hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790de44435a03662547382c8bd4e98cc8e5b9c87
| 118
|
py
|
Python
|
backEnd/app/api/auth/exceptions.py
|
HenryYDJ/flaskAPI
|
9947fbad1050d6ba29c29e365c561689ea3e22d8
|
[
"MIT"
] | null | null | null |
backEnd/app/api/auth/exceptions.py
|
HenryYDJ/flaskAPI
|
9947fbad1050d6ba29c29e365c561689ea3e22d8
|
[
"MIT"
] | 14
|
2022-03-27T13:34:58.000Z
|
2022-03-31T14:37:19.000Z
|
backEnd/app/api/auth/exceptions.py
|
HenryYDJ/flaskAPI
|
9947fbad1050d6ba29c29e365c561689ea3e22d8
|
[
"MIT"
] | null | null | null |
class TokenNotFound(Exception):
"""
Indicates that a token could not be found in the database
"""
pass
| 23.6
| 61
| 0.661017
|
class TokenNotFound(Exception):
pass
| true
| true
|
790de4fd9222d64d2d19db56fda267ac1bb55257
| 2,678
|
py
|
Python
|
tests/test_base.py
|
simba999/dawdle
|
c858b6ecffefa1e4415fa79c368149c6f31176fa
|
[
"MIT"
] | null | null | null |
tests/test_base.py
|
simba999/dawdle
|
c858b6ecffefa1e4415fa79c368149c6f31176fa
|
[
"MIT"
] | null | null | null |
tests/test_base.py
|
simba999/dawdle
|
c858b6ecffefa1e4415fa79c368149c6f31176fa
|
[
"MIT"
] | null | null | null |
from bson.objectid import ObjectId
from faker import Faker
from flask import url_for
from dawdle import create_app
from dawdle.models.board import Board, BoardType
from dawdle.models.user import User
class TestBase:
@classmethod
def setup_class(cls):
cls.fake = Faker()
cls.app = create_app(testing=True)
cls.app.app_context().push()
cls.client = cls.app.test_client()
cls.password = cls.fake.password()
cls.user = cls.create_user(password=cls.password)
cls.login()
@classmethod
def teardown_class(cls):
cls.clear_db()
@classmethod
def create_user(cls, **kwargs):
user = User()
user.active = kwargs.get('active', True)
user.email = kwargs.get('email', cls.fake.email())
user.initials = kwargs.get(
'initials',
cls.fake.pystr(min_chars=1, max_chars=4),
).upper()
user.name = kwargs.get('name', cls.fake.name())
user.password = User.encrypt_password(
kwargs.get('password', cls.fake.password()),
)
return user.save()
@classmethod
def create_boards(cls, owner_id, min_boards=1, max_boards=1):
num = cls.fake.pyint(min_boards, max_boards)
boards = []
for n in range(num):
boards.append(cls.create_board(owner_id=owner_id))
return boards
@classmethod
def create_board(cls, **kwargs):
board = Board()
board.created_by = kwargs.get('created_by', ObjectId())
board.name = kwargs.get(
'name',
cls.fake.pystr(min_chars=1, max_chars=256),
)
board.owner_id = kwargs.get('owner_id', ObjectId())
board.type = kwargs.get('type', BoardType.PERSONAL.id)
return board.save()
@classmethod
def as_new_user(cls):
password = cls.fake.password()
user = cls.create_user(password=password)
cls.login(email=user.email, password=password)
return user, password
@classmethod
def login(cls, **kwargs):
email = kwargs.get('email', cls.user.email)
password = kwargs.get('password', cls.password)
data = {'email': email, 'password': password}
cls.client.post(url_for('auth.login_POST'), data=data)
cls.logged_in = cls.user.email == email and cls.password == password
@classmethod
def logout(cls):
cls.client.get(url_for('auth.logout_GET'))
cls.logged_in = False
@classmethod
def clear_db(cls):
Board.objects.delete()
User.objects.delete()
def setup_method(self):
if not self.logged_in:
self.login()
| 29.428571
| 76
| 0.612024
|
from bson.objectid import ObjectId
from faker import Faker
from flask import url_for
from dawdle import create_app
from dawdle.models.board import Board, BoardType
from dawdle.models.user import User
class TestBase:
@classmethod
def setup_class(cls):
cls.fake = Faker()
cls.app = create_app(testing=True)
cls.app.app_context().push()
cls.client = cls.app.test_client()
cls.password = cls.fake.password()
cls.user = cls.create_user(password=cls.password)
cls.login()
@classmethod
def teardown_class(cls):
cls.clear_db()
@classmethod
def create_user(cls, **kwargs):
user = User()
user.active = kwargs.get('active', True)
user.email = kwargs.get('email', cls.fake.email())
user.initials = kwargs.get(
'initials',
cls.fake.pystr(min_chars=1, max_chars=4),
).upper()
user.name = kwargs.get('name', cls.fake.name())
user.password = User.encrypt_password(
kwargs.get('password', cls.fake.password()),
)
return user.save()
@classmethod
def create_boards(cls, owner_id, min_boards=1, max_boards=1):
num = cls.fake.pyint(min_boards, max_boards)
boards = []
for n in range(num):
boards.append(cls.create_board(owner_id=owner_id))
return boards
@classmethod
def create_board(cls, **kwargs):
board = Board()
board.created_by = kwargs.get('created_by', ObjectId())
board.name = kwargs.get(
'name',
cls.fake.pystr(min_chars=1, max_chars=256),
)
board.owner_id = kwargs.get('owner_id', ObjectId())
board.type = kwargs.get('type', BoardType.PERSONAL.id)
return board.save()
@classmethod
def as_new_user(cls):
password = cls.fake.password()
user = cls.create_user(password=password)
cls.login(email=user.email, password=password)
return user, password
@classmethod
def login(cls, **kwargs):
email = kwargs.get('email', cls.user.email)
password = kwargs.get('password', cls.password)
data = {'email': email, 'password': password}
cls.client.post(url_for('auth.login_POST'), data=data)
cls.logged_in = cls.user.email == email and cls.password == password
@classmethod
def logout(cls):
cls.client.get(url_for('auth.logout_GET'))
cls.logged_in = False
@classmethod
def clear_db(cls):
Board.objects.delete()
User.objects.delete()
def setup_method(self):
if not self.logged_in:
self.login()
| true
| true
|
790de63139a64e545b16f27828fa93eba0d5ea70
| 15,589
|
py
|
Python
|
src/wordle_cheater/interface.py
|
edsq/wordle-cheater
|
216929bbc89da23693d434acbea7e0b89cbf698c
|
[
"MIT"
] | null | null | null |
src/wordle_cheater/interface.py
|
edsq/wordle-cheater
|
216929bbc89da23693d434acbea7e0b89cbf698c
|
[
"MIT"
] | 6
|
2022-03-12T01:01:38.000Z
|
2022-03-27T01:34:40.000Z
|
src/wordle_cheater/interface.py
|
edsq/wordle-cheater
|
216929bbc89da23693d434acbea7e0b89cbf698c
|
[
"MIT"
] | null | null | null |
"""Interfaces for interactively entering guesses."""
import curses
import time
import click
from wordle_cheater.interface_base import WordleCheaterUI
class CursesInterface(WordleCheaterUI):
"""Interface for using the curses library to enter guesses and display solutions.
Attributes
----------
guesses : list of WordleLetter objects
The currently entered guesses.
entering_letters : bool
Whether or not we are currently entering guesses.
"""
@classmethod
def init_and_run(cls, *args, **kwargs):
"""Instantiate and run `self.main()` using `curses.wrapper`.
Parameters
----------
*args : tuple
Positional arguments to be passed to the CursesInterface constructor.
**kwargs : dict, optional
Keyword arguments to be passed to the CursesInterface constructor.
Returns
-------
CursesInterface object
An instance of the CursesInterface class.
"""
ui = cls(*args, **kwargs)
curses.wrapper(ui.main)
return ui
def main(self, stdscr):
"""Run the interface.
Should typically be called using `curses.wrapper`.
Parameters
----------
stdscr : curses.Window object
The curses screen which the user interacts with.
"""
self.stdscr = stdscr
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK) # White on black
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_YELLOW) # Black on yellow
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_GREEN) # Black on green
curses.init_pair(4, curses.COLOR_BLACK, curses.COLOR_RED) # Black on red
height, width = stdscr.getmaxyx()
self.results_window = curses.newwin(
height - 12, width, 12, 0
) # window for printing results
x0 = width // 2 - 3
y0 = 5
self.print_title()
self.enter_letters(x0=x0, y0=y0)
self.print_results()
self.set_cursor_visibility(False)
self.get_key()
def center_print(self, y, string, *args, **kwargs):
"""Print in the center of the screen.
Parameters
----------
y : int
The vertical location at which to print.
string : str
The string to print.
*args : tuple
Additional arguments to be passed to `stdscr.addstr`.
**kwargs : dict, optional
Keyword arguments to be passed to `stdscr.addstr`.
"""
height, width = self.stdscr.getmaxyx()
str_length = len(string)
x_mid = width // 2
self.stdscr.addstr(y, x_mid - str_length // 2, string, *args, **kwargs)
def print_title(self):
"""Print title and instructions."""
self.center_print(1, "Wordle Cheater :(", curses.A_BOLD)
self.center_print(2, "Enter guesses below.")
self.center_print(3, "spacebar: change color", curses.A_DIM)
def print_results(self, sep=" "):
"""Print possible solutions given guesses.
Parameters
----------
sep : str, optional
The string to display between each possible solution.
"""
height, width = self.results_window.getmaxyx()
max_rows = height - 1 # -1 to account for "Possible solutions" header
cols = width // (5 + len(sep))
out_str = self.get_results_string(max_rows=max_rows, max_cols=cols, sep=sep)
self.results_window.clear()
self.results_window.addstr(0, 0, "Possible solutions:", curses.A_UNDERLINE)
self.results_window.addstr(1, 0, out_str)
self.results_window.refresh()
def print(self, x, y, string, c=None):
"""Print `string` at coordinates `x`, `y`.
Parameters
----------
x : int
Horizontal position at which to print the string.
y : int
Height at which to print the string.
string : str
The string to print.
c : str, {None, 'black', 'yellow', 'green', 'red'}
The color in which to print. Must be one of
['black', 'yellow', 'green', 'red'] or None. If `c` is None, it should
print in the default color pair.
"""
if c is None:
self.stdscr.addstr(y, x, string)
elif c == "black":
self.stdscr.addstr(y, x, string, curses.color_pair(1))
elif c == "yellow":
self.stdscr.addstr(y, x, string, curses.color_pair(2))
elif c == "green":
self.stdscr.addstr(y, x, string, curses.color_pair(3))
elif c == "red":
self.stdscr.addstr(y, x, string, curses.color_pair(4))
else:
raise ValueError(
"`c` must be one of ['black', 'yellow', 'green', 'red'] or none."
)
def sleep(self, ms):
"""Temporarily suspend execution.
Parameters
----------
ms : int
Number of miliseconds before execution resumes.
"""
curses.napms(ms)
self.stdscr.refresh()
def move_cursor(self, x, y):
"""Move cursor to position `x`, `y`.
Parameters
----------
x : int
Desired horizontal position of cursor.
y : int
Desired vertical position of cursor.
"""
self.stdscr.move(y, x)
def set_cursor_visibility(self, visible):
"""Set cursor visibility.
Parameters
----------
visible : bool
Whether or not the cursor is visible.
"""
curses.curs_set(visible)
def get_key(self):
"""Get a key press.
Returns
-------
key : str
The key that was pressed.
"""
return self.stdscr.getkey()
def is_enter(self, key):
"""Check if `key` is the enter/return key.
Parameters
----------
key : str
The key to check.
Returns
-------
is_enter : bool
True if `key` is the enter or return key, False otherwise.
"""
if key == curses.KEY_ENTER or key == "\n" or key == "\r":
return True
else:
return False
def is_backspace(self, key):
"""Check if `key` is the backspace/delete key.
Parameters
----------
key : str
The key to check.
Returns
-------
is_backspace : bool
True if `key` is the backspace or delete key, False otherwise.
"""
if key == curses.KEY_BACKSPACE or key == "\b" or key == "\x7f":
return True
else:
return False
class ClickInterface(WordleCheaterUI):
"""Interface for using Click alone to enter letters and see solutions.
Parameters
----------
max_rows : int, optional
The maximum rows of possible solutions to print.
max_cols : int, optional
The maximum columns of possible solutions to print.
x0 : int, optional
The leftmost position where guesses will be entered.
y0 : int, optional
The topmost position where guesses will be entered.
esc : str, optional
The ANSI escape code for the terminal.
Attributes
----------
guesses : list of WordleLetter
The currently entered guesses.
entering_letters : bool
Whether or not we are currently entering guesses.
max_rows : int, optional
The maximum rows of possible solutions to print.
max_cols : int, optional
The maximum columns of possible solutions to print.
x0 : int, optional
The leftmost position where guesses will be entered.
y0 : int, optional
The topmost position where guesses will be entered.
esc : str, optional
The ANSI escape code for the terminal.
line_lengths : list of int
The highest x value we've printed to per line. For example, if we've printed
two lines, the first one up to x=5 and the second up to x=3, then
`line_lengths = [5, 3]`.
curs_xy
"""
def __init__(self, max_rows=10, max_cols=8, x0=4, y0=4, esc="\033"):
self.max_rows = max_rows # Maximum rows of results to print
self.max_cols = max_cols # Maximum columns of results to print
self.x0 = x0 # Initial x position of guesses
self.y0 = y0 # Initial y position of guesses
self.esc = esc # ANSI escape code
self._curs_xy = (0, 0) # cursor position
self.line_lengths = [0] # Highest x values we've hit per line
super().__init__()
@property
def curs_xy(self):
"""Location of cursor."""
return self._curs_xy
@curs_xy.setter
def curs_xy(self, xy):
"""Update max line lengths when we update cursor position."""
x, y = xy
if y > len(self.line_lengths) - 1:
self.line_lengths += [0 for i in range(y - len(self.line_lengths) + 1)]
if x > self.line_lengths[y]:
self.line_lengths[y] = x
self._curs_xy = xy
def main(self):
"""Run the interface."""
try:
self.print_title()
self.enter_letters(x0=self.x0, y0=self.y0)
self.print_results()
finally:
self.set_cursor_visibility(True)
def print_title(self):
"""Print title and instructions."""
self.print(0, 0, "Wordle Cheater :(", bold=True)
self.print(0, 1, "Enter guesses below.")
self.print(0, 2, "spacebar: change color", dim=True)
def print_results(self):
"""Print possible solutions given guesses."""
# If we're still entering letters, don't do anything
if self.entering_letters:
return
out_str = self.get_results_string(
max_rows=self.max_rows, max_cols=self.max_cols, sep=" "
)
self.move_cursor(0, self.curs_xy[1] + 1)
click.secho("Possible solutions:", underline=True)
click.echo(out_str)
def print(self, x, y, string, c=None, *args, **kwargs):
"""Print `string` at coordinates `x`, `y`.
Parameters
----------
x : int
Horizontal position at which to print the string.
y : int
Height at which to print the string.
string : str
The string to print.
c : str, {None, 'black', 'yellow', 'green', 'red'}
The color in which to print. Must be one of
['black', 'yellow', 'green', 'red'] or None. If `c` is None, it should
print in the default color pair.
*args : tuple
Additional arguments to be passed to `click.secho`.
**kwargs : dict, optional
Keyword arguments to be passed to `click.secho`.
"""
# Move cursor to x, y so we can print there
self.move_cursor(x, y)
if c is None:
click.secho(string, nl=False, *args, **kwargs)
elif c == "black":
click.secho(string, fg="white", bg="black", nl=False)
elif c == "yellow":
click.secho(string, fg="black", bg="yellow", nl=False)
elif c == "green":
click.secho(string, fg="black", bg="green", nl=False)
elif c == "red":
click.secho(string, fg="black", bg="red", nl=False)
else:
raise ValueError(
"`c` must be one of ['black', 'yellow', 'green', 'red'] or none."
)
self.curs_xy = (self.curs_xy[0] + len(string), self.curs_xy[1])
def sleep(self, ms):
"""Temporarily suspend execution.
Parameters
----------
ms : int
Number of miliseconds before execution resumes.
"""
time.sleep(ms / 1000)
def move_cursor(self, x, y):
"""Move cursor to position `x`, `y`.
Parameters
----------
x : int
Desired horizontal position of cursor.
y : int
Desired vertical position of cursor.
"""
# Check if we want to move cursor up (decreasing y)
if self.curs_xy[1] > y:
click.echo(f"{self.esc}[{self.curs_xy[1] - y}A", nl=False)
# Check if we want to move cursor down (increasing y)
elif self.curs_xy[1] < y:
# Check if we need to add new lines to screen
if len(self.line_lengths) - 1 < y:
# First arrow down as far as possible
click.echo(
f"{self.esc}[{(len(self.line_lengths) - 1) - self.curs_xy[1]}B",
nl=False,
)
# Now add blank lines
click.echo("\n" * (y - (len(self.line_lengths) - 1)), nl=False)
# New line, so definitely need to print spaces to move x
click.echo(" " * x, nl=False)
self.curs_xy = (x, y)
return
else:
# Should just arrow down to not overwrite stuff
click.echo(f"{self.esc}[{y - self.curs_xy[1]}B", nl=False)
# Check if we want to move cursor left (decreasing x)
if self.curs_xy[0] > x:
click.echo(f"{self.esc}[{self.curs_xy[0] - x}D", nl=False)
# Check if we want to move cursor right (increasing x)
elif self.curs_xy[0] < x:
# Check if we need to add space to right of cursor
if self.line_lengths[y] > x:
# First arrow to the right as far as possible
click.echo(
f"{self.esc}[{self.line_lengths[y] - self.curs_xy[0]}C", nl=False
)
# Now add blank spaces
click.echo(" " * (x - self.line_lengths[y]), nl=False)
else:
# Should just arrow to right to not overwrite stuff
click.echo(f"{self.esc}[{x - self.curs_xy[0]}C", nl=False)
self.curs_xy = (x, y)
def set_cursor_visibility(self, visible):
"""Set cursor visibility.
Parameters
----------
visible : bool
Whether or not the cursor is visible.
"""
if visible:
click.echo(f"{self.esc}[?25h", nl=False)
else:
click.echo(f"{self.esc}[?25l", nl=False)
def get_key(self):
"""Get a key press.
Returns
-------
key : str
The key that was pressed.
"""
return click.getchar()
def is_enter(self, key):
"""Check if `key` is the enter/return key.
Parameters
----------
key : str
The key to check.
Returns
-------
is_enter : bool
True if `key` is the enter or return key, False otherwise.
"""
if key == "\r" or key == "\n":
return True
else:
return False
def is_backspace(self, key):
"""Check if `key` is the backspace/delete key.
Parameters
----------
key : str
The key to check.
Returns
-------
is_backspace : bool
True if `key` is the backspace or delete key, False otherwise.
"""
if key == "\b" or key == "\x7f":
return True
else:
return False
if __name__ == "__main__":
# curses_ui = CursesInterface()
# curses.wrapper(curses_ui.main)
click_ui = ClickInterface()
click_ui.main()
| 30.447266
| 87
| 0.545641
|
import curses
import time
import click
from wordle_cheater.interface_base import WordleCheaterUI
class CursesInterface(WordleCheaterUI):
@classmethod
def init_and_run(cls, *args, **kwargs):
ui = cls(*args, **kwargs)
curses.wrapper(ui.main)
return ui
def main(self, stdscr):
self.stdscr = stdscr
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_YELLOW)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_GREEN)
curses.init_pair(4, curses.COLOR_BLACK, curses.COLOR_RED)
height, width = stdscr.getmaxyx()
self.results_window = curses.newwin(
height - 12, width, 12, 0
)
x0 = width // 2 - 3
y0 = 5
self.print_title()
self.enter_letters(x0=x0, y0=y0)
self.print_results()
self.set_cursor_visibility(False)
self.get_key()
def center_print(self, y, string, *args, **kwargs):
height, width = self.stdscr.getmaxyx()
str_length = len(string)
x_mid = width // 2
self.stdscr.addstr(y, x_mid - str_length // 2, string, *args, **kwargs)
def print_title(self):
self.center_print(1, "Wordle Cheater :(", curses.A_BOLD)
self.center_print(2, "Enter guesses below.")
self.center_print(3, "spacebar: change color", curses.A_DIM)
def print_results(self, sep=" "):
height, width = self.results_window.getmaxyx()
max_rows = height - 1
cols = width // (5 + len(sep))
out_str = self.get_results_string(max_rows=max_rows, max_cols=cols, sep=sep)
self.results_window.clear()
self.results_window.addstr(0, 0, "Possible solutions:", curses.A_UNDERLINE)
self.results_window.addstr(1, 0, out_str)
self.results_window.refresh()
def print(self, x, y, string, c=None):
if c is None:
self.stdscr.addstr(y, x, string)
elif c == "black":
self.stdscr.addstr(y, x, string, curses.color_pair(1))
elif c == "yellow":
self.stdscr.addstr(y, x, string, curses.color_pair(2))
elif c == "green":
self.stdscr.addstr(y, x, string, curses.color_pair(3))
elif c == "red":
self.stdscr.addstr(y, x, string, curses.color_pair(4))
else:
raise ValueError(
"`c` must be one of ['black', 'yellow', 'green', 'red'] or none."
)
def sleep(self, ms):
curses.napms(ms)
self.stdscr.refresh()
def move_cursor(self, x, y):
self.stdscr.move(y, x)
def set_cursor_visibility(self, visible):
curses.curs_set(visible)
def get_key(self):
return self.stdscr.getkey()
def is_enter(self, key):
if key == curses.KEY_ENTER or key == "\n" or key == "\r":
return True
else:
return False
def is_backspace(self, key):
if key == curses.KEY_BACKSPACE or key == "\b" or key == "\x7f":
return True
else:
return False
class ClickInterface(WordleCheaterUI):
def __init__(self, max_rows=10, max_cols=8, x0=4, y0=4, esc="\033"):
self.max_rows = max_rows
self.max_cols = max_cols
self.x0 = x0
self.y0 = y0
self.esc = esc
self._curs_xy = (0, 0)
self.line_lengths = [0]
super().__init__()
@property
def curs_xy(self):
return self._curs_xy
@curs_xy.setter
def curs_xy(self, xy):
x, y = xy
if y > len(self.line_lengths) - 1:
self.line_lengths += [0 for i in range(y - len(self.line_lengths) + 1)]
if x > self.line_lengths[y]:
self.line_lengths[y] = x
self._curs_xy = xy
def main(self):
try:
self.print_title()
self.enter_letters(x0=self.x0, y0=self.y0)
self.print_results()
finally:
self.set_cursor_visibility(True)
def print_title(self):
self.print(0, 0, "Wordle Cheater :(", bold=True)
self.print(0, 1, "Enter guesses below.")
self.print(0, 2, "spacebar: change color", dim=True)
def print_results(self):
# If we're still entering letters, don't do anything
if self.entering_letters:
return
out_str = self.get_results_string(
max_rows=self.max_rows, max_cols=self.max_cols, sep=" "
)
self.move_cursor(0, self.curs_xy[1] + 1)
click.secho("Possible solutions:", underline=True)
click.echo(out_str)
def print(self, x, y, string, c=None, *args, **kwargs):
# Move cursor to x, y so we can print there
self.move_cursor(x, y)
if c is None:
click.secho(string, nl=False, *args, **kwargs)
elif c == "black":
click.secho(string, fg="white", bg="black", nl=False)
elif c == "yellow":
click.secho(string, fg="black", bg="yellow", nl=False)
elif c == "green":
click.secho(string, fg="black", bg="green", nl=False)
elif c == "red":
click.secho(string, fg="black", bg="red", nl=False)
else:
raise ValueError(
"`c` must be one of ['black', 'yellow', 'green', 'red'] or none."
)
self.curs_xy = (self.curs_xy[0] + len(string), self.curs_xy[1])
def sleep(self, ms):
time.sleep(ms / 1000)
def move_cursor(self, x, y):
# Check if we want to move cursor up (decreasing y)
if self.curs_xy[1] > y:
click.echo(f"{self.esc}[{self.curs_xy[1] - y}A", nl=False)
# Check if we want to move cursor down (increasing y)
elif self.curs_xy[1] < y:
# Check if we need to add new lines to screen
if len(self.line_lengths) - 1 < y:
# First arrow down as far as possible
click.echo(
f"{self.esc}[{(len(self.line_lengths) - 1) - self.curs_xy[1]}B",
nl=False,
)
# Now add blank lines
click.echo("\n" * (y - (len(self.line_lengths) - 1)), nl=False)
# New line, so definitely need to print spaces to move x
click.echo(" " * x, nl=False)
self.curs_xy = (x, y)
return
else:
# Should just arrow down to not overwrite stuff
click.echo(f"{self.esc}[{y - self.curs_xy[1]}B", nl=False)
# Check if we want to move cursor left (decreasing x)
if self.curs_xy[0] > x:
click.echo(f"{self.esc}[{self.curs_xy[0] - x}D", nl=False)
# Check if we want to move cursor right (increasing x)
elif self.curs_xy[0] < x:
# Check if we need to add space to right of cursor
if self.line_lengths[y] > x:
# First arrow to the right as far as possible
click.echo(
f"{self.esc}[{self.line_lengths[y] - self.curs_xy[0]}C", nl=False
)
# Now add blank spaces
click.echo(" " * (x - self.line_lengths[y]), nl=False)
else:
# Should just arrow to right to not overwrite stuff
click.echo(f"{self.esc}[{x - self.curs_xy[0]}C", nl=False)
self.curs_xy = (x, y)
def set_cursor_visibility(self, visible):
if visible:
click.echo(f"{self.esc}[?25h", nl=False)
else:
click.echo(f"{self.esc}[?25l", nl=False)
def get_key(self):
return click.getchar()
def is_enter(self, key):
if key == "\r" or key == "\n":
return True
else:
return False
def is_backspace(self, key):
if key == "\b" or key == "\x7f":
return True
else:
return False
if __name__ == "__main__":
# curses_ui = CursesInterface()
# curses.wrapper(curses_ui.main)
click_ui = ClickInterface()
click_ui.main()
| true
| true
|
790de63c43567ce6ab59197e8f261a90fee5e92c
| 4,359
|
py
|
Python
|
src/dataset_creator.py
|
WZX1998/facial-recognition
|
458b42e24aadda0df449c8a3170c2d3d0d01e5d8
|
[
"MIT"
] | 95
|
2018-10-15T15:47:00.000Z
|
2021-11-16T05:29:45.000Z
|
src/dataset_creator.py
|
niloyniil/facial_emotion_recognition__EMOJIFIER
|
3284445abde438c0ae77807eeaf53bb5d1e06308
|
[
"MIT"
] | 10
|
2019-12-16T21:52:26.000Z
|
2022-02-10T01:35:51.000Z
|
src/dataset_creator.py
|
niloyniil/facial_emotion_recognition__EMOJIFIER
|
3284445abde438c0ae77807eeaf53bb5d1e06308
|
[
"MIT"
] | 33
|
2018-10-16T06:45:23.000Z
|
2022-02-19T09:04:58.000Z
|
import logging
import pickle
import os
import sys
import json
import cv2
import numpy as np
import glob
import tqdm
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import src
from src.__init__ import *
def image_reader(image_path_list):
image = cv2.imread(image_path_list[0], 0)
image = cv2.resize(image, (48, 48))
image = np.expand_dims(image, axis=0)
for img_path in image_path_list[1:]:
image = np.concatenate(
(
image,
np.expand_dims(
cv2.resize(cv2.imread(img_path, 0), (48, 48)),
axis=0
)
),
axis=0
)
return image
def image_label_generator(emotion_map):
labels = []
_i = 0
image_lists = []
for k, v in tqdm.tqdm(emotion_map.items()):
path = os.path.join(FACE_IMAGES_PATH, k)
logger.debug('reading images at path: {}'.format(path))
image_list = glob.glob(path+'/*.png')
logger.debug('length images list: {}'.format(len(image_list)))
image_lists.append(image_list)
labels.extend([v]*len(image_list))
images = np.vstack((image_reader(image_list) for image_list in image_lists))
return images, labels
def train_test_splitter(images, labels):
dataset = [(image, label) for image, label in zip(images, labels)]
dataset_size = len(dataset)
trainset_size = int(.8 * dataset_size)
testset_size = dataset_size - trainset_size
logger.debug('Dataset size: {}'.format(dataset_size))
np.random.shuffle(dataset)
# PAY ATTENTION HERE: YOU CAN ALSO ADD DEV-SET :)
trainset, testset = dataset[:trainset_size], dataset[trainset_size:]
logger.debug('Trainset size: {}, Testset size: {}'.format(
len(trainset), len(testset)
))
logger.debug('concatinating the train images on axis 0')
train_image = np.vstack((tr[0] for tr in tqdm.tqdm(trainset[:])))
logger.debug('concatinating the train labels on axis 0')
train_label = [tr[1] for tr in tqdm.tqdm(trainset[:])]
logger.info('concatinating the test images on axis 0')
test_image = np.vstack((te[0] for te in tqdm.tqdm(testset[:])))
logger.debug('concatinating the test labels on axis 0')
test_label = [te[1] for te in tqdm.tqdm(testset[:])]
logger.debug('train-images-shape: {}, test-images-shape: {}'.format(
train_image.shape, test_image.shape
))
return (train_image, train_label), (test_image, test_label)
def create_dataset(images, labels):
images = np.reshape(images, (-1, 48*48))
logger.debug('images-shape: {}, length-labels: {}'.format(
images.shape, len(labels)
))
train, test = train_test_splitter(images, labels)
train_dict = {
'data': train[0],
'labels': train[1]
}
test_dict = {
'data': test[0],
'labels': test[1]
}
with open(os.path.join(DATASET_SAVE_PATH, 'train_batch_0'), 'wb') as file:
pickle.dump(train_dict, file)
logger.info('dataset: trainset-dict pickled and saved at {}'.format(DATASET_SAVE_PATH))
with open(os.path.join(DATASET_SAVE_PATH, 'test_batch_0'), 'wb') as file:
pickle.dump(test_dict, file)
logger.info('dataset: testset-dict pickled and saved at {}'.format(DATASET_SAVE_PATH))
logger.info('dataset created :)')
def condition_satisfied(emotion_map):
for emotion_class in emotion_map.keys():
path = os.path.join(FACE_IMAGES_PATH, emotion_class)
if not os.path.exists(path):
logger.error('Please capture images for "{}" emotion-class as well'.format(
emotion_class
))
logger.error('FAIL.')
return False
return True
if __name__ == '__main__':
logger = logging.getLogger('emojifier.dataset_creator')
FACE_IMAGES_PATH = os.path.join(os.path.dirname(__file__), os.pardir, 'images')
DATASET_SAVE_PATH = os.path.join(os.path.dirname(__file__), os.pardir, 'dataset')
if not os.path.exists(DATASET_SAVE_PATH):
os.makedirs(DATASET_SAVE_PATH)
if condition_satisfied(EMOTION_MAP):
_images, _labels = image_label_generator(EMOTION_MAP)
create_dataset(_images, _labels)
| 29.653061
| 95
| 0.628585
|
import logging
import pickle
import os
import sys
import json
import cv2
import numpy as np
import glob
import tqdm
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import src
from src.__init__ import *
def image_reader(image_path_list):
image = cv2.imread(image_path_list[0], 0)
image = cv2.resize(image, (48, 48))
image = np.expand_dims(image, axis=0)
for img_path in image_path_list[1:]:
image = np.concatenate(
(
image,
np.expand_dims(
cv2.resize(cv2.imread(img_path, 0), (48, 48)),
axis=0
)
),
axis=0
)
return image
def image_label_generator(emotion_map):
labels = []
_i = 0
image_lists = []
for k, v in tqdm.tqdm(emotion_map.items()):
path = os.path.join(FACE_IMAGES_PATH, k)
logger.debug('reading images at path: {}'.format(path))
image_list = glob.glob(path+'/*.png')
logger.debug('length images list: {}'.format(len(image_list)))
image_lists.append(image_list)
labels.extend([v]*len(image_list))
images = np.vstack((image_reader(image_list) for image_list in image_lists))
return images, labels
def train_test_splitter(images, labels):
dataset = [(image, label) for image, label in zip(images, labels)]
dataset_size = len(dataset)
trainset_size = int(.8 * dataset_size)
testset_size = dataset_size - trainset_size
logger.debug('Dataset size: {}'.format(dataset_size))
np.random.shuffle(dataset)
trainset, testset = dataset[:trainset_size], dataset[trainset_size:]
logger.debug('Trainset size: {}, Testset size: {}'.format(
len(trainset), len(testset)
))
logger.debug('concatinating the train images on axis 0')
train_image = np.vstack((tr[0] for tr in tqdm.tqdm(trainset[:])))
logger.debug('concatinating the train labels on axis 0')
train_label = [tr[1] for tr in tqdm.tqdm(trainset[:])]
logger.info('concatinating the test images on axis 0')
test_image = np.vstack((te[0] for te in tqdm.tqdm(testset[:])))
logger.debug('concatinating the test labels on axis 0')
test_label = [te[1] for te in tqdm.tqdm(testset[:])]
logger.debug('train-images-shape: {}, test-images-shape: {}'.format(
train_image.shape, test_image.shape
))
return (train_image, train_label), (test_image, test_label)
def create_dataset(images, labels):
images = np.reshape(images, (-1, 48*48))
logger.debug('images-shape: {}, length-labels: {}'.format(
images.shape, len(labels)
))
train, test = train_test_splitter(images, labels)
train_dict = {
'data': train[0],
'labels': train[1]
}
test_dict = {
'data': test[0],
'labels': test[1]
}
with open(os.path.join(DATASET_SAVE_PATH, 'train_batch_0'), 'wb') as file:
pickle.dump(train_dict, file)
logger.info('dataset: trainset-dict pickled and saved at {}'.format(DATASET_SAVE_PATH))
with open(os.path.join(DATASET_SAVE_PATH, 'test_batch_0'), 'wb') as file:
pickle.dump(test_dict, file)
logger.info('dataset: testset-dict pickled and saved at {}'.format(DATASET_SAVE_PATH))
logger.info('dataset created :)')
def condition_satisfied(emotion_map):
for emotion_class in emotion_map.keys():
path = os.path.join(FACE_IMAGES_PATH, emotion_class)
if not os.path.exists(path):
logger.error('Please capture images for "{}" emotion-class as well'.format(
emotion_class
))
logger.error('FAIL.')
return False
return True
if __name__ == '__main__':
logger = logging.getLogger('emojifier.dataset_creator')
FACE_IMAGES_PATH = os.path.join(os.path.dirname(__file__), os.pardir, 'images')
DATASET_SAVE_PATH = os.path.join(os.path.dirname(__file__), os.pardir, 'dataset')
if not os.path.exists(DATASET_SAVE_PATH):
os.makedirs(DATASET_SAVE_PATH)
if condition_satisfied(EMOTION_MAP):
_images, _labels = image_label_generator(EMOTION_MAP)
create_dataset(_images, _labels)
| true
| true
|
790de642b09bcd930cbaa741c4bda368a30467f8
| 10,160
|
py
|
Python
|
Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py
|
WaterBubbles/fprime
|
dfb1e72bfbe4f21c0014bf78d762fddb3b5c39a1
|
[
"Apache-2.0"
] | 2
|
2021-02-23T06:56:03.000Z
|
2021-02-23T07:03:53.000Z
|
Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py
|
WaterBubbles/fprime
|
dfb1e72bfbe4f21c0014bf78d762fddb3b5c39a1
|
[
"Apache-2.0"
] | 9
|
2021-02-21T07:27:44.000Z
|
2021-02-21T07:27:58.000Z
|
Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py
|
WaterBubbles/fprime
|
dfb1e72bfbe4f21c0014bf78d762fddb3b5c39a1
|
[
"Apache-2.0"
] | 1
|
2021-02-23T17:10:44.000Z
|
2021-02-23T17:10:44.000Z
|
#!/usr/bin/env python3
# ===============================================================================
# NAME: XmlSerializeParser.py
#
# DESCRIPTION: This class parses the XML serializable types files.
#
# USAGE:
#
# AUTHOR: reder
# EMAIL: reder@jpl.nasa.gov
# DATE CREATED : June 4, 2013
#
# Copyright 2013, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
# ===============================================================================
#
# Python standard modules
#
import hashlib
import logging
import os
import sys
from lxml import etree
from fprime_ac.utils import ConfigManager
from fprime_ac.utils.buildroot import (
BuildRootCollisionException,
BuildRootMissingException,
locate_build_root,
)
from fprime_ac.utils.exceptions import FprimeXmlException
#
# Python extention modules and custom interfaces
#
#
# Universal globals used within module go here.
# (DO NOT USE MANY!)
#
# Global logger init. below.
PRINT = logging.getLogger("output")
DEBUG = logging.getLogger("debug")
format_dictionary = {
"U8": "%u",
"I8": "%d",
"U16": "%u",
"I16": "%d",
"U32": "%u",
"I32": "%d",
"U64": "%lu",
"I64": "%ld",
"F32": "%g",
"F64": "%g",
"bool": "%s",
"string": "%s",
"ENUM": "%d",
}
#
class XmlSerializeParser:
"""
An XML parser class that uses lxml.etree to consume an XML
serializable type documents. The class is instanced with
an XML file name.
"""
def __init__(self, xml_file=None):
"""
Given a well formed XML file (xml_file), read it and turn it into
a big string.
"""
self.__root = None
self.__name = ""
self.__namespace = None
# List of C++ include files for serializable *.hpp file
self.__include_header_files = []
# List of XML serializable description dependencies
self.__includes = []
# List of XML enum type files
self.__include_enum_files = []
# List of XML array type files
self.__include_array_files = []
# Comment block of text for serializable
self.__comment = ""
# List of (name, type, comment) tuples
self.__members = []
# Type ID for serialized type
self.__type_id = None
#
if os.path.isfile(xml_file) == False:
stri = "ERROR: Could not find specified XML file %s." % xml_file
raise OSError(stri)
fd = open(xml_file)
xml_file = os.path.basename(xml_file)
# xml_file = os.path.basename(xml_file)
self.__xml_filename = xml_file
self.__config = ConfigManager.ConfigManager.getInstance()
#
xml_parser = etree.XMLParser(remove_comments=True)
element_tree = etree.parse(fd, parser=xml_parser)
# Validate new imports using their root tag as a key to find what schema to use
rng_file = self.__config.get(
"schema", element_tree.getroot().tag.lower()
).lstrip("/")
try:
rng_file = locate_build_root(rng_file)
except (BuildRootMissingException, BuildRootCollisionException) as bre:
stri = "ERROR: Could not find specified RNG file {}. {}".format(
rng_file,
str(bre),
)
raise OSError(stri)
file_handler = open(rng_file)
relax_parsed = etree.parse(file_handler)
file_handler.close()
relax_compiled = etree.RelaxNG(relax_parsed)
# 2/3 conversion
if not relax_compiled.validate(element_tree):
msg = "XML file {} is not valid according to schema {}.".format(
xml_file, rng_file
)
raise FprimeXmlException(msg)
serializable = element_tree.getroot()
if serializable.tag != "serializable":
PRINT.info("%s is not a serializable definition file" % xml_file)
sys.exit(-1)
print("Parsing Serializable %s" % serializable.attrib["name"])
self.__name = serializable.attrib["name"]
if "namespace" in serializable.attrib:
self.__namespace = serializable.attrib["namespace"]
else:
self.__namespace = None
if "typeid" in serializable.attrib:
self.__type_id = serializable.attrib["typeid"]
else:
self.__type_id = None
for serializable_tag in serializable:
if serializable_tag.tag == "comment":
self.__comment = serializable_tag.text.strip()
elif serializable_tag.tag == "include_header":
self.__include_header_files.append(serializable_tag.text)
elif serializable_tag.tag == "import_serializable_type":
self.__includes.append(serializable_tag.text)
elif serializable_tag.tag == "import_enum_type":
self.__include_enum_files.append(serializable_tag.text)
elif serializable_tag.tag == "import_array_type":
self.__include_array_files.append(serializable_tag.text)
elif serializable_tag.tag == "members":
for member in serializable_tag:
if member.tag != "member":
PRINT.info(
"%s: Invalid tag %s in serializable member definition"
% (xml_file, member.tag)
)
sys.exit(-1)
n = member.attrib["name"]
t = member.attrib["type"]
if "size" in list(member.attrib.keys()):
if t == "ENUM":
PRINT.info(
"%s: Member %s: arrays of enums not supported yet!"
% (xml_file, n)
)
sys.exit(-1)
s = member.attrib["size"]
if not s.isdigit():
PRINT.info(
"{}: Member {}: size must be a number".format(
xml_file, n
)
)
sys.exit(-1)
else:
s = None
if "format" in list(member.attrib.keys()):
f = member.attrib["format"]
else:
if t in list(format_dictionary.keys()):
f = format_dictionary[t]
else: # Must be included type, which will use toString method
f = "%s"
if t == "string":
if s is None:
PRINT.info(
"%s: member %s string must specify size tag"
% (xml_file, member.tag)
)
sys.exit(-1)
if "comment" in list(member.attrib.keys()):
c = member.attrib["comment"]
else:
c = None
for member_tag in member:
if member_tag.tag == "enum" and t == "ENUM":
en = member_tag.attrib["name"]
enum_members = []
for mem in member_tag:
mn = mem.attrib["name"]
if "value" in list(mem.attrib.keys()):
v = mem.attrib["value"]
else:
v = None
if "comment" in list(mem.attrib.keys()):
mc = mem.attrib["comment"].strip()
else:
mc = None
enum_members.append((mn, v, mc))
t = ((t, en), enum_members)
else:
PRINT.info(
"%s: Invalid member tag %s in serializable member %s"
% (xml_file, member_tag.tag, n)
)
sys.exit(-1)
self.__members.append((n, t, s, f, c))
#
# Generate a type id here using SHA256 algorithm and XML stringified file.
#
if not "typeid" in serializable.attrib:
s = etree.tostring(element_tree.getroot())
h = hashlib.sha256(s)
n = h.hexdigest()
self.__type_id = "0x" + n.upper()[-8:]
def get_typeid(self):
"""
Return a generated type ID from contents of XML file.
"""
return self.__type_id
def get_xml_filename(self):
"""
Return the original XML filename parsed.
"""
return self.__xml_filename
def get_name(self):
return self.__name
def get_namespace(self):
return self.__namespace
def get_include_header_files(self):
"""
Return a list of all imported Port type XML files.
"""
return self.__include_header_files
def get_includes(self):
"""
Returns a list of all imported XML serializable files.
"""
return self.__includes
def get_include_enums(self):
"""
Returns a list of all imported XML enum files.
"""
return self.__include_enum_files
def get_include_arrays(self):
"""
Returns a list of all imported XML array files.
"""
return self.__include_array_files
def get_comment(self):
"""
Return text block string of comment for serializable class.
"""
return self.__comment
def get_members(self):
"""
Returns a list of member (name, type, optional size, optional format, optional comment) needed.
"""
return self.__members
| 34.324324
| 103
| 0.500197
|
import hashlib
import logging
import os
import sys
from lxml import etree
from fprime_ac.utils import ConfigManager
from fprime_ac.utils.buildroot import (
BuildRootCollisionException,
BuildRootMissingException,
locate_build_root,
)
from fprime_ac.utils.exceptions import FprimeXmlException
PRINT = logging.getLogger("output")
DEBUG = logging.getLogger("debug")
format_dictionary = {
"U8": "%u",
"I8": "%d",
"U16": "%u",
"I16": "%d",
"U32": "%u",
"I32": "%d",
"U64": "%lu",
"I64": "%ld",
"F32": "%g",
"F64": "%g",
"bool": "%s",
"string": "%s",
"ENUM": "%d",
}
class XmlSerializeParser:
def __init__(self, xml_file=None):
self.__root = None
self.__name = ""
self.__namespace = None
self.__include_header_files = []
self.__includes = []
self.__include_enum_files = []
self.__include_array_files = []
self.__comment = ""
self.__members = []
self.__type_id = None
if os.path.isfile(xml_file) == False:
stri = "ERROR: Could not find specified XML file %s." % xml_file
raise OSError(stri)
fd = open(xml_file)
xml_file = os.path.basename(xml_file)
self.__xml_filename = xml_file
self.__config = ConfigManager.ConfigManager.getInstance()
xml_parser = etree.XMLParser(remove_comments=True)
element_tree = etree.parse(fd, parser=xml_parser)
rng_file = self.__config.get(
"schema", element_tree.getroot().tag.lower()
).lstrip("/")
try:
rng_file = locate_build_root(rng_file)
except (BuildRootMissingException, BuildRootCollisionException) as bre:
stri = "ERROR: Could not find specified RNG file {}. {}".format(
rng_file,
str(bre),
)
raise OSError(stri)
file_handler = open(rng_file)
relax_parsed = etree.parse(file_handler)
file_handler.close()
relax_compiled = etree.RelaxNG(relax_parsed)
if not relax_compiled.validate(element_tree):
msg = "XML file {} is not valid according to schema {}.".format(
xml_file, rng_file
)
raise FprimeXmlException(msg)
serializable = element_tree.getroot()
if serializable.tag != "serializable":
PRINT.info("%s is not a serializable definition file" % xml_file)
sys.exit(-1)
print("Parsing Serializable %s" % serializable.attrib["name"])
self.__name = serializable.attrib["name"]
if "namespace" in serializable.attrib:
self.__namespace = serializable.attrib["namespace"]
else:
self.__namespace = None
if "typeid" in serializable.attrib:
self.__type_id = serializable.attrib["typeid"]
else:
self.__type_id = None
for serializable_tag in serializable:
if serializable_tag.tag == "comment":
self.__comment = serializable_tag.text.strip()
elif serializable_tag.tag == "include_header":
self.__include_header_files.append(serializable_tag.text)
elif serializable_tag.tag == "import_serializable_type":
self.__includes.append(serializable_tag.text)
elif serializable_tag.tag == "import_enum_type":
self.__include_enum_files.append(serializable_tag.text)
elif serializable_tag.tag == "import_array_type":
self.__include_array_files.append(serializable_tag.text)
elif serializable_tag.tag == "members":
for member in serializable_tag:
if member.tag != "member":
PRINT.info(
"%s: Invalid tag %s in serializable member definition"
% (xml_file, member.tag)
)
sys.exit(-1)
n = member.attrib["name"]
t = member.attrib["type"]
if "size" in list(member.attrib.keys()):
if t == "ENUM":
PRINT.info(
"%s: Member %s: arrays of enums not supported yet!"
% (xml_file, n)
)
sys.exit(-1)
s = member.attrib["size"]
if not s.isdigit():
PRINT.info(
"{}: Member {}: size must be a number".format(
xml_file, n
)
)
sys.exit(-1)
else:
s = None
if "format" in list(member.attrib.keys()):
f = member.attrib["format"]
else:
if t in list(format_dictionary.keys()):
f = format_dictionary[t]
else:
f = "%s"
if t == "string":
if s is None:
PRINT.info(
"%s: member %s string must specify size tag"
% (xml_file, member.tag)
)
sys.exit(-1)
if "comment" in list(member.attrib.keys()):
c = member.attrib["comment"]
else:
c = None
for member_tag in member:
if member_tag.tag == "enum" and t == "ENUM":
en = member_tag.attrib["name"]
enum_members = []
for mem in member_tag:
mn = mem.attrib["name"]
if "value" in list(mem.attrib.keys()):
v = mem.attrib["value"]
else:
v = None
if "comment" in list(mem.attrib.keys()):
mc = mem.attrib["comment"].strip()
else:
mc = None
enum_members.append((mn, v, mc))
t = ((t, en), enum_members)
else:
PRINT.info(
"%s: Invalid member tag %s in serializable member %s"
% (xml_file, member_tag.tag, n)
)
sys.exit(-1)
self.__members.append((n, t, s, f, c))
if not "typeid" in serializable.attrib:
s = etree.tostring(element_tree.getroot())
h = hashlib.sha256(s)
n = h.hexdigest()
self.__type_id = "0x" + n.upper()[-8:]
def get_typeid(self):
return self.__type_id
def get_xml_filename(self):
return self.__xml_filename
def get_name(self):
return self.__name
def get_namespace(self):
return self.__namespace
def get_include_header_files(self):
return self.__include_header_files
def get_includes(self):
return self.__includes
def get_include_enums(self):
return self.__include_enum_files
def get_include_arrays(self):
return self.__include_array_files
def get_comment(self):
return self.__comment
def get_members(self):
return self.__members
| true
| true
|
790de64f669e9144b97ada11646d47281ef20116
| 7,916
|
py
|
Python
|
main.py
|
RomeroBarata/upt
|
6f953e7a61c31cf608aef9a77b0af3ae8e1f6594
|
[
"BSD-3-Clause"
] | 75
|
2021-12-05T23:56:11.000Z
|
2022-03-31T16:02:33.000Z
|
main.py
|
RomeroBarata/upt
|
6f953e7a61c31cf608aef9a77b0af3ae8e1f6594
|
[
"BSD-3-Clause"
] | 8
|
2021-12-10T07:56:25.000Z
|
2022-03-25T12:16:55.000Z
|
main.py
|
RomeroBarata/upt
|
6f953e7a61c31cf608aef9a77b0af3ae8e1f6594
|
[
"BSD-3-Clause"
] | 16
|
2021-12-07T16:34:19.000Z
|
2022-03-28T12:46:27.000Z
|
"""
Utilities for training, testing and caching results
for HICO-DET and V-COCO evaluations.
Fred Zhang <frederic.zhang@anu.edu.au>
The Australian National University
Australian Centre for Robotic Vision
"""
import os
import sys
import torch
import random
import warnings
import argparse
import numpy as np
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.utils.data import DataLoader, DistributedSampler
from upt import build_detector
from utils import custom_collate, CustomisedDLE, DataFactory
warnings.filterwarnings("ignore")
def main(rank, args):
dist.init_process_group(
backend="nccl",
init_method="env://",
world_size=args.world_size,
rank=rank
)
# Fix seed
seed = args.seed + dist.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.cuda.set_device(rank)
trainset = DataFactory(name=args.dataset, partition=args.partitions[0], data_root=args.data_root)
testset = DataFactory(name=args.dataset, partition=args.partitions[1], data_root=args.data_root)
train_loader = DataLoader(
dataset=trainset,
collate_fn=custom_collate, batch_size=args.batch_size,
num_workers=args.num_workers, pin_memory=True, drop_last=True,
sampler=DistributedSampler(
trainset,
num_replicas=args.world_size,
rank=rank)
)
test_loader = DataLoader(
dataset=testset,
collate_fn=custom_collate, batch_size=1,
num_workers=args.num_workers, pin_memory=True, drop_last=False,
sampler=torch.utils.data.SequentialSampler(testset)
)
args.human_idx = 0
if args.dataset == 'hicodet':
object_to_target = train_loader.dataset.dataset.object_to_verb
args.num_classes = 117
elif args.dataset == 'vcoco':
object_to_target = list(train_loader.dataset.dataset.object_to_action.values())
args.num_classes = 24
upt = build_detector(args, object_to_target)
if os.path.exists(args.resume):
print(f"=> Rank {rank}: continue from saved checkpoint {args.resume}")
checkpoint = torch.load(args.resume, map_location='cpu')
upt.load_state_dict(checkpoint['model_state_dict'])
else:
print(f"=> Rank {rank}: start from a randomly initialised model")
engine = CustomisedDLE(
upt, train_loader,
max_norm=args.clip_max_norm,
num_classes=args.num_classes,
print_interval=args.print_interval,
find_unused_parameters=True,
cache_dir=args.output_dir
)
if args.cache:
if args.dataset == 'hicodet':
engine.cache_hico(test_loader, args.output_dir)
elif args.dataset == 'vcoco':
engine.cache_vcoco(test_loader, args.output_dir)
return
if args.eval:
if args.dataset == 'vcoco':
raise NotImplementedError(f"Evaluation on V-COCO has not been implemented.")
ap = engine.test_hico(test_loader)
# Fetch indices for rare and non-rare classes
num_anno = torch.as_tensor(trainset.dataset.anno_interaction)
rare = torch.nonzero(num_anno < 10).squeeze(1)
non_rare = torch.nonzero(num_anno >= 10).squeeze(1)
print(
f"The mAP is {ap.mean():.4f},"
f" rare: {ap[rare].mean():.4f},"
f" none-rare: {ap[non_rare].mean():.4f}"
)
return
for p in upt.detector.parameters():
p.requires_grad = False
param_dicts = [{
"params": [p for n, p in upt.named_parameters()
if "interaction_head" in n and p.requires_grad]
}]
optim = torch.optim.AdamW(
param_dicts, lr=args.lr_head,
weight_decay=args.weight_decay
)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optim, args.lr_drop)
# Override optimiser and learning rate scheduler
engine.update_state_key(optimizer=optim, lr_scheduler=lr_scheduler)
engine(args.epochs)
@torch.no_grad()
def sanity_check(args):
dataset = DataFactory(name='hicodet', partition=args.partitions[0], data_root=args.data_root)
args.human_idx = 0; args.num_classes = 117
object_to_target = dataset.dataset.object_to_verb
upt = build_detector(args, object_to_target)
if args.eval:
upt.eval()
image, target = dataset[0]
outputs = upt([image], [target])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--lr-head', default=1e-4, type=float)
parser.add_argument('--batch-size', default=2, type=int)
parser.add_argument('--weight-decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=20, type=int)
parser.add_argument('--lr-drop', default=10, type=int)
parser.add_argument('--clip-max-norm', default=0.1, type=float)
parser.add_argument('--backbone', default='resnet50', type=str)
parser.add_argument('--dilation', action='store_true')
parser.add_argument('--position-embedding', default='sine', type=str, choices=('sine', 'learned'))
parser.add_argument('--repr-dim', default=512, type=int)
parser.add_argument('--hidden-dim', default=256, type=int)
parser.add_argument('--enc-layers', default=6, type=int)
parser.add_argument('--dec-layers', default=6, type=int)
parser.add_argument('--dim-feedforward', default=2048, type=int)
parser.add_argument('--dropout', default=0.1, type=float)
parser.add_argument('--nheads', default=8, type=int)
parser.add_argument('--num-queries', default=100, type=int)
parser.add_argument('--pre-norm', action='store_true')
parser.add_argument('--no-aux-loss', dest='aux_loss', action='store_false')
parser.add_argument('--set-cost-class', default=1, type=float)
parser.add_argument('--set-cost-bbox', default=5, type=float)
parser.add_argument('--set-cost-giou', default=2, type=float)
parser.add_argument('--bbox-loss-coef', default=5, type=float)
parser.add_argument('--giou-loss-coef', default=2, type=float)
parser.add_argument('--eos-coef', default=0.1, type=float,
help="Relative classification weight of the no-object class")
parser.add_argument('--alpha', default=0.5, type=float)
parser.add_argument('--gamma', default=0.2, type=float)
parser.add_argument('--dataset', default='hicodet', type=str)
parser.add_argument('--partitions', nargs='+', default=['train2015', 'test2015'], type=str)
parser.add_argument('--num-workers', default=2, type=int)
parser.add_argument('--data-root', default='./hicodet')
# training parameters
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--port', default='1234', type=str)
parser.add_argument('--seed', default=66, type=int)
parser.add_argument('--pretrained', default='', help='Path to a pretrained detector')
parser.add_argument('--resume', default='', help='Resume from a model')
parser.add_argument('--output-dir', default='checkpoints')
parser.add_argument('--print-interval', default=500, type=int)
parser.add_argument('--world-size', default=1, type=int)
parser.add_argument('--eval', action='store_true')
parser.add_argument('--cache', action='store_true')
parser.add_argument('--sanity', action='store_true')
parser.add_argument('--box-score-thresh', default=0.2, type=float)
parser.add_argument('--fg-iou-thresh', default=0.5, type=float)
parser.add_argument('--min-instances', default=3, type=int)
parser.add_argument('--max-instances', default=15, type=int)
args = parser.parse_args()
print(args)
if args.sanity:
sanity_check(args)
sys.exit()
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = args.port
mp.spawn(main, nprocs=args.world_size, args=(args,))
| 37.875598
| 102
| 0.676983
|
import os
import sys
import torch
import random
import warnings
import argparse
import numpy as np
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.utils.data import DataLoader, DistributedSampler
from upt import build_detector
from utils import custom_collate, CustomisedDLE, DataFactory
warnings.filterwarnings("ignore")
def main(rank, args):
dist.init_process_group(
backend="nccl",
init_method="env://",
world_size=args.world_size,
rank=rank
)
seed = args.seed + dist.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.cuda.set_device(rank)
trainset = DataFactory(name=args.dataset, partition=args.partitions[0], data_root=args.data_root)
testset = DataFactory(name=args.dataset, partition=args.partitions[1], data_root=args.data_root)
train_loader = DataLoader(
dataset=trainset,
collate_fn=custom_collate, batch_size=args.batch_size,
num_workers=args.num_workers, pin_memory=True, drop_last=True,
sampler=DistributedSampler(
trainset,
num_replicas=args.world_size,
rank=rank)
)
test_loader = DataLoader(
dataset=testset,
collate_fn=custom_collate, batch_size=1,
num_workers=args.num_workers, pin_memory=True, drop_last=False,
sampler=torch.utils.data.SequentialSampler(testset)
)
args.human_idx = 0
if args.dataset == 'hicodet':
object_to_target = train_loader.dataset.dataset.object_to_verb
args.num_classes = 117
elif args.dataset == 'vcoco':
object_to_target = list(train_loader.dataset.dataset.object_to_action.values())
args.num_classes = 24
upt = build_detector(args, object_to_target)
if os.path.exists(args.resume):
print(f"=> Rank {rank}: continue from saved checkpoint {args.resume}")
checkpoint = torch.load(args.resume, map_location='cpu')
upt.load_state_dict(checkpoint['model_state_dict'])
else:
print(f"=> Rank {rank}: start from a randomly initialised model")
engine = CustomisedDLE(
upt, train_loader,
max_norm=args.clip_max_norm,
num_classes=args.num_classes,
print_interval=args.print_interval,
find_unused_parameters=True,
cache_dir=args.output_dir
)
if args.cache:
if args.dataset == 'hicodet':
engine.cache_hico(test_loader, args.output_dir)
elif args.dataset == 'vcoco':
engine.cache_vcoco(test_loader, args.output_dir)
return
if args.eval:
if args.dataset == 'vcoco':
raise NotImplementedError(f"Evaluation on V-COCO has not been implemented.")
ap = engine.test_hico(test_loader)
num_anno = torch.as_tensor(trainset.dataset.anno_interaction)
rare = torch.nonzero(num_anno < 10).squeeze(1)
non_rare = torch.nonzero(num_anno >= 10).squeeze(1)
print(
f"The mAP is {ap.mean():.4f},"
f" rare: {ap[rare].mean():.4f},"
f" none-rare: {ap[non_rare].mean():.4f}"
)
return
for p in upt.detector.parameters():
p.requires_grad = False
param_dicts = [{
"params": [p for n, p in upt.named_parameters()
if "interaction_head" in n and p.requires_grad]
}]
optim = torch.optim.AdamW(
param_dicts, lr=args.lr_head,
weight_decay=args.weight_decay
)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optim, args.lr_drop)
engine.update_state_key(optimizer=optim, lr_scheduler=lr_scheduler)
engine(args.epochs)
@torch.no_grad()
def sanity_check(args):
dataset = DataFactory(name='hicodet', partition=args.partitions[0], data_root=args.data_root)
args.human_idx = 0; args.num_classes = 117
object_to_target = dataset.dataset.object_to_verb
upt = build_detector(args, object_to_target)
if args.eval:
upt.eval()
image, target = dataset[0]
outputs = upt([image], [target])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--lr-head', default=1e-4, type=float)
parser.add_argument('--batch-size', default=2, type=int)
parser.add_argument('--weight-decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=20, type=int)
parser.add_argument('--lr-drop', default=10, type=int)
parser.add_argument('--clip-max-norm', default=0.1, type=float)
parser.add_argument('--backbone', default='resnet50', type=str)
parser.add_argument('--dilation', action='store_true')
parser.add_argument('--position-embedding', default='sine', type=str, choices=('sine', 'learned'))
parser.add_argument('--repr-dim', default=512, type=int)
parser.add_argument('--hidden-dim', default=256, type=int)
parser.add_argument('--enc-layers', default=6, type=int)
parser.add_argument('--dec-layers', default=6, type=int)
parser.add_argument('--dim-feedforward', default=2048, type=int)
parser.add_argument('--dropout', default=0.1, type=float)
parser.add_argument('--nheads', default=8, type=int)
parser.add_argument('--num-queries', default=100, type=int)
parser.add_argument('--pre-norm', action='store_true')
parser.add_argument('--no-aux-loss', dest='aux_loss', action='store_false')
parser.add_argument('--set-cost-class', default=1, type=float)
parser.add_argument('--set-cost-bbox', default=5, type=float)
parser.add_argument('--set-cost-giou', default=2, type=float)
parser.add_argument('--bbox-loss-coef', default=5, type=float)
parser.add_argument('--giou-loss-coef', default=2, type=float)
parser.add_argument('--eos-coef', default=0.1, type=float,
help="Relative classification weight of the no-object class")
parser.add_argument('--alpha', default=0.5, type=float)
parser.add_argument('--gamma', default=0.2, type=float)
parser.add_argument('--dataset', default='hicodet', type=str)
parser.add_argument('--partitions', nargs='+', default=['train2015', 'test2015'], type=str)
parser.add_argument('--num-workers', default=2, type=int)
parser.add_argument('--data-root', default='./hicodet')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--port', default='1234', type=str)
parser.add_argument('--seed', default=66, type=int)
parser.add_argument('--pretrained', default='', help='Path to a pretrained detector')
parser.add_argument('--resume', default='', help='Resume from a model')
parser.add_argument('--output-dir', default='checkpoints')
parser.add_argument('--print-interval', default=500, type=int)
parser.add_argument('--world-size', default=1, type=int)
parser.add_argument('--eval', action='store_true')
parser.add_argument('--cache', action='store_true')
parser.add_argument('--sanity', action='store_true')
parser.add_argument('--box-score-thresh', default=0.2, type=float)
parser.add_argument('--fg-iou-thresh', default=0.5, type=float)
parser.add_argument('--min-instances', default=3, type=int)
parser.add_argument('--max-instances', default=15, type=int)
args = parser.parse_args()
print(args)
if args.sanity:
sanity_check(args)
sys.exit()
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = args.port
mp.spawn(main, nprocs=args.world_size, args=(args,))
| true
| true
|
790de8a08741e4d115e76027b4a83092d0197e46
| 149
|
py
|
Python
|
showcase/post/admin.py
|
EvanPatrick423/showcase
|
779d73740c977ae7e7d7fa130ceea004013fc5e2
|
[
"MIT"
] | null | null | null |
showcase/post/admin.py
|
EvanPatrick423/showcase
|
779d73740c977ae7e7d7fa130ceea004013fc5e2
|
[
"MIT"
] | 6
|
2020-06-12T18:49:43.000Z
|
2021-09-22T19:11:03.000Z
|
showcase/post/admin.py
|
EvanPatrick423/showcase
|
779d73740c977ae7e7d7fa130ceea004013fc5e2
|
[
"MIT"
] | 1
|
2020-06-08T18:03:11.000Z
|
2020-06-08T18:03:11.000Z
|
from django.contrib import admin
from .models import Post, Reply
# Register your models here.
admin.site.register(Post)
admin.site.register(Reply)
| 18.625
| 32
| 0.791946
|
from django.contrib import admin
from .models import Post, Reply
admin.site.register(Post)
admin.site.register(Reply)
| true
| true
|
790dea0f54387804d817c82c1733143d3d140ea9
| 220
|
py
|
Python
|
filter_plugins/env_json_map.py
|
paulrbr-fl/ansible-clever
|
b731b96649a95825576060e8821e247b99aa8f2d
|
[
"MIT"
] | 7
|
2020-10-12T16:25:30.000Z
|
2021-02-26T15:47:17.000Z
|
filter_plugins/env_json_map.py
|
paulrbr-fl/ansible-clever
|
b731b96649a95825576060e8821e247b99aa8f2d
|
[
"MIT"
] | 1
|
2020-10-12T16:00:35.000Z
|
2020-10-12T16:00:35.000Z
|
filter_plugins/env_json_map.py
|
paulrbr-fl/ansible-clever
|
b731b96649a95825576060e8821e247b99aa8f2d
|
[
"MIT"
] | 2
|
2020-12-08T10:17:41.000Z
|
2021-06-03T09:32:49.000Z
|
#!/usr/bin/env python
class FilterModule(object):
def filters(self):
return {'json_env_map': self.json_env_map}
def json_env_map(self, env):
return [{'name': k, 'value': str(v)} for k,v in env.items()]
| 24.444444
| 66
| 0.65
|
class FilterModule(object):
def filters(self):
return {'json_env_map': self.json_env_map}
def json_env_map(self, env):
return [{'name': k, 'value': str(v)} for k,v in env.items()]
| true
| true
|
790deb812a406c2b661121d481d09392eca52b4d
| 27,013
|
py
|
Python
|
python/ccxt/bigone.py
|
tssujt/ccxt
|
95a8befe3540043bac408b36794342b0a9e724cd
|
[
"MIT"
] | null | null | null |
python/ccxt/bigone.py
|
tssujt/ccxt
|
95a8befe3540043bac408b36794342b0a9e724cd
|
[
"MIT"
] | null | null | null |
python/ccxt/bigone.py
|
tssujt/ccxt
|
95a8befe3540043bac408b36794342b0a9e724cd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
class bigone (Exchange):
def describe(self):
return self.deep_extend(super(bigone, self).describe(), {
'id': 'bigone',
'name': 'BigONE',
'countries': ['GB'],
'version': 'v2',
'has': {
'fetchTickers': True,
'fetchOpenOrders': True,
'fetchMyTrades': True,
'fetchDepositAddress': True,
'withdraw': True,
'fetchOHLCV': False,
'createMarketOrder': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/42803606-27c2b5ec-89af-11e8-8d15-9c8c245e8b2c.jpg',
'api': {
'public': 'https://big.one/api/v2',
'private': 'https://big.one/api/v2/viewer',
},
'www': 'https://big.one',
'doc': 'https://open.big.one/docs/api.html',
'fees': 'https://help.big.one/hc/en-us/articles/115001933374-BigONE-Fee-Policy',
'referral': 'https://b1.run/users/new?code=D3LLBVFT',
},
'api': {
'public': {
'get': [
'ping', # timestamp in nanoseconds
'markets',
'markets/{symbol}/depth',
'markets/{symbol}/trades',
'markets/{symbol}/ticker',
'orders',
'orders/{id}',
'tickers',
'trades',
],
},
'private': {
'get': [
'accounts',
'orders',
'orders/{order_id}',
],
'post': [
'orders',
'orders/{order_id}/cancel',
'orders/cancel_all',
],
},
},
'fees': {
'trading': {
'maker': 0.1 / 100,
'taker': 0.1 / 100,
},
'funding': {
# HARDCODING IS DEPRECATED THE FEES BELOW ARE TO BE REMOVED SOON
'withdraw': {
'BTC': 0.002,
'ETH': 0.01,
'EOS': 0.01,
'ZEC': 0.002,
'LTC': 0.01,
'QTUM': 0.01,
# 'INK': 0.01 QTUM,
# 'BOT': 0.01 QTUM,
'ETC': 0.01,
'GAS': 0.0,
'BTS': 1.0,
'GXS': 0.1,
'BITCNY': 1.0,
},
},
},
'exceptions': {
'codes': {
'401': AuthenticationError,
'10030': InvalidNonce, # {"message":"invalid nonce, nonce should be a 19bits number","code":10030}
},
'detail': {
'Internal server error': ExchangeNotAvailable,
},
},
})
def fetch_markets(self):
response = self.publicGetMarkets()
markets = response['data']
result = []
self.options['marketsByUuid'] = {}
for i in range(0, len(markets)):
#
# { uuid: "550b34db-696e-4434-a126-196f827d9172",
# quoteScale: 3,
# quoteAsset: { uuid: "17082d1c-0195-4fb6-8779-2cdbcb9eeb3c",
# symbol: "USDT",
# name: "TetherUS" },
# name: "BTC-USDT",
# baseScale: 5,
# baseAsset: { uuid: "0df9c3c3-255a-46d7-ab82-dedae169fba9",
# symbol: "BTC",
# name: "Bitcoin" } }}
#
market = markets[i]
id = market['name']
uuid = market['uuid']
baseId = market['baseAsset']['symbol']
quoteId = market['quoteAsset']['symbol']
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': market['baseScale'],
'price': market['quoteScale'],
}
entry = {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
}
self.options['marketsByUuid'][uuid] = entry
result.append(entry)
return result
def parse_ticker(self, ticker, market=None):
#
# [
# {
# "volume": "190.4925000000000000",
# "open": "0.0777371200000000",
# "market_uuid": "38dd30bf-76c2-4777-ae2a-a3222433eef3",
# "market_id": "ETH-BTC",
# "low": "0.0742925600000000",
# "high": "0.0789150000000000",
# "daily_change_perc": "-0.3789180767180466680525339760",
# "daily_change": "-0.0002945600000000",
# "close": "0.0774425600000000", # last price
# "bid": {
# "price": "0.0764777900000000",
# "amount": "6.4248000000000000"
# },
# "ask": {
# "price": "0.0774425600000000",
# "amount": "1.1741000000000000"
# }
# }
# ]
#
if market is None:
marketId = self.safe_string(ticker, 'market_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.milliseconds()
close = self.safe_float(ticker, 'close')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker['bid'], 'price'),
'bidVolume': self.safe_float(ticker['bid'], 'amount'),
'ask': self.safe_float(ticker['ask'], 'price'),
'askVolume': self.safe_float(ticker['ask'], 'amount'),
'vwap': None,
'open': self.safe_float(ticker, 'open'),
'close': close,
'last': close,
'previousClose': None,
'change': self.safe_float(ticker, 'daily_change'),
'percentage': self.safe_float(ticker, 'daily_change_perc'),
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetMarketsSymbolTicker(self.extend({
'symbol': market['id'],
}, params))
return self.parse_ticker(response['data'], market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTickers(params)
tickers = response['data']
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
response = self.publicGetMarketsSymbolDepth(self.extend({
'symbol': self.market_id(symbol),
}, params))
return self.parse_order_book(response['data'], None, 'bids', 'asks', 'price', 'amount')
def parse_trade(self, trade, market=None):
#
# { node: { taker_side: "ASK",
# price: "0.0694071600000000",
# market_uuid: "38dd30bf-76c2-4777-ae2a-a3222433eef3",
# market_id: "ETH-BTC",
# inserted_at: "2018-07-14T09:22:06Z",
# id: "19913306",
# amount: "0.8800000000000000" },
# cursor: "Y3Vyc29yOnYxOjE5OTEzMzA2" }
#
node = trade['node']
timestamp = self.parse8601(node['inserted_at'])
price = self.safe_float(node, 'price')
amount = self.safe_float(node, 'amount')
if market is None:
marketId = self.safe_string(node, 'market_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = None
if market is not None:
symbol = market['symbol']
cost = self.cost_to_precision(symbol, price * amount)
side = None
if node['taker_side'] == 'ASK':
side = 'sell'
else:
side = 'buy'
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string(node, 'id'),
'order': None,
'type': 'limit',
'side': side,
'price': price,
'amount': amount,
'cost': float(cost),
'fee': None,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['first'] = limit
response = self.publicGetMarketsSymbolTrades(self.extend(request, params))
#
# {data: {page_info: { start_cursor: "Y3Vyc29yOnYxOjE5OTEzMzA2",
# has_previous_page: True,
# has_next_page: False,
# end_cursor: "Y3Vyc29yOnYxOjIwMDU0NzIw" },
# edges: [{ node: { taker_side: "ASK",
# price: "0.0694071600000000",
# market_uuid: "38dd30bf-76c2-4777-ae2a-a3222433eef3",
# market_id: "ETH-BTC",
# inserted_at: "2018-07-14T09:22:06Z",
# id: "19913306",
# amount: "0.8800000000000000" },
# cursor: "Y3Vyc29yOnYxOjE5OTEzMzA2" },
# { node: { taker_side: "ASK",
# price: "0.0694071600000000",
# market_uuid: "38dd30bf-76c2-4777-ae2a-a3222433eef3",
# market_id: "ETH-BTC",
# inserted_at: "2018-07-14T09:22:07Z",
# id: "19913307",
# amount: "0.3759000000000000" },
# cursor: "Y3Vyc29yOnYxOjE5OTEzMzA3" },
# { node: { taker_side: "ASK",
# price: "0.0694071600000000",
# market_uuid: "38dd30bf-76c2-4777-ae2a-a3222433eef3",
# market_id: "ETH-BTC",
# inserted_at: "2018-07-14T09:22:08Z",
# id: "19913321",
# amount: "0.2197000000000000" },
# cursor: "Y3Vyc29yOnYxOjE5OTEzMzIx" },
#
return self.parse_trades(response['data']['edges'], market, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetAccounts(params)
#
# {data: [{locked_balance: "0",
# balance: "0",
# asset_uuid: "04479958-d7bb-40e4-b153-48bd63f2f77f",
# asset_id: "NKC" },
# {locked_balance: "0",
# balance: "0",
# asset_uuid: "04c8da0e-44fd-4d71-aeb0-8f4d54a4a907",
# asset_id: "UBTC" },
# {locked_balance: "0",
# balance: "0",
# asset_uuid: "05bc0d34-4809-4a39-a3c8-3a1851c8d224",
# asset_id: "READ" },
#
result = {'info': response}
balances = response['data']
for i in range(0, len(balances)):
balance = balances[i]
currencyId = balance['asset_id']
code = self.common_currency_code(currencyId)
if currencyId in self.currencies_by_id:
code = self.currencies_by_id[currencyId]['code']
total = self.safe_float(balance, 'balance')
used = self.safe_float(balance, 'locked_balance')
free = None
if total is not None and used is not None:
free = total - used
account = {
'free': free,
'used': used,
'total': total,
}
result[code] = account
return self.parse_balance(result)
def parse_order(self, order, market=None):
#
# {
# "id": 10,
# "market_uuid": "d2185614-50c3-4588-b146-b8afe7534da6",
# "market_uuid": "BTC-EOS", # not sure which one is correct
# "market_id": "BTC-EOS", # not sure which one is correct
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# }
#
id = self.safe_string(order, 'id')
if market is None:
marketId = self.safe_string(order, 'market_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
marketUuid = self.safe_string(order, 'market_uuid')
if marketUuid in self.options['marketsByUuid']:
market = self.options['marketsByUuid'][marketUuid]
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string(order, 'inserted_at'))
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'filled_amount')
remaining = max(0, amount - filled)
status = self.parse_order_status(self.safe_string(order, 'state'))
side = self.safe_string(order, 'side')
if side == 'BID':
side = 'buy'
else:
side = 'sell'
return {
'id': id,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'status': status,
'symbol': symbol,
'type': None,
'side': side,
'price': price,
'cost': None,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': None,
'info': order,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
side = 'BID' if (side == 'buy') else 'ASK'
request = {
'market_id': market['id'], # market uuid d2185614-50c3-4588-b146-b8afe7534da6, required
'side': side, # order side one of "ASK"/"BID", required
'amount': self.amount_to_precision(symbol, amount), # order amount, string, required
'price': self.price_to_precision(symbol, price), # order price, string, required
}
response = self.privatePostOrders(self.extend(request, params))
#
# {
# "data":
# {
# "id": 10,
# "market_uuid": "BTC-EOS",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# }
# }
#
order = self.safe_value(response, 'data')
return self.parse_order(order, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {'order_id': id}
response = self.privatePostOrdersOrderIdCancel(self.extend(request, params))
#
# {
# "data":
# {
# "id": 10,
# "market_uuid": "BTC-EOS",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# }
# }
#
order = response['data']
return self.parse_order(order)
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
response = self.privatePostOrdersOrderIdCancel(params)
#
# [
# {
# "id": 10,
# "market_uuid": "d2185614-50c3-4588-b146-b8afe7534da6",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# },
# {
# ...
# },
# ]
#
return self.parse_orders(response)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {'order_id': id}
response = self.privateGetOrdersOrderId(self.extend(request, params))
#
# {
# "id": 10,
# "market_uuid": "d2185614-50c3-4588-b146-b8afe7534da6",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# }
#
return self.parse_order(response)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
# NAME DESCRIPTION EXAMPLE REQUIRED
# market_id market id ETH-BTC True
# after ask for the server to return orders after the cursor dGVzdGN1cmVzZQo False
# before ask for the server to return orders before the cursor dGVzdGN1cmVzZQo False
# first slicing count 20 False
# last slicing count 20 False
# side order side one of "ASK"/"BID" False
# state order state one of "CANCELED"/"FILLED"/"PENDING" False
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
}
if limit is not None:
request['first'] = limit
response = self.privateGetOrders(self.extend(request, params))
#
# {
# "data": {
# "edges": [
# {
# "node": {
# "id": 10,
# "market_id": "ETH-BTC",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# },
# "cursor": "dGVzdGN1cmVzZQo="
# }
# ],
# "page_info": {
# "end_cursor": "dGVzdGN1cmVzZQo=",
# "start_cursor": "dGVzdGN1cmVzZQo=",
# "has_next_page": True,
# "has_previous_page": False
# }
# }
# }
#
data = self.safe_value(response, 'data', {})
orders = self.safe_value(data, 'edges', [])
result = []
for i in range(0, len(orders)):
result.append(self.parse_order(orders[i]['node'], market))
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def parse_order_status(self, status):
statuses = {
'PENDING': 'open',
'FILLED': 'closed',
'CANCELED': 'canceled',
}
return self.safe_string(statuses, status)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders(symbol, since, limit, self.extend({
'state': 'PENDING',
}, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders(symbol, since, limit, self.extend({
'state': 'FILLED',
}, params))
def nonce(self):
return self.microseconds() * 1000
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = self.urls['api'][api] + '/' + self.implode_params(path, params)
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
request = {
'type': 'OpenAPI',
'sub': self.apiKey,
'nonce': nonce,
}
jwt = self.jwt(request, self.secret)
headers = {
'Authorization': 'Bearer ' + jwt,
}
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
elif method == 'POST':
headers['Content-Type'] = 'application/json'
body = self.json(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return # fallback to default error handler
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
#
# {"errors":{"detail":"Internal server error"}}
# {"errors":[{"message":"invalid nonce, nonce should be a 19bits number","code":10030}],"data":null}
#
error = self.safe_value(response, 'error')
errors = self.safe_value(response, 'errors')
data = self.safe_value(response, 'data')
if error is not None or errors is not None or data is None:
feedback = self.id + ' ' + self.json(response)
code = None
if error is not None:
code = self.safe_integer(error, 'code')
exceptions = self.exceptions['codes']
if errors is not None:
if self.isArray(errors):
code = self.safe_string(errors[0], 'code')
else:
code = self.safe_string(errors, 'detail')
exceptions = self.exceptions['detail']
if code in exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
| 41.367534
| 126
| 0.424018
|
ge import Exchange
try:
basestring
except NameError:
basestring = str
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
class bigone (Exchange):
def describe(self):
return self.deep_extend(super(bigone, self).describe(), {
'id': 'bigone',
'name': 'BigONE',
'countries': ['GB'],
'version': 'v2',
'has': {
'fetchTickers': True,
'fetchOpenOrders': True,
'fetchMyTrades': True,
'fetchDepositAddress': True,
'withdraw': True,
'fetchOHLCV': False,
'createMarketOrder': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/42803606-27c2b5ec-89af-11e8-8d15-9c8c245e8b2c.jpg',
'api': {
'public': 'https://big.one/api/v2',
'private': 'https://big.one/api/v2/viewer',
},
'www': 'https://big.one',
'doc': 'https://open.big.one/docs/api.html',
'fees': 'https://help.big.one/hc/en-us/articles/115001933374-BigONE-Fee-Policy',
'referral': 'https://b1.run/users/new?code=D3LLBVFT',
},
'api': {
'public': {
'get': [
'ping',
'markets',
'markets/{symbol}/depth',
'markets/{symbol}/trades',
'markets/{symbol}/ticker',
'orders',
'orders/{id}',
'tickers',
'trades',
],
},
'private': {
'get': [
'accounts',
'orders',
'orders/{order_id}',
],
'post': [
'orders',
'orders/{order_id}/cancel',
'orders/cancel_all',
],
},
},
'fees': {
'trading': {
'maker': 0.1 / 100,
'taker': 0.1 / 100,
},
'funding': {
'withdraw': {
'BTC': 0.002,
'ETH': 0.01,
'EOS': 0.01,
'ZEC': 0.002,
'LTC': 0.01,
'QTUM': 0.01,
'ETC': 0.01,
'GAS': 0.0,
'BTS': 1.0,
'GXS': 0.1,
'BITCNY': 1.0,
},
},
},
'exceptions': {
'codes': {
'401': AuthenticationError,
'10030': InvalidNonce,
},
'detail': {
'Internal server error': ExchangeNotAvailable,
},
},
})
def fetch_markets(self):
response = self.publicGetMarkets()
markets = response['data']
result = []
self.options['marketsByUuid'] = {}
for i in range(0, len(markets)):
market = markets[i]
id = market['name']
uuid = market['uuid']
baseId = market['baseAsset']['symbol']
quoteId = market['quoteAsset']['symbol']
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': market['baseScale'],
'price': market['quoteScale'],
}
entry = {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
}
self.options['marketsByUuid'][uuid] = entry
result.append(entry)
return result
def parse_ticker(self, ticker, market=None):
if market is None:
marketId = self.safe_string(ticker, 'market_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.milliseconds()
close = self.safe_float(ticker, 'close')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker['bid'], 'price'),
'bidVolume': self.safe_float(ticker['bid'], 'amount'),
'ask': self.safe_float(ticker['ask'], 'price'),
'askVolume': self.safe_float(ticker['ask'], 'amount'),
'vwap': None,
'open': self.safe_float(ticker, 'open'),
'close': close,
'last': close,
'previousClose': None,
'change': self.safe_float(ticker, 'daily_change'),
'percentage': self.safe_float(ticker, 'daily_change_perc'),
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetMarketsSymbolTicker(self.extend({
'symbol': market['id'],
}, params))
return self.parse_ticker(response['data'], market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTickers(params)
tickers = response['data']
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
response = self.publicGetMarketsSymbolDepth(self.extend({
'symbol': self.market_id(symbol),
}, params))
return self.parse_order_book(response['data'], None, 'bids', 'asks', 'price', 'amount')
def parse_trade(self, trade, market=None):
node = trade['node']
timestamp = self.parse8601(node['inserted_at'])
price = self.safe_float(node, 'price')
amount = self.safe_float(node, 'amount')
if market is None:
marketId = self.safe_string(node, 'market_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = None
if market is not None:
symbol = market['symbol']
cost = self.cost_to_precision(symbol, price * amount)
side = None
if node['taker_side'] == 'ASK':
side = 'sell'
else:
side = 'buy'
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string(node, 'id'),
'order': None,
'type': 'limit',
'side': side,
'price': price,
'amount': amount,
'cost': float(cost),
'fee': None,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['first'] = limit
response = self.publicGetMarketsSymbolTrades(self.extend(request, params))
return self.parse_trades(response['data']['edges'], market, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetAccounts(params)
result = {'info': response}
balances = response['data']
for i in range(0, len(balances)):
balance = balances[i]
currencyId = balance['asset_id']
code = self.common_currency_code(currencyId)
if currencyId in self.currencies_by_id:
code = self.currencies_by_id[currencyId]['code']
total = self.safe_float(balance, 'balance')
used = self.safe_float(balance, 'locked_balance')
free = None
if total is not None and used is not None:
free = total - used
account = {
'free': free,
'used': used,
'total': total,
}
result[code] = account
return self.parse_balance(result)
def parse_order(self, order, market=None):
id = self.safe_string(order, 'id')
if market is None:
marketId = self.safe_string(order, 'market_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
marketUuid = self.safe_string(order, 'market_uuid')
if marketUuid in self.options['marketsByUuid']:
market = self.options['marketsByUuid'][marketUuid]
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string(order, 'inserted_at'))
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'filled_amount')
remaining = max(0, amount - filled)
status = self.parse_order_status(self.safe_string(order, 'state'))
side = self.safe_string(order, 'side')
if side == 'BID':
side = 'buy'
else:
side = 'sell'
return {
'id': id,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'status': status,
'symbol': symbol,
'type': None,
'side': side,
'price': price,
'cost': None,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': None,
'info': order,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
side = 'BID' if (side == 'buy') else 'ASK'
request = {
'market_id': market['id'],
'side': side,
'amount': self.amount_to_precision(symbol, amount),
'price': self.price_to_precision(symbol, price),
}
response = self.privatePostOrders(self.extend(request, params))
order = self.safe_value(response, 'data')
return self.parse_order(order, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {'order_id': id}
response = self.privatePostOrdersOrderIdCancel(self.extend(request, params))
order = response['data']
return self.parse_order(order)
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
response = self.privatePostOrdersOrderIdCancel(params)
return self.parse_orders(response)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {'order_id': id}
response = self.privateGetOrdersOrderId(self.extend(request, params))
return self.parse_order(response)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
}
if limit is not None:
request['first'] = limit
response = self.privateGetOrders(self.extend(request, params))
data = self.safe_value(response, 'data', {})
orders = self.safe_value(data, 'edges', [])
result = []
for i in range(0, len(orders)):
result.append(self.parse_order(orders[i]['node'], market))
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def parse_order_status(self, status):
statuses = {
'PENDING': 'open',
'FILLED': 'closed',
'CANCELED': 'canceled',
}
return self.safe_string(statuses, status)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders(symbol, since, limit, self.extend({
'state': 'PENDING',
}, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders(symbol, since, limit, self.extend({
'state': 'FILLED',
}, params))
def nonce(self):
return self.microseconds() * 1000
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = self.urls['api'][api] + '/' + self.implode_params(path, params)
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
request = {
'type': 'OpenAPI',
'sub': self.apiKey,
'nonce': nonce,
}
jwt = self.jwt(request, self.secret)
headers = {
'Authorization': 'Bearer ' + jwt,
}
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
elif method == 'POST':
headers['Content-Type'] = 'application/json'
body = self.json(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body):
if not isinstance(body, basestring):
return
if len(body) < 2:
return
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
error = self.safe_value(response, 'error')
errors = self.safe_value(response, 'errors')
data = self.safe_value(response, 'data')
if error is not None or errors is not None or data is None:
feedback = self.id + ' ' + self.json(response)
code = None
if error is not None:
code = self.safe_integer(error, 'code')
exceptions = self.exceptions['codes']
if errors is not None:
if self.isArray(errors):
code = self.safe_string(errors[0], 'code')
else:
code = self.safe_string(errors, 'detail')
exceptions = self.exceptions['detail']
if code in exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
| true
| true
|
790debf3909097b9e0411b0ed00743e6150e0725
| 5,155
|
py
|
Python
|
code/modbus_server_py3.py
|
NanoDataCenter/nano_data_center
|
76ad521e1a5139a37df80214af1413d2fd4ade60
|
[
"MIT"
] | 2
|
2018-02-21T03:46:51.000Z
|
2019-12-24T16:40:51.000Z
|
code/modbus_server_py3.py
|
NanoDataCenter/nano_data_center
|
76ad521e1a5139a37df80214af1413d2fd4ade60
|
[
"MIT"
] | 7
|
2020-07-16T19:54:08.000Z
|
2022-03-02T03:29:07.000Z
|
code/modbus_server_py3.py
|
NanoDataCenter/nano_data_center
|
76ad521e1a5139a37df80214af1413d2fd4ade60
|
[
"MIT"
] | 2
|
2018-04-16T07:02:35.000Z
|
2020-07-23T21:57:19.000Z
|
import time
import base64
from datetime import datetime
import sys
import json
from redis_support_py3.graph_query_support_py3 import Query_Support
from redis_support_py3.construct_data_handlers_py3 import Generate_Handlers
from modbus_redis_server_py3.modbus_serial_ctrl_py3 import ModbusSerialCtrl
from modbus_redis_server_py3.msg_manager_py3 import MessageManager
from modbus_redis_server_py3.rs485_mgr_py3 import RS485_Mgr
from modbus_redis_server_py3.modbus_serial_ctrl_py3 import ModbusSerialCtrl
from modbus_redis_server_py3.msg_manager_py3 import MessageManager
from modbus_redis_server_py3.modbus_statistics_py3 import Statistic_Handler
#from redis_support_py3.redis_rpc_server_py3 import Redis_Rpc_Server
class Modbus_Server( object ):
def __init__( self, msg_handler,generate_handlers,data_structures,remote_dict): # fill in proceedures
self.msg_handler = msg_handler
self.statistic_handler = Statistic_Handler(generate_handlers,data_structures,remote_dict)
self.rpc_server_handle = generate_handlers.construct_rpc_sever(data_structures["PLC_RPC_SERVER"] )
self.rpc_server_handle.register_call_back( "modbus_relay", self.process_modbus_message)
self.rpc_server_handle.register_call_back( "ping_message", self.process_ping_message)
self.rpc_server_handle.add_time_out_function(self.process_null_msg)
self.rpc_server_handle.start()
def process_ping_message(self, address):
temp = self.msg_handler.ping_devices([address])
return temp[0]["result"]
def process_modbus_message( self,input_msg ):
address = input_msg[0]
self.statistic_handler.process_start_message( address )
failure, retries, output_message = self.msg_handler.process_msg( input_msg )
if failure != 0:
output_msg = "@"
self.statistic_handler.log_bad_message( address, retries )
else:
self.statistic_handler.log_good_message( address, retries )
self.statistic_handler.process_end_message()
return output_message
def process_null_msg( self ):
self.statistic_handler.process_null_message()
def find_remotes(qs,link_name):
return_value = {}
query_list = []
query_list = qs.add_match_relationship( query_list,relationship="SITE",label=redis_site["site"] )
query_list = qs.add_match_relationship( query_list,relationship="PLC_SERVER",label=plc_server_name )
query_list = qs.add_match_relationship( query_list, relationship = "IO_LINK",label=link_name)
query_list = qs.add_match_terminal( query_list, relationship = "REMOTE_UNIT")
remote_sets, remote_sources = qs.match_list(query_list)
for i in remote_sources:
return_value[i["modbus_address"]] = i["parameters"]
return return_value
if __name__ == "__main__":
plc_server_name = sys.argv[1]
file_handle = open("system_data_files/redis_server.json",'r')
data = file_handle.read()
file_handle.close()
redis_site = json.loads(data)
qs = Query_Support( redis_site )
# find data structures
query_list = []
query_list = qs.add_match_relationship( query_list,relationship="SITE",label=redis_site["site"] )
query_list = qs.add_match_relationship( query_list,relationship="PLC_SERVER",label=plc_server_name )
query_list = qs.add_match_terminal( query_list,
relationship = "PACKAGE",
property_mask={"name":"PLC_SERVER_DATA"} )
package_sets, package_sources = qs.match_list(query_list)
package = package_sources[0]
generate_handlers = Generate_Handlers(package,qs)
data_structures = package["data_structures"]
#
# finding IO_LINKS
#
#
query_list = []
query_list = qs.add_match_relationship( query_list,relationship="SITE",label=redis_site["site"] )
query_list = qs.add_match_relationship( query_list,relationship="PLC_SERVER",label=plc_server_name )
query_list = qs.add_match_terminal( query_list, relationship = "IO_LINK")
serial_sets, serial_sources = qs.match_list(query_list)
rs485_interface = RS485_Mgr()
interfaces = {}
for i in serial_sources:
i["handler"] = rs485_interface
interfaces[i["name"]] = i
msg_mgr = MessageManager()
for i,item in interfaces.items():
remote_dict = find_remotes(qs,item["name"])
modbus_serial_ctrl = ModbusSerialCtrl( item, remote_dict)
for j,k in remote_dict.items():
msg_mgr.add_device( k["address"], modbus_serial_ctrl )
#print(msg_mgr.ping_devices([100]))
Modbus_Server( msg_mgr,generate_handlers,data_structures,remote_dict )
| 36.560284
| 107
| 0.671581
|
import time
import base64
from datetime import datetime
import sys
import json
from redis_support_py3.graph_query_support_py3 import Query_Support
from redis_support_py3.construct_data_handlers_py3 import Generate_Handlers
from modbus_redis_server_py3.modbus_serial_ctrl_py3 import ModbusSerialCtrl
from modbus_redis_server_py3.msg_manager_py3 import MessageManager
from modbus_redis_server_py3.rs485_mgr_py3 import RS485_Mgr
from modbus_redis_server_py3.modbus_serial_ctrl_py3 import ModbusSerialCtrl
from modbus_redis_server_py3.msg_manager_py3 import MessageManager
from modbus_redis_server_py3.modbus_statistics_py3 import Statistic_Handler
class Modbus_Server( object ):
def __init__( self, msg_handler,generate_handlers,data_structures,remote_dict):
self.msg_handler = msg_handler
self.statistic_handler = Statistic_Handler(generate_handlers,data_structures,remote_dict)
self.rpc_server_handle = generate_handlers.construct_rpc_sever(data_structures["PLC_RPC_SERVER"] )
self.rpc_server_handle.register_call_back( "modbus_relay", self.process_modbus_message)
self.rpc_server_handle.register_call_back( "ping_message", self.process_ping_message)
self.rpc_server_handle.add_time_out_function(self.process_null_msg)
self.rpc_server_handle.start()
def process_ping_message(self, address):
temp = self.msg_handler.ping_devices([address])
return temp[0]["result"]
def process_modbus_message( self,input_msg ):
address = input_msg[0]
self.statistic_handler.process_start_message( address )
failure, retries, output_message = self.msg_handler.process_msg( input_msg )
if failure != 0:
output_msg = "@"
self.statistic_handler.log_bad_message( address, retries )
else:
self.statistic_handler.log_good_message( address, retries )
self.statistic_handler.process_end_message()
return output_message
def process_null_msg( self ):
self.statistic_handler.process_null_message()
def find_remotes(qs,link_name):
return_value = {}
query_list = []
query_list = qs.add_match_relationship( query_list,relationship="SITE",label=redis_site["site"] )
query_list = qs.add_match_relationship( query_list,relationship="PLC_SERVER",label=plc_server_name )
query_list = qs.add_match_relationship( query_list, relationship = "IO_LINK",label=link_name)
query_list = qs.add_match_terminal( query_list, relationship = "REMOTE_UNIT")
remote_sets, remote_sources = qs.match_list(query_list)
for i in remote_sources:
return_value[i["modbus_address"]] = i["parameters"]
return return_value
if __name__ == "__main__":
plc_server_name = sys.argv[1]
file_handle = open("system_data_files/redis_server.json",'r')
data = file_handle.read()
file_handle.close()
redis_site = json.loads(data)
qs = Query_Support( redis_site )
query_list = []
query_list = qs.add_match_relationship( query_list,relationship="SITE",label=redis_site["site"] )
query_list = qs.add_match_relationship( query_list,relationship="PLC_SERVER",label=plc_server_name )
query_list = qs.add_match_terminal( query_list,
relationship = "PACKAGE",
property_mask={"name":"PLC_SERVER_DATA"} )
package_sets, package_sources = qs.match_list(query_list)
package = package_sources[0]
generate_handlers = Generate_Handlers(package,qs)
data_structures = package["data_structures"]
query_list = []
query_list = qs.add_match_relationship( query_list,relationship="SITE",label=redis_site["site"] )
query_list = qs.add_match_relationship( query_list,relationship="PLC_SERVER",label=plc_server_name )
query_list = qs.add_match_terminal( query_list, relationship = "IO_LINK")
serial_sets, serial_sources = qs.match_list(query_list)
rs485_interface = RS485_Mgr()
interfaces = {}
for i in serial_sources:
i["handler"] = rs485_interface
interfaces[i["name"]] = i
msg_mgr = MessageManager()
for i,item in interfaces.items():
remote_dict = find_remotes(qs,item["name"])
modbus_serial_ctrl = ModbusSerialCtrl( item, remote_dict)
for j,k in remote_dict.items():
msg_mgr.add_device( k["address"], modbus_serial_ctrl )
Modbus_Server( msg_mgr,generate_handlers,data_structures,remote_dict )
| true
| true
|
790dec5c9feeac8dfc94fdd375692906bfd8bb4d
| 12,628
|
py
|
Python
|
nndrone/converters.py
|
Tevien/NNDrone
|
76dce457324ea03a8757d74f6403fbf60132294b
|
[
"BSD-3-Clause"
] | 3
|
2017-11-06T11:21:20.000Z
|
2018-07-20T14:47:21.000Z
|
nndrone/converters.py
|
Tevien/NNDrone
|
76dce457324ea03a8757d74f6403fbf60132294b
|
[
"BSD-3-Clause"
] | 4
|
2018-01-12T15:49:40.000Z
|
2018-04-06T02:29:56.000Z
|
nndrone/converters.py
|
Tevien/NNDrone
|
76dce457324ea03a8757d74f6403fbf60132294b
|
[
"BSD-3-Clause"
] | 12
|
2018-01-12T15:46:35.000Z
|
2018-06-21T22:41:56.000Z
|
import numpy as np
import pickle
import math
try:
from utilities import dot_loss, next_batch
except ImportError:
from utilities.utilities import dot_loss, next_batch
class DontCacheRef(Exception):
pass
class BasicConverter(object):
def __init__(self, learning_rate = 0.05, batch_size = 1, num_epochs = 300, threshold = 0.02, add_layer_dynamic = False, layer_to_expand = 0):
# training control
self._learning_rate = learning_rate
self._batchSize = batch_size
self._num_epochs = num_epochs
self._threshold = threshold
self._add_layer_dynamic = add_layer_dynamic
self._layer_to_expand = int(layer_to_expand)
# training history
self._updatedLoss = 1000.0
self._diffs = []
self._losses = []
self._updates = []
self._epoch = 0
def losses(self):
return self._losses
def diffs(self):
return self._diffs
def updates(self):
return self._updates
def save_history(self, fname):
f_train = open(fname, 'wb')
training_data = [self._losses, self._diffs, self._updates]
pickle.dump(training_data, f_train)
f_train.close()
def get_refs(self, base_model, datapoints, scaler = None, conv_1d = False, conv_2d = False, cache_data = True):
try:
if not cache_data:
raise DontCacheRef()
# Return the cached list of reference outputs for the base model
return (self.__datapoints, self.__refs)
except (DontCacheRef, AttributeError) as e:
# Create the list of reference outputs for the base model
if conv_1d and conv_2d:
print('ERROR: conv_1d and conv_2d are mutually exclusive')
return None
refs = []
flattened = []
for point in datapoints:
spoint = point
if scaler and not conv_2d:
spoint = scaler.transform([point])
prob = 0.0
if conv_1d:
prob = base_model.predict_proba(np.expand_dims(np.expand_dims(spoint, axis = 2), axis = 0))[0][0]
elif conv_2d:
# this will match if original model was trained with correct dimensionality
prob = base_model.predict_proba(np.expand_dims(spoint, axis = 0))
else:
prob = base_model.predict_proba(spoint.reshape(1, -1))[0][0]
refs.append(prob)
flattened.append(spoint.flatten().tolist())
self.__datapoints = np.asarray(flattened)
self.__refs = np.asarray(refs)
return (self.__datapoints, self.__refs)
def convert_model(self, drone_model, base_model, datapoints, scaler = None, conv_1d = False, conv_2d = False, cache_data = True, epoch_reset = False):
# Get the list of reference outputs for the base model
datapoints_for_drone, refs = self.get_refs(base_model, datapoints, scaler, conv_1d, conv_2d, cache_data)
inflate = 0 # to inflate the learning without change iterations
if epoch_reset:
self._epoch = 0
avloss = 0
# convert until min epochs are passed and leave only if loss at minima
while (self._epoch < self._num_epochs) or (self._updatedLoss < avloss):
# initialize the total loss for the epoch
epochloss = []
# loop over our data in batches
for (batchX, batchY) in next_batch(datapoints_for_drone, refs, self._batchSize):
batchY = np.array(batchY)
if batchX.shape[0] != self._batchSize:
print('Batch size insufficient (%s), continuing...' % batchY.shape[0])
continue
# Find current output and calculate loss for our graph
preds = drone_model.evaluate_total(batchX, debug = False)
loss, error = dot_loss(preds, batchY)
epochloss.append(loss)
# Update the model
drone_model.update(batchX, batchY, self._learning_rate)
avloss = np.average(epochloss)
diff = 0.0
if self._epoch > 0:
# is the relative improvement of the loss too small, smaller than threshold
diff = math.fabs(avloss - self._losses[-1]) / avloss
self._diffs.append(diff)
self._losses.append(avloss)
update = 0
modify = True if (diff < self._threshold) else False
if modify:
# If it is less than the threshold, is it below
# where we last updated, has the drone learned enough
#
# - skip checks if we have never updated before
# - do at least 6 learning iterations before attempting new update
# - use asymptotic exponential to push model to learn
# until its loss is far enough away from previous update,
inflate += 1 # iterate inflating
modify = True if self._updatedLoss == 1000.0 else (avloss < (self._updatedLoss - (50.0 * (1.0 - np.exp(-0.04 * inflate)) * diff * avloss))) and (inflate > 5)
if modify:
update = 1
inflate = 0
print('Model conversion not sufficient, updating...')
print('Last updated loss: %s' % self._updatedLoss)
self._updatedLoss = avloss
if self._add_layer_dynamic:
drone_model.add_layer_dynamic()
else:
drone_model.expand_layer_dynamic(self._layer_to_expand)
print('Model structure is now:')
drone_model.print_layers()
self._updates.append(update)
print('Epoch: %s, loss %s, diff %.5f, last updated loss %.5f' % (self._epoch, avloss, diff, self._updatedLoss))
# update our loss history list by taking the average loss
# across all batches
if self._epoch == 0: # be consistent at the first epoch
self._losses.append(avloss)
self._diffs.append(math.fabs(avloss - self._updatedLoss) / avloss)
self._updates.append(0)
self._epoch += 1
return drone_model
class AdvancedConverter(object):
def __init__(self, learning_rate = 0.05, batch_size = 1, num_epochs = 300, threshold = 0.02, add_layer_dynamic = False, layer_to_expand = None):
# training control
self._learning_rate = learning_rate
self._batchSize = batch_size
self._num_epochs = num_epochs
self._threshold = threshold
self._add_layer_dynamic = add_layer_dynamic
self.__round_robin = False
if layer_to_expand is None:
self.__round_robin = True
self._layer_to_expand = int(layer_to_expand) if layer_to_expand is not None else None
# training history
self._updatedLoss = 1000.0
self._diffs = []
self._losses = []
self._updates = []
self._epoch = 0
self.__rr_begin = 0
self.__rr_last = 0
def losses(self):
return self._losses
def diffs(self):
return self._diffs
def updates(self):
return self._updates
def round_robin(self, num_layers):
self.__rr_last = self.__rr_begin
self.__rr_begin = np.random.ranint(0, num_layers - 1) # careful, expanding last layer will change output number
return self.__rr_last
def save_history(self, fname):
f_train = open(fname, 'wb')
training_data = [self._losses, self._diffs, self._updates]
pickle.dump(training_data, f_train)
f_train.close()
def get_refs(self, base_model, datapoints, scaler = None, cache_data = True):
try:
if not cache_data:
raise DontCacheRef()
# Return the cached list of reference outputs for the base model
return (self.__datapoints, self.__refs)
except(DontCacheRef, AttributeError) as e:
# Create the list of reference outputs for the base model
refs = []
datapoints_for_drone = datapoints
if scaler:
datapoints_for_drone = scaler.transform(datapoints)
for point in datapoints_for_drone:
prob = base_model.predict_proba(point)
refs.append(prob)
self.__datapoints = datapoints_for_drone
self.__refs = refs
return (self.__datapoints, self.__refs)
def convert_model(self, drone_model, base_model, datapoints, scaler = None, cache_data = True, epoch_reset = False):
# Get the list of reference outputs for the base model
datapoints_for_drone, refs = self.get_refs(base_model, datapoints, scaler, cache_data)
inflate = 0 # to inflate the learning without change iterations
if epoch_reset:
self._epoch = 0
avloss = 0
# convert until min epochs are passed and leave only if loss at minima
while (self._epoch < self._num_epochs) or (self._updatedLoss < avloss):
# initialize the total loss for the epoch
epochloss = []
# loop over our data in batches
for (batchX, batchY) in next_batch(datapoints_for_drone, refs, self._batchSize):
batchY = np.array(batchY)
if batchX.shape[0] != self._batchSize:
print('Batch size insufficient ({}), continuing...'.format(batchY.shape[0]))
continue
# Find current output and calculate loss for our graph
preds = drone_model.evaluate_total(batchX, debug = False)
loss, error = dot_loss(preds, batchY)
epochloss.append(loss)
# Update the model
drone_model.update(batchX, batchY, self._learning_rate)
avloss = np.average(epochloss)
diff = 0.0
if self._epoch > 0:
# is the relative improvement of the loss too small, smaller than threshold
diff = math.fabs(avloss - self._losses[-1]) / avloss
self._diffs.append(diff)
self._losses.append(avloss)
update = 0
modify = True if (diff < self._threshold) else False
if modify:
# If it is less than the threshold, is it below
# where we last updated, has the drone learned enough
#
# - skip checks if we have never updated before
# - do at least 6 learning iterations before attempting new update
# - use asymptotic exponential to push model to learn
# until its loss is far enough away from previous update,
inflate += 1 # iterate inflating
modify = True if self._updatedLoss == 1000.0 else (avloss < (self._updatedLoss - (50.0 * (1.0 - np.exp(-0.04 * inflate)) * diff * avloss))) and (inflate > 5)
if modify:
update = 1
inflate = 0
print('Model conversion not sufficient, updating...')
print('Last updated loss: %s' % self._updatedLoss)
self._updatedLoss = avloss
if self._add_layer_dynamic:
drone_model.add_layer_dynamic()
elif self._layer_to_expand is not None:
drone_model.expand_layer_dynamic(self._layer_to_expand)
else:
drone_model.expand_layer_dynamic(self.round_robin(drone_model.num_layers()))
print('Model structure is now:')
drone_model.print_layers()
self._updates.append(update)
print('Epoch: %s, loss %s, diff %.5f, last updated loss %.5f' % (self._epoch, avloss, diff, self._updatedLoss))
# update our loss history list by taking the average loss
# across all batches
if self._epoch == 0: # be consistent at the first epoch
self._losses.append(avloss)
self._diffs.append(math.fabs(avloss - self._updatedLoss) / avloss)
self._updates.append(0)
self._epoch += 1
return drone_model
| 45.261649
| 177
| 0.578001
|
import numpy as np
import pickle
import math
try:
from utilities import dot_loss, next_batch
except ImportError:
from utilities.utilities import dot_loss, next_batch
class DontCacheRef(Exception):
pass
class BasicConverter(object):
def __init__(self, learning_rate = 0.05, batch_size = 1, num_epochs = 300, threshold = 0.02, add_layer_dynamic = False, layer_to_expand = 0):
self._learning_rate = learning_rate
self._batchSize = batch_size
self._num_epochs = num_epochs
self._threshold = threshold
self._add_layer_dynamic = add_layer_dynamic
self._layer_to_expand = int(layer_to_expand)
self._updatedLoss = 1000.0
self._diffs = []
self._losses = []
self._updates = []
self._epoch = 0
def losses(self):
return self._losses
def diffs(self):
return self._diffs
def updates(self):
return self._updates
def save_history(self, fname):
f_train = open(fname, 'wb')
training_data = [self._losses, self._diffs, self._updates]
pickle.dump(training_data, f_train)
f_train.close()
def get_refs(self, base_model, datapoints, scaler = None, conv_1d = False, conv_2d = False, cache_data = True):
try:
if not cache_data:
raise DontCacheRef()
return (self.__datapoints, self.__refs)
except (DontCacheRef, AttributeError) as e:
if conv_1d and conv_2d:
print('ERROR: conv_1d and conv_2d are mutually exclusive')
return None
refs = []
flattened = []
for point in datapoints:
spoint = point
if scaler and not conv_2d:
spoint = scaler.transform([point])
prob = 0.0
if conv_1d:
prob = base_model.predict_proba(np.expand_dims(np.expand_dims(spoint, axis = 2), axis = 0))[0][0]
elif conv_2d:
prob = base_model.predict_proba(np.expand_dims(spoint, axis = 0))
else:
prob = base_model.predict_proba(spoint.reshape(1, -1))[0][0]
refs.append(prob)
flattened.append(spoint.flatten().tolist())
self.__datapoints = np.asarray(flattened)
self.__refs = np.asarray(refs)
return (self.__datapoints, self.__refs)
def convert_model(self, drone_model, base_model, datapoints, scaler = None, conv_1d = False, conv_2d = False, cache_data = True, epoch_reset = False):
datapoints_for_drone, refs = self.get_refs(base_model, datapoints, scaler, conv_1d, conv_2d, cache_data)
inflate = 0
if epoch_reset:
self._epoch = 0
avloss = 0
while (self._epoch < self._num_epochs) or (self._updatedLoss < avloss):
epochloss = []
for (batchX, batchY) in next_batch(datapoints_for_drone, refs, self._batchSize):
batchY = np.array(batchY)
if batchX.shape[0] != self._batchSize:
print('Batch size insufficient (%s), continuing...' % batchY.shape[0])
continue
preds = drone_model.evaluate_total(batchX, debug = False)
loss, error = dot_loss(preds, batchY)
epochloss.append(loss)
drone_model.update(batchX, batchY, self._learning_rate)
avloss = np.average(epochloss)
diff = 0.0
if self._epoch > 0:
diff = math.fabs(avloss - self._losses[-1]) / avloss
self._diffs.append(diff)
self._losses.append(avloss)
update = 0
modify = True if (diff < self._threshold) else False
if modify:
inflate += 1
modify = True if self._updatedLoss == 1000.0 else (avloss < (self._updatedLoss - (50.0 * (1.0 - np.exp(-0.04 * inflate)) * diff * avloss))) and (inflate > 5)
if modify:
update = 1
inflate = 0
print('Model conversion not sufficient, updating...')
print('Last updated loss: %s' % self._updatedLoss)
self._updatedLoss = avloss
if self._add_layer_dynamic:
drone_model.add_layer_dynamic()
else:
drone_model.expand_layer_dynamic(self._layer_to_expand)
print('Model structure is now:')
drone_model.print_layers()
self._updates.append(update)
print('Epoch: %s, loss %s, diff %.5f, last updated loss %.5f' % (self._epoch, avloss, diff, self._updatedLoss))
if self._epoch == 0:
self._losses.append(avloss)
self._diffs.append(math.fabs(avloss - self._updatedLoss) / avloss)
self._updates.append(0)
self._epoch += 1
return drone_model
class AdvancedConverter(object):
def __init__(self, learning_rate = 0.05, batch_size = 1, num_epochs = 300, threshold = 0.02, add_layer_dynamic = False, layer_to_expand = None):
self._learning_rate = learning_rate
self._batchSize = batch_size
self._num_epochs = num_epochs
self._threshold = threshold
self._add_layer_dynamic = add_layer_dynamic
self.__round_robin = False
if layer_to_expand is None:
self.__round_robin = True
self._layer_to_expand = int(layer_to_expand) if layer_to_expand is not None else None
self._updatedLoss = 1000.0
self._diffs = []
self._losses = []
self._updates = []
self._epoch = 0
self.__rr_begin = 0
self.__rr_last = 0
def losses(self):
return self._losses
def diffs(self):
return self._diffs
def updates(self):
return self._updates
def round_robin(self, num_layers):
self.__rr_last = self.__rr_begin
self.__rr_begin = np.random.ranint(0, num_layers - 1)
return self.__rr_last
def save_history(self, fname):
f_train = open(fname, 'wb')
training_data = [self._losses, self._diffs, self._updates]
pickle.dump(training_data, f_train)
f_train.close()
def get_refs(self, base_model, datapoints, scaler = None, cache_data = True):
try:
if not cache_data:
raise DontCacheRef()
return (self.__datapoints, self.__refs)
except(DontCacheRef, AttributeError) as e:
refs = []
datapoints_for_drone = datapoints
if scaler:
datapoints_for_drone = scaler.transform(datapoints)
for point in datapoints_for_drone:
prob = base_model.predict_proba(point)
refs.append(prob)
self.__datapoints = datapoints_for_drone
self.__refs = refs
return (self.__datapoints, self.__refs)
def convert_model(self, drone_model, base_model, datapoints, scaler = None, cache_data = True, epoch_reset = False):
datapoints_for_drone, refs = self.get_refs(base_model, datapoints, scaler, cache_data)
inflate = 0
if epoch_reset:
self._epoch = 0
avloss = 0
while (self._epoch < self._num_epochs) or (self._updatedLoss < avloss):
epochloss = []
for (batchX, batchY) in next_batch(datapoints_for_drone, refs, self._batchSize):
batchY = np.array(batchY)
if batchX.shape[0] != self._batchSize:
print('Batch size insufficient ({}), continuing...'.format(batchY.shape[0]))
continue
preds = drone_model.evaluate_total(batchX, debug = False)
loss, error = dot_loss(preds, batchY)
epochloss.append(loss)
drone_model.update(batchX, batchY, self._learning_rate)
avloss = np.average(epochloss)
diff = 0.0
if self._epoch > 0:
diff = math.fabs(avloss - self._losses[-1]) / avloss
self._diffs.append(diff)
self._losses.append(avloss)
update = 0
modify = True if (diff < self._threshold) else False
if modify:
inflate += 1
modify = True if self._updatedLoss == 1000.0 else (avloss < (self._updatedLoss - (50.0 * (1.0 - np.exp(-0.04 * inflate)) * diff * avloss))) and (inflate > 5)
if modify:
update = 1
inflate = 0
print('Model conversion not sufficient, updating...')
print('Last updated loss: %s' % self._updatedLoss)
self._updatedLoss = avloss
if self._add_layer_dynamic:
drone_model.add_layer_dynamic()
elif self._layer_to_expand is not None:
drone_model.expand_layer_dynamic(self._layer_to_expand)
else:
drone_model.expand_layer_dynamic(self.round_robin(drone_model.num_layers()))
print('Model structure is now:')
drone_model.print_layers()
self._updates.append(update)
print('Epoch: %s, loss %s, diff %.5f, last updated loss %.5f' % (self._epoch, avloss, diff, self._updatedLoss))
if self._epoch == 0:
self._losses.append(avloss)
self._diffs.append(math.fabs(avloss - self._updatedLoss) / avloss)
self._updates.append(0)
self._epoch += 1
return drone_model
| true
| true
|
790dee9ae31e2e68f764162bc69de5f0809f90e7
| 2,059
|
py
|
Python
|
resilient-circuits/setup.py
|
ibmresilient/resilient-python-api
|
85e0ff684a88f744645c0ace414f51d769bcc3c2
|
[
"MIT"
] | 28
|
2017-12-22T00:26:59.000Z
|
2022-01-22T14:51:33.000Z
|
resilient-circuits/setup.py
|
ibmresilient/resilient-python-api
|
85e0ff684a88f744645c0ace414f51d769bcc3c2
|
[
"MIT"
] | 18
|
2018-03-06T19:04:20.000Z
|
2022-03-21T15:06:30.000Z
|
resilient-circuits/setup.py
|
ibmresilient/resilient-python-api
|
85e0ff684a88f744645c0ace414f51d769bcc3c2
|
[
"MIT"
] | 28
|
2018-05-01T17:53:22.000Z
|
2022-03-28T09:56:59.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2021. All Rights Reserved.
""" setup.py for resilient-circuits Python module """
import io
from os import path
from setuptools import find_packages, setup
this_directory = path.abspath(path.dirname(__file__))
with io.open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="resilient_circuits",
use_scm_version={"root": "../", "relative_to": __file__},
setup_requires=[
"setuptools_scm < 6.0.0;python_version<'3.0'",
"setuptools_scm >= 6.0.0;python_version>='3.0'"
],
license="MIT",
packages=find_packages(),
include_package_data=True,
# Runtime Dependencies
install_requires=[
"stompest>=2.3.0",
"circuits",
"pytz",
"jinja2~=2.0",
"pysocks",
"filelock>=2.0.5",
"watchdog>=0.9.0, <1.0.0; python_version < '3.6.0'",
"watchdog>=0.9.0; python_version >= '3.6.0'",
"resilient>=42.0.0",
"resilient-lib>=42.0.0"
],
entry_points={
"console_scripts": ["res-action-test = resilient_circuits.bin.res_action_test:main",
"resilient-circuits = resilient_circuits.bin.resilient_circuits_cmd:main"]
},
# PyPI metadata
author="IBM SOAR",
description="Framework used to run IBM SOAR Apps and Integrations.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ibmresilient/resilient-python-api/tree/master/resilient-circuits",
project_urls={
"Documentation": "https://ibm.biz/soar-docs",
"API Docs": "https://ibm.biz/soar-python-docs",
"IBM Community": "https://ibm.biz/soarcommunity",
"Change Log": "https://ibm.biz/resilient-circuits-changes"
},
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6"
],
keywords="ibm soar resilient circuits resilient-circuits"
)
| 31.676923
| 102
| 0.63186
|
import io
from os import path
from setuptools import find_packages, setup
this_directory = path.abspath(path.dirname(__file__))
with io.open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="resilient_circuits",
use_scm_version={"root": "../", "relative_to": __file__},
setup_requires=[
"setuptools_scm < 6.0.0;python_version<'3.0'",
"setuptools_scm >= 6.0.0;python_version>='3.0'"
],
license="MIT",
packages=find_packages(),
include_package_data=True,
install_requires=[
"stompest>=2.3.0",
"circuits",
"pytz",
"jinja2~=2.0",
"pysocks",
"filelock>=2.0.5",
"watchdog>=0.9.0, <1.0.0; python_version < '3.6.0'",
"watchdog>=0.9.0; python_version >= '3.6.0'",
"resilient>=42.0.0",
"resilient-lib>=42.0.0"
],
entry_points={
"console_scripts": ["res-action-test = resilient_circuits.bin.res_action_test:main",
"resilient-circuits = resilient_circuits.bin.resilient_circuits_cmd:main"]
},
author="IBM SOAR",
description="Framework used to run IBM SOAR Apps and Integrations.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ibmresilient/resilient-python-api/tree/master/resilient-circuits",
project_urls={
"Documentation": "https://ibm.biz/soar-docs",
"API Docs": "https://ibm.biz/soar-python-docs",
"IBM Community": "https://ibm.biz/soarcommunity",
"Change Log": "https://ibm.biz/resilient-circuits-changes"
},
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6"
],
keywords="ibm soar resilient circuits resilient-circuits"
)
| true
| true
|
790deea8025bfc6d3ad5be3b0c721ae8b17b96fd
| 5,359
|
py
|
Python
|
dev/scratch/sphinx-quickstart/conf.py
|
nrser/qb
|
13b5737afb0f00971793768fcb539907d790d8a5
|
[
"MIT"
] | 1
|
2018-03-23T01:42:52.000Z
|
2018-03-23T01:42:52.000Z
|
dev/scratch/sphinx-quickstart/conf.py
|
nrser/qb
|
13b5737afb0f00971793768fcb539907d790d8a5
|
[
"MIT"
] | 12
|
2016-02-12T08:35:43.000Z
|
2018-03-23T08:05:49.000Z
|
dev/scratch/sphinx-quickstart/conf.py
|
nrser/qb
|
13b5737afb0f00971793768fcb539907d790d8a5
|
[
"MIT"
] | 1
|
2017-07-13T10:10:34.000Z
|
2017-07-13T10:10:34.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'QB'
copyright = u'2018, NRSER'
author = u'NRSER'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'QBdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'QB.tex', u'QB Documentation',
u'NRSER', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'qb', u'QB Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'QB', u'QB Documentation',
author, 'QB', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 30.448864
| 79
| 0.639112
|
project = u'QB'
copyright = u'2018, NRSER'
author = u'NRSER'
version = u''
release = u''
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
html_theme = 'alabaster'
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'QBdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'QB.tex', u'QB Documentation',
u'NRSER', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'qb', u'QB Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'QB', u'QB Documentation',
author, 'QB', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| true
| true
|
790deeb39d6c1199c29d1d1e3b14a36019193573
| 637
|
py
|
Python
|
model-optimizer/mo/pipeline/unified.py
|
monroid/openvino
|
8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6
|
[
"Apache-2.0"
] | 2,406
|
2020-04-22T15:47:54.000Z
|
2022-03-31T10:27:37.000Z
|
model-optimizer/mo/pipeline/unified.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 4,948
|
2020-04-22T15:12:39.000Z
|
2022-03-31T18:45:42.000Z
|
model-optimizer/mo/pipeline/unified.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 991
|
2020-04-23T18:21:09.000Z
|
2022-03-31T18:40:57.000Z
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import argparse
from mo.graph.graph import Graph
from mo.pipeline.common import get_ir_version
from mo.utils import class_registration
def unified_pipeline(argv: argparse.Namespace):
graph = Graph(cmd_params=argv, name=argv.model_name, ir_version=get_ir_version(argv))
class_registration.apply_replacements(graph, [
class_registration.ClassType.LOADER,
class_registration.ClassType.FRONT_REPLACER,
class_registration.ClassType.MIDDLE_REPLACER,
class_registration.ClassType.BACK_REPLACER
])
return graph
| 31.85
| 89
| 0.78022
|
import argparse
from mo.graph.graph import Graph
from mo.pipeline.common import get_ir_version
from mo.utils import class_registration
def unified_pipeline(argv: argparse.Namespace):
graph = Graph(cmd_params=argv, name=argv.model_name, ir_version=get_ir_version(argv))
class_registration.apply_replacements(graph, [
class_registration.ClassType.LOADER,
class_registration.ClassType.FRONT_REPLACER,
class_registration.ClassType.MIDDLE_REPLACER,
class_registration.ClassType.BACK_REPLACER
])
return graph
| true
| true
|
790defb2e60f646da18e707569642139f440968e
| 8,837
|
py
|
Python
|
tests/extractcode/extractcode_assert_utils.py
|
doc22940/scancode-toolk
|
588b9a9411730e99d763d715ae9f38575744aaee
|
[
"Apache-2.0",
"CC0-1.0"
] | 1
|
2020-06-24T16:03:52.000Z
|
2020-06-24T16:03:52.000Z
|
tests/extractcode/extractcode_assert_utils.py
|
doc22940/scancode-toolk
|
588b9a9411730e99d763d715ae9f38575744aaee
|
[
"Apache-2.0",
"CC0-1.0"
] | 1
|
2021-06-02T02:50:07.000Z
|
2021-06-02T02:50:07.000Z
|
tests/extractcode/extractcode_assert_utils.py
|
hwpplayers/scancode-toolkit
|
72850bd57a1a841e5a6a6e4120223a00c4189046
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
import os
import ntpath
import posixpath
from commoncode import compat
from commoncode import filetype
from commoncode import fileutils
from commoncode.testcase import FileBasedTesting
from commoncode.system import on_windows
"""
Shared archiving test utils.
"""
def check_size(expected_size, location):
assert expected_size == os.stat(location).st_size
def check_files(test_dir, expected):
"""
Walk test_dir.
Check that all dirs are readable.
Check that all files are:
* non-special,
* readable,
* have a posix path that ends with one of the expected tuple paths.
"""
result = []
locs = []
if filetype.is_file(test_dir):
test_dir = fileutils.parent_directory(test_dir)
test_dir_path = fileutils.as_posixpath(test_dir)
for top, _, files in os.walk(test_dir):
for f in files:
location = os.path.join(top, f)
locs.append(location)
path = fileutils.as_posixpath(location)
path = path.replace(test_dir_path, '').strip('/')
result.append(path)
assert sorted(expected) == sorted(result)
for location in locs:
assert filetype.is_file(location)
assert not filetype.is_special(location)
assert filetype.is_readable(location)
def check_no_error(result):
"""
Check that every ExtractEvent in the `result` list has no error or warning.
"""
for r in result:
assert not r.errors
assert not r.warnings
def is_posixpath(location):
"""
Return True if the `location` path is likely a POSIX-like path using POSIX path
separators (slash or "/")or has no path separator.
Return False if the `location` path is likely a Windows-like path using backslash
as path separators (e.g. "\").
"""
has_slashes = '/' in location
has_backslashes = '\\' in location
# windows paths with drive
if location:
drive, _ = ntpath.splitdrive(location)
if drive:
return False
# a path is always POSIX unless it contains ONLY backslahes
# which is a rough approximation (it could still be posix)
is_posix = True
if has_backslashes and not has_slashes:
is_posix = False
return is_posix
def to_posix(path):
"""
Return a path using the posix path separator given a path that may contain posix
or windows separators, converting \\ to /. NB: this path will still be valid in
the windows explorer (except as a UNC or share name). It will be a valid path
everywhere in Python. It will not be valid for windows command line operations.
"""
is_unicode = isinstance(path, compat.unicode)
ntpath_sep = is_unicode and u'\\' or '\\'
posixpath_sep = is_unicode and u'/' or '/'
if is_posixpath(path):
if on_windows:
return path.replace(ntpath_sep, posixpath_sep)
else:
return path
return path.replace(ntpath_sep, posixpath_sep)
class BaseArchiveTestCase(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def check_get_extractors(self, test_file, expected, kinds=()):
from extractcode import archive
test_loc = self.get_test_loc(test_file)
if kinds:
extractors = archive.get_extractors(test_loc, kinds)
else:
extractors = archive.get_extractors(test_loc)
# import typecode
# ft = 'TODO' or typecode.contenttype.get_type(test_loc).filetype_file
# mt = 'TODO' or typecode.contenttype.get_type(test_loc).mimetype_file
fe = fileutils.file_extension(test_loc).lower()
em = ', '.join(e.__module__ + '.' + e.__name__ for e in extractors)
msg = ('%(expected)r == %(extractors)r for %(test_file)s\n'
'with fe:%(fe)r, em:%(em)s' % locals())
assert expected == extractors, msg
def assertRaisesInstance(self, excInstance, callableObj, *args, **kwargs):
"""
This assertion accepts an instance instead of a class for refined
exception testing.
"""
kwargs = kwargs or {}
excClass = excInstance.__class__
try:
callableObj(*args, **kwargs)
except excClass as e:
assert str(e).startswith(str(excInstance))
else:
if hasattr(excClass, '__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException('%s not raised' % excName)
def check_extract(self, test_function, test_file, expected, expected_warnings=None, check_all=False):
"""
Run the extraction `test_function` on `test_file` checking that a map of
expected paths --> size exist in the extracted target directory.
Does not test the presence of all files unless `check_all` is True.
"""
from extractcode import archive
test_file = self.get_test_loc(test_file)
test_dir = self.get_temp_dir()
warnings = test_function(test_file, test_dir)
if expected_warnings is not None:
assert expected_warnings == warnings
if check_all:
len_test_dir = len(test_dir)
extracted = {path[len_test_dir:]: filetype.get_size(path) for path in fileutils.resource_iter(test_dir, with_dirs=False)}
expected = {os.path.join(test_dir, exp_path): exp_size for exp_path, exp_size in expected.items()}
assert sorted(expected.items()) == sorted(extracted.items())
else:
for exp_path, exp_size in expected.items():
exp_loc = os.path.join(test_dir, exp_path)
msg = '''When extracting: %(test_file)s
With function: %(test_function)r
Failed to find expected path: %(exp_loc)s'''
assert os.path.exists(exp_loc), msg % locals()
if exp_size is not None:
res_size = os.stat(exp_loc).st_size
msg = '''When extracting: %(test_file)s
With function: %(test_function)r
Failed to assert the correct size %(exp_size)d
Got instead: %(res_size)d
for expected path: %(exp_loc)s'''
assert exp_size == res_size, msg % locals()
def collect_extracted_path(self, test_dir):
result = []
td = fileutils.as_posixpath(test_dir)
for t, dirs, files in os.walk(test_dir):
t = fileutils.as_posixpath(t)
for d in dirs:
nd = posixpath.join(t, d).replace(td, '') + '/'
result.append(nd)
for f in files:
nf = posixpath.join(t, f).replace(td, '')
result.append(nf)
result = sorted(result)
return result
def assertExceptionContains(self, text, callableObj, *args, **kwargs):
try:
callableObj(*args, **kwargs)
except Exception as e:
if text not in str(e):
raise self.failureException(
'Exception %(e)r raised, '
'it should contain the text %(text)r '
'and does not' % locals())
else:
raise self.failureException(
'Exception containing %(text)r not raised' % locals())
| 37.927039
| 133
| 0.640715
|
from __future__ import absolute_import
from __future__ import print_function
import os
import ntpath
import posixpath
from commoncode import compat
from commoncode import filetype
from commoncode import fileutils
from commoncode.testcase import FileBasedTesting
from commoncode.system import on_windows
def check_size(expected_size, location):
assert expected_size == os.stat(location).st_size
def check_files(test_dir, expected):
result = []
locs = []
if filetype.is_file(test_dir):
test_dir = fileutils.parent_directory(test_dir)
test_dir_path = fileutils.as_posixpath(test_dir)
for top, _, files in os.walk(test_dir):
for f in files:
location = os.path.join(top, f)
locs.append(location)
path = fileutils.as_posixpath(location)
path = path.replace(test_dir_path, '').strip('/')
result.append(path)
assert sorted(expected) == sorted(result)
for location in locs:
assert filetype.is_file(location)
assert not filetype.is_special(location)
assert filetype.is_readable(location)
def check_no_error(result):
for r in result:
assert not r.errors
assert not r.warnings
def is_posixpath(location):
has_slashes = '/' in location
has_backslashes = '\\' in location
if location:
drive, _ = ntpath.splitdrive(location)
if drive:
return False
is_posix = True
if has_backslashes and not has_slashes:
is_posix = False
return is_posix
def to_posix(path):
is_unicode = isinstance(path, compat.unicode)
ntpath_sep = is_unicode and u'\\' or '\\'
posixpath_sep = is_unicode and u'/' or '/'
if is_posixpath(path):
if on_windows:
return path.replace(ntpath_sep, posixpath_sep)
else:
return path
return path.replace(ntpath_sep, posixpath_sep)
class BaseArchiveTestCase(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def check_get_extractors(self, test_file, expected, kinds=()):
from extractcode import archive
test_loc = self.get_test_loc(test_file)
if kinds:
extractors = archive.get_extractors(test_loc, kinds)
else:
extractors = archive.get_extractors(test_loc)
fe = fileutils.file_extension(test_loc).lower()
em = ', '.join(e.__module__ + '.' + e.__name__ for e in extractors)
msg = ('%(expected)r == %(extractors)r for %(test_file)s\n'
'with fe:%(fe)r, em:%(em)s' % locals())
assert expected == extractors, msg
def assertRaisesInstance(self, excInstance, callableObj, *args, **kwargs):
kwargs = kwargs or {}
excClass = excInstance.__class__
try:
callableObj(*args, **kwargs)
except excClass as e:
assert str(e).startswith(str(excInstance))
else:
if hasattr(excClass, '__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException('%s not raised' % excName)
def check_extract(self, test_function, test_file, expected, expected_warnings=None, check_all=False):
from extractcode import archive
test_file = self.get_test_loc(test_file)
test_dir = self.get_temp_dir()
warnings = test_function(test_file, test_dir)
if expected_warnings is not None:
assert expected_warnings == warnings
if check_all:
len_test_dir = len(test_dir)
extracted = {path[len_test_dir:]: filetype.get_size(path) for path in fileutils.resource_iter(test_dir, with_dirs=False)}
expected = {os.path.join(test_dir, exp_path): exp_size for exp_path, exp_size in expected.items()}
assert sorted(expected.items()) == sorted(extracted.items())
else:
for exp_path, exp_size in expected.items():
exp_loc = os.path.join(test_dir, exp_path)
msg = '''When extracting: %(test_file)s
With function: %(test_function)r
Failed to find expected path: %(exp_loc)s'''
assert os.path.exists(exp_loc), msg % locals()
if exp_size is not None:
res_size = os.stat(exp_loc).st_size
msg = '''When extracting: %(test_file)s
With function: %(test_function)r
Failed to assert the correct size %(exp_size)d
Got instead: %(res_size)d
for expected path: %(exp_loc)s'''
assert exp_size == res_size, msg % locals()
def collect_extracted_path(self, test_dir):
result = []
td = fileutils.as_posixpath(test_dir)
for t, dirs, files in os.walk(test_dir):
t = fileutils.as_posixpath(t)
for d in dirs:
nd = posixpath.join(t, d).replace(td, '') + '/'
result.append(nd)
for f in files:
nf = posixpath.join(t, f).replace(td, '')
result.append(nf)
result = sorted(result)
return result
def assertExceptionContains(self, text, callableObj, *args, **kwargs):
try:
callableObj(*args, **kwargs)
except Exception as e:
if text not in str(e):
raise self.failureException(
'Exception %(e)r raised, '
'it should contain the text %(text)r '
'and does not' % locals())
else:
raise self.failureException(
'Exception containing %(text)r not raised' % locals())
| true
| true
|
790df00b3d96283fd9f00b4fd1e4b28bb99f7a5c
| 2,817
|
py
|
Python
|
calculation/gmhazard_calc/gmhazard_calc/nz_code/nzs1170p5/nzs_zfactor_2016/ll2z.py
|
ucgmsim/gmhazard
|
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
|
[
"MIT"
] | null | null | null |
calculation/gmhazard_calc/gmhazard_calc/nz_code/nzs1170p5/nzs_zfactor_2016/ll2z.py
|
ucgmsim/gmhazard
|
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
|
[
"MIT"
] | 8
|
2021-10-13T02:33:23.000Z
|
2022-03-29T21:01:08.000Z
|
calculation/gmhazard_calc/gmhazard_calc/nz_code/nzs1170p5/nzs_zfactor_2016/ll2z.py
|
ucgmsim/gmhazard
|
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
from matplotlib.path import Path
import numpy as np
import pandas as pd
from scipy.interpolate import griddata
from qcore import geo
DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "zdata")
# constant regions and max bounds for faster processing
POLYGONS = [
(os.path.join(DATA, "AucklandPolgonOutline_Points_WGS84.txt"), 0.13),
(os.path.join(DATA, "ChristchurchPolgonOutline_Points_WGS84.txt"), 0.3),
(os.path.join(DATA, "NorthlandPolgonOutline_Points_WGS84.txt"), 0.1),
]
CITY_RADIUS_SEARCH = 2
# contours
Z_VALS = [0.13, 0.15, 0.175, 0.188, 0.20, 0.25, 0.275, 0.30, 0.325, 0.35, 0.375, 0.40, 0.415, 0.425, 0.45, 0.475, 0.50, 0.525, 0.55, 0.575, 0.60]
Z_FORMAT = os.path.join(DATA, "Z_%.3f_points_WGS84.txt")
def ll2z(locations, radius_search=CITY_RADIUS_SEARCH):
"""Computes the z-value for the given lon, lat tuple or
list of lon, lat tuples
:param locations:
:param radius_search: Checks to see if a city is within X km from the given location,
removes the search if value is set to 0
:return: Array of z-values, one for each location specified
"""
try:
multi = bool(len(locations[0]))
except TypeError:
multi = False
locations = [locations]
out = np.zeros(len(locations))
# check if in polygon
for p in POLYGONS:
c = Path(
geo.path_from_corners(
corners=np.loadtxt(p[0]).tolist(), output=None, min_edge_points=4
)
).contains_points(locations)
out = np.where(c, p[1], out)
# check if within specified radius from city
if radius_search > 0:
cities = pd.read_csv(os.path.join(DATA, 'cities_z.csv'), header=None, names=['lon', 'lat', 'city', 'z_value'])
cities_ll = cities[['lon', 'lat']].values
for i, location in enumerate(locations):
dists = geo.get_distances(cities_ll, location[0], location[1])
if np.any(dists < radius_search):
cities['dist'] = dists
city_idx = cities.dist.idxmin()
out[i] = cities.loc[city_idx].z_value
# interpolate contours
nz = []
points_all = []
for z in Z_VALS:
points = np.atleast_2d(np.loadtxt(Z_FORMAT % z))
nz.append(len(points))
points_all.append(points)
points = np.concatenate(points_all)
del points_all
z = griddata(points, np.repeat(Z_VALS, nz), locations, method="linear")
return np.where(out == 0, np.where(np.isnan(z), 0.13, z), out)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("lon", type=float)
parser.add_argument("lat", type=float)
a = parser.parse_args()
print(ll2z((a.lon, a.lat)))
| 32.37931
| 145
| 0.636493
|
import os
from matplotlib.path import Path
import numpy as np
import pandas as pd
from scipy.interpolate import griddata
from qcore import geo
DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "zdata")
POLYGONS = [
(os.path.join(DATA, "AucklandPolgonOutline_Points_WGS84.txt"), 0.13),
(os.path.join(DATA, "ChristchurchPolgonOutline_Points_WGS84.txt"), 0.3),
(os.path.join(DATA, "NorthlandPolgonOutline_Points_WGS84.txt"), 0.1),
]
CITY_RADIUS_SEARCH = 2
Z_VALS = [0.13, 0.15, 0.175, 0.188, 0.20, 0.25, 0.275, 0.30, 0.325, 0.35, 0.375, 0.40, 0.415, 0.425, 0.45, 0.475, 0.50, 0.525, 0.55, 0.575, 0.60]
Z_FORMAT = os.path.join(DATA, "Z_%.3f_points_WGS84.txt")
def ll2z(locations, radius_search=CITY_RADIUS_SEARCH):
try:
multi = bool(len(locations[0]))
except TypeError:
multi = False
locations = [locations]
out = np.zeros(len(locations))
for p in POLYGONS:
c = Path(
geo.path_from_corners(
corners=np.loadtxt(p[0]).tolist(), output=None, min_edge_points=4
)
).contains_points(locations)
out = np.where(c, p[1], out)
if radius_search > 0:
cities = pd.read_csv(os.path.join(DATA, 'cities_z.csv'), header=None, names=['lon', 'lat', 'city', 'z_value'])
cities_ll = cities[['lon', 'lat']].values
for i, location in enumerate(locations):
dists = geo.get_distances(cities_ll, location[0], location[1])
if np.any(dists < radius_search):
cities['dist'] = dists
city_idx = cities.dist.idxmin()
out[i] = cities.loc[city_idx].z_value
nz = []
points_all = []
for z in Z_VALS:
points = np.atleast_2d(np.loadtxt(Z_FORMAT % z))
nz.append(len(points))
points_all.append(points)
points = np.concatenate(points_all)
del points_all
z = griddata(points, np.repeat(Z_VALS, nz), locations, method="linear")
return np.where(out == 0, np.where(np.isnan(z), 0.13, z), out)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("lon", type=float)
parser.add_argument("lat", type=float)
a = parser.parse_args()
print(ll2z((a.lon, a.lat)))
| true
| true
|
790df02ebcc7c2fd723b63bd54ae85b27f97db53
| 1,608
|
py
|
Python
|
ocdskingfisherprocess/cli/commands/new_transform_upgrade_1_0_to_1_1.py
|
matiasSanabria/kingfisher-process
|
88cb768aaa562714c8bd53e05717639faf041501
|
[
"BSD-3-Clause"
] | 1
|
2019-04-11T10:17:32.000Z
|
2019-04-11T10:17:32.000Z
|
ocdskingfisherprocess/cli/commands/new_transform_upgrade_1_0_to_1_1.py
|
matiasSanabria/kingfisher-process
|
88cb768aaa562714c8bd53e05717639faf041501
|
[
"BSD-3-Clause"
] | 282
|
2018-12-20T16:49:22.000Z
|
2022-02-01T00:48:10.000Z
|
ocdskingfisherprocess/cli/commands/new_transform_upgrade_1_0_to_1_1.py
|
matiasSanabria/kingfisher-process
|
88cb768aaa562714c8bd53e05717639faf041501
|
[
"BSD-3-Clause"
] | 7
|
2019-04-15T13:36:18.000Z
|
2021-03-02T16:25:41.000Z
|
import ocdskingfisherprocess.cli.commands.base
import ocdskingfisherprocess.database
from ocdskingfisherprocess.transform import TRANSFORM_TYPE_UPGRADE_1_0_TO_1_1
class NewTransformUpgrade10To11CLICommand(ocdskingfisherprocess.cli.commands.base.CLICommand):
command = 'new-transform-upgrade-1-0-to-1-1'
def configure_subparser(self, subparser):
self.configure_subparser_for_selecting_existing_collection(subparser)
def run_command(self, args):
self.run_command_for_selecting_existing_collection(args)
if self.collection.deleted_at:
print("That collection is deleted!")
return
id = self.database.get_collection_id(
self.collection.source_id,
self.collection.data_version,
self.collection.sample,
transform_from_collection_id=self.collection.database_id,
transform_type=TRANSFORM_TYPE_UPGRADE_1_0_TO_1_1)
if id:
print("Already exists! The ID is {}".format(id))
return
id = self.database.get_or_create_collection_id(self.collection.source_id,
self.collection.data_version,
self.collection.sample,
transform_from_collection_id=self.collection.database_id,
transform_type=TRANSFORM_TYPE_UPGRADE_1_0_TO_1_1)
print("Created! The ID is {}".format(id))
print("Now run transform-collection with that ID.")
| 43.459459
| 112
| 0.636816
|
import ocdskingfisherprocess.cli.commands.base
import ocdskingfisherprocess.database
from ocdskingfisherprocess.transform import TRANSFORM_TYPE_UPGRADE_1_0_TO_1_1
class NewTransformUpgrade10To11CLICommand(ocdskingfisherprocess.cli.commands.base.CLICommand):
command = 'new-transform-upgrade-1-0-to-1-1'
def configure_subparser(self, subparser):
self.configure_subparser_for_selecting_existing_collection(subparser)
def run_command(self, args):
self.run_command_for_selecting_existing_collection(args)
if self.collection.deleted_at:
print("That collection is deleted!")
return
id = self.database.get_collection_id(
self.collection.source_id,
self.collection.data_version,
self.collection.sample,
transform_from_collection_id=self.collection.database_id,
transform_type=TRANSFORM_TYPE_UPGRADE_1_0_TO_1_1)
if id:
print("Already exists! The ID is {}".format(id))
return
id = self.database.get_or_create_collection_id(self.collection.source_id,
self.collection.data_version,
self.collection.sample,
transform_from_collection_id=self.collection.database_id,
transform_type=TRANSFORM_TYPE_UPGRADE_1_0_TO_1_1)
print("Created! The ID is {}".format(id))
print("Now run transform-collection with that ID.")
| true
| true
|
790df0fb4eab5abf961604977a83799295634a26
| 866
|
py
|
Python
|
natural-languages-python.py
|
ErosMLima/python-server-connection
|
a15706a007a95eff64597fa02e64d95b6b2da6a5
|
[
"MIT"
] | 2
|
2020-07-27T06:33:59.000Z
|
2021-02-02T15:17:56.000Z
|
natural-languages-python.py
|
ErosMLima/python-server-connection
|
a15706a007a95eff64597fa02e64d95b6b2da6a5
|
[
"MIT"
] | null | null | null |
natural-languages-python.py
|
ErosMLima/python-server-connection
|
a15706a007a95eff64597fa02e64d95b6b2da6a5
|
[
"MIT"
] | null | null | null |
import emoji
emoji.emojize('\:sunglasses:?')
#Transformação #Comentário
String['Curso em Videos Python']
frase[9:13]
frase[9:21:2]
frase[:5]
frase[15:]
frase[9::3]
#Aula Curso Em Video Python 9 => revisão 2 [13/07/2020 14h00m]
#Funcionalidades de Trasnformação
Objeti.Methodo() #Comentário
frase.find('deo') #Acha, busca
frase.find('Android') #Acha, busca
frase.replace('Python','Android') #Subistui
frase.lower() #tudo em minusculo
frase.capitelize() #tudo maiusculo
frase.title()
frase = ['Aprenda Python']
frase.rstrip() # o lado direito "r" é uma keyword de direita
frase.lstrip() # strip vai remover os, somente os espaços da esquerda keyword "r"
#Funcionalidade Divisão de Strings [Cadeia de Caracteres]
frase.split() #
'-'.join(frase) #vc vai juntar todos os elementos de frase e vai usar esse separador aqui '-'
| 24.055556
| 94
| 0.702079
|
import emoji
emoji.emojize('\:sunglasses:?')
so em Videos Python']
frase[9:13]
frase[9:21:2]
frase[:5]
frase[15:]
frase[9::3]
Objeti.Methodo()
frase.find('deo')
frase.find('Android')
frase.replace('Python','Android')
frase.lower()
frase.capitelize()
frase.title()
frase = ['Aprenda Python']
frase.rstrip()
frase.lstrip()
frase.split()
'-'.join(frase)
| true
| true
|
790df18be8449e10282c4e08023378127398a2ee
| 8,687
|
py
|
Python
|
GoogleScraper/commandline.py
|
hnhnarek/GoogleScraper
|
80421366f2fbff2537edc8f29e51f9b75df919a3
|
[
"Apache-2.0"
] | 1
|
2018-11-18T14:36:29.000Z
|
2018-11-18T14:36:29.000Z
|
GoogleScraper/commandline.py
|
hnhnarek/GoogleScraper
|
80421366f2fbff2537edc8f29e51f9b75df919a3
|
[
"Apache-2.0"
] | null | null | null |
GoogleScraper/commandline.py
|
hnhnarek/GoogleScraper
|
80421366f2fbff2537edc8f29e51f9b75df919a3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import argparse
from GoogleScraper.version import __version__
def get_command_line(only_print_help=False):
"""
Parse command line arguments when GoogleScraper is used as a CLI application.
Returns:
The configuration as a dictionary that determines the behaviour of the app.
"""
parser = argparse.ArgumentParser(prog='GoogleScraper',
description='Scrapes the Google, Yandex, Bing and many other search engines by '
'forging http requests that imitate browser searches or by using real '
'browsers controlled by the selenium framework. '
'Multithreading support.',
epilog='GoogleScraper {version}. This program might infringe the TOS of the '
'search engines. Please use it on your own risk. (c) by Nikolai Tschacher'
', 2012-2018. incolumitas.com'.format(version=__version__))
parser.add_argument('-m', '--scrape-method', type=str, default='http',
help='The scraping type. There are currently three types: "http", "selenium" and "http-async". '
'"Http" scrapes with raw http requests, whereas "selenium" uses the selenium framework to '
'remotely control browsers. "http-async" makes use of gevent and is well suited for '
'extremely fast and explosive scraping jobs. You may search more than 1000 requests per '
'second if you have the necessary number of proxies available. ',
choices=('http', 'selenium', 'http-async'))
parser.add_argument('--sel-browser', choices=['firefox', 'chrome'], default='chrome',
help='The browser frontend for selenium scraping mode. Takes only effect if --scrape-method is set to "selenium"')
parser.add_argument('--browser-mode', choices=['normal', 'headless'], default='normal',
help='In which mode the browser is started. Valid values = (normal, headless)')
keyword_group = parser.add_mutually_exclusive_group()
keyword_group.add_argument('-q', '--keyword', type=str, action='store', dest='keyword',
help='The search keyword to scrape for. If you need to scrape multiple keywords, use '
'the --keyword-file flag')
keyword_group.add_argument('--keyword-file', type=str, action='store', default='',
help='Keywords to search for. One keyword per line. Empty lines are ignored. '
'Alternatively, you may specify the path to an python module (must end with the '
'.py suffix) where the keywords must be held in a dictionary with the name "scrape_'
'jobs".')
parser.add_argument('-o-', '--output-filename', type=str, action='store', default='',
help='The name of the output file. If the file ending is "json", write a json file, if the '
'ending is "csv", write a csv file.')
parser.add_argument('--shell', action='store_true', default=False,
help='Fire up a shell with a loaded sqlalchemy session.')
parser.add_argument('-n', '--num-results-per-page', type=int,
action='store', default=10,
help='The number of results per page. Must be smaller than 100, by default 50 for raw mode and '
'10 for selenium mode. Some search engines ignore this setting.')
parser.add_argument('-p', '--num-pages-for-keyword', type=int, action='store',
default=1,
help='The number of pages to request for each keyword. Each page is requested by a unique '
'connection and if possible by a unique IP (at least in "http" mode).')
parser.add_argument('-z', '--num-workers', type=int, default=1,
action='store',
help='This arguments sets the number of browser instances for selenium mode or the number of '
'worker threads in http mode.')
parser.add_argument('-t', '--search-type', type=str, action='store', default='normal',
help='The searchtype to launch. May be normal web search, image search, news search or video '
'search.')
parser.add_argument('--proxy-file', type=str, dest='proxy_file', action='store',
required=False, help='A filename for a list of proxies (supported are HTTP PROXIES, SOCKS4/5) '
'with the following format: "Proxyprotocol (proxy_ip|proxy_host):Port\n"'
'Example file: socks4 127.0.0.1:99\nsocks5 33.23.193.22:1080\n')
parser.add_argument('--config-file', type=str, dest='config_file', action='store',
help='The path to the configuration file for GoogleScraper. Normally you won\'t need this, '
'because GoogleScrape comes shipped with a thoroughly commented configuration file named '
'"scrape_config.py"')
parser.add_argument('--check-detection', type=str, dest='check_detection', action='store',
help='Check if the given search engine blocked you from scrapign. Often detection can be determined'
'if you have to solve a captcha.')
parser.add_argument('--simulate', action='store_true', default=False, required=False,
help='''If this flag is set, the scrape job and its estimated length will be printed.''')
loglevel_help = '''
Set the debug level of the application. Use the string representation
instead of the numbers. High numbers will output less, low numbers more.
CRITICAL = 50,
FATAL = CRITICAL,
ERROR = 40,
WARNING = 30,
WARN = WARNING,
INFO = 20,
DEBUG = 10,
NOTSET = 0
'''
parser.add_argument('-v', '--verbosity', '--loglevel',
dest='log_level', default='INFO', type = str.lower,
choices=['debug', 'info', 'warning', 'warn', 'error', 'critical', 'fatal'], help=loglevel_help)
parser.add_argument('--print-results', choices=['all', 'summarize'], default='all',
help='Whether to print all results ("all"), or only print a summary ("summarize")')
parser.add_argument('--view-config', action='store_true', default=False,
help="Print the current configuration to stdout. You may use it to create and tweak your own "
"config file from it.")
parser.add_argument('-V', '--v', '--version', action='store_true', default=False, dest='version',
help='Prints the version of GoogleScraper')
parser.add_argument('--clean', action='store_true', default=False,
help='Cleans all stored data. Please be very careful when you use this flag.')
parser.add_argument('--mysql-proxy-db', action='store',
help="A mysql connection string for proxies to use. Format: mysql://<username>:<password>@"
"<host>/<dbname>. Has precedence over proxy files.")
parser.add_argument('-s', '--search-engines', action='store', default=['google'],
help='What search engines to use (See GoogleScraper --config for the all supported). If you '
'want to use more than one at the same time, just separate with commatas: "google, bing, '
'yandex". If you want to use all search engines that are available, give \'*\' as '
'argument.')
#custom arguments
parser.add_argument('--proxy_chain_ips', type=str, action='store', default="local",
help='proxy_chain_ips to forward requests')
parser.add_argument('--strict', action='store_true', default=False,
help='Defines strict google / bing search')
parser.add_argument('--no-cache', action='store_true', default=False,
help='Disable caching')
if only_print_help:
parser.print_help()
else:
args = parser.parse_args()
return vars(args)
| 58.302013
| 138
| 0.574191
|
import argparse
from GoogleScraper.version import __version__
def get_command_line(only_print_help=False):
parser = argparse.ArgumentParser(prog='GoogleScraper',
description='Scrapes the Google, Yandex, Bing and many other search engines by '
'forging http requests that imitate browser searches or by using real '
'browsers controlled by the selenium framework. '
'Multithreading support.',
epilog='GoogleScraper {version}. This program might infringe the TOS of the '
'search engines. Please use it on your own risk. (c) by Nikolai Tschacher'
', 2012-2018. incolumitas.com'.format(version=__version__))
parser.add_argument('-m', '--scrape-method', type=str, default='http',
help='The scraping type. There are currently three types: "http", "selenium" and "http-async". '
'"Http" scrapes with raw http requests, whereas "selenium" uses the selenium framework to '
'remotely control browsers. "http-async" makes use of gevent and is well suited for '
'extremely fast and explosive scraping jobs. You may search more than 1000 requests per '
'second if you have the necessary number of proxies available. ',
choices=('http', 'selenium', 'http-async'))
parser.add_argument('--sel-browser', choices=['firefox', 'chrome'], default='chrome',
help='The browser frontend for selenium scraping mode. Takes only effect if --scrape-method is set to "selenium"')
parser.add_argument('--browser-mode', choices=['normal', 'headless'], default='normal',
help='In which mode the browser is started. Valid values = (normal, headless)')
keyword_group = parser.add_mutually_exclusive_group()
keyword_group.add_argument('-q', '--keyword', type=str, action='store', dest='keyword',
help='The search keyword to scrape for. If you need to scrape multiple keywords, use '
'the --keyword-file flag')
keyword_group.add_argument('--keyword-file', type=str, action='store', default='',
help='Keywords to search for. One keyword per line. Empty lines are ignored. '
'Alternatively, you may specify the path to an python module (must end with the '
'.py suffix) where the keywords must be held in a dictionary with the name "scrape_'
'jobs".')
parser.add_argument('-o-', '--output-filename', type=str, action='store', default='',
help='The name of the output file. If the file ending is "json", write a json file, if the '
'ending is "csv", write a csv file.')
parser.add_argument('--shell', action='store_true', default=False,
help='Fire up a shell with a loaded sqlalchemy session.')
parser.add_argument('-n', '--num-results-per-page', type=int,
action='store', default=10,
help='The number of results per page. Must be smaller than 100, by default 50 for raw mode and '
'10 for selenium mode. Some search engines ignore this setting.')
parser.add_argument('-p', '--num-pages-for-keyword', type=int, action='store',
default=1,
help='The number of pages to request for each keyword. Each page is requested by a unique '
'connection and if possible by a unique IP (at least in "http" mode).')
parser.add_argument('-z', '--num-workers', type=int, default=1,
action='store',
help='This arguments sets the number of browser instances for selenium mode or the number of '
'worker threads in http mode.')
parser.add_argument('-t', '--search-type', type=str, action='store', default='normal',
help='The searchtype to launch. May be normal web search, image search, news search or video '
'search.')
parser.add_argument('--proxy-file', type=str, dest='proxy_file', action='store',
required=False, help='A filename for a list of proxies (supported are HTTP PROXIES, SOCKS4/5) '
'with the following format: "Proxyprotocol (proxy_ip|proxy_host):Port\n"'
'Example file: socks4 127.0.0.1:99\nsocks5 33.23.193.22:1080\n')
parser.add_argument('--config-file', type=str, dest='config_file', action='store',
help='The path to the configuration file for GoogleScraper. Normally you won\'t need this, '
'because GoogleScrape comes shipped with a thoroughly commented configuration file named '
'"scrape_config.py"')
parser.add_argument('--check-detection', type=str, dest='check_detection', action='store',
help='Check if the given search engine blocked you from scrapign. Often detection can be determined'
'if you have to solve a captcha.')
parser.add_argument('--simulate', action='store_true', default=False, required=False,
help='''If this flag is set, the scrape job and its estimated length will be printed.''')
loglevel_help = '''
Set the debug level of the application. Use the string representation
instead of the numbers. High numbers will output less, low numbers more.
CRITICAL = 50,
FATAL = CRITICAL,
ERROR = 40,
WARNING = 30,
WARN = WARNING,
INFO = 20,
DEBUG = 10,
NOTSET = 0
'''
parser.add_argument('-v', '--verbosity', '--loglevel',
dest='log_level', default='INFO', type = str.lower,
choices=['debug', 'info', 'warning', 'warn', 'error', 'critical', 'fatal'], help=loglevel_help)
parser.add_argument('--print-results', choices=['all', 'summarize'], default='all',
help='Whether to print all results ("all"), or only print a summary ("summarize")')
parser.add_argument('--view-config', action='store_true', default=False,
help="Print the current configuration to stdout. You may use it to create and tweak your own "
"config file from it.")
parser.add_argument('-V', '--v', '--version', action='store_true', default=False, dest='version',
help='Prints the version of GoogleScraper')
parser.add_argument('--clean', action='store_true', default=False,
help='Cleans all stored data. Please be very careful when you use this flag.')
parser.add_argument('--mysql-proxy-db', action='store',
help="A mysql connection string for proxies to use. Format: mysql://<username>:<password>@"
"<host>/<dbname>. Has precedence over proxy files.")
parser.add_argument('-s', '--search-engines', action='store', default=['google'],
help='What search engines to use (See GoogleScraper --config for the all supported). If you '
'want to use more than one at the same time, just separate with commatas: "google, bing, '
'yandex". If you want to use all search engines that are available, give \'*\' as '
'argument.')
#custom arguments
parser.add_argument('--proxy_chain_ips', type=str, action='store', default="local",
help='proxy_chain_ips to forward requests')
parser.add_argument('--strict', action='store_true', default=False,
help='Defines strict google / bing search')
parser.add_argument('--no-cache', action='store_true', default=False,
help='Disable caching')
if only_print_help:
parser.print_help()
else:
args = parser.parse_args()
return vars(args)
| true
| true
|
790df2247cdf56f82b55987b1a12b3ee33742514
| 6,396
|
py
|
Python
|
packages/pytea/pytest/benchmarks/transformers/tests/test_tokenization_fsmt.py
|
lego0901/pytea
|
8ede650def2e68f4610ba816451d8b9e28f09f76
|
[
"MIT"
] | 1
|
2020-11-14T06:08:38.000Z
|
2020-11-14T06:08:38.000Z
|
packages/pytea/pytest/benchmarks/transformers/tests/test_tokenization_fsmt.py
|
lego0901/pytea
|
8ede650def2e68f4610ba816451d8b9e28f09f76
|
[
"MIT"
] | null | null | null |
packages/pytea/pytest/benchmarks/transformers/tests/test_tokenization_fsmt.py
|
lego0901/pytea
|
8ede650def2e68f4610ba816451d8b9e28f09f76
|
[
"MIT"
] | 1
|
2020-11-16T23:12:50.000Z
|
2020-11-16T23:12:50.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from transformers.file_utils import cached_property
from transformers.testing_utils import slow
from transformers.tokenization_fsmt import VOCAB_FILES_NAMES, FSMTTokenizer
from .test_tokenization_common import TokenizerTesterMixin
# using a different tiny model than the one used for default params defined in init to ensure proper testing
FSMT_TINY2 = "stas/tiny-wmt19-en-ru"
class FSMTTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = FSMTTokenizer
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
self.langs = ["en", "ru"]
config = {
"langs": self.langs,
"src_vocab_size": 10,
"tgt_vocab_size": 20,
}
self.src_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["src_vocab_file"])
self.tgt_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["tgt_vocab_file"])
config_file = os.path.join(self.tmpdirname, "tokenizer_config.json")
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.merges_file, "w") as fp:
fp.write("\n".join(merges))
with open(config_file, "w") as fp:
fp.write(json.dumps(config))
@cached_property
def tokenizer_ru_en(self):
return FSMTTokenizer.from_pretrained("facebook/wmt19-ru-en")
@cached_property
def tokenizer_en_ru(self):
return FSMTTokenizer.from_pretrained("facebook/wmt19-en-ru")
def test_online_tokenizer_config(self):
"""this just tests that the online tokenizer files get correctly fetched and
loaded via its tokenizer_config.json and it's not slow so it's run by normal CI
"""
tokenizer = FSMTTokenizer.from_pretrained(FSMT_TINY2)
self.assertListEqual([tokenizer.src_lang, tokenizer.tgt_lang], ["en", "ru"])
self.assertEqual(tokenizer.src_vocab_size, 21)
self.assertEqual(tokenizer.tgt_vocab_size, 21)
def test_full_tokenizer(self):
""" Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt """
tokenizer = FSMTTokenizer(self.langs, self.src_vocab_file, self.tgt_vocab_file, self.merges_file)
text = "lower"
bpe_tokens = ["low", "er</w>"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + ["<unk>"]
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_ru_en
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == text + [2]
assert encoded_pair == text + [2] + text_2 + [2]
@slow
def test_match_encode_decode(self):
tokenizer_enc = self.tokenizer_en_ru
tokenizer_dec = self.tokenizer_ru_en
targets = [
[
"Here's a little song I wrote. Don't worry, be happy.",
[2470, 39, 11, 2349, 7222, 70, 5979, 7, 8450, 1050, 13160, 5, 26, 6445, 7, 2],
],
["This is it. No more. I'm done!", [132, 21, 37, 7, 1434, 86, 7, 70, 6476, 1305, 427, 2]],
]
# if data needs to be recreated or added, run:
# import torch
# model = torch.hub.load("pytorch/fairseq", "transformer.wmt19.en-ru", checkpoint_file="model4.pt", tokenizer="moses", bpe="fastbpe")
# for src_text, _ in targets: print(f"""[\n"{src_text}",\n {model.encode(src_text).tolist()}\n],""")
for src_text, tgt_input_ids in targets:
encoded_ids = tokenizer_enc.encode(src_text, return_tensors=None)
self.assertListEqual(encoded_ids, tgt_input_ids)
# and decode backward, using the reversed languages model
decoded_text = tokenizer_dec.decode(encoded_ids, skip_special_tokens=True)
self.assertEqual(decoded_text, src_text)
@slow
def test_tokenizer_lower(self):
tokenizer = FSMTTokenizer.from_pretrained("facebook/wmt19-ru-en", do_lower_case=True)
tokens = tokenizer.tokenize("USA is United States of America")
expected = ["us", "a</w>", "is</w>", "un", "i", "ted</w>", "st", "ates</w>", "of</w>", "am", "er", "ica</w>"]
self.assertListEqual(tokens, expected)
@unittest.skip("FSMTConfig.__init__ requires non-optional args")
def test_torch_encode_plus_sent_to_model(self):
pass
@unittest.skip("FSMTConfig.__init__ requires non-optional args")
def test_np_encode_plus_sent_to_model(self):
pass
| 38.071429
| 141
| 0.635241
|
import json
import os
import unittest
from transformers.file_utils import cached_property
from transformers.testing_utils import slow
from transformers.tokenization_fsmt import VOCAB_FILES_NAMES, FSMTTokenizer
from .test_tokenization_common import TokenizerTesterMixin
FSMT_TINY2 = "stas/tiny-wmt19-en-ru"
class FSMTTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = FSMTTokenizer
def setUp(self):
super().setUp()
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
self.langs = ["en", "ru"]
config = {
"langs": self.langs,
"src_vocab_size": 10,
"tgt_vocab_size": 20,
}
self.src_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["src_vocab_file"])
self.tgt_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["tgt_vocab_file"])
config_file = os.path.join(self.tmpdirname, "tokenizer_config.json")
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.merges_file, "w") as fp:
fp.write("\n".join(merges))
with open(config_file, "w") as fp:
fp.write(json.dumps(config))
@cached_property
def tokenizer_ru_en(self):
return FSMTTokenizer.from_pretrained("facebook/wmt19-ru-en")
@cached_property
def tokenizer_en_ru(self):
return FSMTTokenizer.from_pretrained("facebook/wmt19-en-ru")
def test_online_tokenizer_config(self):
tokenizer = FSMTTokenizer.from_pretrained(FSMT_TINY2)
self.assertListEqual([tokenizer.src_lang, tokenizer.tgt_lang], ["en", "ru"])
self.assertEqual(tokenizer.src_vocab_size, 21)
self.assertEqual(tokenizer.tgt_vocab_size, 21)
def test_full_tokenizer(self):
tokenizer = FSMTTokenizer(self.langs, self.src_vocab_file, self.tgt_vocab_file, self.merges_file)
text = "lower"
bpe_tokens = ["low", "er</w>"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + ["<unk>"]
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_ru_en
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == text + [2]
assert encoded_pair == text + [2] + text_2 + [2]
@slow
def test_match_encode_decode(self):
tokenizer_enc = self.tokenizer_en_ru
tokenizer_dec = self.tokenizer_ru_en
targets = [
[
"Here's a little song I wrote. Don't worry, be happy.",
[2470, 39, 11, 2349, 7222, 70, 5979, 7, 8450, 1050, 13160, 5, 26, 6445, 7, 2],
],
["This is it. No more. I'm done!", [132, 21, 37, 7, 1434, 86, 7, 70, 6476, 1305, 427, 2]],
]
# if data needs to be recreated or added, run:
# import torch
# model = torch.hub.load("pytorch/fairseq", "transformer.wmt19.en-ru", checkpoint_file="model4.pt", tokenizer="moses", bpe="fastbpe")
# for src_text, _ in targets: print(f"""[\n"{src_text}",\n {model.encode(src_text).tolist()}\n],""")
for src_text, tgt_input_ids in targets:
encoded_ids = tokenizer_enc.encode(src_text, return_tensors=None)
self.assertListEqual(encoded_ids, tgt_input_ids)
# and decode backward, using the reversed languages model
decoded_text = tokenizer_dec.decode(encoded_ids, skip_special_tokens=True)
self.assertEqual(decoded_text, src_text)
@slow
def test_tokenizer_lower(self):
tokenizer = FSMTTokenizer.from_pretrained("facebook/wmt19-ru-en", do_lower_case=True)
tokens = tokenizer.tokenize("USA is United States of America")
expected = ["us", "a</w>", "is</w>", "un", "i", "ted</w>", "st", "ates</w>", "of</w>", "am", "er", "ica</w>"]
self.assertListEqual(tokens, expected)
@unittest.skip("FSMTConfig.__init__ requires non-optional args")
def test_torch_encode_plus_sent_to_model(self):
pass
@unittest.skip("FSMTConfig.__init__ requires non-optional args")
def test_np_encode_plus_sent_to_model(self):
pass
| true
| true
|
790df3d3edbdef7f3e27cbfab633888f8f7bfa8a
| 3,597
|
py
|
Python
|
milk/supervised/classifier.py
|
luispedro/milk
|
abc2a28b526c199414d42c0a26092938968c3caf
|
[
"MIT"
] | 284
|
2015-01-21T09:07:55.000Z
|
2022-03-19T07:39:17.000Z
|
milk/supervised/classifier.py
|
pursh2002/milk
|
abc2a28b526c199414d42c0a26092938968c3caf
|
[
"MIT"
] | 6
|
2015-04-22T15:17:44.000Z
|
2018-04-22T16:06:24.000Z
|
milk/supervised/classifier.py
|
pursh2002/milk
|
abc2a28b526c199414d42c0a26092938968c3caf
|
[
"MIT"
] | 109
|
2015-02-03T07:39:59.000Z
|
2022-01-16T00:16:13.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2015, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
import numpy as np
from .normalise import normaliselabels
from .base import supervised_model
__all__ = ['normaliselabels', 'ctransforms']
class threshold_model(object):
'''
threshold_model
Attributes
----------
threshold : float
threshold value
'''
def __init__(self, threshold=.5):
self.threshold = .5
def apply(self, f):
return f >= self.threshold
def __repr__(self):
return 'threshold_model({})'.format(self.threshold)
__str__ = __repr__
class fixed_threshold_learner(object):
def __init__(self, threshold=.5):
self.threshold = threshold
def train(self, features, labels, **kwargs):
return threshold_model(self.threshold)
def __repr__(self):
return 'fixed_threshold_learner({})'.format(self.threshold)
__str__ = __repr__
class ctransforms_model(supervised_model):
'''
model = ctransforms_model(models)
A model that consists of a series of transformations.
See Also
--------
ctransforms
'''
def __init__(self, models):
self.models = models
def apply_many(self, features):
if len(features) == 0:
return features
for m in self.models:
features = m.apply_many(features)
return features
def __repr__(self):
return 'ctransforms_model({})'.format(self.models)
__str__ = __repr__
def __getitem__(self, ix):
return self.models[ix]
def apply(self,features):
for T in self.models:
features = T.apply(features)
return features
class ctransforms(object):
'''
ctransf = ctransforms(c0, c1, c2, ...)
Concatenate transforms.
'''
def __init__(self,*args):
self.transforms = args
def train(self, features, labels, **kwargs):
models = []
model = None
for T in self.transforms:
if model is not None:
features = np.array([model.apply(f) for f in features])
model = T.train(features, labels, **kwargs)
models.append(model)
return ctransforms_model(models)
def __repr__(self):
return 'ctransforms(*{})'.format(self.transforms)
__str__ = __repr__
def set_option(self, opt, val):
idx, opt = opt
self.transforms[idx].set_option(opt,val)
| 29.727273
| 80
| 0.666389
|
from __future__ import division
import numpy as np
from .normalise import normaliselabels
from .base import supervised_model
__all__ = ['normaliselabels', 'ctransforms']
class threshold_model(object):
def __init__(self, threshold=.5):
self.threshold = .5
def apply(self, f):
return f >= self.threshold
def __repr__(self):
return 'threshold_model({})'.format(self.threshold)
__str__ = __repr__
class fixed_threshold_learner(object):
def __init__(self, threshold=.5):
self.threshold = threshold
def train(self, features, labels, **kwargs):
return threshold_model(self.threshold)
def __repr__(self):
return 'fixed_threshold_learner({})'.format(self.threshold)
__str__ = __repr__
class ctransforms_model(supervised_model):
def __init__(self, models):
self.models = models
def apply_many(self, features):
if len(features) == 0:
return features
for m in self.models:
features = m.apply_many(features)
return features
def __repr__(self):
return 'ctransforms_model({})'.format(self.models)
__str__ = __repr__
def __getitem__(self, ix):
return self.models[ix]
def apply(self,features):
for T in self.models:
features = T.apply(features)
return features
class ctransforms(object):
def __init__(self,*args):
self.transforms = args
def train(self, features, labels, **kwargs):
models = []
model = None
for T in self.transforms:
if model is not None:
features = np.array([model.apply(f) for f in features])
model = T.train(features, labels, **kwargs)
models.append(model)
return ctransforms_model(models)
def __repr__(self):
return 'ctransforms(*{})'.format(self.transforms)
__str__ = __repr__
def set_option(self, opt, val):
idx, opt = opt
self.transforms[idx].set_option(opt,val)
| true
| true
|
790df45ba9086de045cf171d19e9c521fc8111bf
| 17,757
|
py
|
Python
|
cgp.py
|
Pavan-Samtani/CGP-CNN-v2
|
2eede8297542b7551d5ef5bf11aeeaba34bf4f3f
|
[
"MIT"
] | null | null | null |
cgp.py
|
Pavan-Samtani/CGP-CNN-v2
|
2eede8297542b7551d5ef5bf11aeeaba34bf4f3f
|
[
"MIT"
] | null | null | null |
cgp.py
|
Pavan-Samtani/CGP-CNN-v2
|
2eede8297542b7551d5ef5bf11aeeaba34bf4f3f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import time
import numpy as np
import math
import os
# gene[f][c] f:function type, c:connection (nodeID)
class Individual(object):
def __init__(self, net_info, init):
self.net_info = net_info
self.gene = np.zeros((self.net_info.node_num + self.net_info.out_num, self.net_info.max_in_num + 1)).astype(int)
self.is_active = np.empty(self.net_info.node_num + self.net_info.out_num).astype(bool)
self.is_pool = np.empty(self.net_info.node_num + self.net_info.out_num).astype(bool)
self.eval = None
self.size = None
if init:
print('init with specific architectures')
self.init_gene_with_conv() # In the case of starting only convolution
else:
self.init_gene() # generate initial individual randomly
def init_gene_with_conv(self):
# initial architecture
arch = ['S_ConvBlock_64_3']
input_layer_num = int(self.net_info.input_num / self.net_info.rows) + 1
output_layer_num = int(self.net_info.out_num / self.net_info.rows) + 1
layer_ids = [((self.net_info.cols - 1 - input_layer_num - output_layer_num) + i) // (len(arch)) for i in
range(len(arch))]
prev_id = 0 # i.e. input layer
current_layer = input_layer_num
block_ids = [] # *do not connect with these ids
# building convolution net
for i, idx in enumerate(layer_ids):
current_layer += idx
n = current_layer * self.net_info.rows + np.random.randint(self.net_info.rows)
block_ids.append(n)
self.gene[n][0] = self.net_info.func_type.index(arch[i])
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
self.gene[n][1] = prev_id
for j in range(1, self.net_info.max_in_num):
self.gene[n][j + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)
prev_id = n + self.net_info.input_num
# output layer
n = self.net_info.node_num
type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
self.gene[n][0] = np.random.randint(type_num)
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
self.gene[n][1] = prev_id
for i in range(1, self.net_info.max_in_num):
self.gene[n][i + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)
block_ids.append(n)
# intermediate node
for n in range(self.net_info.node_num + self.net_info.out_num):
if n in block_ids:
continue
# type gene
type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
self.gene[n][0] = np.random.randint(type_num)
# connection gene
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
for i in range(self.net_info.max_in_num):
self.gene[n][i + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)
self.check_active()
def init_gene(self):
# intermediate node
for n in range(self.net_info.node_num + self.net_info.out_num):
# type gene
type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
self.gene[n][0] = np.random.randint(type_num)
# connection gene
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
for i in range(self.net_info.max_in_num):
self.gene[n][i + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)
self.check_active()
def __check_course_to_out(self, n):
if not self.is_active[n]:
self.is_active[n] = True
t = self.gene[n][0]
if n >= self.net_info.node_num: # output node
in_num = self.net_info.out_in_num[t]
else: # intermediate node
in_num = self.net_info.func_in_num[t]
for i in range(in_num):
if self.gene[n][i + 1] >= self.net_info.input_num:
self.__check_course_to_out(self.gene[n][i + 1] - self.net_info.input_num)
def check_active(self):
# clear
self.is_active[:] = False
# start from output nodes
for n in range(self.net_info.out_num):
self.__check_course_to_out(self.net_info.node_num + n)
def check_pool(self):
is_pool = True
pool_num = 0
for n in range(self.net_info.node_num + self.net_info.out_num):
if self.is_active[n]:
if self.gene[n][0] > 19:
is_pool = False
pool_num += 1
return is_pool, pool_num
def __mutate(self, current, min_int, max_int):
mutated_gene = current
while current == mutated_gene:
mutated_gene = min_int + np.random.randint(max_int - min_int)
return mutated_gene
def mutation(self, mutation_rate=0.01):
active_check = False
for n in range(self.net_info.node_num + self.net_info.out_num):
t = self.gene[n][0]
# mutation for type gene
type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
if np.random.rand() < mutation_rate and type_num > 1:
self.gene[n][0] = self.__mutate(self.gene[n][0], 0, type_num)
if self.is_active[n]:
active_check = True
# mutation for connection gene
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
in_num = self.net_info.func_in_num[t] if n < self.net_info.node_num else self.net_info.out_in_num[t]
for i in range(self.net_info.max_in_num):
if np.random.rand() < mutation_rate and max_connect_id - min_connect_id > 1:
self.gene[n][i + 1] = self.__mutate(self.gene[n][i + 1], min_connect_id, max_connect_id)
if self.is_active[n] and i < in_num:
active_check = True
self.check_active()
return active_check
def neutral_mutation(self, mutation_rate=0.01):
for n in range(self.net_info.node_num + self.net_info.out_num):
t = self.gene[n][0]
# mutation for type gene
type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
if not self.is_active[n] and np.random.rand() < mutation_rate and type_num > 1:
self.gene[n][0] = self.__mutate(self.gene[n][0], 0, type_num)
# mutation for connection gene
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
in_num = self.net_info.func_in_num[t] if n < self.net_info.node_num else self.net_info.out_in_num[t]
for i in range(self.net_info.max_in_num):
if (not self.is_active[n] or i >= in_num) and np.random.rand() < mutation_rate \
and max_connect_id - min_connect_id > 1:
self.gene[n][i + 1] = self.__mutate(self.gene[n][i + 1], min_connect_id, max_connect_id)
self.check_active()
return False
def count_active_node(self):
return self.is_active.sum()
def copy(self, source):
self.net_info = source.net_info
self.gene = source.gene.copy()
self.is_active = source.is_active.copy()
self.eval = source.eval
self.size = source.size
def active_net_list(self):
net_list = [["input", 0, 0]]
active_cnt = np.arange(self.net_info.input_num + self.net_info.node_num + self.net_info.out_num)
active_cnt[self.net_info.input_num:] = np.cumsum(self.is_active)
for n, is_a in enumerate(self.is_active):
if is_a:
t = self.gene[n][0]
if n < self.net_info.node_num: # intermediate node
type_str = self.net_info.func_type[t]
else: # output node
type_str = self.net_info.out_type[t]
connections = [active_cnt[self.gene[n][i + 1]] for i in range(self.net_info.max_in_num)]
net_list.append([type_str] + connections)
return net_list
# CGP with (1 + \lambda)-ES
class CGP(object):
def __init__(self, net_info, eval_func, lam=4, imgSize=32, init=False, bias=0):
self.lam = lam
self.pop = [Individual(net_info, init) for _ in range(1 + self.lam)]
self.eval_func = eval_func
self.num_gen = 0
self.num_eval = 0
self.max_pool_num = int(math.log2(imgSize) - 2)
self.init = init
self.bias = bias
def _evaluation(self, pop, eval_flag):
# create network list
net_lists = []
active_index = np.where(eval_flag)[0]
for i in active_index:
net_lists.append(pop[i].active_net_list())
# evaluation
fp = self.eval_func(net_lists)
for i, j in enumerate(active_index):
if isinstance(fp[i], tuple):
pop[j].eval = fp[i][0]
pop[j].size = fp[i][1]
else:
pop[j].eval = fp[i]
pop[j].size = np.inf
evaluations_acc = np.zeros(len(pop))
evaluations_size = np.zeros(len(pop))
for i in range(len(pop)):
evaluations_acc[i] = pop[i].eval
evaluations_size[i] = pop[i].size
self.num_eval += len(net_lists)
return evaluations_acc, evaluations_size
def _log_data(self, net_info_type='active_only', start_time=0):
log_list = [self.num_gen, self.num_eval, time.time() - start_time, self.pop[0].eval,
self.pop[0].size, self.pop[0].count_active_node()]
if net_info_type == 'active_only':
log_list.append(self.pop[0].active_net_list())
elif net_info_type == 'full':
log_list += self.pop[0].gene.flatten().tolist()
else:
pass
return log_list
def _log_data_children(self, net_info_type='active_only', start_time=0, pop=None):
log_list = [self.num_gen, self.num_eval, time.time() - start_time, pop.eval, pop.size, pop.count_active_node()]
if net_info_type == 'active_only':
log_list.append(pop.active_net_list())
elif net_info_type == 'full':
log_list += pop.gene.flatten().tolist()
else:
pass
return log_list
def load_log(self, log_data):
self.num_gen = int(log_data[0])
self.num_eval = int(log_data[1])
net_info = self.pop[0].net_info
self.pop[0].eval = log_data[3]
self.pop[0].size = log_data[4]
print("Loaded Accuracy:", self.pop[0].eval)
self.pop[0].gene = np.int64(np.array(log_data[6:])).reshape(
(net_info.node_num + net_info.out_num, net_info.max_in_num + 1))
self.pop[0].check_active()
# Evolution CGP:
# At each iteration:
# - Generate lambda individuals in which at least one active node changes (i.e., forced mutation)
# - Mutate the best individual with neutral mutation (unchanging the active nodes)
# if the best individual is not updated.
def modified_evolution(self, max_eval=100, mutation_rate=0.01, log_path='./'):
with open(os.path.join(log_path, 'child.txt'), 'a') as fw_c:
writer_c = csv.writer(fw_c, lineterminator='\n')
start_time = time.time()
eval_flag = np.empty(self.lam)
active_num = self.pop[0].count_active_node()
_, pool_num = self.pop[0].check_pool()
if self.init:
pass
else: # in the case of not using an init indiviudal
while active_num < self.pop[0].net_info.min_active_num or pool_num > self.max_pool_num:
self.pop[0].mutation(1.0)
active_num = self.pop[0].count_active_node()
_, pool_num = self.pop[0].check_pool()
if self.pop[0].eval is None:
self._evaluation([self.pop[0]], np.array([True]))
print(self._log_data(net_info_type='active_only', start_time=start_time))
while self.num_gen < max_eval:
self.num_gen += 1
# reproduction
for i in range(self.lam):
eval_flag[i] = False
self.pop[i + 1].copy(self.pop[0]) # copy a parent
active_num = self.pop[i + 1].count_active_node()
_, pool_num = self.pop[i + 1].check_pool()
# mutation (forced mutation)
while not eval_flag[i] or active_num < self.pop[
i + 1].net_info.min_active_num or pool_num > self.max_pool_num:
self.pop[i + 1].copy(self.pop[0]) # copy a parent
eval_flag[i] = self.pop[i + 1].mutation(mutation_rate) # mutation
active_num = self.pop[i + 1].count_active_node()
_, pool_num = self.pop[i + 1].check_pool()
# evaluation and selection
evaluations_acc, evaluations_size = self._evaluation(self.pop[1:], eval_flag=eval_flag)
evaluations_argsort = np.argsort(-evaluations_acc)
print(evaluations_acc, evaluations_argsort)
best_arg = evaluations_argsort[0]
# save
f = open(os.path.join(log_path, 'arch_child.txt'), 'a')
writer_f = csv.writer(f, lineterminator='\n')
for c in range(1 + self.lam):
writer_c.writerow(
self._log_data_children(net_info_type='full', start_time=start_time, pop=self.pop[c]))
writer_f.writerow(
self._log_data_children(net_info_type='active_only', start_time=start_time, pop=self.pop[c]))
f.close()
# replace the parent by the best individual
print("Comparing children with parent...")
print(f"Best Child's Accuracy {evaluations_acc[best_arg]}, Parent Accuracy: {self.pop[0].eval}")
if evaluations_acc[best_arg] > self.pop[0].eval:
self.pop[0].copy(self.pop[best_arg + 1])
print("Replacing parent with best child")
elif self.bias > 0:
found = False
print(f"Parent: Accuracy: {self.pop[0].eval}, Size: {self.pop[0].size}")
for i, idx in enumerate(evaluations_argsort):
print(f"Child {i + 1}: Accuracy: {evaluations_acc[idx]}, Size: {evaluations_size[idx]}")
if evaluations_acc[idx] > (self.pop[0].eval - self.bias) and \
evaluations_size[idx] < self.pop[0].size:
print("Replacing parent with child")
self.pop[0].copy(self.pop[idx + 1])
found = True
break
if not found:
self.pop[0].neutral_mutation(mutation_rate) # modify the parent (neutral mutation)
else:
self.pop[0].neutral_mutation(mutation_rate) # modify the parent (neutral mutation)
# display and save log
print(self._log_data(net_info_type='active_only', start_time=start_time))
fw = open(os.path.join(log_path, 'log_cgp.txt'), 'a')
writer = csv.writer(fw, lineterminator='\n')
writer.writerow(self._log_data(net_info_type='full', start_time=start_time))
fa = open(os.path.join(log_path, 'arch.txt'), 'a')
writer_a = csv.writer(fa, lineterminator='\n')
writer_a.writerow(self._log_data(net_info_type='active_only', start_time=start_time))
fw.close()
fa.close()
| 47.862534
| 120
| 0.584108
|
import csv
import time
import numpy as np
import math
import os
class Individual(object):
def __init__(self, net_info, init):
self.net_info = net_info
self.gene = np.zeros((self.net_info.node_num + self.net_info.out_num, self.net_info.max_in_num + 1)).astype(int)
self.is_active = np.empty(self.net_info.node_num + self.net_info.out_num).astype(bool)
self.is_pool = np.empty(self.net_info.node_num + self.net_info.out_num).astype(bool)
self.eval = None
self.size = None
if init:
print('init with specific architectures')
self.init_gene_with_conv()
else:
self.init_gene()
def init_gene_with_conv(self):
arch = ['S_ConvBlock_64_3']
input_layer_num = int(self.net_info.input_num / self.net_info.rows) + 1
output_layer_num = int(self.net_info.out_num / self.net_info.rows) + 1
layer_ids = [((self.net_info.cols - 1 - input_layer_num - output_layer_num) + i) // (len(arch)) for i in
range(len(arch))]
prev_id = 0
current_layer = input_layer_num
block_ids = []
for i, idx in enumerate(layer_ids):
current_layer += idx
n = current_layer * self.net_info.rows + np.random.randint(self.net_info.rows)
block_ids.append(n)
self.gene[n][0] = self.net_info.func_type.index(arch[i])
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
self.gene[n][1] = prev_id
for j in range(1, self.net_info.max_in_num):
self.gene[n][j + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)
prev_id = n + self.net_info.input_num
n = self.net_info.node_num
type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
self.gene[n][0] = np.random.randint(type_num)
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
self.gene[n][1] = prev_id
for i in range(1, self.net_info.max_in_num):
self.gene[n][i + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)
block_ids.append(n)
for n in range(self.net_info.node_num + self.net_info.out_num):
if n in block_ids:
continue
type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
self.gene[n][0] = np.random.randint(type_num)
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
for i in range(self.net_info.max_in_num):
self.gene[n][i + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)
self.check_active()
def init_gene(self):
for n in range(self.net_info.node_num + self.net_info.out_num):
type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
self.gene[n][0] = np.random.randint(type_num)
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
for i in range(self.net_info.max_in_num):
self.gene[n][i + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)
self.check_active()
def __check_course_to_out(self, n):
if not self.is_active[n]:
self.is_active[n] = True
t = self.gene[n][0]
if n >= self.net_info.node_num:
in_num = self.net_info.out_in_num[t]
else:
in_num = self.net_info.func_in_num[t]
for i in range(in_num):
if self.gene[n][i + 1] >= self.net_info.input_num:
self.__check_course_to_out(self.gene[n][i + 1] - self.net_info.input_num)
def check_active(self):
self.is_active[:] = False
for n in range(self.net_info.out_num):
self.__check_course_to_out(self.net_info.node_num + n)
def check_pool(self):
is_pool = True
pool_num = 0
for n in range(self.net_info.node_num + self.net_info.out_num):
if self.is_active[n]:
if self.gene[n][0] > 19:
is_pool = False
pool_num += 1
return is_pool, pool_num
def __mutate(self, current, min_int, max_int):
mutated_gene = current
while current == mutated_gene:
mutated_gene = min_int + np.random.randint(max_int - min_int)
return mutated_gene
def mutation(self, mutation_rate=0.01):
active_check = False
for n in range(self.net_info.node_num + self.net_info.out_num):
t = self.gene[n][0]
type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
if np.random.rand() < mutation_rate and type_num > 1:
self.gene[n][0] = self.__mutate(self.gene[n][0], 0, type_num)
if self.is_active[n]:
active_check = True
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
in_num = self.net_info.func_in_num[t] if n < self.net_info.node_num else self.net_info.out_in_num[t]
for i in range(self.net_info.max_in_num):
if np.random.rand() < mutation_rate and max_connect_id - min_connect_id > 1:
self.gene[n][i + 1] = self.__mutate(self.gene[n][i + 1], min_connect_id, max_connect_id)
if self.is_active[n] and i < in_num:
active_check = True
self.check_active()
return active_check
def neutral_mutation(self, mutation_rate=0.01):
for n in range(self.net_info.node_num + self.net_info.out_num):
t = self.gene[n][0]
type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
if not self.is_active[n] and np.random.rand() < mutation_rate and type_num > 1:
self.gene[n][0] = self.__mutate(self.gene[n][0], 0, type_num)
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
in_num = self.net_info.func_in_num[t] if n < self.net_info.node_num else self.net_info.out_in_num[t]
for i in range(self.net_info.max_in_num):
if (not self.is_active[n] or i >= in_num) and np.random.rand() < mutation_rate \
and max_connect_id - min_connect_id > 1:
self.gene[n][i + 1] = self.__mutate(self.gene[n][i + 1], min_connect_id, max_connect_id)
self.check_active()
return False
def count_active_node(self):
return self.is_active.sum()
def copy(self, source):
self.net_info = source.net_info
self.gene = source.gene.copy()
self.is_active = source.is_active.copy()
self.eval = source.eval
self.size = source.size
def active_net_list(self):
net_list = [["input", 0, 0]]
active_cnt = np.arange(self.net_info.input_num + self.net_info.node_num + self.net_info.out_num)
active_cnt[self.net_info.input_num:] = np.cumsum(self.is_active)
for n, is_a in enumerate(self.is_active):
if is_a:
t = self.gene[n][0]
if n < self.net_info.node_num:
type_str = self.net_info.func_type[t]
else:
type_str = self.net_info.out_type[t]
connections = [active_cnt[self.gene[n][i + 1]] for i in range(self.net_info.max_in_num)]
net_list.append([type_str] + connections)
return net_list
class CGP(object):
def __init__(self, net_info, eval_func, lam=4, imgSize=32, init=False, bias=0):
self.lam = lam
self.pop = [Individual(net_info, init) for _ in range(1 + self.lam)]
self.eval_func = eval_func
self.num_gen = 0
self.num_eval = 0
self.max_pool_num = int(math.log2(imgSize) - 2)
self.init = init
self.bias = bias
def _evaluation(self, pop, eval_flag):
net_lists = []
active_index = np.where(eval_flag)[0]
for i in active_index:
net_lists.append(pop[i].active_net_list())
fp = self.eval_func(net_lists)
for i, j in enumerate(active_index):
if isinstance(fp[i], tuple):
pop[j].eval = fp[i][0]
pop[j].size = fp[i][1]
else:
pop[j].eval = fp[i]
pop[j].size = np.inf
evaluations_acc = np.zeros(len(pop))
evaluations_size = np.zeros(len(pop))
for i in range(len(pop)):
evaluations_acc[i] = pop[i].eval
evaluations_size[i] = pop[i].size
self.num_eval += len(net_lists)
return evaluations_acc, evaluations_size
def _log_data(self, net_info_type='active_only', start_time=0):
log_list = [self.num_gen, self.num_eval, time.time() - start_time, self.pop[0].eval,
self.pop[0].size, self.pop[0].count_active_node()]
if net_info_type == 'active_only':
log_list.append(self.pop[0].active_net_list())
elif net_info_type == 'full':
log_list += self.pop[0].gene.flatten().tolist()
else:
pass
return log_list
def _log_data_children(self, net_info_type='active_only', start_time=0, pop=None):
log_list = [self.num_gen, self.num_eval, time.time() - start_time, pop.eval, pop.size, pop.count_active_node()]
if net_info_type == 'active_only':
log_list.append(pop.active_net_list())
elif net_info_type == 'full':
log_list += pop.gene.flatten().tolist()
else:
pass
return log_list
def load_log(self, log_data):
self.num_gen = int(log_data[0])
self.num_eval = int(log_data[1])
net_info = self.pop[0].net_info
self.pop[0].eval = log_data[3]
self.pop[0].size = log_data[4]
print("Loaded Accuracy:", self.pop[0].eval)
self.pop[0].gene = np.int64(np.array(log_data[6:])).reshape(
(net_info.node_num + net_info.out_num, net_info.max_in_num + 1))
self.pop[0].check_active()
def modified_evolution(self, max_eval=100, mutation_rate=0.01, log_path='./'):
with open(os.path.join(log_path, 'child.txt'), 'a') as fw_c:
writer_c = csv.writer(fw_c, lineterminator='\n')
start_time = time.time()
eval_flag = np.empty(self.lam)
active_num = self.pop[0].count_active_node()
_, pool_num = self.pop[0].check_pool()
if self.init:
pass
else:
while active_num < self.pop[0].net_info.min_active_num or pool_num > self.max_pool_num:
self.pop[0].mutation(1.0)
active_num = self.pop[0].count_active_node()
_, pool_num = self.pop[0].check_pool()
if self.pop[0].eval is None:
self._evaluation([self.pop[0]], np.array([True]))
print(self._log_data(net_info_type='active_only', start_time=start_time))
while self.num_gen < max_eval:
self.num_gen += 1
for i in range(self.lam):
eval_flag[i] = False
self.pop[i + 1].copy(self.pop[0])
active_num = self.pop[i + 1].count_active_node()
_, pool_num = self.pop[i + 1].check_pool()
while not eval_flag[i] or active_num < self.pop[
i + 1].net_info.min_active_num or pool_num > self.max_pool_num:
self.pop[i + 1].copy(self.pop[0])
eval_flag[i] = self.pop[i + 1].mutation(mutation_rate)
active_num = self.pop[i + 1].count_active_node()
_, pool_num = self.pop[i + 1].check_pool()
evaluations_acc, evaluations_size = self._evaluation(self.pop[1:], eval_flag=eval_flag)
evaluations_argsort = np.argsort(-evaluations_acc)
print(evaluations_acc, evaluations_argsort)
best_arg = evaluations_argsort[0]
f = open(os.path.join(log_path, 'arch_child.txt'), 'a')
writer_f = csv.writer(f, lineterminator='\n')
for c in range(1 + self.lam):
writer_c.writerow(
self._log_data_children(net_info_type='full', start_time=start_time, pop=self.pop[c]))
writer_f.writerow(
self._log_data_children(net_info_type='active_only', start_time=start_time, pop=self.pop[c]))
f.close()
print("Comparing children with parent...")
print(f"Best Child's Accuracy {evaluations_acc[best_arg]}, Parent Accuracy: {self.pop[0].eval}")
if evaluations_acc[best_arg] > self.pop[0].eval:
self.pop[0].copy(self.pop[best_arg + 1])
print("Replacing parent with best child")
elif self.bias > 0:
found = False
print(f"Parent: Accuracy: {self.pop[0].eval}, Size: {self.pop[0].size}")
for i, idx in enumerate(evaluations_argsort):
print(f"Child {i + 1}: Accuracy: {evaluations_acc[idx]}, Size: {evaluations_size[idx]}")
if evaluations_acc[idx] > (self.pop[0].eval - self.bias) and \
evaluations_size[idx] < self.pop[0].size:
print("Replacing parent with child")
self.pop[0].copy(self.pop[idx + 1])
found = True
break
if not found:
self.pop[0].neutral_mutation(mutation_rate) # modify the parent (neutral mutation)
else:
self.pop[0].neutral_mutation(mutation_rate) # modify the parent (neutral mutation)
# display and save log
print(self._log_data(net_info_type='active_only', start_time=start_time))
fw = open(os.path.join(log_path, 'log_cgp.txt'), 'a')
writer = csv.writer(fw, lineterminator='\n')
writer.writerow(self._log_data(net_info_type='full', start_time=start_time))
fa = open(os.path.join(log_path, 'arch.txt'), 'a')
writer_a = csv.writer(fa, lineterminator='\n')
writer_a.writerow(self._log_data(net_info_type='active_only', start_time=start_time))
fw.close()
fa.close()
| true
| true
|
790df4d87f1f316506b7ed42cc4afd8e8add0212
| 19,206
|
py
|
Python
|
ocs_ci/ocs/ui/views.py
|
keesturam/ocs-ci
|
de85058255b6a50c65889b8077da326be28bdcc7
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/ui/views.py
|
keesturam/ocs-ci
|
de85058255b6a50c65889b8077da326be28bdcc7
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/ui/views.py
|
keesturam/ocs-ci
|
de85058255b6a50c65889b8077da326be28bdcc7
|
[
"MIT"
] | null | null | null |
from selenium.webdriver.common.by import By
osd_sizes = ("512", "2048", "4096")
login = {
"ocp_page": "Overview · Red Hat OpenShift Container Platform",
"username": ("inputUsername", By.ID),
"password": ("inputPassword", By.ID),
"click_login": ("//button[text()='Log in']", By.XPATH),
"flexy_kubeadmin": ('a[title="Log in with kube:admin"]', By.CSS_SELECTOR),
}
deployment = {
"click_install_ocs": ('a[data-test-id="operator-install-btn"]', By.CSS_SELECTOR),
"choose_ocs_version": (
'a[data-test="ocs-operator-ocs-catalogsource-openshift-marketplace"]',
By.CSS_SELECTOR,
),
"search_operators": ('input[placeholder="Filter by keyword..."]', By.CSS_SELECTOR),
"operators_tab": ("//button[text()='Operators']", By.XPATH),
"operatorhub_tab": ("OperatorHub", By.LINK_TEXT),
"installed_operators_tab": ("Installed Operators", By.LINK_TEXT),
"storage_cluster_tab": (
'a[data-test-id="horizontal-link-Storage Cluster"]',
By.CSS_SELECTOR,
),
"ocs_operator_installed": (
'a[data-test-operator-row="OpenShift Container Storage"]',
By.CSS_SELECTOR,
),
"search_operator_installed": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
"thin_sc": ('a[id="thin-link"]', By.CSS_SELECTOR),
"gp2_sc": ('a[id="gp2-link"]', By.CSS_SELECTOR),
"managed-premium_sc": ('a[id="managed-premium-link"]', By.CSS_SELECTOR),
"osd_size_dropdown": ('button[data-test-id="dropdown-button"]', By.CSS_SELECTOR),
"512": ('button[data-test-dropdown-menu="512Gi"]', By.CSS_SELECTOR),
"2048": ('button[data-test-dropdown-menu="2Ti"]', By.CSS_SELECTOR),
"4096": ('button[data-test-dropdown-menu="4Ti"]', By.CSS_SELECTOR),
"all_nodes": ('input[aria-label="Select all rows"]', By.CSS_SELECTOR),
"wide_encryption": ('//*[@id="cluster-wide-encryption"]', By.XPATH),
"class_encryption": ('//*[@id="storage-class-encryption"]', By.XPATH),
"advanced_encryption": ('//*[@id="advanced-encryption"]', By.XPATH),
"kms_service_name": ('//*[@id="kms-service-name"]', By.XPATH),
"kms_address": ('//*[@id="kms-address"]', By.XPATH),
"kms_address_port": ('//*[@id="kms-address-port"]', By.XPATH),
"kms_token": ('//*[@id="kms-token"]', By.XPATH),
"create_on_review": ("//button[text()='Create']", By.XPATH),
"search_ocs_installed": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
"all_nodes_lso": (
'input[id="auto-detect-volume-radio-all-nodes"]',
By.CSS_SELECTOR,
),
"lv_name": ('input[id="create-lvs-volume-set-name"]', By.CSS_SELECTOR),
"sc_name": ('input[id="create-lvs-storage-class-name"]', By.CSS_SELECTOR),
"all_nodes_create_sc": ('input[id="create-lvs-radio-all-nodes"]', By.CSS_SELECTOR),
"storage_class_dropdown_lso": (
'button[id="storage-class-dropdown"]',
By.CSS_SELECTOR,
),
"localblock_sc": ('a[id="localblock-link"]', By.CSS_SELECTOR),
"choose_local_storage_version": (
'a[data-test="local-storage-operator-redhat-operators-openshift-marketplace"]',
By.CSS_SELECTOR,
),
"click_install_lso": ('a[data-test-id="operator-install-btn"]', By.CSS_SELECTOR),
"yes": ("//*[contains(text(), 'Yes')]", By.XPATH),
"next": ("//*[contains(text(), 'Next')]", By.XPATH),
}
deployment_4_6 = {
"click_install_ocs_page": ("//button[text()='Install']", By.XPATH),
"create_storage_cluster": ("//button[text()='Create Storage Cluster']", By.XPATH),
"internal_mode": ('input[value="Internal"]', By.CSS_SELECTOR),
"internal-attached_devices": (
'input[value="Internal - Attached Devices"]',
By.CSS_SELECTOR,
),
"storage_class_dropdown": (
'button[id="ceph-sc-dropdown"]',
By.CSS_SELECTOR,
),
"enable_encryption": ('//span[@class="pf-c-switch__toggle"]', By.XPATH),
"click_install_lso_page": ("//button[text()='Install']", By.XPATH),
"project_dropdown": (
'button[class="pf-c-dropdown__toggle pf-m-plain"]',
By.CSS_SELECTOR,
),
"OpenShift Container Storage": ('a[id="openshift-storage-link"]', By.CSS_SELECTOR),
"Local Storage": ('a[id="openshift-local-storage-link"]', By.CSS_SELECTOR),
}
deployment_4_7 = {
"click_install_ocs_page": ('button[data-test="install-operator"]', By.CSS_SELECTOR),
"create_storage_cluster": ('button[data-test="item-create"]', By.CSS_SELECTOR),
"internal_mode": ('input[data-test="Internal-radio-input"]', By.CSS_SELECTOR),
"internal-attached_devices": (
'input[data-test="Internal - Attached Devices-radio-input"]',
By.CSS_SELECTOR,
),
"storage_class_dropdown": (
'button[data-test="storage-class-dropdown"]',
By.CSS_SELECTOR,
),
"enable_encryption": ('input[data-test="encryption-checkbox"]', By.CSS_SELECTOR),
"click_install_lso_page": ('button[data-test="install-operator"]', By.CSS_SELECTOR),
}
generic_locators = {
"project_selector": (
'button[class="pf-c-dropdown__toggle pf-m-plain"]',
By.CSS_SELECTOR,
),
"select_openshift-storage_project": (
'a[id="openshift-storage-link"]',
By.CSS_SELECTOR,
),
"create_resource_button": ("yaml-create", By.ID),
"search_resource_field": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
"first_dropdown_option": (
'a[data-test="dropdown-menu-item-link"]',
By.CSS_SELECTOR,
),
"actions": ('button[data-test-id="actions-menu-button"]', By.CSS_SELECTOR),
"confirm_action": ("confirm-action", By.ID),
"submit_form": ('button[type="submit"]', By.CSS_SELECTOR),
"ocs_operator": ('//h1[text()="OpenShift Container Storage"]', By.XPATH),
"kebab_button": ('button[data-test-id="kebab-button"', By.CSS_SELECTOR),
"resource_status": ('span[data-test="status-text"]', By.CSS_SELECTOR),
"check_first_row_checkbox": ('input[name="checkrow0"]', By.CSS_SELECTOR),
"remove_search_filter": ('button[aria-label="close"]', By.CSS_SELECTOR),
"delete_resource_kebab_button": ('//*[contains(text(), "Delete")]', By.XPATH),
}
ocs_operator_locators = {
"backingstore_page": (
'a[data-test-id="horizontal-link-Backing Store"]',
By.CSS_SELECTOR,
),
"namespacestore_page": (
'a[data-test-id="horizontal-link-Namespace Store"]',
By.CSS_SELECTOR,
),
"bucketclass_page": (
'a[data-test-id="horizontal-link-Bucket Class"]',
By.CSS_SELECTOR,
),
}
mcg_stores = {
"store_name": ('input[data-test*="store-name"]', By.CSS_SELECTOR),
"provider_dropdown": ('button[data-test*="store-provider"]', By.CSS_SELECTOR),
"aws_provider": ("AWS S3-link", By.ID),
"aws_region_dropdown": ("region", By.ID),
"us_east_2_region": ("us-east-2-link", By.ID),
"aws_secret_dropdown": ("secret-dropdown", By.ID),
"aws_secret_search_field": (
'input[data-test-id="dropdown-text-filter"]',
By.CSS_SELECTOR,
),
"target_bucket": ("target-bucket", By.ID),
}
bucketclass = {
"standard_type": ("Standard", By.ID),
"namespace_type": ("Namespace", By.ID),
"bucketclass_name": ("bucketclassname-input", By.ID),
"spread_policy": ('input[data-test="placement-policy-spread1"]', By.CSS_SELECTOR),
"mirror_policy": ('input[data-test="placement-policy-mirror1"]', By.CSS_SELECTOR),
"single_policy": ("Single", By.ID),
"multi_policy": ("Multi", By.ID),
"cache_policy": ("Cache", By.ID),
"nss_dropdown": ('button[data-test="nns-dropdown-toggle"]', By.CSS_SELECTOR),
"nss_option_template": ('button[data-test="{}"]', By.CSS_SELECTOR),
"bs_dropdown": ('button[data-test="nbs-dropdown-toggle"]', By.CSS_SELECTOR),
"first_bs_dropdown_option": (
'button[data-test="mybs-dropdown-item"]',
By.CSS_SELECTOR,
),
"ttl_input": ("ttl-input", By.ID),
"ttl_time_unit_dropdown": ("timetolive-input", By.ID),
"ttl_minute_time_unit_button": ("MIN-link", By.ID),
}
obc = {
"storageclass_dropdown": ("sc-dropdown", By.ID),
"storageclass_text_field": (
'input[placeholder="Select StorageClass"]',
By.CSS_SELECTOR,
),
"bucketclass_dropdown": ("bc-dropdown", By.ID),
"bucketclass_text_field": (
'input[placeholder="Select BucketClass"]',
By.CSS_SELECTOR,
),
"default_bucketclass": ("noobaa-default-bucket-class-link", By.ID),
"obc_name": ("obc-name", By.ID),
"first_obc_link": ('a[class="co-resource-item__resource-name"]', By.CSS_SELECTOR),
"delete_obc": (
'button[data-test-action="Delete Object Bucket Claim"]',
By.CSS_SELECTOR,
),
}
pvc = {
"pvc_project_selector": (
'button[class="pf-c-dropdown__toggle pf-m-plain"]',
By.CSS_SELECTOR,
),
"select_openshift-storage_project": (
'a[id="openshift-storage-link"]',
By.CSS_SELECTOR,
),
"pvc_create_button": ('button[data-test="item-create"]', By.CSS_SELECTOR),
"pvc_storage_class_selector": (
'button[data-test="storageclass-dropdown"]',
By.CSS_SELECTOR,
),
"storage_class_name": ('//*[text()="{}"]', By.XPATH),
"ocs-storagecluster-ceph-rbd": (
'a[id="ocs-storagecluster-ceph-rbd-link"]',
By.CSS_SELECTOR,
),
"ocs-storagecluster-cephfs": (
'a[id="ocs-storagecluster-cephfs-link"]',
By.CSS_SELECTOR,
),
"ocs-storagecluster-ceph-rbd-thick": (
"a[id='ocs-storagecluster-ceph-rbd-thick-link'] div[class='text-muted small']",
By.CSS_SELECTOR,
),
"pvc_name": ('input[data-test="pvc-name"]', By.CSS_SELECTOR),
"ReadWriteOnce": (
'input[data-test="Single User (RWO)-radio-input"]',
By.CSS_SELECTOR,
),
"ReadWriteMany": (
'input[data-test="Shared Access (RWX)-radio-input"]',
By.CSS_SELECTOR,
),
"ReadOnlyMany": ('input[data-test="Read Only (ROX)-radio-input"]', By.CSS_SELECTOR),
"pvc_size": ('input[data-test="pvc-size"]', By.CSS_SELECTOR),
"pvc_create": ('button[data-test="create-pvc"]', By.CSS_SELECTOR),
"pvc_actions": ('button[data-test-id="actions-menu-button"]', By.CSS_SELECTOR),
"pvc_delete": (
'button[data-test-action="Delete PersistentVolumeClaim"]',
By.CSS_SELECTOR,
),
"confirm_pvc_deletion": ('button[data-test="confirm-action"]', By.CSS_SELECTOR),
"search_pvc": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
}
pvc_4_7 = {
"test-pvc-fs": ('a[data-test-id="test-pvc-fs"]', By.CSS_SELECTOR),
"test-pvc-rbd": ("a[title='test-pvc-rbd']", By.CSS_SELECTOR),
"Block": ("input[value='Block']", By.CSS_SELECTOR),
"Filesystem": ("input[value='Filesystem']", By.CSS_SELECTOR),
"search-project": ("input[placeholder='Select Project...']", By.CSS_SELECTOR),
"expand_pvc": ("button[data-test-action='Expand PVC']", By.CSS_SELECTOR),
"resize-value": ("//input[@name='requestSizeValue']", By.XPATH),
"expand-btn": ("#confirm-action", By.CSS_SELECTOR),
"pvc-status": (
"dd[data-test-id='pvc-status'] span[data-test='status-text']",
By.CSS_SELECTOR,
),
"test-project-link": ("//a[normalize-space()='{}']", By.XPATH),
"expected-capacity": (
"//dd[contains(text(),'{}') and @data-test='pvc-requested-capacity']",
By.XPATH,
),
"new-capacity": (
"//dd[contains(text(),'{}') and @data-test-id='pvc-capacity']",
By.XPATH,
),
}
pvc_4_8 = {
"ReadWriteMany": ("input[value='ReadWriteMany']", By.CSS_SELECTOR),
"pvc_actions": ("button[aria-label='Actions']", By.CSS_SELECTOR),
"ReadWriteOnce": ("input[value='ReadWriteOnce']", By.CSS_SELECTOR),
"test-pvc-fs": ("a[title='test-pvc-fs']", By.CSS_SELECTOR),
"test-pvc-rbd-thick": ("a[title='test-pvc-rbd-thick']", By.CSS_SELECTOR),
"resize-pending": (
"div[class ='col-xs-4 col-sm-2 col-md-2'] span",
By.CSS_SELECTOR,
),
"search_pvc": ("input[placeholder='Search by name...']", By.CSS_SELECTOR),
}
page_nav = {
"Home": ("//button[text()='Home']", By.XPATH),
"overview_page": ("Overview", By.LINK_TEXT),
"projects_page": ("Projects", By.LINK_TEXT),
"search_page": ("Search", By.LINK_TEXT),
"explore_page": ("Explore", By.LINK_TEXT),
"events_page": ("Events", By.LINK_TEXT),
"Operators": ("//button[text()='Operators']", By.XPATH),
"operatorhub_page": ("OperatorHub", By.LINK_TEXT),
"installed_operators_page": ("Installed Operators", By.LINK_TEXT),
"Storage": ("//button[text()='Storage']", By.XPATH),
"persistentvolumes_page": ("PersistentVolumes", By.LINK_TEXT),
"persistentvolumeclaims_page": ("PersistentVolumeClaims", By.LINK_TEXT),
"storageclasses_page": ("StorageClasses", By.LINK_TEXT),
"volumesnapshots_page": ("VolumeSnapshots", By.LINK_TEXT),
"volumesnapshotclasses_page": ("VolumeSnapshotClasses", By.LINK_TEXT),
"volumesnapshotcontents_page": ("VolumeSnapshotContents", By.LINK_TEXT),
"object_buckets_page": ("Object Buckets", By.LINK_TEXT),
"object_bucket_claims_page": ("Object Bucket Claims", By.LINK_TEXT),
"Monitoring": ("//button[text()='Monitoring']", By.XPATH),
"alerting_page": ("Alerting", By.LINK_TEXT),
"metrics_page": ("Metrics", By.LINK_TEXT),
"dashboards_page": ("Dashboards", By.LINK_TEXT),
"Workloads": ("//button[text()='Workloads']", By.XPATH),
"Pods": ("Pods", By.LINK_TEXT),
"quickstarts": ('a[href="/quickstart"]', By.CSS_SELECTOR),
"block_pool_link": (
'a[data-test-id="horizontal-link-Block Pools"]',
By.CSS_SELECTOR,
),
}
add_capacity = {
"ocs_operator": (
'a[data-test-operator-row="OpenShift Container Storage"]',
By.CSS_SELECTOR,
),
"storage_cluster_tab": (
'a[data-test-id="horizontal-link-Storage Cluster"]',
By.CSS_SELECTOR,
),
"kebab_storage_cluster": ('button[data-test-id="kebab-button"', By.CSS_SELECTOR),
"add_capacity_button": ('button[data-test-action="Add Capacity"]', By.CSS_SELECTOR),
"select_sc_add_capacity": (
'button[data-test="add-cap-sc-dropdown"]',
By.CSS_SELECTOR,
),
"thin_sc": ('a[id="thin-link"]', By.CSS_SELECTOR),
"gp2_sc": ('a[id="gp2-link"]', By.CSS_SELECTOR),
"managed-premium_sc": ('a[id="managed-premium-link"]', By.CSS_SELECTOR),
"confirm_add_capacity": ('button[data-test="confirm-action"', By.CSS_SELECTOR),
"filter_pods": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
}
block_pool = {
"create_block_pool": ("Create BlockPool", By.LINK_TEXT),
"new_pool_name": (
'input[data-test="new-pool-name-textbox"]',
By.CSS_SELECTOR,
),
"first_select_replica": ('button[data-test="replica-dropdown"]', By.CSS_SELECTOR),
"second_select_replica_2": ("//button[text()='2-way Replication']", By.XPATH),
"second_select_replica_3": ("//button[text()='3-way Replication']", By.XPATH),
"conpression_checkbox": (
'input[data-test="compression-checkbox"]',
By.CSS_SELECTOR,
),
"pool_confirm_create": ('button[data-test-id="confirm-action"]', By.CSS_SELECTOR),
"actions_inside_pool": ('button[aria-label="Actions"]', By.CSS_SELECTOR),
"edit_pool_inside_pool": (
'button[data-test-action="Edit BlockPool"]',
By.CSS_SELECTOR,
),
"delete_pool_inside_pool": (
'button[data-test-action="Delete BlockPool"]',
By.CSS_SELECTOR,
),
"confirm_delete_inside_pool": ("//button[text()='Delete']", By.XPATH),
"replica_dropdown_edit": ('button[data-test="replica-dropdown"]', By.CSS_SELECTOR),
"compression_checkbox_edit": (
'input[data-test="compression-checkbox"]',
By.CSS_SELECTOR,
),
"save_pool_edit": ('button[data-test-id="confirm-action"]', By.CSS_SELECTOR),
"pool_state_inside_pool": ('span[data-test="status-text"]', By.CSS_SELECTOR),
}
storageclass = {
"create_storageclass_button": ("Create StorageClass", By.LINK_TEXT),
"input_storageclass_name": ('input[id="storage-class-name"]', By.CSS_SELECTOR),
"provisioner_dropdown": (
'button[data-test="storage-class-provisioner-dropdown"]',
By.CSS_SELECTOR,
),
"rbd_provisioner": ("openshift-storage.rbd.csi.ceph.com", By.LINK_TEXT),
"pool_dropdown": ('button[id="pool-dropdown-id"]', By.CSS_SELECTOR),
"save_storageclass": ('button[id="save-changes"]', By.CSS_SELECTOR),
"action_inside_storageclass": (
'button[data-test-id="actions-menu-button"]',
By.CSS_SELECTOR,
),
"delete_inside_storageclass": (
'button[data-test-action="Delete StorageClass"]',
By.CSS_SELECTOR,
),
"confirm_delete_inside_storageclass": ("//button[text()='Delete']", By.XPATH),
}
validation = {
"object_service_button": ("//button[text()='Object Service']", By.XPATH),
"data_resiliency_button": ("//button[text()='Data Resiliency']", By.XPATH),
"search_ocs_installed": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
"ocs_operator_installed": (
'a[data-test-operator-row="OpenShift Container Storage"]',
By.CSS_SELECTOR,
),
"osc_subscription_tab": (
'a[data-test-id="horizontal-link-olm~Subscription"]',
By.CSS_SELECTOR,
),
"osc_all_instances_tab": (
'a[data-test-id="horizontal-link-olm~All instances"]',
By.CSS_SELECTOR,
),
"osc_storage_cluster_tab": (
'a[data-test-id="horizontal-link-Storage Cluster"]',
By.CSS_SELECTOR,
),
"osc_backing_store_tab": (
'a[data-test-id="horizontal-link-Backing Store"]',
By.CSS_SELECTOR,
),
"osc_bucket_class_tab": (
'a[data-test-id="horizontal-link-Bucket Class"]',
By.CSS_SELECTOR,
),
}
validation_4_7 = {
"object_service_tab": (
'a[data-test-id="horizontal-link-Object Service"]',
By.CSS_SELECTOR,
),
"persistent_storage_tab": (
'a[data-test-id="horizontal-link-Persistent Storage"]',
By.CSS_SELECTOR,
),
}
validation_4_8 = {
"object_service_tab": (
'a[data-test-id="horizontal-link-Object"]',
By.CSS_SELECTOR,
),
"persistent_storage_tab": (
'a[data-test-id="horizontal-link-Block and File"]',
By.CSS_SELECTOR,
),
}
locators = {
"4.8": {
"login": login,
"page": page_nav,
"deployment": {**deployment, **deployment_4_7},
"generic": generic_locators,
"ocs_operator": ocs_operator_locators,
"obc": obc,
"bucketclass": bucketclass,
"mcg_stores": mcg_stores,
"pvc": {**pvc, **pvc_4_7, **pvc_4_8},
"validation": {**validation, **validation_4_8},
"add_capacity": add_capacity,
"block_pool": block_pool,
"storageclass": storageclass,
},
"4.7": {
"login": login,
"page": page_nav,
"deployment": {**deployment, **deployment_4_7},
"pvc": {**pvc, **pvc_4_7},
"add_capacity": add_capacity,
"validation": {**validation, **validation_4_7},
},
"4.6": {
"login": login,
"page": page_nav,
"deployment": {**deployment, **deployment_4_6},
"pvc": pvc,
},
}
| 40.0125
| 88
| 0.6272
|
from selenium.webdriver.common.by import By
osd_sizes = ("512", "2048", "4096")
login = {
"ocp_page": "Overview · Red Hat OpenShift Container Platform",
"username": ("inputUsername", By.ID),
"password": ("inputPassword", By.ID),
"click_login": ("//button[text()='Log in']", By.XPATH),
"flexy_kubeadmin": ('a[title="Log in with kube:admin"]', By.CSS_SELECTOR),
}
deployment = {
"click_install_ocs": ('a[data-test-id="operator-install-btn"]', By.CSS_SELECTOR),
"choose_ocs_version": (
'a[data-test="ocs-operator-ocs-catalogsource-openshift-marketplace"]',
By.CSS_SELECTOR,
),
"search_operators": ('input[placeholder="Filter by keyword..."]', By.CSS_SELECTOR),
"operators_tab": ("//button[text()='Operators']", By.XPATH),
"operatorhub_tab": ("OperatorHub", By.LINK_TEXT),
"installed_operators_tab": ("Installed Operators", By.LINK_TEXT),
"storage_cluster_tab": (
'a[data-test-id="horizontal-link-Storage Cluster"]',
By.CSS_SELECTOR,
),
"ocs_operator_installed": (
'a[data-test-operator-row="OpenShift Container Storage"]',
By.CSS_SELECTOR,
),
"search_operator_installed": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
"thin_sc": ('a[id="thin-link"]', By.CSS_SELECTOR),
"gp2_sc": ('a[id="gp2-link"]', By.CSS_SELECTOR),
"managed-premium_sc": ('a[id="managed-premium-link"]', By.CSS_SELECTOR),
"osd_size_dropdown": ('button[data-test-id="dropdown-button"]', By.CSS_SELECTOR),
"512": ('button[data-test-dropdown-menu="512Gi"]', By.CSS_SELECTOR),
"2048": ('button[data-test-dropdown-menu="2Ti"]', By.CSS_SELECTOR),
"4096": ('button[data-test-dropdown-menu="4Ti"]', By.CSS_SELECTOR),
"all_nodes": ('input[aria-label="Select all rows"]', By.CSS_SELECTOR),
"wide_encryption": ('//*[@id="cluster-wide-encryption"]', By.XPATH),
"class_encryption": ('//*[@id="storage-class-encryption"]', By.XPATH),
"advanced_encryption": ('//*[@id="advanced-encryption"]', By.XPATH),
"kms_service_name": ('//*[@id="kms-service-name"]', By.XPATH),
"kms_address": ('//*[@id="kms-address"]', By.XPATH),
"kms_address_port": ('//*[@id="kms-address-port"]', By.XPATH),
"kms_token": ('//*[@id="kms-token"]', By.XPATH),
"create_on_review": ("//button[text()='Create']", By.XPATH),
"search_ocs_installed": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
"all_nodes_lso": (
'input[id="auto-detect-volume-radio-all-nodes"]',
By.CSS_SELECTOR,
),
"lv_name": ('input[id="create-lvs-volume-set-name"]', By.CSS_SELECTOR),
"sc_name": ('input[id="create-lvs-storage-class-name"]', By.CSS_SELECTOR),
"all_nodes_create_sc": ('input[id="create-lvs-radio-all-nodes"]', By.CSS_SELECTOR),
"storage_class_dropdown_lso": (
'button[id="storage-class-dropdown"]',
By.CSS_SELECTOR,
),
"localblock_sc": ('a[id="localblock-link"]', By.CSS_SELECTOR),
"choose_local_storage_version": (
'a[data-test="local-storage-operator-redhat-operators-openshift-marketplace"]',
By.CSS_SELECTOR,
),
"click_install_lso": ('a[data-test-id="operator-install-btn"]', By.CSS_SELECTOR),
"yes": ("//*[contains(text(), 'Yes')]", By.XPATH),
"next": ("//*[contains(text(), 'Next')]", By.XPATH),
}
deployment_4_6 = {
"click_install_ocs_page": ("//button[text()='Install']", By.XPATH),
"create_storage_cluster": ("//button[text()='Create Storage Cluster']", By.XPATH),
"internal_mode": ('input[value="Internal"]', By.CSS_SELECTOR),
"internal-attached_devices": (
'input[value="Internal - Attached Devices"]',
By.CSS_SELECTOR,
),
"storage_class_dropdown": (
'button[id="ceph-sc-dropdown"]',
By.CSS_SELECTOR,
),
"enable_encryption": ('//span[@class="pf-c-switch__toggle"]', By.XPATH),
"click_install_lso_page": ("//button[text()='Install']", By.XPATH),
"project_dropdown": (
'button[class="pf-c-dropdown__toggle pf-m-plain"]',
By.CSS_SELECTOR,
),
"OpenShift Container Storage": ('a[id="openshift-storage-link"]', By.CSS_SELECTOR),
"Local Storage": ('a[id="openshift-local-storage-link"]', By.CSS_SELECTOR),
}
deployment_4_7 = {
"click_install_ocs_page": ('button[data-test="install-operator"]', By.CSS_SELECTOR),
"create_storage_cluster": ('button[data-test="item-create"]', By.CSS_SELECTOR),
"internal_mode": ('input[data-test="Internal-radio-input"]', By.CSS_SELECTOR),
"internal-attached_devices": (
'input[data-test="Internal - Attached Devices-radio-input"]',
By.CSS_SELECTOR,
),
"storage_class_dropdown": (
'button[data-test="storage-class-dropdown"]',
By.CSS_SELECTOR,
),
"enable_encryption": ('input[data-test="encryption-checkbox"]', By.CSS_SELECTOR),
"click_install_lso_page": ('button[data-test="install-operator"]', By.CSS_SELECTOR),
}
generic_locators = {
"project_selector": (
'button[class="pf-c-dropdown__toggle pf-m-plain"]',
By.CSS_SELECTOR,
),
"select_openshift-storage_project": (
'a[id="openshift-storage-link"]',
By.CSS_SELECTOR,
),
"create_resource_button": ("yaml-create", By.ID),
"search_resource_field": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
"first_dropdown_option": (
'a[data-test="dropdown-menu-item-link"]',
By.CSS_SELECTOR,
),
"actions": ('button[data-test-id="actions-menu-button"]', By.CSS_SELECTOR),
"confirm_action": ("confirm-action", By.ID),
"submit_form": ('button[type="submit"]', By.CSS_SELECTOR),
"ocs_operator": ('//h1[text()="OpenShift Container Storage"]', By.XPATH),
"kebab_button": ('button[data-test-id="kebab-button"', By.CSS_SELECTOR),
"resource_status": ('span[data-test="status-text"]', By.CSS_SELECTOR),
"check_first_row_checkbox": ('input[name="checkrow0"]', By.CSS_SELECTOR),
"remove_search_filter": ('button[aria-label="close"]', By.CSS_SELECTOR),
"delete_resource_kebab_button": ('//*[contains(text(), "Delete")]', By.XPATH),
}
ocs_operator_locators = {
"backingstore_page": (
'a[data-test-id="horizontal-link-Backing Store"]',
By.CSS_SELECTOR,
),
"namespacestore_page": (
'a[data-test-id="horizontal-link-Namespace Store"]',
By.CSS_SELECTOR,
),
"bucketclass_page": (
'a[data-test-id="horizontal-link-Bucket Class"]',
By.CSS_SELECTOR,
),
}
mcg_stores = {
"store_name": ('input[data-test*="store-name"]', By.CSS_SELECTOR),
"provider_dropdown": ('button[data-test*="store-provider"]', By.CSS_SELECTOR),
"aws_provider": ("AWS S3-link", By.ID),
"aws_region_dropdown": ("region", By.ID),
"us_east_2_region": ("us-east-2-link", By.ID),
"aws_secret_dropdown": ("secret-dropdown", By.ID),
"aws_secret_search_field": (
'input[data-test-id="dropdown-text-filter"]',
By.CSS_SELECTOR,
),
"target_bucket": ("target-bucket", By.ID),
}
bucketclass = {
"standard_type": ("Standard", By.ID),
"namespace_type": ("Namespace", By.ID),
"bucketclass_name": ("bucketclassname-input", By.ID),
"spread_policy": ('input[data-test="placement-policy-spread1"]', By.CSS_SELECTOR),
"mirror_policy": ('input[data-test="placement-policy-mirror1"]', By.CSS_SELECTOR),
"single_policy": ("Single", By.ID),
"multi_policy": ("Multi", By.ID),
"cache_policy": ("Cache", By.ID),
"nss_dropdown": ('button[data-test="nns-dropdown-toggle"]', By.CSS_SELECTOR),
"nss_option_template": ('button[data-test="{}"]', By.CSS_SELECTOR),
"bs_dropdown": ('button[data-test="nbs-dropdown-toggle"]', By.CSS_SELECTOR),
"first_bs_dropdown_option": (
'button[data-test="mybs-dropdown-item"]',
By.CSS_SELECTOR,
),
"ttl_input": ("ttl-input", By.ID),
"ttl_time_unit_dropdown": ("timetolive-input", By.ID),
"ttl_minute_time_unit_button": ("MIN-link", By.ID),
}
obc = {
"storageclass_dropdown": ("sc-dropdown", By.ID),
"storageclass_text_field": (
'input[placeholder="Select StorageClass"]',
By.CSS_SELECTOR,
),
"bucketclass_dropdown": ("bc-dropdown", By.ID),
"bucketclass_text_field": (
'input[placeholder="Select BucketClass"]',
By.CSS_SELECTOR,
),
"default_bucketclass": ("noobaa-default-bucket-class-link", By.ID),
"obc_name": ("obc-name", By.ID),
"first_obc_link": ('a[class="co-resource-item__resource-name"]', By.CSS_SELECTOR),
"delete_obc": (
'button[data-test-action="Delete Object Bucket Claim"]',
By.CSS_SELECTOR,
),
}
pvc = {
"pvc_project_selector": (
'button[class="pf-c-dropdown__toggle pf-m-plain"]',
By.CSS_SELECTOR,
),
"select_openshift-storage_project": (
'a[id="openshift-storage-link"]',
By.CSS_SELECTOR,
),
"pvc_create_button": ('button[data-test="item-create"]', By.CSS_SELECTOR),
"pvc_storage_class_selector": (
'button[data-test="storageclass-dropdown"]',
By.CSS_SELECTOR,
),
"storage_class_name": ('//*[text()="{}"]', By.XPATH),
"ocs-storagecluster-ceph-rbd": (
'a[id="ocs-storagecluster-ceph-rbd-link"]',
By.CSS_SELECTOR,
),
"ocs-storagecluster-cephfs": (
'a[id="ocs-storagecluster-cephfs-link"]',
By.CSS_SELECTOR,
),
"ocs-storagecluster-ceph-rbd-thick": (
"a[id='ocs-storagecluster-ceph-rbd-thick-link'] div[class='text-muted small']",
By.CSS_SELECTOR,
),
"pvc_name": ('input[data-test="pvc-name"]', By.CSS_SELECTOR),
"ReadWriteOnce": (
'input[data-test="Single User (RWO)-radio-input"]',
By.CSS_SELECTOR,
),
"ReadWriteMany": (
'input[data-test="Shared Access (RWX)-radio-input"]',
By.CSS_SELECTOR,
),
"ReadOnlyMany": ('input[data-test="Read Only (ROX)-radio-input"]', By.CSS_SELECTOR),
"pvc_size": ('input[data-test="pvc-size"]', By.CSS_SELECTOR),
"pvc_create": ('button[data-test="create-pvc"]', By.CSS_SELECTOR),
"pvc_actions": ('button[data-test-id="actions-menu-button"]', By.CSS_SELECTOR),
"pvc_delete": (
'button[data-test-action="Delete PersistentVolumeClaim"]',
By.CSS_SELECTOR,
),
"confirm_pvc_deletion": ('button[data-test="confirm-action"]', By.CSS_SELECTOR),
"search_pvc": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
}
pvc_4_7 = {
"test-pvc-fs": ('a[data-test-id="test-pvc-fs"]', By.CSS_SELECTOR),
"test-pvc-rbd": ("a[title='test-pvc-rbd']", By.CSS_SELECTOR),
"Block": ("input[value='Block']", By.CSS_SELECTOR),
"Filesystem": ("input[value='Filesystem']", By.CSS_SELECTOR),
"search-project": ("input[placeholder='Select Project...']", By.CSS_SELECTOR),
"expand_pvc": ("button[data-test-action='Expand PVC']", By.CSS_SELECTOR),
"resize-value": ("//input[@name='requestSizeValue']", By.XPATH),
"expand-btn": ("#confirm-action", By.CSS_SELECTOR),
"pvc-status": (
"dd[data-test-id='pvc-status'] span[data-test='status-text']",
By.CSS_SELECTOR,
),
"test-project-link": ("//a[normalize-space()='{}']", By.XPATH),
"expected-capacity": (
"//dd[contains(text(),'{}') and @data-test='pvc-requested-capacity']",
By.XPATH,
),
"new-capacity": (
"//dd[contains(text(),'{}') and @data-test-id='pvc-capacity']",
By.XPATH,
),
}
pvc_4_8 = {
"ReadWriteMany": ("input[value='ReadWriteMany']", By.CSS_SELECTOR),
"pvc_actions": ("button[aria-label='Actions']", By.CSS_SELECTOR),
"ReadWriteOnce": ("input[value='ReadWriteOnce']", By.CSS_SELECTOR),
"test-pvc-fs": ("a[title='test-pvc-fs']", By.CSS_SELECTOR),
"test-pvc-rbd-thick": ("a[title='test-pvc-rbd-thick']", By.CSS_SELECTOR),
"resize-pending": (
"div[class ='col-xs-4 col-sm-2 col-md-2'] span",
By.CSS_SELECTOR,
),
"search_pvc": ("input[placeholder='Search by name...']", By.CSS_SELECTOR),
}
page_nav = {
"Home": ("//button[text()='Home']", By.XPATH),
"overview_page": ("Overview", By.LINK_TEXT),
"projects_page": ("Projects", By.LINK_TEXT),
"search_page": ("Search", By.LINK_TEXT),
"explore_page": ("Explore", By.LINK_TEXT),
"events_page": ("Events", By.LINK_TEXT),
"Operators": ("//button[text()='Operators']", By.XPATH),
"operatorhub_page": ("OperatorHub", By.LINK_TEXT),
"installed_operators_page": ("Installed Operators", By.LINK_TEXT),
"Storage": ("//button[text()='Storage']", By.XPATH),
"persistentvolumes_page": ("PersistentVolumes", By.LINK_TEXT),
"persistentvolumeclaims_page": ("PersistentVolumeClaims", By.LINK_TEXT),
"storageclasses_page": ("StorageClasses", By.LINK_TEXT),
"volumesnapshots_page": ("VolumeSnapshots", By.LINK_TEXT),
"volumesnapshotclasses_page": ("VolumeSnapshotClasses", By.LINK_TEXT),
"volumesnapshotcontents_page": ("VolumeSnapshotContents", By.LINK_TEXT),
"object_buckets_page": ("Object Buckets", By.LINK_TEXT),
"object_bucket_claims_page": ("Object Bucket Claims", By.LINK_TEXT),
"Monitoring": ("//button[text()='Monitoring']", By.XPATH),
"alerting_page": ("Alerting", By.LINK_TEXT),
"metrics_page": ("Metrics", By.LINK_TEXT),
"dashboards_page": ("Dashboards", By.LINK_TEXT),
"Workloads": ("//button[text()='Workloads']", By.XPATH),
"Pods": ("Pods", By.LINK_TEXT),
"quickstarts": ('a[href="/quickstart"]', By.CSS_SELECTOR),
"block_pool_link": (
'a[data-test-id="horizontal-link-Block Pools"]',
By.CSS_SELECTOR,
),
}
add_capacity = {
"ocs_operator": (
'a[data-test-operator-row="OpenShift Container Storage"]',
By.CSS_SELECTOR,
),
"storage_cluster_tab": (
'a[data-test-id="horizontal-link-Storage Cluster"]',
By.CSS_SELECTOR,
),
"kebab_storage_cluster": ('button[data-test-id="kebab-button"', By.CSS_SELECTOR),
"add_capacity_button": ('button[data-test-action="Add Capacity"]', By.CSS_SELECTOR),
"select_sc_add_capacity": (
'button[data-test="add-cap-sc-dropdown"]',
By.CSS_SELECTOR,
),
"thin_sc": ('a[id="thin-link"]', By.CSS_SELECTOR),
"gp2_sc": ('a[id="gp2-link"]', By.CSS_SELECTOR),
"managed-premium_sc": ('a[id="managed-premium-link"]', By.CSS_SELECTOR),
"confirm_add_capacity": ('button[data-test="confirm-action"', By.CSS_SELECTOR),
"filter_pods": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
}
block_pool = {
"create_block_pool": ("Create BlockPool", By.LINK_TEXT),
"new_pool_name": (
'input[data-test="new-pool-name-textbox"]',
By.CSS_SELECTOR,
),
"first_select_replica": ('button[data-test="replica-dropdown"]', By.CSS_SELECTOR),
"second_select_replica_2": ("//button[text()='2-way Replication']", By.XPATH),
"second_select_replica_3": ("//button[text()='3-way Replication']", By.XPATH),
"conpression_checkbox": (
'input[data-test="compression-checkbox"]',
By.CSS_SELECTOR,
),
"pool_confirm_create": ('button[data-test-id="confirm-action"]', By.CSS_SELECTOR),
"actions_inside_pool": ('button[aria-label="Actions"]', By.CSS_SELECTOR),
"edit_pool_inside_pool": (
'button[data-test-action="Edit BlockPool"]',
By.CSS_SELECTOR,
),
"delete_pool_inside_pool": (
'button[data-test-action="Delete BlockPool"]',
By.CSS_SELECTOR,
),
"confirm_delete_inside_pool": ("//button[text()='Delete']", By.XPATH),
"replica_dropdown_edit": ('button[data-test="replica-dropdown"]', By.CSS_SELECTOR),
"compression_checkbox_edit": (
'input[data-test="compression-checkbox"]',
By.CSS_SELECTOR,
),
"save_pool_edit": ('button[data-test-id="confirm-action"]', By.CSS_SELECTOR),
"pool_state_inside_pool": ('span[data-test="status-text"]', By.CSS_SELECTOR),
}
storageclass = {
"create_storageclass_button": ("Create StorageClass", By.LINK_TEXT),
"input_storageclass_name": ('input[id="storage-class-name"]', By.CSS_SELECTOR),
"provisioner_dropdown": (
'button[data-test="storage-class-provisioner-dropdown"]',
By.CSS_SELECTOR,
),
"rbd_provisioner": ("openshift-storage.rbd.csi.ceph.com", By.LINK_TEXT),
"pool_dropdown": ('button[id="pool-dropdown-id"]', By.CSS_SELECTOR),
"save_storageclass": ('button[id="save-changes"]', By.CSS_SELECTOR),
"action_inside_storageclass": (
'button[data-test-id="actions-menu-button"]',
By.CSS_SELECTOR,
),
"delete_inside_storageclass": (
'button[data-test-action="Delete StorageClass"]',
By.CSS_SELECTOR,
),
"confirm_delete_inside_storageclass": ("//button[text()='Delete']", By.XPATH),
}
validation = {
"object_service_button": ("//button[text()='Object Service']", By.XPATH),
"data_resiliency_button": ("//button[text()='Data Resiliency']", By.XPATH),
"search_ocs_installed": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
"ocs_operator_installed": (
'a[data-test-operator-row="OpenShift Container Storage"]',
By.CSS_SELECTOR,
),
"osc_subscription_tab": (
'a[data-test-id="horizontal-link-olm~Subscription"]',
By.CSS_SELECTOR,
),
"osc_all_instances_tab": (
'a[data-test-id="horizontal-link-olm~All instances"]',
By.CSS_SELECTOR,
),
"osc_storage_cluster_tab": (
'a[data-test-id="horizontal-link-Storage Cluster"]',
By.CSS_SELECTOR,
),
"osc_backing_store_tab": (
'a[data-test-id="horizontal-link-Backing Store"]',
By.CSS_SELECTOR,
),
"osc_bucket_class_tab": (
'a[data-test-id="horizontal-link-Bucket Class"]',
By.CSS_SELECTOR,
),
}
validation_4_7 = {
"object_service_tab": (
'a[data-test-id="horizontal-link-Object Service"]',
By.CSS_SELECTOR,
),
"persistent_storage_tab": (
'a[data-test-id="horizontal-link-Persistent Storage"]',
By.CSS_SELECTOR,
),
}
validation_4_8 = {
"object_service_tab": (
'a[data-test-id="horizontal-link-Object"]',
By.CSS_SELECTOR,
),
"persistent_storage_tab": (
'a[data-test-id="horizontal-link-Block and File"]',
By.CSS_SELECTOR,
),
}
locators = {
"4.8": {
"login": login,
"page": page_nav,
"deployment": {**deployment, **deployment_4_7},
"generic": generic_locators,
"ocs_operator": ocs_operator_locators,
"obc": obc,
"bucketclass": bucketclass,
"mcg_stores": mcg_stores,
"pvc": {**pvc, **pvc_4_7, **pvc_4_8},
"validation": {**validation, **validation_4_8},
"add_capacity": add_capacity,
"block_pool": block_pool,
"storageclass": storageclass,
},
"4.7": {
"login": login,
"page": page_nav,
"deployment": {**deployment, **deployment_4_7},
"pvc": {**pvc, **pvc_4_7},
"add_capacity": add_capacity,
"validation": {**validation, **validation_4_7},
},
"4.6": {
"login": login,
"page": page_nav,
"deployment": {**deployment, **deployment_4_6},
"pvc": pvc,
},
}
| true
| true
|
790df4e7a9c04ae8a13942dddbafbb797339cb4a
| 741
|
py
|
Python
|
src/dictionaryCode/other/siderData1.py
|
ankita094/BioIntMed
|
a76616a3a87b02f3523a77704d6b9bc42253f9a7
|
[
"MIT"
] | null | null | null |
src/dictionaryCode/other/siderData1.py
|
ankita094/BioIntMed
|
a76616a3a87b02f3523a77704d6b9bc42253f9a7
|
[
"MIT"
] | null | null | null |
src/dictionaryCode/other/siderData1.py
|
ankita094/BioIntMed
|
a76616a3a87b02f3523a77704d6b9bc42253f9a7
|
[
"MIT"
] | null | null | null |
#CODE1---For preparing the list of DRUG side-effect relation from SIDER database---
#Python 3.6.5 |Anaconda, Inc.
import sys
import glob
import errno
import csv
path = '/home/16AT72P01/Excelra/SIDER1/output/adverse_effects.tsv'
files = glob.glob(path)
unique_sideeffect = set()
unique_drug = set()
unique_pair = set()
with open(path) as f1:
reader = csv.DictReader(f1, quotechar='"', delimiter='\t', quoting=csv.QUOTE_ALL, skipinitialspace=True)
print(reader)
for row in reader:
unique_drug.add(row['drug_name'])
unique_sideeffect.add(row['adverse_effect'])
val = row['drug_name']+"|"+row['adverse_effect']
unique_pair.add(val)
f1.close()
print(len(unique_drug))
print(len(unique_sideeffect))
print(len(unique_pair))
| 25.551724
| 105
| 0.735493
|
import sys
import glob
import errno
import csv
path = '/home/16AT72P01/Excelra/SIDER1/output/adverse_effects.tsv'
files = glob.glob(path)
unique_sideeffect = set()
unique_drug = set()
unique_pair = set()
with open(path) as f1:
reader = csv.DictReader(f1, quotechar='"', delimiter='\t', quoting=csv.QUOTE_ALL, skipinitialspace=True)
print(reader)
for row in reader:
unique_drug.add(row['drug_name'])
unique_sideeffect.add(row['adverse_effect'])
val = row['drug_name']+"|"+row['adverse_effect']
unique_pair.add(val)
f1.close()
print(len(unique_drug))
print(len(unique_sideeffect))
print(len(unique_pair))
| true
| true
|
790df56e8912cf9310145a1e2cea8740ba1983f7
| 2,515
|
py
|
Python
|
src/users/views/user.py
|
system-design-2/user-service
|
5bf71e15d7c0969bac55a0e53298711877b02aa1
|
[
"MIT"
] | null | null | null |
src/users/views/user.py
|
system-design-2/user-service
|
5bf71e15d7c0969bac55a0e53298711877b02aa1
|
[
"MIT"
] | null | null | null |
src/users/views/user.py
|
system-design-2/user-service
|
5bf71e15d7c0969bac55a0e53298711877b02aa1
|
[
"MIT"
] | null | null | null |
import logging
from django.utils.decorators import method_decorator
from django_filters.rest_framework import DjangoFilterBackend
from drf_yasg.utils import swagger_auto_schema
from rest_condition import Or
from rest_framework import filters, viewsets
from rest_framework.permissions import IsAdminUser, IsAuthenticated
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework.permissions import AllowAny, IsAuthenticated
from base.documentation import jwt_header
from users.models import Device
from users.serializers import DeviceSerializer, DeviceListSerializer
logger = logging.getLogger('user_app')
@method_decorator(name='list', decorator=swagger_auto_schema(manual_parameters=[jwt_header]))
@method_decorator(name='retrieve', decorator=swagger_auto_schema(manual_parameters=[jwt_header]))
@method_decorator(name='create', decorator=swagger_auto_schema(manual_parameters=[jwt_header]))
class DeviceViewSet(viewsets.ModelViewSet):
"""
Creating Device
"""
permission_classes = [IsAuthenticated]
authentication_classes = [JWTAuthentication]
queryset = Device.objects.all()
serializer_class = DeviceSerializer
filter_backends = [filters.SearchFilter, DjangoFilterBackend]
search_fields = ['user_id', 'user__username']
filter_fields = ['user_id', 'user__username']
http_method_names = ['get', 'post']
def get_queryset(self):
if IsAdminUser.has_permission(self, request=self.request, view=self.get_view_name()):
return super().get_queryset()
return super().get_queryset().filter(user=self.request.user)
# def get_permissions(self):
# return [permission() for permission in permission_classes]
def get_serializer_class(self):
if self.action in ['list']:
return DeviceListSerializer
return DeviceSerializer
class DeviceList(APIView):
"""
An API endpoint for Getting device list
"""
permission_classes = [AllowAny]
def get(self, request, user_id):
queryset = Device.objects.filter(user_id=user_id).select_related('user')
serializer_context = {
'request': request
}
serializer = DeviceListSerializer(queryset, context=serializer_context, many=True)
data = serializer.data
if data:
return Response(data)
return Response({'message': f"No Device data found", 'data': []})
| 37.537313
| 97
| 0.749105
|
import logging
from django.utils.decorators import method_decorator
from django_filters.rest_framework import DjangoFilterBackend
from drf_yasg.utils import swagger_auto_schema
from rest_condition import Or
from rest_framework import filters, viewsets
from rest_framework.permissions import IsAdminUser, IsAuthenticated
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework.permissions import AllowAny, IsAuthenticated
from base.documentation import jwt_header
from users.models import Device
from users.serializers import DeviceSerializer, DeviceListSerializer
logger = logging.getLogger('user_app')
@method_decorator(name='list', decorator=swagger_auto_schema(manual_parameters=[jwt_header]))
@method_decorator(name='retrieve', decorator=swagger_auto_schema(manual_parameters=[jwt_header]))
@method_decorator(name='create', decorator=swagger_auto_schema(manual_parameters=[jwt_header]))
class DeviceViewSet(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
authentication_classes = [JWTAuthentication]
queryset = Device.objects.all()
serializer_class = DeviceSerializer
filter_backends = [filters.SearchFilter, DjangoFilterBackend]
search_fields = ['user_id', 'user__username']
filter_fields = ['user_id', 'user__username']
http_method_names = ['get', 'post']
def get_queryset(self):
if IsAdminUser.has_permission(self, request=self.request, view=self.get_view_name()):
return super().get_queryset()
return super().get_queryset().filter(user=self.request.user)
def get_serializer_class(self):
if self.action in ['list']:
return DeviceListSerializer
return DeviceSerializer
class DeviceList(APIView):
permission_classes = [AllowAny]
def get(self, request, user_id):
queryset = Device.objects.filter(user_id=user_id).select_related('user')
serializer_context = {
'request': request
}
serializer = DeviceListSerializer(queryset, context=serializer_context, many=True)
data = serializer.data
if data:
return Response(data)
return Response({'message': f"No Device data found", 'data': []})
| true
| true
|
790df7fb23e384f7f4dd00412dd888030f392d8f
| 3,149
|
py
|
Python
|
tests/test_multiple_packages_with_tasks.py
|
orlevii/realm
|
e561ba09df4fbdc3bf60c8678462da4f55033894
|
[
"MIT"
] | 3
|
2021-06-17T06:27:16.000Z
|
2022-03-14T09:34:42.000Z
|
tests/test_multiple_packages_with_tasks.py
|
orlevii/realm
|
e561ba09df4fbdc3bf60c8678462da4f55033894
|
[
"MIT"
] | null | null | null |
tests/test_multiple_packages_with_tasks.py
|
orlevii/realm
|
e561ba09df4fbdc3bf60c8678462da4f55033894
|
[
"MIT"
] | null | null | null |
import unittest
from realm.cli.application import Application
from realm.cli.commands.install import InstallCommand
from realm.cli.commands.ls import LsCommand
from realm.cli.commands.task import TaskCommand
from realm.entities import Config, RealmContext
from realm.utils.child_process import ChildProcess
from tests.common import get_tests_root_dir, captured_output
REPO_DIR = get_tests_root_dir().joinpath('scenarios/multiple_packages_with_tasks')
class TestCommands(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
# Create config once
cls.cfg = Config.from_file(realm_json_file=str(REPO_DIR.joinpath('realm.json')))
def setUp(self) -> None:
# Create context every test
self.ctx = RealmContext(config=self.cfg,
projects=Application.get_projects(self.cfg))
def test_scan(self):
found = len(self.ctx.projects)
self.assertEqual(found, 1)
def test_ls(self):
cmd = LsCommand(self.ctx)
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, 'pkg@0.1.0')
def test_task_install(self):
install_cmd = InstallCommand(self.ctx)
task_cmd = TaskCommand(self.ctx, task_name='test')
self.assertEqual(len(task_cmd.ctx.projects), 1)
with captured_output(stderr=False) as (out, _):
install_cmd.run()
task_cmd.run()
output = out.getvalue()
self.assertIn('Installing the current project: pkg', output)
self.assertIn('Poe => python -m unittest discover -s tests -v -p "test_*.py"', output)
def test_git_diff(self):
cmd = LsCommand(self.ctx, since='.')
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, '')
def test_git_diff_with_change(self):
pkg_proj = [p for p in self.ctx.projects if p.name == 'pkg'][0]
try:
with pkg_proj.source_dir.joinpath('pyproject.toml').open('a') as f:
print('', file=f)
cmd = LsCommand(self.ctx, since='.')
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, 'pkg@0.1.0')
finally:
ChildProcess.run(f'git checkout {pkg_proj.source_dir}')
def test_scope_filter(self):
cmd = LsCommand(self.ctx, scope=['p*'])
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, 'pkg@0.1.0')
def test_ignore_filter(self):
cmd = LsCommand(self.ctx, ignore=['p*'])
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, '')
def test_match_filter(self):
cmd = LsCommand(self.ctx, match=['labels.type=package'])
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, 'pkg@0.1.0')
| 33.5
| 94
| 0.617021
|
import unittest
from realm.cli.application import Application
from realm.cli.commands.install import InstallCommand
from realm.cli.commands.ls import LsCommand
from realm.cli.commands.task import TaskCommand
from realm.entities import Config, RealmContext
from realm.utils.child_process import ChildProcess
from tests.common import get_tests_root_dir, captured_output
REPO_DIR = get_tests_root_dir().joinpath('scenarios/multiple_packages_with_tasks')
class TestCommands(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.cfg = Config.from_file(realm_json_file=str(REPO_DIR.joinpath('realm.json')))
def setUp(self) -> None:
self.ctx = RealmContext(config=self.cfg,
projects=Application.get_projects(self.cfg))
def test_scan(self):
found = len(self.ctx.projects)
self.assertEqual(found, 1)
def test_ls(self):
cmd = LsCommand(self.ctx)
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, 'pkg@0.1.0')
def test_task_install(self):
install_cmd = InstallCommand(self.ctx)
task_cmd = TaskCommand(self.ctx, task_name='test')
self.assertEqual(len(task_cmd.ctx.projects), 1)
with captured_output(stderr=False) as (out, _):
install_cmd.run()
task_cmd.run()
output = out.getvalue()
self.assertIn('Installing the current project: pkg', output)
self.assertIn('Poe => python -m unittest discover -s tests -v -p "test_*.py"', output)
def test_git_diff(self):
cmd = LsCommand(self.ctx, since='.')
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, '')
def test_git_diff_with_change(self):
pkg_proj = [p for p in self.ctx.projects if p.name == 'pkg'][0]
try:
with pkg_proj.source_dir.joinpath('pyproject.toml').open('a') as f:
print('', file=f)
cmd = LsCommand(self.ctx, since='.')
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, 'pkg@0.1.0')
finally:
ChildProcess.run(f'git checkout {pkg_proj.source_dir}')
def test_scope_filter(self):
cmd = LsCommand(self.ctx, scope=['p*'])
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, 'pkg@0.1.0')
def test_ignore_filter(self):
cmd = LsCommand(self.ctx, ignore=['p*'])
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, '')
def test_match_filter(self):
cmd = LsCommand(self.ctx, match=['labels.type=package'])
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, 'pkg@0.1.0')
| true
| true
|
790df8504762ffa2a359c7df6676f6dc68a78386
| 409
|
py
|
Python
|
curiefense/curielogserver/app/main.py
|
fossabot/curiefense
|
6941f8aa08bcac1b0cf87c36ddb0037917a38c5a
|
[
"Apache-2.0"
] | 1
|
2020-11-15T06:27:05.000Z
|
2020-11-15T06:27:05.000Z
|
curiefense/curielogserver/app/main.py
|
fossabot/curiefense
|
6941f8aa08bcac1b0cf87c36ddb0037917a38c5a
|
[
"Apache-2.0"
] | 3
|
2022-02-24T09:58:32.000Z
|
2022-03-01T20:05:07.000Z
|
curiefense/curielogserver/app/main.py
|
xavier-rbz/curiefense
|
44200a90c515fe184e9c66ea662b2643adcbd34e
|
[
"Apache-2.0"
] | 1
|
2021-01-07T20:51:48.000Z
|
2021-01-07T20:51:48.000Z
|
from curielogserver import app, get_default_dbconfig
import os
import time
import psycopg2.pool
from psycopg2 import OperationalError
retries = 10
while retries > 0:
retries -= 1
try:
app.config['postgreSQL_pool'] = psycopg2.pool.ThreadedConnectionPool(1, 20, get_default_dbconfig())
break
except OperationalError:
if retries == 0:
raise
time.sleep(1)
| 24.058824
| 107
| 0.691932
|
from curielogserver import app, get_default_dbconfig
import os
import time
import psycopg2.pool
from psycopg2 import OperationalError
retries = 10
while retries > 0:
retries -= 1
try:
app.config['postgreSQL_pool'] = psycopg2.pool.ThreadedConnectionPool(1, 20, get_default_dbconfig())
break
except OperationalError:
if retries == 0:
raise
time.sleep(1)
| true
| true
|
790df908af3666ae813f54b97ab4e3aa3b0ef1c0
| 6,947
|
py
|
Python
|
src/data/generateData.py
|
oesst/HRTF_neural_model
|
494d29c514eaad3aee575f77d08a59a9d011a415
|
[
"MIT"
] | null | null | null |
src/data/generateData.py
|
oesst/HRTF_neural_model
|
494d29c514eaad3aee575f77d08a59a9d011a415
|
[
"MIT"
] | null | null | null |
src/data/generateData.py
|
oesst/HRTF_neural_model
|
494d29c514eaad3aee575f77d08a59a9d011a415
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from os import listdir
from os.path import isfile, join
import numpy as np
import soundfile as sf
from scipy import io
import scipy.signal as sp
from src.features import gtgram
ROOT = Path(__file__).resolve().parents[2]
# set the path to the sound files
SOUND_FILES = ROOT / 'data/raw/sound_samples/'
# create a list of the sound files
SOUND_FILES = list(SOUND_FILES.glob('**/*.wav'))
# Define up to which frequency the data should be generated
def create_data(freq_bands=24, participant_number=19, snr=0.2, normalize=False, azimuth=12, time_window=0.1, max_freq=20000):
str_r = 'data/processed_' + str(max_freq) + 'Hz/binaural_right_0_gammatone_' + str(time_window) + '_window_{0:03d}'.format(participant_number) + '_cipic_' + str(
int(snr * 100)) + '_srn_' + str(freq_bands) + '_channels_' + str((azimuth - 12) * 10) + '_azi_' + str(normalize) + '_norm.npy'
str_l = 'data/processed_' + str(max_freq) + 'Hz/binaural_left_0_gammatone_' + str(time_window) + '_window_{0:03d}'.format(participant_number) + '_cipic_' + str(
int(snr * 100)) + '_srn_' + str(freq_bands) + '_channels_' + str((azimuth - 12) * 10) + '_azi_' + str(normalize) + '_norm.npy'
path_data_r = ROOT / str_r
path_data_l = ROOT / str_l
# check if we can load the data from a file
if path_data_r.is_file() and path_data_l.is_file():
print('Data set found. Loading from file : ' + str_r)
return np.load(path_data_r), np.load(path_data_l)
else:
print('Creating data set : ' + str_l)
# read the HRIR data
hrtf_path = (
ROOT / 'data/raw/hrtfs/hrir_{0:03d}.mat'.format(participant_number)).resolve()
hrir_mat = io.loadmat(hrtf_path.as_posix())
# get the data for the left ear
hrir_l = hrir_mat['hrir_l']
# get the data for the right ear
hrir_r = hrir_mat['hrir_r']
# use always all elevations -> 50
psd_all_i = np.zeros((len(SOUND_FILES), 50, freq_bands))
psd_all_c = np.zeros((len(SOUND_FILES), 50, freq_bands))
# temporal_means = np.zeros((hrir_elevs.shape[0],87))
for i in range(psd_all_i.shape[0]):
for i_elevs in range(psd_all_i.shape[1]):
# read the hrir for a specific location
hrir_elevs = np.squeeze(hrir_l[azimuth, i_elevs, :])
# load a sound sample
signal = sf.read(SOUND_FILES[i].as_posix())[0]
# add noise to the signal
signal_elevs = (1 - snr) * sp.lfilter(hrir_elevs, 1, signal) + \
snr * (signal + np.random.random(signal.shape[0]) * snr)
###### TAKE THE ENTIRE SIGNAL #######
# window_means = get_spectrum(signal_elevs,nperseg=welch_nperseg)
#####################################
# read the hrir for a specific location
hrir_elevs = np.squeeze(hrir_r[azimuth, i_elevs, :])
# add noise to the signal
signal_elevs_c = (1 - snr) * sp.lfilter(hrir_elevs, 1, signal) + \
snr * (signal + np.random.random(signal.shape[0]) * snr)
# Default gammatone-based spectrogram parameters
twin = time_window
thop = twin / 2
fmin = 20
fs = 44100
###### Apply Gammatone Filter Bank ##############
y = gtgram.gtgram(signal_elevs, fs, twin,
thop, freq_bands, fmin, max_freq)
y = (20 * np.log10(y + 1))
window_means = np.mean(y, axis=1)
psd_all_i[i, i_elevs, :] = window_means
y = gtgram.gtgram(signal_elevs_c, fs,
twin, thop, freq_bands, fmin, max_freq)
y = (20 * np.log10(y + 1))
window_means = np.mean(y, axis=1)
psd_all_c[i, i_elevs, :] = window_means
#################################################
np.save(path_data_r.absolute(), psd_all_c)
np.save(path_data_l.absolute(), psd_all_i)
return psd_all_c, psd_all_i
def main():
""" This script creates HRTF filtered sound samples of the sounds given in the folder SOUND_FILES.
This is done for each participant's HRTF specified in participant_numbers.
ALL ELEVATIONS (50) are taken to filter the data.
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
########################################################################
######################## Set parameters ################################
########################################################################
normalize = False # paramter is not considered
time_window = 0.1 # time window for spectrogram in sec
# Parameter to test
snrs = np.arange(0, 1.1, 0.1) # Signal to noise ratio
# snrs = np.array([0.2]) # Signal to noise ratio
# snrs = np.array([0.2]) # Signal to noise ratio
# freq_bandss = np.array([32, 64, 128]) # Frequency bands in resulting data
freq_bandss = np.array([128]) # Frequency bands in resulting data
# azimuths = np.arange(0, 25, 1) # which azimuths to create
azimuths = np.array([12]) # which azimuths to create
participant_numbers = np.array([1, 2, 3, 8, 9, 10, 11,
12, 15, 17, 18, 19, 20,
21, 27, 28, 33, 40, 44,
48, 50, 51, 58, 59, 60,
61, 65, 119, 124, 126,
127, 131, 133, 134, 135,
137, 147, 148, 152, 153,
154, 155, 156, 158, 162,
163, 165])
# define max frequency for gammatone filter bank
max_freqs = np.array([16000, 20000])
# participant_numbers = participant_numbers[::-1]
# snrs = snrs[::-1]
# freq_bandss = freq_bandss[::-1]
########################################################################
########################################################################
# walk over all parameter combinations
for _, participant_number in enumerate(participant_numbers):
for _, snr in enumerate(snrs):
for _, freq_bands in enumerate(freq_bandss):
for _, azimuth in enumerate(azimuths):
for _, max_freq in enumerate(max_freqs):
psd_all_c, psd_all_i = create_data(freq_bands, participant_number, snr, normalize, azimuth, time_window, max_freq=max_freq)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| 43.968354
| 165
| 0.539513
|
import click
import logging
from pathlib import Path
from os import listdir
from os.path import isfile, join
import numpy as np
import soundfile as sf
from scipy import io
import scipy.signal as sp
from src.features import gtgram
ROOT = Path(__file__).resolve().parents[2]
SOUND_FILES = ROOT / 'data/raw/sound_samples/'
SOUND_FILES = list(SOUND_FILES.glob('**/*.wav'))
def create_data(freq_bands=24, participant_number=19, snr=0.2, normalize=False, azimuth=12, time_window=0.1, max_freq=20000):
str_r = 'data/processed_' + str(max_freq) + 'Hz/binaural_right_0_gammatone_' + str(time_window) + '_window_{0:03d}'.format(participant_number) + '_cipic_' + str(
int(snr * 100)) + '_srn_' + str(freq_bands) + '_channels_' + str((azimuth - 12) * 10) + '_azi_' + str(normalize) + '_norm.npy'
str_l = 'data/processed_' + str(max_freq) + 'Hz/binaural_left_0_gammatone_' + str(time_window) + '_window_{0:03d}'.format(participant_number) + '_cipic_' + str(
int(snr * 100)) + '_srn_' + str(freq_bands) + '_channels_' + str((azimuth - 12) * 10) + '_azi_' + str(normalize) + '_norm.npy'
path_data_r = ROOT / str_r
path_data_l = ROOT / str_l
if path_data_r.is_file() and path_data_l.is_file():
print('Data set found. Loading from file : ' + str_r)
return np.load(path_data_r), np.load(path_data_l)
else:
print('Creating data set : ' + str_l)
hrtf_path = (
ROOT / 'data/raw/hrtfs/hrir_{0:03d}.mat'.format(participant_number)).resolve()
hrir_mat = io.loadmat(hrtf_path.as_posix())
hrir_l = hrir_mat['hrir_l']
hrir_r = hrir_mat['hrir_r']
psd_all_i = np.zeros((len(SOUND_FILES), 50, freq_bands))
psd_all_c = np.zeros((len(SOUND_FILES), 50, freq_bands))
for i in range(psd_all_i.shape[0]):
for i_elevs in range(psd_all_i.shape[1]):
hrir_elevs = np.squeeze(hrir_l[azimuth, i_elevs, :])
signal = sf.read(SOUND_FILES[i].as_posix())[0]
signal_elevs = (1 - snr) * sp.lfilter(hrir_elevs, 1, signal) + \
snr * (signal + np.random.random(signal.shape[0]) * snr)
| true
| true
|
790df9f628d3cb8a6382a7969703167a2921418d
| 2,267
|
py
|
Python
|
zerver/management/commands/convert_bot_to_outgoing_webhook.py
|
lunayach/zulip
|
2d8c1f6d93499aa6395cfcb1895859569890953f
|
[
"Apache-2.0"
] | null | null | null |
zerver/management/commands/convert_bot_to_outgoing_webhook.py
|
lunayach/zulip
|
2d8c1f6d93499aa6395cfcb1895859569890953f
|
[
"Apache-2.0"
] | 1
|
2021-11-15T17:53:42.000Z
|
2021-11-15T17:53:42.000Z
|
zerver/management/commands/convert_bot_to_outgoing_webhook.py
|
lunayach/zulip
|
2d8c1f6d93499aa6395cfcb1895859569890953f
|
[
"Apache-2.0"
] | null | null | null |
from argparse import ArgumentParser
from typing import Any
from zerver.lib.management import ZulipBaseCommand
from zerver.models import Service, UserProfile
class Command(ZulipBaseCommand):
help = """Given an existing bot, converts it into an outgoing webhook bot."""
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser)
parser.add_argument('bot_email', metavar='<bot_email>', type=str,
help='email of bot')
parser.add_argument('service_name', metavar='<service_name>', type=str,
help='name of Service object to create')
parser.add_argument('base_url', metavar='<base_url>', type=str,
help='Endpoint URL of outgoing webhook')
# TODO: Add token and interface as arguments once OutgoingWebhookWorker
# uses these fields on the Service object.
def handle(self, *args: Any, **options: str) -> None:
bot_email = options['bot_email']
service_name = options['service_name']
base_url = options['base_url']
realm = self.get_realm(options)
if not bot_email:
print('Email of existing bot must be provided')
exit(1)
if not service_name:
print('Name for Service object must be provided')
exit(1)
if not base_url:
print('Endpoint URL of outgoing webhook must be provided')
exit(1)
# TODO: Normalize email?
bot_profile = self.get_user(email=bot_email, realm=realm)
if not bot_profile.is_bot:
print('User %s is not a bot' % (bot_email,))
exit(1)
if bot_profile.is_outgoing_webhook_bot:
print('%s is already marked as an outgoing webhook bot' % (bot_email,))
exit(1)
Service.objects.create(name=service_name,
user_profile=bot_profile,
base_url=base_url,
token='',
interface=1)
bot_profile.bot_type = UserProfile.OUTGOING_WEBHOOK_BOT
bot_profile.save()
print('Successfully converted %s into an outgoing webhook bot' % (bot_email,))
| 37.783333
| 86
| 0.599912
|
from argparse import ArgumentParser
from typing import Any
from zerver.lib.management import ZulipBaseCommand
from zerver.models import Service, UserProfile
class Command(ZulipBaseCommand):
help = """Given an existing bot, converts it into an outgoing webhook bot."""
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser)
parser.add_argument('bot_email', metavar='<bot_email>', type=str,
help='email of bot')
parser.add_argument('service_name', metavar='<service_name>', type=str,
help='name of Service object to create')
parser.add_argument('base_url', metavar='<base_url>', type=str,
help='Endpoint URL of outgoing webhook')
def handle(self, *args: Any, **options: str) -> None:
bot_email = options['bot_email']
service_name = options['service_name']
base_url = options['base_url']
realm = self.get_realm(options)
if not bot_email:
print('Email of existing bot must be provided')
exit(1)
if not service_name:
print('Name for Service object must be provided')
exit(1)
if not base_url:
print('Endpoint URL of outgoing webhook must be provided')
exit(1)
bot_profile = self.get_user(email=bot_email, realm=realm)
if not bot_profile.is_bot:
print('User %s is not a bot' % (bot_email,))
exit(1)
if bot_profile.is_outgoing_webhook_bot:
print('%s is already marked as an outgoing webhook bot' % (bot_email,))
exit(1)
Service.objects.create(name=service_name,
user_profile=bot_profile,
base_url=base_url,
token='',
interface=1)
bot_profile.bot_type = UserProfile.OUTGOING_WEBHOOK_BOT
bot_profile.save()
print('Successfully converted %s into an outgoing webhook bot' % (bot_email,))
| true
| true
|
790dfa428636cd37454a13f136024e191d7e9967
| 14,814
|
py
|
Python
|
jina/peapods/peas/__init__.py
|
anuragdw710/jina
|
b2d3577f2d5b86399f0b4a8e4529df4929dd18ff
|
[
"Apache-2.0"
] | 3
|
2021-09-02T04:55:20.000Z
|
2021-11-15T09:41:50.000Z
|
jina/peapods/peas/__init__.py
|
sheetal01761/jina
|
520fc0794fb43d96e1fc85534e9df3cf9c89c42e
|
[
"Apache-2.0"
] | null | null | null |
jina/peapods/peas/__init__.py
|
sheetal01761/jina
|
520fc0794fb43d96e1fc85534e9df3cf9c89c42e
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import multiprocessing
import os
import threading
import time
from typing import Any, Tuple, Union, Dict, Optional
from .helper import _get_event, ConditionalEvent
from ... import __stop_msg__, __ready_msg__, __default_host__
from ...enums import PeaRoleType, RuntimeBackendType, SocketType
from ...excepts import RuntimeFailToStart, RuntimeRunForeverEarlyError
from ...helper import typename
from ...logging.logger import JinaLogger
__all__ = ['BasePea']
def run(
args: 'argparse.Namespace',
name: str,
runtime_cls,
envs: Dict[str, str],
is_started: Union['multiprocessing.Event', 'threading.Event'],
is_shutdown: Union['multiprocessing.Event', 'threading.Event'],
is_ready: Union['multiprocessing.Event', 'threading.Event'],
cancel_event: Union['multiprocessing.Event', 'threading.Event'],
):
"""Method representing the :class:`BaseRuntime` activity.
This method is the target for the Pea's `thread` or `process`
.. note::
:meth:`run` is running in subprocess/thread, the exception can not be propagated to the main process.
Hence, please do not raise any exception here.
.. note::
Please note that env variables are process-specific. Subprocess inherits envs from
the main process. But Subprocess's envs do NOT affect the main process. It does NOT
mess up user local system envs.
.. warning::
If you are using ``thread`` as backend, envs setting will likely be overidden by others
:param args: namespace args from the Pea
:param name: name of the Pea to have proper logging
:param runtime_cls: the runtime class to instantiate
:param envs: a dictionary of environment variables to be set in the new Process
:param is_started: concurrency event to communicate runtime is properly started. Used for better logging
:param is_shutdown: concurrency event to communicate runtime is terminated
:param is_ready: concurrency event to communicate runtime is ready to receive messages
:param cancel_event: concurrency event to receive cancelling signal from the Pea. Needed by some runtimes
"""
logger = JinaLogger(name, **vars(args))
def _unset_envs():
if envs and args.runtime_backend != RuntimeBackendType.THREAD:
for k in envs.keys():
os.unsetenv(k)
def _set_envs():
if args.env:
if args.runtime_backend == RuntimeBackendType.THREAD:
logger.warning(
'environment variables should not be set when runtime="thread".'
)
else:
os.environ.update({k: str(v) for k, v in envs.items()})
try:
_set_envs()
runtime = runtime_cls(
args=args,
cancel_event=cancel_event,
)
except Exception as ex:
logger.error(
f'{ex!r} during {runtime_cls!r} initialization'
+ f'\n add "--quiet-error" to suppress the exception details'
if not args.quiet_error
else '',
exc_info=not args.quiet_error,
)
else:
is_started.set()
with runtime:
is_ready.set()
runtime.run_forever()
finally:
_unset_envs()
is_shutdown.set()
class BasePea:
"""
:class:`BasePea` is a thread/process- container of :class:`BaseRuntime`. It leverages :class:`threading.Thread`
or :class:`multiprocessing.Process` to manage the lifecycle of a :class:`BaseRuntime` object in a robust way.
A :class:`BasePea` must be equipped with a proper :class:`Runtime` class to work.
"""
def __init__(self, args: 'argparse.Namespace'):
super().__init__() #: required here to call process/thread __init__
self.args = args
self.name = self.args.name or self.__class__.__name__
self.logger = JinaLogger(self.name, **vars(self.args))
if self.args.runtime_backend == RuntimeBackendType.THREAD:
self.logger.warning(
f' Using Thread as runtime backend is not recommended for production purposes. It is '
f'just supposed to be used for easier debugging. Besides the performance considerations, it is'
f'specially dangerous to mix `Executors` running in different types of `RuntimeBackends`.'
)
self._envs = {'JINA_POD_NAME': self.name, 'JINA_LOG_ID': self.args.identity}
if self.args.quiet:
self._envs['JINA_LOG_CONFIG'] = 'QUIET'
if self.args.env:
self._envs.update(self.args.env)
# arguments needed to create `runtime` and communicate with it in the `run` in the stack of the new process
# or thread. Control address from Zmqlet has some randomness and therefore we need to make sure Pea knows
# control address of runtime
self.runtime_cls = self._get_runtime_cls()
self._timeout_ctrl = self.args.timeout_ctrl
self._set_ctrl_adrr()
test_worker = {
RuntimeBackendType.THREAD: threading.Thread,
RuntimeBackendType.PROCESS: multiprocessing.Process,
}.get(getattr(args, 'runtime_backend', RuntimeBackendType.THREAD))()
self.is_ready = _get_event(test_worker)
self.is_shutdown = _get_event(test_worker)
self.cancel_event = _get_event(test_worker)
self.is_started = _get_event(test_worker)
self.ready_or_shutdown = ConditionalEvent(
getattr(args, 'runtime_backend', RuntimeBackendType.THREAD),
events_list=[self.is_ready, self.is_shutdown],
)
self.worker = {
RuntimeBackendType.THREAD: threading.Thread,
RuntimeBackendType.PROCESS: multiprocessing.Process,
}.get(getattr(args, 'runtime_backend', RuntimeBackendType.THREAD))(
target=run,
kwargs={
'args': args,
'name': self.name,
'envs': self._envs,
'is_started': self.is_started,
'is_shutdown': self.is_shutdown,
'is_ready': self.is_ready,
'cancel_event': self.cancel_event,
'runtime_cls': self.runtime_cls,
},
)
self.daemon = self.args.daemon #: required here to set process/thread daemon
def _set_ctrl_adrr(self):
"""Sets control address for different runtimes"""
self.runtime_ctrl_address = self.runtime_cls.get_control_address(
host=self.args.host,
port=self.args.port_ctrl,
docker_kwargs=getattr(self.args, 'docker_kwargs', None),
)
if not self.runtime_ctrl_address:
self.runtime_ctrl_address = f'{self.args.host}:{self.args.port_in}'
def start(self):
"""Start the Pea.
This method calls :meth:`start` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.
.. #noqa: DAR201
"""
self.worker.start()
if not self.args.noblock_on_start:
self.wait_start_success()
return self
def join(self, *args, **kwargs):
"""Joins the Pea.
This method calls :meth:`join` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.
:param args: extra positional arguments to pass to join
:param kwargs: extra keyword arguments to pass to join
"""
self.worker.join(*args, **kwargs)
def terminate(self):
"""Terminate the Pea.
This method calls :meth:`terminate` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.
"""
if hasattr(self.worker, 'terminate'):
self.worker.terminate()
def _retry_control_message(self, command: str, num_retry: int = 3):
from ..zmq import send_ctrl_message
for retry in range(1, num_retry + 1):
self.logger.debug(f'Sending {command} command for the {retry}th time')
try:
send_ctrl_message(
self.runtime_ctrl_address,
command,
timeout=self._timeout_ctrl,
raise_exception=True,
)
break
except Exception as ex:
self.logger.warning(f'{ex!r}')
if retry == num_retry:
raise ex
def activate_runtime(self):
""" Send activate control message. """
self.runtime_cls.activate(
logger=self.logger,
socket_in_type=self.args.socket_in,
control_address=self.runtime_ctrl_address,
timeout_ctrl=self._timeout_ctrl,
)
def _cancel_runtime(self, skip_deactivate: bool = False):
"""
Send terminate control message.
:param skip_deactivate: Mark that the DEACTIVATE signal may be missed if set to True
"""
self.runtime_cls.cancel(
cancel_event=self.cancel_event,
logger=self.logger,
socket_in_type=self.args.socket_in,
control_address=self.runtime_ctrl_address,
timeout_ctrl=self._timeout_ctrl,
skip_deactivate=skip_deactivate,
)
def _wait_for_ready_or_shutdown(self, timeout: Optional[float]):
"""
Waits for the process to be ready or to know it has failed.
:param timeout: The time to wait before readiness or failure is determined
.. # noqa: DAR201
"""
return self.runtime_cls.wait_for_ready_or_shutdown(
timeout=timeout,
ready_or_shutdown_event=self.ready_or_shutdown.event,
ctrl_address=self.runtime_ctrl_address,
timeout_ctrl=self._timeout_ctrl,
shutdown_event=self.is_shutdown,
)
def wait_start_success(self):
"""Block until all peas starts successfully.
If not success, it will raise an error hoping the outer function to catch it
"""
_timeout = self.args.timeout_ready
if _timeout <= 0:
_timeout = None
else:
_timeout /= 1e3
if self._wait_for_ready_or_shutdown(_timeout):
if self.is_shutdown.is_set():
# return too early and the shutdown is set, means something fails!!
if not self.is_started.is_set():
raise RuntimeFailToStart
else:
raise RuntimeRunForeverEarlyError
else:
self.logger.success(__ready_msg__)
else:
_timeout = _timeout or -1
self.logger.warning(
f'{self.runtime_cls!r} timeout after waiting for {self.args.timeout_ready}ms, '
f'if your executor takes time to load, you may increase --timeout-ready'
)
self.close()
raise TimeoutError(
f'{typename(self)}:{self.name} can not be initialized after {_timeout * 1e3}ms'
)
@property
def _is_dealer(self):
"""Return true if this `Pea` must act as a Dealer responding to a Router
.. # noqa: DAR201
"""
return self.args.socket_in == SocketType.DEALER_CONNECT
def close(self) -> None:
"""Close the Pea
This method makes sure that the `Process/thread` is properly finished and its resources properly released
"""
# if that 1s is not enough, it means the process/thread is still in forever loop, cancel it
self.logger.debug('waiting for ready or shutdown signal from runtime')
if self.is_ready.is_set() and not self.is_shutdown.is_set():
try:
self._cancel_runtime()
if not self.is_shutdown.wait(timeout=self._timeout_ctrl):
self.terminate()
time.sleep(0.1)
raise Exception(
f'Shutdown signal was not received for {self._timeout_ctrl}'
)
except Exception as ex:
self.logger.error(
f'{ex!r} during {self.close!r}'
+ f'\n add "--quiet-error" to suppress the exception details'
if not self.args.quiet_error
else '',
exc_info=not self.args.quiet_error,
)
# if it is not daemon, block until the process/thread finish work
if not self.args.daemon:
self.join()
elif self.is_shutdown.is_set():
# here shutdown has been set already, therefore `run` will gracefully finish
pass
else:
# sometimes, we arrive to the close logic before the `is_ready` is even set.
# Observed with `gateway` when Pods fail to start
self.logger.warning(
'Pea is being closed before being ready. Most likely some other Pea in the Flow or Pod '
'failed to start'
)
_timeout = self.args.timeout_ready
if _timeout <= 0:
_timeout = None
else:
_timeout /= 1e3
self.logger.debug('waiting for ready or shutdown signal from runtime')
if self._wait_for_ready_or_shutdown(_timeout):
if not self.is_shutdown.is_set():
self._cancel_runtime(skip_deactivate=True)
if not self.is_shutdown.wait(timeout=self._timeout_ctrl):
self.terminate()
time.sleep(0.1)
raise Exception(
f'Shutdown signal was not received for {self._timeout_ctrl}'
)
else:
self.logger.warning(
'Terminating process after waiting for readiness signal for graceful shutdown'
)
# Just last resource, terminate it
self.terminate()
time.sleep(0.1)
self.logger.debug(__stop_msg__)
self.logger.close()
def __enter__(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _get_runtime_cls(self) -> Tuple[Any, bool]:
from .helper import update_runtime_cls
from ..runtimes import get_runtime
update_runtime_cls(self.args)
return get_runtime(self.args.runtime_cls)
@property
def role(self) -> 'PeaRoleType':
"""Get the role of this pea in a pod
.. #noqa: DAR201"""
return self.args.pea_role
@property
def _is_inner_pea(self) -> bool:
"""Determine whether this is a inner pea or a head/tail
.. #noqa: DAR201"""
return self.role is PeaRoleType.SINGLETON or self.role is PeaRoleType.PARALLEL
| 38.984211
| 115
| 0.605778
|
import argparse
import multiprocessing
import os
import threading
import time
from typing import Any, Tuple, Union, Dict, Optional
from .helper import _get_event, ConditionalEvent
from ... import __stop_msg__, __ready_msg__, __default_host__
from ...enums import PeaRoleType, RuntimeBackendType, SocketType
from ...excepts import RuntimeFailToStart, RuntimeRunForeverEarlyError
from ...helper import typename
from ...logging.logger import JinaLogger
__all__ = ['BasePea']
def run(
args: 'argparse.Namespace',
name: str,
runtime_cls,
envs: Dict[str, str],
is_started: Union['multiprocessing.Event', 'threading.Event'],
is_shutdown: Union['multiprocessing.Event', 'threading.Event'],
is_ready: Union['multiprocessing.Event', 'threading.Event'],
cancel_event: Union['multiprocessing.Event', 'threading.Event'],
):
logger = JinaLogger(name, **vars(args))
def _unset_envs():
if envs and args.runtime_backend != RuntimeBackendType.THREAD:
for k in envs.keys():
os.unsetenv(k)
def _set_envs():
if args.env:
if args.runtime_backend == RuntimeBackendType.THREAD:
logger.warning(
'environment variables should not be set when runtime="thread".'
)
else:
os.environ.update({k: str(v) for k, v in envs.items()})
try:
_set_envs()
runtime = runtime_cls(
args=args,
cancel_event=cancel_event,
)
except Exception as ex:
logger.error(
f'{ex!r} during {runtime_cls!r} initialization'
+ f'\n add "--quiet-error" to suppress the exception details'
if not args.quiet_error
else '',
exc_info=not args.quiet_error,
)
else:
is_started.set()
with runtime:
is_ready.set()
runtime.run_forever()
finally:
_unset_envs()
is_shutdown.set()
class BasePea:
def __init__(self, args: 'argparse.Namespace'):
super().__init__()
self.args = args
self.name = self.args.name or self.__class__.__name__
self.logger = JinaLogger(self.name, **vars(self.args))
if self.args.runtime_backend == RuntimeBackendType.THREAD:
self.logger.warning(
f' Using Thread as runtime backend is not recommended for production purposes. It is '
f'just supposed to be used for easier debugging. Besides the performance considerations, it is'
f'specially dangerous to mix `Executors` running in different types of `RuntimeBackends`.'
)
self._envs = {'JINA_POD_NAME': self.name, 'JINA_LOG_ID': self.args.identity}
if self.args.quiet:
self._envs['JINA_LOG_CONFIG'] = 'QUIET'
if self.args.env:
self._envs.update(self.args.env)
self.runtime_cls = self._get_runtime_cls()
self._timeout_ctrl = self.args.timeout_ctrl
self._set_ctrl_adrr()
test_worker = {
RuntimeBackendType.THREAD: threading.Thread,
RuntimeBackendType.PROCESS: multiprocessing.Process,
}.get(getattr(args, 'runtime_backend', RuntimeBackendType.THREAD))()
self.is_ready = _get_event(test_worker)
self.is_shutdown = _get_event(test_worker)
self.cancel_event = _get_event(test_worker)
self.is_started = _get_event(test_worker)
self.ready_or_shutdown = ConditionalEvent(
getattr(args, 'runtime_backend', RuntimeBackendType.THREAD),
events_list=[self.is_ready, self.is_shutdown],
)
self.worker = {
RuntimeBackendType.THREAD: threading.Thread,
RuntimeBackendType.PROCESS: multiprocessing.Process,
}.get(getattr(args, 'runtime_backend', RuntimeBackendType.THREAD))(
target=run,
kwargs={
'args': args,
'name': self.name,
'envs': self._envs,
'is_started': self.is_started,
'is_shutdown': self.is_shutdown,
'is_ready': self.is_ready,
'cancel_event': self.cancel_event,
'runtime_cls': self.runtime_cls,
},
)
self.daemon = self.args.daemon
def _set_ctrl_adrr(self):
self.runtime_ctrl_address = self.runtime_cls.get_control_address(
host=self.args.host,
port=self.args.port_ctrl,
docker_kwargs=getattr(self.args, 'docker_kwargs', None),
)
if not self.runtime_ctrl_address:
self.runtime_ctrl_address = f'{self.args.host}:{self.args.port_in}'
def start(self):
self.worker.start()
if not self.args.noblock_on_start:
self.wait_start_success()
return self
def join(self, *args, **kwargs):
self.worker.join(*args, **kwargs)
def terminate(self):
if hasattr(self.worker, 'terminate'):
self.worker.terminate()
def _retry_control_message(self, command: str, num_retry: int = 3):
from ..zmq import send_ctrl_message
for retry in range(1, num_retry + 1):
self.logger.debug(f'Sending {command} command for the {retry}th time')
try:
send_ctrl_message(
self.runtime_ctrl_address,
command,
timeout=self._timeout_ctrl,
raise_exception=True,
)
break
except Exception as ex:
self.logger.warning(f'{ex!r}')
if retry == num_retry:
raise ex
def activate_runtime(self):
self.runtime_cls.activate(
logger=self.logger,
socket_in_type=self.args.socket_in,
control_address=self.runtime_ctrl_address,
timeout_ctrl=self._timeout_ctrl,
)
def _cancel_runtime(self, skip_deactivate: bool = False):
self.runtime_cls.cancel(
cancel_event=self.cancel_event,
logger=self.logger,
socket_in_type=self.args.socket_in,
control_address=self.runtime_ctrl_address,
timeout_ctrl=self._timeout_ctrl,
skip_deactivate=skip_deactivate,
)
def _wait_for_ready_or_shutdown(self, timeout: Optional[float]):
return self.runtime_cls.wait_for_ready_or_shutdown(
timeout=timeout,
ready_or_shutdown_event=self.ready_or_shutdown.event,
ctrl_address=self.runtime_ctrl_address,
timeout_ctrl=self._timeout_ctrl,
shutdown_event=self.is_shutdown,
)
def wait_start_success(self):
_timeout = self.args.timeout_ready
if _timeout <= 0:
_timeout = None
else:
_timeout /= 1e3
if self._wait_for_ready_or_shutdown(_timeout):
if self.is_shutdown.is_set():
if not self.is_started.is_set():
raise RuntimeFailToStart
else:
raise RuntimeRunForeverEarlyError
else:
self.logger.success(__ready_msg__)
else:
_timeout = _timeout or -1
self.logger.warning(
f'{self.runtime_cls!r} timeout after waiting for {self.args.timeout_ready}ms, '
f'if your executor takes time to load, you may increase --timeout-ready'
)
self.close()
raise TimeoutError(
f'{typename(self)}:{self.name} can not be initialized after {_timeout * 1e3}ms'
)
@property
def _is_dealer(self):
return self.args.socket_in == SocketType.DEALER_CONNECT
def close(self) -> None:
self.logger.debug('waiting for ready or shutdown signal from runtime')
if self.is_ready.is_set() and not self.is_shutdown.is_set():
try:
self._cancel_runtime()
if not self.is_shutdown.wait(timeout=self._timeout_ctrl):
self.terminate()
time.sleep(0.1)
raise Exception(
f'Shutdown signal was not received for {self._timeout_ctrl}'
)
except Exception as ex:
self.logger.error(
f'{ex!r} during {self.close!r}'
+ f'\n add "--quiet-error" to suppress the exception details'
if not self.args.quiet_error
else '',
exc_info=not self.args.quiet_error,
)
if not self.args.daemon:
self.join()
elif self.is_shutdown.is_set():
pass
else:
self.logger.warning(
'Pea is being closed before being ready. Most likely some other Pea in the Flow or Pod '
'failed to start'
)
_timeout = self.args.timeout_ready
if _timeout <= 0:
_timeout = None
else:
_timeout /= 1e3
self.logger.debug('waiting for ready or shutdown signal from runtime')
if self._wait_for_ready_or_shutdown(_timeout):
if not self.is_shutdown.is_set():
self._cancel_runtime(skip_deactivate=True)
if not self.is_shutdown.wait(timeout=self._timeout_ctrl):
self.terminate()
time.sleep(0.1)
raise Exception(
f'Shutdown signal was not received for {self._timeout_ctrl}'
)
else:
self.logger.warning(
'Terminating process after waiting for readiness signal for graceful shutdown'
)
self.terminate()
time.sleep(0.1)
self.logger.debug(__stop_msg__)
self.logger.close()
def __enter__(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _get_runtime_cls(self) -> Tuple[Any, bool]:
from .helper import update_runtime_cls
from ..runtimes import get_runtime
update_runtime_cls(self.args)
return get_runtime(self.args.runtime_cls)
@property
def role(self) -> 'PeaRoleType':
return self.args.pea_role
@property
def _is_inner_pea(self) -> bool:
return self.role is PeaRoleType.SINGLETON or self.role is PeaRoleType.PARALLEL
| true
| true
|
790dfaa3ace00ee844e14ba35f6542955066a057
| 2,534
|
py
|
Python
|
src/yt_list_downloader/input.py
|
portikCoder/yt-list-downloader
|
a96dbf605b21ea0f62e4c80d04251ee4def4c60e
|
[
"MIT"
] | null | null | null |
src/yt_list_downloader/input.py
|
portikCoder/yt-list-downloader
|
a96dbf605b21ea0f62e4c80d04251ee4def4c60e
|
[
"MIT"
] | null | null | null |
src/yt_list_downloader/input.py
|
portikCoder/yt-list-downloader
|
a96dbf605b21ea0f62e4c80d04251ee4def4c60e
|
[
"MIT"
] | null | null | null |
playlists = """https://www.youtube.com/playlist?list=PLWOxuTkHhAq70Q4ZF1EcsRaW1uHs6zygL
https://www.youtube.com/playlist?list=PLObmubSxSaRKdWB3BoyndydqDGkgbMlbC
https://www.youtube.com/playlist?list=PLxN__ARc_RfpOAdnB14rBSF6xDHDySNE4
https://www.youtube.com/playlist?list=PLxN__ARc_Rfrsee6JJkU198Fa0rxjbTgQ
https://www.youtube.com/playlist?list=PLxN__ARc_Rfr3AipUqXTqjbL990_pUBCd
https://www.youtube.com/playlist?list=PLxN__ARc_RfrYYGV0uN9G79ASYNYHN5bi
https://www.youtube.com/playlist?list=PLnyIUJZrQfakO0xkKPxceT4rOhzwd9NMU
https://www.youtube.com/playlist?list=PLnyIUJZrQfam-mBL4Wq3qncy68DfeI_Op
https://www.youtube.com/playlist?list=PLnyIUJZrQfalS8yK1K7NGWbZXiY6nnDxg
https://www.youtube.com/playlist?list=PLnyIUJZrQfaluNlLUmbGoDJUUas4T7W_C
https://www.youtube.com/playlist?list=PLjqsQycWIhFfKtcQlGgnGDQuUbUjSgqrB
https://www.youtube.com/playlist?list=PLaK1z6C61upcg-fWsHoiWtDwsKP7Ti23r
https://www.youtube.com/playlist?list=PLZ7Ye5d76T7j9l2BONR05FDyUaczK3NWP
https://www.youtube.com/playlist?list=PLZ7Ye5d76T7gzXf6-_QdT3qRplcIODcs0
https://www.youtube.com/playlist?list=PLZ7Ye5d76T7j-QdeSLenweE_E_w2gWGDd
https://www.youtube.com/playlist?list=PLcfHpRUOJGcOSIYV7UX0Fxb3N9aYiIeIX
https://www.youtube.com/playlist?list=PLcfHpRUOJGcN3QcWyDJ9D6e_BDmkmRoaX
https://www.youtube.com/playlist?list=PLcfHpRUOJGcN_q9JvR09_Wy4Yhl_9eG6r
https://www.youtube.com/playlist?list=PL3R3ezbvlgNpyAxZzQ3vEIO3R6XYzX-Oa
https://www.youtube.com/playlist?list=PLTufViCW8MklAol1CKXYFmWB-6LMS5cLS
https://www.youtube.com/playlist?list=PL2kIyfRWfPs6UC--1IqcxcPSfXsNyGFiK
https://www.youtube.com/playlist?list=PLSQCEkZEhfq9mra1sy7vI4Ra2kgPslVaX
https://www.youtube.com/playlist?list=PL1ad0PFSaS7c0k5guh90oS-7csla07Or1
https://www.youtube.com/playlist?list=PLy_Iceng2X1A8MwZ8hWC9ITqz0fYU3Mau
https://www.youtube.com/playlist?list=PLTufViCW8MknjUllPTnq5PtRlZTAJ2a-9
https://www.youtube.com/playlist?list=PLTufViCW8MknQc1Dgth6b0CyFhXWClDJs
https://www.youtube.com/playlist?list=PLTufViCW8MkkAnqcreRA78ytpxuf2r8ac
https://www.youtube.com/playlist?list=PLttdYH6hzg6fmQVkawJWxBKPRGVxvm11I
https://www.youtube.com/playlist?list=PLAmqhEyEmobA1M3t4XjlAQthTRToIYku4
https://www.youtube.com/playlist?list=PLAmqhEyEmobBAo9FR9_8JRt9ykj2k92-e
https://www.youtube.com/playlist?list=PLJJd8CUjfjZqb9tGMTfAYPfVcO-yMSiV3
https://www.youtube.com/playlist?list=PLJJd8CUjfjZqLKBBP0D_jqtG4OPYTTByR
https://www.youtube.com/playlist?list=PLAmqhEyEmobDzbAqxuYjpjDLZFnhws7no
https://www.youtube.com/playlist?list=PLgE5g9Ln1vGHGjsDr-4t-Jsw4C1mjWIQV"""
playlists = playlists.split("\n")
| 70.388889
| 87
| 0.866219
|
playlists = """https://www.youtube.com/playlist?list=PLWOxuTkHhAq70Q4ZF1EcsRaW1uHs6zygL
https://www.youtube.com/playlist?list=PLObmubSxSaRKdWB3BoyndydqDGkgbMlbC
https://www.youtube.com/playlist?list=PLxN__ARc_RfpOAdnB14rBSF6xDHDySNE4
https://www.youtube.com/playlist?list=PLxN__ARc_Rfrsee6JJkU198Fa0rxjbTgQ
https://www.youtube.com/playlist?list=PLxN__ARc_Rfr3AipUqXTqjbL990_pUBCd
https://www.youtube.com/playlist?list=PLxN__ARc_RfrYYGV0uN9G79ASYNYHN5bi
https://www.youtube.com/playlist?list=PLnyIUJZrQfakO0xkKPxceT4rOhzwd9NMU
https://www.youtube.com/playlist?list=PLnyIUJZrQfam-mBL4Wq3qncy68DfeI_Op
https://www.youtube.com/playlist?list=PLnyIUJZrQfalS8yK1K7NGWbZXiY6nnDxg
https://www.youtube.com/playlist?list=PLnyIUJZrQfaluNlLUmbGoDJUUas4T7W_C
https://www.youtube.com/playlist?list=PLjqsQycWIhFfKtcQlGgnGDQuUbUjSgqrB
https://www.youtube.com/playlist?list=PLaK1z6C61upcg-fWsHoiWtDwsKP7Ti23r
https://www.youtube.com/playlist?list=PLZ7Ye5d76T7j9l2BONR05FDyUaczK3NWP
https://www.youtube.com/playlist?list=PLZ7Ye5d76T7gzXf6-_QdT3qRplcIODcs0
https://www.youtube.com/playlist?list=PLZ7Ye5d76T7j-QdeSLenweE_E_w2gWGDd
https://www.youtube.com/playlist?list=PLcfHpRUOJGcOSIYV7UX0Fxb3N9aYiIeIX
https://www.youtube.com/playlist?list=PLcfHpRUOJGcN3QcWyDJ9D6e_BDmkmRoaX
https://www.youtube.com/playlist?list=PLcfHpRUOJGcN_q9JvR09_Wy4Yhl_9eG6r
https://www.youtube.com/playlist?list=PL3R3ezbvlgNpyAxZzQ3vEIO3R6XYzX-Oa
https://www.youtube.com/playlist?list=PLTufViCW8MklAol1CKXYFmWB-6LMS5cLS
https://www.youtube.com/playlist?list=PL2kIyfRWfPs6UC--1IqcxcPSfXsNyGFiK
https://www.youtube.com/playlist?list=PLSQCEkZEhfq9mra1sy7vI4Ra2kgPslVaX
https://www.youtube.com/playlist?list=PL1ad0PFSaS7c0k5guh90oS-7csla07Or1
https://www.youtube.com/playlist?list=PLy_Iceng2X1A8MwZ8hWC9ITqz0fYU3Mau
https://www.youtube.com/playlist?list=PLTufViCW8MknjUllPTnq5PtRlZTAJ2a-9
https://www.youtube.com/playlist?list=PLTufViCW8MknQc1Dgth6b0CyFhXWClDJs
https://www.youtube.com/playlist?list=PLTufViCW8MkkAnqcreRA78ytpxuf2r8ac
https://www.youtube.com/playlist?list=PLttdYH6hzg6fmQVkawJWxBKPRGVxvm11I
https://www.youtube.com/playlist?list=PLAmqhEyEmobA1M3t4XjlAQthTRToIYku4
https://www.youtube.com/playlist?list=PLAmqhEyEmobBAo9FR9_8JRt9ykj2k92-e
https://www.youtube.com/playlist?list=PLJJd8CUjfjZqb9tGMTfAYPfVcO-yMSiV3
https://www.youtube.com/playlist?list=PLJJd8CUjfjZqLKBBP0D_jqtG4OPYTTByR
https://www.youtube.com/playlist?list=PLAmqhEyEmobDzbAqxuYjpjDLZFnhws7no
https://www.youtube.com/playlist?list=PLgE5g9Ln1vGHGjsDr-4t-Jsw4C1mjWIQV"""
playlists = playlists.split("\n")
| true
| true
|
790dfb1b129701f5d95410bf58efedf5ed0fc8d9
| 15,042
|
py
|
Python
|
nets/block.py
|
manoil/Deep_VoiceChanger
|
5cd3d6ff2a8a9eea3b8fae1c0e6ed2d00012b771
|
[
"MIT"
] | 176
|
2019-01-03T02:18:09.000Z
|
2022-03-25T11:39:23.000Z
|
nets/block.py
|
manoil/Deep_VoiceChanger
|
5cd3d6ff2a8a9eea3b8fae1c0e6ed2d00012b771
|
[
"MIT"
] | 9
|
2019-01-04T16:20:09.000Z
|
2021-01-05T10:39:24.000Z
|
nets/block.py
|
manoil/Deep_VoiceChanger
|
5cd3d6ff2a8a9eea3b8fae1c0e6ed2d00012b771
|
[
"MIT"
] | 43
|
2018-12-02T15:04:27.000Z
|
2022-03-11T15:48:02.000Z
|
import math
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
from .sn_convolution_2d import SNConvolution2D, SNDeconvolution2D
from .sn_linear import SNLinear
def _upsample(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, 2, outsize=(h * 2, w * 2))
def _downsample(x):
return F.average_pooling_2d(x, 2)
def upsample_conv(x, conv):
return conv(_upsample(x))
def _upsample_frq(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, (1,2), outsize=(h, w * 2))
def _downsample_frq(x):
return F.average_pooling_2d(x, (1,2))
def upsample_conv_frq(x, conv):
return conv(_upsample_frq(x))
class ResBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.leaky_relu, mode='none', bn=False, dr=None):
super(ResBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None
self.learnable_sc = in_channels != out_channels
self.dr = dr
self.bn = bn
with self.init_scope():
self.c1 = L.Convolution2D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
self.c2 = L.Convolution2D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
if bn:
self.b1 = L.BatchNormalization(out_channels)
self.b2 = L.BatchNormalization(out_channels)
if self.learnable_sc:
self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.c1(h)
if self.bn:
h = self.b1(h)
if self.activation:
h = self.activation(h)
if self.mode:
h = self.mode(h)
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c2(h)
if self.bn:
h = self.b2(h)
if self.activation:
h = self.activation(h)
return h
def shortcut(self, x):
if self.mode:
x = self.mode(x)
if self.learnable_sc:
x = self.c_sc(x)
return x
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
class ConvBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(ConvBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = L.Deconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
elif mode == 'pad':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=2, initialW=initializer, nobias=bn)
elif mode == 'trim':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=0, initialW=initializer, nobias=bn)
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
def __call__(self, h):
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class CoPSBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, bn=True):
super(CoPSBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
with self.init_scope():
self.ps = L.Convolution2D(in_channels, in_channels*4, ksize=1, stride=1, initialW=initializer)
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer)
if bn:
self.b = L.BatchNormalization(out_channels)
def pixel_shuffle(self, x):
out = self.ps(x)
b = out.shape[0]
c = out.shape[1]
h = out.shape[2]
w = out.shape[3]
out = F.reshape(out, (b, 2, 2, c//4, h, w))
out = F.transpose(out, (0, 3, 4, 1, 5, 2))
out = F.reshape(out, (b, c//4, h*2, w*2))
return out
def __call__(self, h):
h = self.pixel_shuffle(h)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class SNResBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, sample='none', dr=None):
super(SNResBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.sample = _downsample if sample == 'down' else _upsample if sample == 'up' else None
self.learnable_sc = in_channels != out_channels or sample == 'down' or sample == 'up'
with self.init_scope():
self.c1 = SNConvolution2D(in_channels, out_channels, ksize=3, pad=1, initialW=initializer)
self.c2 = SNConvolution2D(out_channels, out_channels, ksize=3, pad=1, initialW=initializer)
if self.learnable_sc:
self.c_sc = SNConvolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
if self.sample:
h = self.sample(h)
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.sample:
return self.sample(x)
else:
return x
else:
return x
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
class SNConvBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(SNConvBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = SNConvolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = SNDeconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
def __call__(self, h):
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class SNLinearBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, dr=None):
super(SNLinearBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
if type(out_channels) is tuple:
self.out_shape = (-1,)+out_channels
else:
self.out_shape = None
with self.init_scope():
self.l = SNLinear(in_channels, np.prod(out_channels), initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = self.l(x)
x = self.activation(x)
if self.out_shape:
x = F.reshape(x, self.out_shape)
return x
class SNMDBlock(chainer.Chain):
def __init__(self, in_channels, in_size=4, B=100, C=5, gap=True, dr=None):
super(SNMDBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.B = B
self.C = C
self.dr = dr
self.gap = gap
if gap:
in_size = 1
if type(in_size) is int:
in_size = (in_size, in_size)
with self.init_scope():
self.l = SNLinear(in_size[0] * in_size[1] * in_channels + B, 1, initialW=initializer)
self.md = SNLinear(in_size[0] * in_size[1] * in_channels, B * C, initialW=initializer)
def __call__(self, x):
if self.dr:
with chainer.using_config('train', True):
x = F.dropout(x, self.dr)
if self.gap:
x = F.sum(x, axis=(2,3))
N = x.shape[0]
#Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py
feature = F.reshape(F.leaky_relu(x), (N, -1))
m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
m0 = F.broadcast_to(m, (N, self.B * self.C, N))
m1 = F.transpose(m0, (2, 1, 0))
d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
h = F.concat([feature, d])
h = self.l(h)
return h
class SNL1DBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(SNL1DBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.out_ch = out_ch
with self.init_scope():
self.l = SNLinear(in_ch*width, out_ch*width, initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = F.transpose(x, (0, 2, 1, 3))
out_shape = list(x.shape)
x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))
x = self.l(x)
x = self.activation(x)
out_shape[2] = self.out_ch
x = F.reshape(x, out_shape)
x = F.transpose(x, (0, 2, 1, 3))
return x
class L1DBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(L1DBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.out_ch = out_ch
with self.init_scope():
self.l = L.Linear(in_ch*width, out_ch*width, initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = F.transpose(x, (0, 2, 1, 3))
out_shape = list(x.shape)
x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))
x = self.l(x)
x = self.activation(x)
out_shape[2] = self.out_ch
x = F.reshape(x, out_shape)
x = F.transpose(x, (0, 2, 1, 3))
return x
class CLBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, liner_out_ch=1, dr=None):
super(CLBlock, self).__init__()
self.dr = dr
if out_ch - liner_out_ch <= 0:
raise Exception('out_ch <= liner_out_ch!')
with self.init_scope():
self.c = ConvBlock(in_ch, out_ch-liner_out_ch, activation=activation)
self.l = L1DBlock(in_ch, liner_out_ch, width, activation)
def __call__(self, x):
h = x
if self.dr:
h = F.dropout(h, self.dr)
h1 = self.c(h)
h2 = self.l(h)
h = F.concat([h1,h2])
return h
class SNCLBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(SNCLBlock, self).__init__()
self.dr = dr
with self.init_scope():
self.c = SNConvBlock(in_ch, out_ch-1, activation=activation)
self.l = SNL1DBlock(in_ch, 1, width, activation)
def __call__(self, x):
h = x
if self.dr:
h = F.dropout(h, self.dr)
h1 = self.c(h)
h2 = self.l(h)
h = F.concat([h1,h2])
return h
| 39.899204
| 134
| 0.582103
|
import math
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
from .sn_convolution_2d import SNConvolution2D, SNDeconvolution2D
from .sn_linear import SNLinear
def _upsample(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, 2, outsize=(h * 2, w * 2))
def _downsample(x):
return F.average_pooling_2d(x, 2)
def upsample_conv(x, conv):
return conv(_upsample(x))
def _upsample_frq(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, (1,2), outsize=(h, w * 2))
def _downsample_frq(x):
return F.average_pooling_2d(x, (1,2))
def upsample_conv_frq(x, conv):
return conv(_upsample_frq(x))
class ResBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.leaky_relu, mode='none', bn=False, dr=None):
super(ResBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None
self.learnable_sc = in_channels != out_channels
self.dr = dr
self.bn = bn
with self.init_scope():
self.c1 = L.Convolution2D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
self.c2 = L.Convolution2D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
if bn:
self.b1 = L.BatchNormalization(out_channels)
self.b2 = L.BatchNormalization(out_channels)
if self.learnable_sc:
self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.c1(h)
if self.bn:
h = self.b1(h)
if self.activation:
h = self.activation(h)
if self.mode:
h = self.mode(h)
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c2(h)
if self.bn:
h = self.b2(h)
if self.activation:
h = self.activation(h)
return h
def shortcut(self, x):
if self.mode:
x = self.mode(x)
if self.learnable_sc:
x = self.c_sc(x)
return x
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
class ConvBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(ConvBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = L.Deconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
elif mode == 'pad':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=2, initialW=initializer, nobias=bn)
elif mode == 'trim':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=0, initialW=initializer, nobias=bn)
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
def __call__(self, h):
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class CoPSBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, bn=True):
super(CoPSBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
with self.init_scope():
self.ps = L.Convolution2D(in_channels, in_channels*4, ksize=1, stride=1, initialW=initializer)
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer)
if bn:
self.b = L.BatchNormalization(out_channels)
def pixel_shuffle(self, x):
out = self.ps(x)
b = out.shape[0]
c = out.shape[1]
h = out.shape[2]
w = out.shape[3]
out = F.reshape(out, (b, 2, 2, c//4, h, w))
out = F.transpose(out, (0, 3, 4, 1, 5, 2))
out = F.reshape(out, (b, c//4, h*2, w*2))
return out
def __call__(self, h):
h = self.pixel_shuffle(h)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class SNResBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, sample='none', dr=None):
super(SNResBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.sample = _downsample if sample == 'down' else _upsample if sample == 'up' else None
self.learnable_sc = in_channels != out_channels or sample == 'down' or sample == 'up'
with self.init_scope():
self.c1 = SNConvolution2D(in_channels, out_channels, ksize=3, pad=1, initialW=initializer)
self.c2 = SNConvolution2D(out_channels, out_channels, ksize=3, pad=1, initialW=initializer)
if self.learnable_sc:
self.c_sc = SNConvolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
if self.sample:
h = self.sample(h)
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.sample:
return self.sample(x)
else:
return x
else:
return x
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
class SNConvBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(SNConvBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = SNConvolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = SNDeconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
def __call__(self, h):
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class SNLinearBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, dr=None):
super(SNLinearBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
if type(out_channels) is tuple:
self.out_shape = (-1,)+out_channels
else:
self.out_shape = None
with self.init_scope():
self.l = SNLinear(in_channels, np.prod(out_channels), initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = self.l(x)
x = self.activation(x)
if self.out_shape:
x = F.reshape(x, self.out_shape)
return x
class SNMDBlock(chainer.Chain):
def __init__(self, in_channels, in_size=4, B=100, C=5, gap=True, dr=None):
super(SNMDBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.B = B
self.C = C
self.dr = dr
self.gap = gap
if gap:
in_size = 1
if type(in_size) is int:
in_size = (in_size, in_size)
with self.init_scope():
self.l = SNLinear(in_size[0] * in_size[1] * in_channels + B, 1, initialW=initializer)
self.md = SNLinear(in_size[0] * in_size[1] * in_channels, B * C, initialW=initializer)
def __call__(self, x):
if self.dr:
with chainer.using_config('train', True):
x = F.dropout(x, self.dr)
if self.gap:
x = F.sum(x, axis=(2,3))
N = x.shape[0]
feature = F.reshape(F.leaky_relu(x), (N, -1))
m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
m0 = F.broadcast_to(m, (N, self.B * self.C, N))
m1 = F.transpose(m0, (2, 1, 0))
d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
h = F.concat([feature, d])
h = self.l(h)
return h
class SNL1DBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(SNL1DBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.out_ch = out_ch
with self.init_scope():
self.l = SNLinear(in_ch*width, out_ch*width, initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = F.transpose(x, (0, 2, 1, 3))
out_shape = list(x.shape)
x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))
x = self.l(x)
x = self.activation(x)
out_shape[2] = self.out_ch
x = F.reshape(x, out_shape)
x = F.transpose(x, (0, 2, 1, 3))
return x
class L1DBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(L1DBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.out_ch = out_ch
with self.init_scope():
self.l = L.Linear(in_ch*width, out_ch*width, initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = F.transpose(x, (0, 2, 1, 3))
out_shape = list(x.shape)
x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))
x = self.l(x)
x = self.activation(x)
out_shape[2] = self.out_ch
x = F.reshape(x, out_shape)
x = F.transpose(x, (0, 2, 1, 3))
return x
class CLBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, liner_out_ch=1, dr=None):
super(CLBlock, self).__init__()
self.dr = dr
if out_ch - liner_out_ch <= 0:
raise Exception('out_ch <= liner_out_ch!')
with self.init_scope():
self.c = ConvBlock(in_ch, out_ch-liner_out_ch, activation=activation)
self.l = L1DBlock(in_ch, liner_out_ch, width, activation)
def __call__(self, x):
h = x
if self.dr:
h = F.dropout(h, self.dr)
h1 = self.c(h)
h2 = self.l(h)
h = F.concat([h1,h2])
return h
class SNCLBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(SNCLBlock, self).__init__()
self.dr = dr
with self.init_scope():
self.c = SNConvBlock(in_ch, out_ch-1, activation=activation)
self.l = SNL1DBlock(in_ch, 1, width, activation)
def __call__(self, x):
h = x
if self.dr:
h = F.dropout(h, self.dr)
h1 = self.c(h)
h2 = self.l(h)
h = F.concat([h1,h2])
return h
| true
| true
|
790dfbabe99181be277c7a5d49fc080057ab0ccf
| 2,583
|
py
|
Python
|
scripts/deindexify.py
|
learningequality/channel2site
|
0020c15f404c58e369bbe435950e1a38418313e8
|
[
"MIT"
] | null | null | null |
scripts/deindexify.py
|
learningequality/channel2site
|
0020c15f404c58e369bbe435950e1a38418313e8
|
[
"MIT"
] | null | null | null |
scripts/deindexify.py
|
learningequality/channel2site
|
0020c15f404c58e369bbe435950e1a38418313e8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import bs4
import os
import re
# better indentation hack via https://stackoverflow.com/a/15513483/127114
orig_prettify = bs4.BeautifulSoup.prettify
r = re.compile(r'^(\s*)', re.MULTILINE)
def prettify(self, encoding=None, formatter="minimal", indent_width=3):
return r.sub(r'\1' * indent_width, orig_prettify(self, encoding, formatter))
bs4.BeautifulSoup.prettify = prettify
def process_file(filepath):
"""
Rewrite links in `filepath` as follows: /some/path/index.html --> /some/path/
"""
# print('processing', filepath)
if filepath.endswith('.html'):
# 1. read
with open(filepath, 'r') as htmlfile:
page = bs4.BeautifulSoup(htmlfile.read(), 'html.parser')
# 2. rewrite links
links = page.find_all('a')
for link in links:
href = link['href']
if href.endswith('index.html'):
href = href.replace('index.html', '')
link['href'] = href
# 3. hack to rewrite subtitle links that wget doesn't handle correctly
video = page.find('video')
if video:
source = video.find('source')
main_file = source['src']
tracks = video.find_all('track')
if tracks:
for track in tracks:
# track_src = track['src']
# new_src = os.path.basename(track_src)
new_src = main_file.replace('.mp4', '.vtt')
track['src'] = new_src
# 4. write
with open(filepath, 'w') as htmlfile:
html = page.prettify()
htmlfile.write(html)
def deindexify(webroot):
"""
Walks directory stucutre starting at `webroot` and rewrites all folder links.
"""
content_folders = list(os.walk(webroot))
for rel_path, _subfolders, filenames in content_folders:
# print('processing folder ' + str(rel_path))
for filename in filenames:
filepath = os.path.join(rel_path, filename)
if filepath.endswith('_Subtitle.vtt'):
video_matching_filepath = filepath.replace('_Subtitle.vtt', '_Low_Resolution.vtt')
os.rename(filepath, video_matching_filepath)
else:
process_file(filepath)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("webroot", help="Directory where website is stored.")
args = parser.parse_args()
deindexify(args.webroot)
print('Removing index.html from folder links done.')
| 33.115385
| 98
| 0.603175
|
import argparse
import bs4
import os
import re
orig_prettify = bs4.BeautifulSoup.prettify
r = re.compile(r'^(\s*)', re.MULTILINE)
def prettify(self, encoding=None, formatter="minimal", indent_width=3):
return r.sub(r'\1' * indent_width, orig_prettify(self, encoding, formatter))
bs4.BeautifulSoup.prettify = prettify
def process_file(filepath):
if filepath.endswith('.html'):
with open(filepath, 'r') as htmlfile:
page = bs4.BeautifulSoup(htmlfile.read(), 'html.parser')
links = page.find_all('a')
for link in links:
href = link['href']
if href.endswith('index.html'):
href = href.replace('index.html', '')
link['href'] = href
video = page.find('video')
if video:
source = video.find('source')
main_file = source['src']
tracks = video.find_all('track')
if tracks:
for track in tracks:
# track_src = track['src']
# new_src = os.path.basename(track_src)
new_src = main_file.replace('.mp4', '.vtt')
track['src'] = new_src
# 4. write
with open(filepath, 'w') as htmlfile:
html = page.prettify()
htmlfile.write(html)
def deindexify(webroot):
content_folders = list(os.walk(webroot))
for rel_path, _subfolders, filenames in content_folders:
# print('processing folder ' + str(rel_path))
for filename in filenames:
filepath = os.path.join(rel_path, filename)
if filepath.endswith('_Subtitle.vtt'):
video_matching_filepath = filepath.replace('_Subtitle.vtt', '_Low_Resolution.vtt')
os.rename(filepath, video_matching_filepath)
else:
process_file(filepath)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("webroot", help="Directory where website is stored.")
args = parser.parse_args()
deindexify(args.webroot)
print('Removing index.html from folder links done.')
| true
| true
|
790dfbd4daba2cae77c456b5f7c16f383632bb75
| 1,534
|
py
|
Python
|
tests/timings.py
|
ncullen93/ANTsPy
|
a4c990dcd5b7445a45ce7b366ee018c7350e7d9f
|
[
"Apache-2.0"
] | 3
|
2018-06-07T19:11:47.000Z
|
2019-06-10T05:24:06.000Z
|
tests/timings.py
|
ncullen93/ANTsPy
|
a4c990dcd5b7445a45ce7b366ee018c7350e7d9f
|
[
"Apache-2.0"
] | null | null | null |
tests/timings.py
|
ncullen93/ANTsPy
|
a4c990dcd5b7445a45ce7b366ee018c7350e7d9f
|
[
"Apache-2.0"
] | 1
|
2019-04-04T06:18:44.000Z
|
2019-04-04T06:18:44.000Z
|
"""
Timings against numpy/itk/nibabel/etc where appropriate
"""
import os
import nibabel as nib
import itk
import ants
import time
def time_nifti_to_numpy(N_TRIALS):
"""
Times how fast a framework can read a nifti file and convert it to numpy
"""
datadir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
img_paths = []
for dtype in ['CHAR', 'DOUBLE', 'FLOAT', 'SHORT',
'UNSIGNEDCHAR', 'UNSIGNEDSHORT']:
for dim in [2,3]:
img_paths.append(os.path.join(datadir, 'image_%s_%iD.nii.gz' % (dtype, dim)))
def test_nibabel():
for img_path in img_paths:
array = nib.load(img_path).get_data()
def test_itk():
for img_path in img_paths:
array = itk.GetArrayFromImage(itk.imread(img_path))
def test_ants():
for img_path in img_paths:
array = ants.image_read(img_path).numpy()
nib_start = time.time()
for i in range(N_TRIALS):
test_nibabel()
nib_end = time.time()
print('NIBABEL TIME: %.3f seconds' % (nib_end-nib_start))
itk_start = time.time()
for i in range(N_TRIALS):
test_itk()
itk_end = time.time()
print('ITK TIME: %.3f seconds' % (itk_end-itk_start))
ants_start = time.time()
for i in range(N_TRIALS):
test_ants()
ants_end = time.time()
print('ANTS TIME: %.3f seconds' % (ants_end-ants_start))
if __name__ == '__main__':
time_nifti_to_numpy(N_TRIALS=1)
time_nifti_to_numpy(N_TRIALS=20)
| 26
| 89
| 0.625163
|
import os
import nibabel as nib
import itk
import ants
import time
def time_nifti_to_numpy(N_TRIALS):
datadir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
img_paths = []
for dtype in ['CHAR', 'DOUBLE', 'FLOAT', 'SHORT',
'UNSIGNEDCHAR', 'UNSIGNEDSHORT']:
for dim in [2,3]:
img_paths.append(os.path.join(datadir, 'image_%s_%iD.nii.gz' % (dtype, dim)))
def test_nibabel():
for img_path in img_paths:
array = nib.load(img_path).get_data()
def test_itk():
for img_path in img_paths:
array = itk.GetArrayFromImage(itk.imread(img_path))
def test_ants():
for img_path in img_paths:
array = ants.image_read(img_path).numpy()
nib_start = time.time()
for i in range(N_TRIALS):
test_nibabel()
nib_end = time.time()
print('NIBABEL TIME: %.3f seconds' % (nib_end-nib_start))
itk_start = time.time()
for i in range(N_TRIALS):
test_itk()
itk_end = time.time()
print('ITK TIME: %.3f seconds' % (itk_end-itk_start))
ants_start = time.time()
for i in range(N_TRIALS):
test_ants()
ants_end = time.time()
print('ANTS TIME: %.3f seconds' % (ants_end-ants_start))
if __name__ == '__main__':
time_nifti_to_numpy(N_TRIALS=1)
time_nifti_to_numpy(N_TRIALS=20)
| true
| true
|
790dfd3fea2c63b9ff7d941d6798ec613cfce731
| 636
|
py
|
Python
|
notes/migrations/0004_auto_20220101_1047.py
|
ArnedyNavi/studymate
|
55e6a2c6717dd478a311ea8bf839a26ca3ef2b40
|
[
"MIT"
] | 4
|
2021-12-31T17:25:00.000Z
|
2022-02-08T17:05:46.000Z
|
notes/migrations/0004_auto_20220101_1047.py
|
ArnedyNavi/studymate
|
55e6a2c6717dd478a311ea8bf839a26ca3ef2b40
|
[
"MIT"
] | null | null | null |
notes/migrations/0004_auto_20220101_1047.py
|
ArnedyNavi/studymate
|
55e6a2c6717dd478a311ea8bf839a26ca3ef2b40
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2022-01-01 10:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notes', '0003_auto_20220101_1040'),
]
operations = [
migrations.RenameField(
model_name='notes',
old_name='category',
new_name='categories',
),
migrations.RemoveField(
model_name='notescategory',
name='count',
),
migrations.AddField(
model_name='notesrating',
name='comment',
field=models.TextField(null=True),
),
]
| 22.714286
| 47
| 0.553459
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notes', '0003_auto_20220101_1040'),
]
operations = [
migrations.RenameField(
model_name='notes',
old_name='category',
new_name='categories',
),
migrations.RemoveField(
model_name='notescategory',
name='count',
),
migrations.AddField(
model_name='notesrating',
name='comment',
field=models.TextField(null=True),
),
]
| true
| true
|
790dfd571a02aaf0e1f50352d8a32cceda3321ae
| 213
|
py
|
Python
|
cogs/utils/my_errors.py
|
Roxedus/PoengBott
|
f485021b4bc9691cb860872eb51307de3f23d6dd
|
[
"MIT"
] | null | null | null |
cogs/utils/my_errors.py
|
Roxedus/PoengBott
|
f485021b4bc9691cb860872eb51307de3f23d6dd
|
[
"MIT"
] | null | null | null |
cogs/utils/my_errors.py
|
Roxedus/PoengBott
|
f485021b4bc9691cb860872eb51307de3f23d6dd
|
[
"MIT"
] | null | null | null |
# pylint: disable=missing-class-docstring,missing-module-docstring
# Discord Packages
from discord.ext.commands.errors import CommandError
class NoDM(CommandError):
pass
class NoToken(Exception):
pass
| 17.75
| 66
| 0.784038
|
from discord.ext.commands.errors import CommandError
class NoDM(CommandError):
pass
class NoToken(Exception):
pass
| true
| true
|
790dfd5eeab3c790a341e13a086dc986dd95779e
| 2,881
|
py
|
Python
|
infer/lib/python/inferlib/capture/ant.py
|
stefb965/infer
|
4057ffadcf6371af76be94dc34d05169ca290128
|
[
"BSD-3-Clause"
] | 1
|
2016-12-19T07:33:16.000Z
|
2016-12-19T07:33:16.000Z
|
infer/lib/python/inferlib/capture/ant.py
|
stefb965/infer
|
4057ffadcf6371af76be94dc34d05169ca290128
|
[
"BSD-3-Clause"
] | null | null | null |
infer/lib/python/inferlib/capture/ant.py
|
stefb965/infer
|
4057ffadcf6371af76be94dc34d05169ca290128
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2015 - present Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import logging
from . import util
from inferlib import jwlib
MODULE_NAME = __name__
MODULE_DESCRIPTION = '''Run analysis of code built with a command like:
ant [options] [target]
Analysis examples:
infer -- ant compile'''
LANG = ['java']
def gen_instance(*args):
return AntCapture(*args)
# This creates an empty argparser for the module, which provides only
# description/usage information and no arguments.
create_argparser = util.base_argparser(MODULE_DESCRIPTION, MODULE_NAME)
class AntCapture:
def __init__(self, args, cmd):
self.args = args
util.log_java_version()
logging.info(util.run_cmd_ignore_fail(['ant', '-version']))
# TODO: make the extraction of targets smarter
self.build_cmd = ['ant', '-verbose'] + cmd[1:]
def is_interesting(self, content):
return self.is_quoted(content) or content.endswith('.java')
def is_quoted(self, argument):
quote = '\''
return len(argument) > 2 and argument[0] == quote\
and argument[-1] == quote
def remove_quotes(self, argument):
if self.is_quoted(argument):
return argument[1:-1]
else:
return argument
def get_infer_commands(self, verbose_output):
javac_pattern = '[javac]'
argument_start_pattern = 'Compilation arguments'
calls = []
javac_arguments = []
collect = False
for line in verbose_output:
if javac_pattern in line:
if argument_start_pattern in line:
collect = True
if javac_arguments != []:
capture = jwlib.create_infer_command(self.args,
javac_arguments)
calls.append(capture)
javac_arguments = []
if collect:
pos = line.index(javac_pattern) + len(javac_pattern)
content = line[pos:].strip()
if self.is_interesting(content):
arg = self.remove_quotes(content)
javac_arguments.append(arg)
if javac_arguments != []:
capture = jwlib.create_infer_command(self.args, javac_arguments)
calls.append(capture)
javac_arguments = []
return calls
def capture(self):
cmds = self.get_infer_commands(util.get_build_output(self.build_cmd))
clean_cmd = '%s clean' % self.build_cmd[0]
return util.run_compilation_commands(cmds, clean_cmd)
| 34.297619
| 77
| 0.609511
|
import logging
from . import util
from inferlib import jwlib
MODULE_NAME = __name__
MODULE_DESCRIPTION = '''Run analysis of code built with a command like:
ant [options] [target]
Analysis examples:
infer -- ant compile'''
LANG = ['java']
def gen_instance(*args):
return AntCapture(*args)
create_argparser = util.base_argparser(MODULE_DESCRIPTION, MODULE_NAME)
class AntCapture:
def __init__(self, args, cmd):
self.args = args
util.log_java_version()
logging.info(util.run_cmd_ignore_fail(['ant', '-version']))
self.build_cmd = ['ant', '-verbose'] + cmd[1:]
def is_interesting(self, content):
return self.is_quoted(content) or content.endswith('.java')
def is_quoted(self, argument):
quote = '\''
return len(argument) > 2 and argument[0] == quote\
and argument[-1] == quote
def remove_quotes(self, argument):
if self.is_quoted(argument):
return argument[1:-1]
else:
return argument
def get_infer_commands(self, verbose_output):
javac_pattern = '[javac]'
argument_start_pattern = 'Compilation arguments'
calls = []
javac_arguments = []
collect = False
for line in verbose_output:
if javac_pattern in line:
if argument_start_pattern in line:
collect = True
if javac_arguments != []:
capture = jwlib.create_infer_command(self.args,
javac_arguments)
calls.append(capture)
javac_arguments = []
if collect:
pos = line.index(javac_pattern) + len(javac_pattern)
content = line[pos:].strip()
if self.is_interesting(content):
arg = self.remove_quotes(content)
javac_arguments.append(arg)
if javac_arguments != []:
capture = jwlib.create_infer_command(self.args, javac_arguments)
calls.append(capture)
javac_arguments = []
return calls
def capture(self):
cmds = self.get_infer_commands(util.get_build_output(self.build_cmd))
clean_cmd = '%s clean' % self.build_cmd[0]
return util.run_compilation_commands(cmds, clean_cmd)
| true
| true
|
790dfddcd5bba69ae9183519679bf598e77bebf4
| 4,776
|
py
|
Python
|
cvss/datasets/nyuv2.py
|
etmwb/cvsegmentation
|
c283a79f4cf4e78d057f598944b1c252f6533f00
|
[
"MIT"
] | null | null | null |
cvss/datasets/nyuv2.py
|
etmwb/cvsegmentation
|
c283a79f4cf4e78d057f598944b1c252f6533f00
|
[
"MIT"
] | null | null | null |
cvss/datasets/nyuv2.py
|
etmwb/cvsegmentation
|
c283a79f4cf4e78d057f598944b1c252f6533f00
|
[
"MIT"
] | null | null | null |
import os
import sys
import numpy as np
import random
import math
from PIL import Image, ImageOps, ImageFilter
import torch
import torch.utils.data as data
import torchvision.transforms as transform
from .base import BaseDataset
class NYUv2Segmentation(BaseDataset):
BASE_DIR = 'nyuv2'
NUM_CLASS = 40
def __init__(self, root=os.path.expanduser('~/.cvss/data'), split='train',
mode=None, transform=None, target_transform=None, **kwargs):
super(NYUv2Segmentation, self).__init__(
root, split, mode, transform, target_transform, **kwargs)
# assert exists and prepare dataset automatically
root = os.path.join(root, self.BASE_DIR)
assert os.path.exists(root), "Please setup the dataset using" + \
"cvss/scripts/prepare_nyuv2.py"
self.images, self.masks = _get_nyuv2_pairs(root, split)
if split != 'test':
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise(RuntimeError("Found 0 images in subfolders of: \
" + root + "\n"))
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if self.mode == 'test':
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
mask = self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
mask = self.target_transform(mask)
return img, mask
def _sync_transform(self, img, mask):
# random mirror
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
# random scale (short edge)
w, h = img.size
min_side = min(w, h)
scale = np.random.uniform(0.5, 2.0)
if min_side * scale < 350:
scale = 350 * 1.0 / min_side
long_size = int(self.base_size*scale)
if h > w:
oh = long_size
ow = int(1.0 * w * long_size / h + 0.5)
short_size = ow
else:
ow = long_size
oh = int(1.0 * h * long_size / w + 0.5)
short_size = oh
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# random crop crop_size
w, h = img.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
img = img.crop((x1, y1, x1+crop_size, y1+crop_size))
mask = mask.crop((x1, y1, x1+crop_size, y1+crop_size))
# final transform
return img, self._mask_transform(mask)
def _val_sync_transform(self, img, mask):
# final transform
return img, self._mask_transform(mask)
def _mask_transform(self, mask):
target = np.array(mask).astype('int64') - 1
return torch.from_numpy(target)
def __len__(self):
return len(self.images)
@property
def pred_offset(self):
return 1
def _get_nyuv2_pairs(folder, split='train'):
def get_path_pairs(folder, split_file):
img_paths = []
mask_paths = []
with open(os.path.join(folder, split_file), 'r') as f:
for filename in f.readlines():
filename = filename.strip()
imgpath = os.path.join(folder, 'image', filename)
maskpath = os.path.join(folder, 'mask', filename)
if os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask:', maskpath)
return img_paths, mask_paths
img_paths, mask_paths = get_path_pairs(folder, split_file=split+'.txt')
return img_paths, mask_paths
| 37.904762
| 79
| 0.568677
|
import os
import sys
import numpy as np
import random
import math
from PIL import Image, ImageOps, ImageFilter
import torch
import torch.utils.data as data
import torchvision.transforms as transform
from .base import BaseDataset
class NYUv2Segmentation(BaseDataset):
BASE_DIR = 'nyuv2'
NUM_CLASS = 40
def __init__(self, root=os.path.expanduser('~/.cvss/data'), split='train',
mode=None, transform=None, target_transform=None, **kwargs):
super(NYUv2Segmentation, self).__init__(
root, split, mode, transform, target_transform, **kwargs)
root = os.path.join(root, self.BASE_DIR)
assert os.path.exists(root), "Please setup the dataset using" + \
"cvss/scripts/prepare_nyuv2.py"
self.images, self.masks = _get_nyuv2_pairs(root, split)
if split != 'test':
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise(RuntimeError("Found 0 images in subfolders of: \
" + root + "\n"))
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if self.mode == 'test':
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
mask = self._mask_transform(mask)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
mask = self.target_transform(mask)
return img, mask
def _sync_transform(self, img, mask):
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
w, h = img.size
min_side = min(w, h)
scale = np.random.uniform(0.5, 2.0)
if min_side * scale < 350:
scale = 350 * 1.0 / min_side
long_size = int(self.base_size*scale)
if h > w:
oh = long_size
ow = int(1.0 * w * long_size / h + 0.5)
short_size = ow
else:
ow = long_size
oh = int(1.0 * h * long_size / w + 0.5)
short_size = oh
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
w, h = img.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
img = img.crop((x1, y1, x1+crop_size, y1+crop_size))
mask = mask.crop((x1, y1, x1+crop_size, y1+crop_size))
return img, self._mask_transform(mask)
def _val_sync_transform(self, img, mask):
return img, self._mask_transform(mask)
def _mask_transform(self, mask):
target = np.array(mask).astype('int64') - 1
return torch.from_numpy(target)
def __len__(self):
return len(self.images)
@property
def pred_offset(self):
return 1
def _get_nyuv2_pairs(folder, split='train'):
def get_path_pairs(folder, split_file):
img_paths = []
mask_paths = []
with open(os.path.join(folder, split_file), 'r') as f:
for filename in f.readlines():
filename = filename.strip()
imgpath = os.path.join(folder, 'image', filename)
maskpath = os.path.join(folder, 'mask', filename)
if os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask:', maskpath)
return img_paths, mask_paths
img_paths, mask_paths = get_path_pairs(folder, split_file=split+'.txt')
return img_paths, mask_paths
| true
| true
|
790dfde41c1193449a9e7c78b80a0145993ae772
| 1,090
|
py
|
Python
|
cloudferrylib/os/actions/detach_used_volumes.py
|
toha10/CloudFerry
|
5f844a480d3326d1fea74cca35b648c32d390fab
|
[
"Apache-2.0"
] | null | null | null |
cloudferrylib/os/actions/detach_used_volumes.py
|
toha10/CloudFerry
|
5f844a480d3326d1fea74cca35b648c32d390fab
|
[
"Apache-2.0"
] | null | null | null |
cloudferrylib/os/actions/detach_used_volumes.py
|
toha10/CloudFerry
|
5f844a480d3326d1fea74cca35b648c32d390fab
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from cloudferrylib.base.action import action
from cloudferrylib.utils import utils as utl
class DetachVolumes(action.Action):
def run(self, storage_info={}, **kwargs):
resource_storage = self.cloud.resources[utl.STORAGE_RESOURCE]
for (vol_id, vol_info) \
in storage_info[utl.VOLUMES_TYPE].iteritems():
if 'instance' in vol_info['meta']:
if vol_info['meta']['instance']:
resource_storage.detach_volume(vol_id)
return {}
| 36.333333
| 70
| 0.707339
|
from cloudferrylib.base.action import action
from cloudferrylib.utils import utils as utl
class DetachVolumes(action.Action):
def run(self, storage_info={}, **kwargs):
resource_storage = self.cloud.resources[utl.STORAGE_RESOURCE]
for (vol_id, vol_info) \
in storage_info[utl.VOLUMES_TYPE].iteritems():
if 'instance' in vol_info['meta']:
if vol_info['meta']['instance']:
resource_storage.detach_volume(vol_id)
return {}
| true
| true
|
790e001fdd0c7d8bcf20399395d3bf6cac5629ca
| 10,657
|
py
|
Python
|
Zmax_autoscoring_controlled_train_test_split.py
|
MahdadJafarzadeh/ssccoorriinngg
|
63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3
|
[
"MIT"
] | 2
|
2020-04-28T12:50:26.000Z
|
2020-05-13T08:52:42.000Z
|
Zmax_autoscoring_controlled_train_test_split.py
|
MahdadJafarzadeh/ssccoorriinngg
|
63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3
|
[
"MIT"
] | null | null | null |
Zmax_autoscoring_controlled_train_test_split.py
|
MahdadJafarzadeh/ssccoorriinngg
|
63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3
|
[
"MIT"
] | 1
|
2020-07-14T13:48:56.000Z
|
2020-07-14T13:48:56.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 6 22:41:54 2020
@author: mahjaf
Automatic sleep scoring implemented for Zmax headband.
"""
#%% Reading EDF section
#####===================== Importiung libraries =========================#####
import mne
import numpy as np
from numpy import loadtxt
import h5py
import time
import os
from ssccoorriinngg import ssccoorriinngg
import matplotlib.pyplot as plt
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import confusion_matrix, make_scorer, accuracy_score, precision_score, recall_score, f1_score, classification_report
import pandas as pd
import tensorflow as tf
from scipy import signal
#####==================== Defining required paths r=======================#####
Main_path = "P:/3013080.01/"
subject_Id_folder = Main_path + "Autoscoring/ssccoorriinngg/"
Data_folder = Main_path + "Zmax_Data/"
Hypnogram_folder = Main_path + "somno_scorings/Rathiga/"
#####===================== Reading EDF data files=========================#####
subject_ids = loadtxt(subject_Id_folder+"Zmax/Subject_ids_excluding 22_2.txt", dtype = 'str',delimiter='\n')
#####============= create an object of ssccoorriinngg class ==============#####
Object = ssccoorriinngg(filename='', channel='', fs = 256, T = 30)
#%% Load featureset and labels
path = "P:/3013080.01/Autoscoring/features/"
filename = "Zmax_Rathiga_scorings_ch-ch2+AccFeats_190620"
subjects_dic, hyp_dic = Object.load_dictionary(path, filename)
#%% ================================Training part==============================
# Training perentage
train_size = .7
n_train = round(train_size * len(subject_ids))
#######=== Randomly shuffle subjects to choose train and test splits ===#######
subject_ids = np.random.RandomState(seed=0).permutation(subject_ids)
#######=============== Initialize train and test arrays ================#######
sample_subject = "subjectP_12_night1_scoring.csv.spisop.new - Copy"
sample_hyp = "hypP_12_night1_scoring.csv.spisop.new - Copy"
X_train = np.empty((0, np.shape(subjects_dic[sample_subject])[1]))
X_test = np.empty((0, np.shape(subjects_dic[sample_subject])[1]))
y_train = np.empty((0, np.shape(hyp_dic[sample_hyp])[1]))
y_test = np.empty((0, np.shape(hyp_dic[sample_hyp])[1]))
########======= Picking the train subjetcs and concatenate them =======########
tic = time.time()
train_subjects_list = ["P_12_night1_scoring.csv.spisop.new - Copy",
"P_13_night2_scoring.csv.spisop.new - Copy",
"P_15_night2_scoring.csv.spisop.new - Copy",
"P_16_night1_scoring.csv.spisop.new - Copy",
"P_18_night1_scoring.csv.spisop.new - Copy",
"P_20_night1_scoring.csv.spisop.new - Copy",
"P_21_night1_scoring.csv.spisop.new - Copy",
"P_23_night1_scoring.csv.spisop.new - Copy"]
for c_subj in train_subjects_list:
# train hypnogram
str_train_hyp = 'hyp' + str(c_subj)
# train featureset
str_train_feat = 'subject' + str(c_subj)
# create template arrays for featurs and label
tmp_x = subjects_dic[str_train_feat]
tmp_y = hyp_dic[str_train_hyp]
# Concatenate features and labels
X_train = np.row_stack((X_train, tmp_x))
y_train = np.row_stack((y_train, tmp_y))
del tmp_x, tmp_y
print('Training set was successfully created in : {} secs'.format(time.time()-tic))
#%% ================================Test part==============================%%#
########======== Picking the test subjetcs and concatenate them =======########
tic = time.time()
test_subjects_list = []
tst_subj_list = ["P_12_night2_scoring.csv.spisop.new - Copy",
"P_12_night3_scoring.csv.spisop.new - Copy",
"P_13_night3_scoring.csv.spisop.new - Copy",
"P_14_night3_scoring.csv.spisop.new - Copy",
"P_15_night3_scoring.csv.spisop.new - Copy",
"P_16_night3_scoring.csv.spisop.new - Copy",
"P_18_night2_scoring.csv.spisop.new - Copy",
"P_18_night3_scoring.csv.spisop.new - Copy",
"P_20_night2_scoring.csv.spisop.new - Copy",
"P_20_night3_scoring.csv.spisop.new - Copy",
"P_21_night2_scoring.csv.spisop.new - Copy",
"P_21_night3_scoring.csv.spisop.new - Copy"]
for c_subj in tst_subj_list:
# test hypnogram
str_test_hyp = 'hyp' + str(c_subj)
# test featureset
str_test_feat = 'subject' + str(c_subj)
# create template arrays for featurs and label
tmp_x = subjects_dic[str_test_feat]
tmp_y = hyp_dic[str_test_hyp]
# Concatenate features and labels
X_test = np.row_stack((X_test, tmp_x))
y_test = np.row_stack((y_test, tmp_y))
# keep the subject id
test_subjects_list.append(str_test_feat)
# remove for next iteration
del tmp_x, tmp_y, str_test_feat, str_test_hyp
print('Test set was successfully created in : {} secs'.format(time.time()-tic))
print(f'Raw train and test data were created.')
########================== Replace any probable NaN ===================########
X_train = Object.replace_NaN_with_mean(X_train)
X_test = Object.replace_NaN_with_mean(X_test)
########================== Replace any probable inf ===================########
X_train = Object.replace_inf_with_mean(X_train)
X_test = Object.replace_inf_with_mean(X_test)
########==================== Z-score of features ======================########
X_train, X_test = Object.Standardadize_features(X_train, X_test)
########========== select features only on first iteration ============########
td = 5 # Time dependence: number of epochs of memory
X_train_td = Object.add_time_dependence_backward(X_train, n_time_dependence=td,
padding_type = 'sequential')
X_test_td = Object.add_time_dependence_backward(X_test, n_time_dependence=td,
padding_type = 'sequential')
########====================== Feature Selection ======================########
y_train_td = Object.binary_to_single_column_label(y_train)
########========== select features only on first iteration ============########
# =============================================================================
# ranks, Feat_selected, selected_feats_ind = Object.FeatSelect_Boruta(X_train_td,
# y_train_td[:,0], max_iter = 50, max_depth = 7)
#
# #######===================== Save selected feats =======================#######
#
# path = "P:/3013080.01/Autoscoring/features/"
# filename = "Selected_Features_BoturaNoTimeDependency_5_Backward_Zmax_ch1-ch2+Acc_200620"
# with open(path+filename+'.pickle',"wb") as f:
# pickle.dump(selected_feats_ind, f)
# =============================================================================
########################### Load selected feats ###############################
path = "P:/3013080.01/Autoscoring/features/"
filename = "Selected_Features_BoturaAfterTD=5_Backward_Zmax_ch1-ch2+Acc_200620"
#filename = "sleep_scoring_NoArousal_8channels_selected_feats_NEW"
with open(path + filename + '.pickle', "rb") as f:
selected_feats_ind = pickle.load(f)
########=================== Apply selected features ===================########
X_train = X_train_td[:, selected_feats_ind]
X_test = X_test_td[:, selected_feats_ind]
########============== Define classifier of interest ==================########
y_pred = Object.XGB_Modelling(X_train, y_train,X_test, y_test, n_estimators = 500)
#y_pred = Object.KernelSVM_Modelling(X_train, y_train,X_test, y_test, kernel='rbf')
y_pred = Object.ANN_classifier(X_train, y_train, X_test, units_h1=600, units_h2 = 300, units_output = 5,
activation_out = 'softmax',
init = 'uniform', activation = 'relu', optimizer = 'adam',
loss = 'categorical_crossentropy', metrics=[tf.keras.metrics.Recall()],
h3_status = 'deactive', units_h3 = 50, epochs = 100, batch_size = 100)
########===== Metrics to assess the model performance on test data ====########
Acc, Recall, prec, f1_sc, kappa, mcm= Object.multi_label_confusion_matrix(y_test, y_pred)
########================= Creating subjective outputs =================########
Object.create_subjecive_results(y_true=y_test, y_pred=y_pred,
test_subjects_list = test_subjects_list,
subjects_data_dic = subjects_dic,
fname_save = "results")
########============= find number of epochs per stage =================########
Object.find_number_of_samples_per_class(y_test, including_artefact = False)
########================== Comparative hypnogram ======================########
hyp_true = Object.binary_to_single_column_label(y_test)
Object.plot_comparative_hyp(hyp_true = hyp_true, hyp_pred = y_pred, mark_REM = 'active')
########==================== Plot subjectve hypnos ====================########
Object.plot_subjective_hypno(y_true=y_test, y_pred=y_pred,
test_subjects_list=test_subjects_list,
subjects_data_dic=subjects_dic,
save_fig = False,
directory="P:/3013080.01/Autoscoring/ssccoorriinngg/")
########=================== Plot overall conf-mat =======================######
Object.plot_confusion_matrix(y_test,y_pred, target_names = ['Wake','N1','N2','SWS','REM'],
title='Confusion matrix of ssccoorriinngg algorithm',
cmap = None,
normalize=True)
########================== Plot subjective conf-mat ==================########
Object.plot_confusion_mat_subjective(y_true=y_test, y_pred=y_pred,
test_subjects_list=test_subjects_list,
subjects_data_dic=subjects_dic)
########========================== Save figure =======================#########
Object.save_figure(saving_format = '.png',
directory="P:/3013080.02/Mahdad/Github/ssccoorriinngg/",
saving_name = 'test_subject_all' + str(c_subj), dpi = 900,
full_screen = False)
| 41.956693
| 137
| 0.569016
|
confusion_matrix, make_scorer, accuracy_score, precision_score, recall_score, f1_score, classification_report
import pandas as pd
import tensorflow as tf
from scipy import signal
' + str(c_subj)
tmp_x = subjects_dic[str_test_feat]
tmp_y = hyp_dic[str_test_hyp]
X_test = np.row_stack((X_test, tmp_x))
y_test = np.row_stack((y_test, tmp_y))
test_subjects_list.append(str_test_feat)
del tmp_x, tmp_y, str_test_feat, str_test_hyp
print('Test set was successfully created in : {} secs'.format(time.time()-tic))
print(f'Raw train and test data were created.')
| true
| true
|
790e003189116c711c7868ee0796c708fa30c31e
| 692
|
py
|
Python
|
FatherSon/HelloWorld2_source_code/Listing_19-1.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 1
|
2019-01-04T05:47:50.000Z
|
2019-01-04T05:47:50.000Z
|
FatherSon/HelloWorld2_source_code/Listing_19-1.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | null | null | null |
FatherSon/HelloWorld2_source_code/Listing_19-1.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | null | null | null |
# Listing_19-1.py
# Copyright Warren & Carter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Trying out sounds in Pygame
import pygame
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode([640,480])
pygame.time.delay(1000) # Wait a second for the mixer to finish initializing
splat = pygame.mixer.Sound("splat.wav") # Create the Sound object
splat.play() # Play the sound
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
| 30.086957
| 82
| 0.628613
|
import pygame
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode([640,480])
pygame.time.delay(1000)
splat = pygame.mixer.Sound("splat.wav")
splat.play()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
| true
| true
|
790e00df749b9f39cf73a004cd46aaf2af4e6723
| 18,416
|
py
|
Python
|
librosa/decompose.py
|
nehz/librosa
|
0dcd53f462db124ed3f54edf2334f28738d2ecc6
|
[
"ISC"
] | 2
|
2018-10-24T09:04:54.000Z
|
2021-03-29T16:49:01.000Z
|
librosa/decompose.py
|
nehz/librosa
|
0dcd53f462db124ed3f54edf2334f28738d2ecc6
|
[
"ISC"
] | null | null | null |
librosa/decompose.py
|
nehz/librosa
|
0dcd53f462db124ed3f54edf2334f28738d2ecc6
|
[
"ISC"
] | 2
|
2022-01-28T06:40:44.000Z
|
2022-02-06T19:03:58.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Spectrogram decomposition
=========================
.. autosummary::
:toctree: generated/
decompose
hpss
nn_filter
"""
import numpy as np
import scipy.sparse
from scipy.ndimage import median_filter
import sklearn.decomposition
from . import core
from . import cache
from . import segment
from . import util
from .util.exceptions import ParameterError
__all__ = ['decompose', 'hpss', 'nn_filter']
def decompose(S, n_components=None, transformer=None, sort=False, fit=True, **kwargs):
"""Decompose a feature matrix.
Given a spectrogram `S`, produce a decomposition into `components`
and `activations` such that `S ~= components.dot(activations)`.
By default, this is done with with non-negative matrix factorization (NMF),
but any `sklearn.decomposition`-type object will work.
Parameters
----------
S : np.ndarray [shape=(n_features, n_samples), dtype=float]
The input feature matrix (e.g., magnitude spectrogram)
n_components : int > 0 [scalar] or None
number of desired components
if None, then `n_features` components are used
transformer : None or object
If None, use `sklearn.decomposition.NMF`
Otherwise, any object with a similar interface to NMF should work.
`transformer` must follow the scikit-learn convention, where
input data is `(n_samples, n_features)`.
`transformer.fit_transform()` will be run on `S.T` (not `S`),
the return value of which is stored (transposed) as `activations`
The components will be retrieved as `transformer.components_.T`
`S ~= np.dot(activations, transformer.components_).T`
or equivalently:
`S ~= np.dot(transformer.components_.T, activations.T)`
sort : bool
If `True`, components are sorted by ascending peak frequency.
.. note:: If used with `transformer`, sorting is applied to copies
of the decomposition parameters, and not to `transformer`'s
internal parameters.
fit : bool
If `True`, components are estimated from the input ``S``.
If `False`, components are assumed to be pre-computed and stored
in ``transformer``, and are not changed.
kwargs : Additional keyword arguments to the default transformer
`sklearn.decomposition.NMF`
Returns
-------
components: np.ndarray [shape=(n_features, n_components)]
matrix of components (basis elements).
activations: np.ndarray [shape=(n_components, n_samples)]
transformed matrix/activation matrix
Raises
------
ParameterError
if `fit` is False and no `transformer` object is provided.
See Also
--------
sklearn.decomposition : SciKit-Learn matrix decomposition modules
Examples
--------
Decompose a magnitude spectrogram into 32 components with NMF
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> comps, acts = librosa.decompose.decompose(S, n_components=8)
>>> comps
array([[ 1.876e-01, 5.559e-02, ..., 1.687e-01, 4.907e-02],
[ 3.148e-01, 1.719e-01, ..., 2.314e-01, 9.493e-02],
...,
[ 1.561e-07, 8.564e-08, ..., 7.167e-08, 4.997e-08],
[ 1.531e-07, 7.880e-08, ..., 5.632e-08, 4.028e-08]])
>>> acts
array([[ 4.197e-05, 8.512e-03, ..., 3.056e-05, 9.159e-06],
[ 9.568e-06, 1.718e-02, ..., 3.322e-05, 7.869e-06],
...,
[ 5.982e-05, 1.311e-02, ..., -0.000e+00, 6.323e-06],
[ 3.782e-05, 7.056e-03, ..., 3.290e-05, -0.000e+00]])
Sort components by ascending peak frequency
>>> comps, acts = librosa.decompose.decompose(S, n_components=16,
... sort=True)
Or with sparse dictionary learning
>>> import sklearn.decomposition
>>> T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=16)
>>> scomps, sacts = librosa.decompose.decompose(S, transformer=T, sort=True)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10,8))
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Input spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(3, 2, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(comps,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Components')
>>> plt.subplot(3, 2, 4)
>>> librosa.display.specshow(acts, x_axis='time')
>>> plt.ylabel('Components')
>>> plt.title('Activations')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> S_approx = comps.dot(acts)
>>> librosa.display.specshow(librosa.amplitude_to_db(S_approx,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Reconstructed spectrogram')
>>> plt.tight_layout()
"""
if transformer is None:
if fit is False:
raise ParameterError('fit must be True if transformer is None')
transformer = sklearn.decomposition.NMF(n_components=n_components,
**kwargs)
if n_components is None:
n_components = S.shape[0]
if fit:
activations = transformer.fit_transform(S.T).T
else:
activations = transformer.transform(S.T).T
components = transformer.components_.T
if sort:
components, idx = util.axis_sort(components, index=True)
activations = activations[idx]
return components, activations
@cache(level=30)
def hpss(S, kernel_size=31, power=2.0, mask=False, margin=1.0):
"""Median-filtering harmonic percussive source separation (HPSS).
If `margin = 1.0`, decomposes an input spectrogram `S = H + P`
where `H` contains the harmonic components,
and `P` contains the percussive components.
If `margin > 1.0`, decomposes an input spectrogram `S = H + P + R`
where `R` contains residual components not included in `H` or `P`.
This implementation is based upon the algorithm described by [1]_ and [2]_.
.. [1] Fitzgerald, Derry.
"Harmonic/percussive separation using median filtering."
13th International Conference on Digital Audio Effects (DAFX10),
Graz, Austria, 2010.
.. [2] Driedger, Müller, Disch.
"Extending harmonic-percussive separation of audio."
15th International Society for Music Information Retrieval Conference (ISMIR 2014),
Taipei, Taiwan, 2014.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input spectrogram. May be real (magnitude) or complex.
kernel_size : int or tuple (kernel_harmonic, kernel_percussive)
kernel size(s) for the median filters.
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the width of the
harmonic filter, and the second value specifies the width
of the percussive filter.
power : float > 0 [scalar]
Exponent for the Wiener filter when constructing soft mask matrices.
mask : bool
Return the masking matrices instead of components.
Masking matrices contain non-negative real values that
can be used to measure the assignment of energy from `S`
into harmonic or percussive components.
Components can be recovered by multiplying `S * mask_H`
or `S * mask_P`.
margin : float or tuple (margin_harmonic, margin_percussive)
margin size(s) for the masks (as described in [2]_)
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the margin of the
harmonic mask, and the second value specifies the margin
of the percussive mask.
Returns
-------
harmonic : np.ndarray [shape=(d, n)]
harmonic component (or mask)
percussive : np.ndarray [shape=(d, n)]
percussive component (or mask)
See Also
--------
util.softmask
Notes
-----
This function caches at level 30.
Examples
--------
Separate into harmonic and percussive
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> D = librosa.stft(y)
>>> H, P = librosa.decompose.hpss(D)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Full power spectrogram')
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(H,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Harmonic power spectrogram')
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(P,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Percussive power spectrogram')
>>> plt.tight_layout()
Or with a narrower horizontal filter
>>> H, P = librosa.decompose.hpss(D, kernel_size=(13, 31))
Just get harmonic/percussive masks, not the spectra
>>> mask_H, mask_P = librosa.decompose.hpss(D, mask=True)
>>> mask_H
array([[ 1.000e+00, 1.469e-01, ..., 2.648e-03, 2.164e-03],
[ 1.000e+00, 2.368e-01, ..., 9.413e-03, 7.703e-03],
...,
[ 8.869e-01, 5.673e-02, ..., 4.603e-02, 1.247e-05],
[ 7.068e-01, 2.194e-02, ..., 4.453e-02, 1.205e-05]], dtype=float32)
>>> mask_P
array([[ 2.858e-05, 8.531e-01, ..., 9.974e-01, 9.978e-01],
[ 1.586e-05, 7.632e-01, ..., 9.906e-01, 9.923e-01],
...,
[ 1.131e-01, 9.433e-01, ..., 9.540e-01, 1.000e+00],
[ 2.932e-01, 9.781e-01, ..., 9.555e-01, 1.000e+00]], dtype=float32)
Separate into harmonic/percussive/residual components by using a margin > 1.0
>>> H, P = librosa.decompose.hpss(D, margin=3.0)
>>> R = D - (H+P)
>>> y_harm = librosa.core.istft(H)
>>> y_perc = librosa.core.istft(P)
>>> y_resi = librosa.core.istft(R)
Get a more isolated percussive component by widening its margin
>>> H, P = librosa.decompose.hpss(D, margin=(1.0,5.0))
"""
if np.iscomplexobj(S):
S, phase = core.magphase(S)
else:
phase = 1
if np.isscalar(kernel_size):
win_harm = kernel_size
win_perc = kernel_size
else:
win_harm = kernel_size[0]
win_perc = kernel_size[1]
if np.isscalar(margin):
margin_harm = margin
margin_perc = margin
else:
margin_harm = margin[0]
margin_perc = margin[1]
# margin minimum is 1.0
if margin_harm < 1 or margin_perc < 1:
raise ParameterError("Margins must be >= 1.0. "
"A typical range is between 1 and 10.")
# Compute median filters. Pre-allocation here preserves memory layout.
harm = np.empty_like(S)
harm[:] = median_filter(S, size=(1, win_harm), mode='reflect')
perc = np.empty_like(S)
perc[:] = median_filter(S, size=(win_perc, 1), mode='reflect')
split_zeros = (margin_harm == 1 and margin_perc == 1)
mask_harm = util.softmask(harm, perc * margin_harm,
power=power,
split_zeros=split_zeros)
mask_perc = util.softmask(perc, harm * margin_perc,
power=power,
split_zeros=split_zeros)
if mask:
return mask_harm, mask_perc
return ((S * mask_harm) * phase, (S * mask_perc) * phase)
@cache(level=30)
def nn_filter(S, rec=None, aggregate=None, axis=-1, **kwargs):
'''Filtering by nearest-neighbors.
Each data point (e.g, spectrogram column) is replaced
by aggregating its nearest neighbors in feature space.
This can be useful for de-noising a spectrogram or feature matrix.
The non-local means method [1]_ can be recovered by providing a
weighted recurrence matrix as input and specifying `aggregate=np.average`.
Similarly, setting `aggregate=np.median` produces sparse de-noising
as in REPET-SIM [2]_.
.. [1] Buades, A., Coll, B., & Morel, J. M.
(2005, June). A non-local algorithm for image denoising.
In Computer Vision and Pattern Recognition, 2005.
CVPR 2005. IEEE Computer Society Conference on (Vol. 2, pp. 60-65). IEEE.
.. [2] Rafii, Z., & Pardo, B.
(2012, October). "Music/Voice Separation Using the Similarity Matrix."
International Society for Music Information Retrieval Conference, 2012.
Parameters
----------
S : np.ndarray
The input data (spectrogram) to filter
rec : (optional) scipy.sparse.spmatrix or np.ndarray
Optionally, a pre-computed nearest-neighbor matrix
as provided by `librosa.segment.recurrence_matrix`
aggregate : function
aggregation function (default: `np.mean`)
If `aggregate=np.average`, then a weighted average is
computed according to the (per-row) weights in `rec`.
For all other aggregation functions, all neighbors
are treated equally.
axis : int
The axis along which to filter (by default, columns)
kwargs
Additional keyword arguments provided to
`librosa.segment.recurrence_matrix` if `rec` is not provided
Returns
-------
S_filtered : np.ndarray
The filtered data
Raises
------
ParameterError
if `rec` is provided and its shape is incompatible with `S`.
See also
--------
decompose
hpss
librosa.segment.recurrence_matrix
Notes
-----
This function caches at level 30.
Examples
--------
De-noise a chromagram by non-local median filtering.
By default this would use euclidean distance to select neighbors,
but this can be overridden directly by setting the `metric` parameter.
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=10)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> chroma_med = librosa.decompose.nn_filter(chroma,
... aggregate=np.median,
... metric='cosine')
To use non-local means, provide an affinity matrix and `aggregate=np.average`.
>>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity',
... metric='cosine', sparse=True)
>>> chroma_nlm = librosa.decompose.nn_filter(chroma, rec=rec,
... aggregate=np.average)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 8))
>>> plt.subplot(5, 1, 1)
>>> librosa.display.specshow(chroma, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Unfiltered')
>>> plt.subplot(5, 1, 2)
>>> librosa.display.specshow(chroma_med, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Median-filtered')
>>> plt.subplot(5, 1, 3)
>>> librosa.display.specshow(chroma_nlm, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Non-local means')
>>> plt.subplot(5, 1, 4)
>>> librosa.display.specshow(chroma - chroma_med,
... y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Original - median')
>>> plt.subplot(5, 1, 5)
>>> librosa.display.specshow(chroma - chroma_nlm,
... y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Original - NLM')
>>> plt.tight_layout()
'''
if aggregate is None:
aggregate = np.mean
if rec is None:
kwargs = dict(kwargs)
kwargs['sparse'] = True
rec = segment.recurrence_matrix(S, axis=axis, **kwargs)
elif not scipy.sparse.issparse(rec):
rec = scipy.sparse.csr_matrix(rec)
if rec.shape[0] != S.shape[axis] or rec.shape[0] != rec.shape[1]:
raise ParameterError('Invalid self-similarity matrix shape '
'rec.shape={} for S.shape={}'.format(rec.shape,
S.shape))
return __nn_filter_helper(rec.data, rec.indices, rec.indptr,
S.swapaxes(0, axis), aggregate).swapaxes(0, axis)
def __nn_filter_helper(R_data, R_indices, R_ptr, S, aggregate):
'''Nearest-neighbor filter helper function.
This is an internal function, not for use outside of the decompose module.
It applies the nearest-neighbor filter to S, assuming that the first index
corresponds to observations.
Parameters
----------
R_data, R_indices, R_ptr : np.ndarrays
The `data`, `indices`, and `indptr` of a scipy.sparse matrix
S : np.ndarray
The observation data to filter
aggregate : callable
The aggregation operator
Returns
-------
S_out : np.ndarray like S
The filtered data array
'''
s_out = np.empty_like(S)
for i in range(len(R_ptr)-1):
# Get the non-zeros out of the recurrence matrix
targets = R_indices[R_ptr[i]:R_ptr[i+1]]
if not len(targets):
s_out[i] = S[i]
continue
neighbors = np.take(S, targets, axis=0)
if aggregate is np.average:
weights = R_data[R_ptr[i]:R_ptr[i+1]]
s_out[i] = aggregate(neighbors, axis=0, weights=weights)
else:
s_out[i] = aggregate(neighbors, axis=0)
return s_out
| 32.827094
| 91
| 0.587641
|
import numpy as np
import scipy.sparse
from scipy.ndimage import median_filter
import sklearn.decomposition
from . import core
from . import cache
from . import segment
from . import util
from .util.exceptions import ParameterError
__all__ = ['decompose', 'hpss', 'nn_filter']
def decompose(S, n_components=None, transformer=None, sort=False, fit=True, **kwargs):
if transformer is None:
if fit is False:
raise ParameterError('fit must be True if transformer is None')
transformer = sklearn.decomposition.NMF(n_components=n_components,
**kwargs)
if n_components is None:
n_components = S.shape[0]
if fit:
activations = transformer.fit_transform(S.T).T
else:
activations = transformer.transform(S.T).T
components = transformer.components_.T
if sort:
components, idx = util.axis_sort(components, index=True)
activations = activations[idx]
return components, activations
@cache(level=30)
def hpss(S, kernel_size=31, power=2.0, mask=False, margin=1.0):
if np.iscomplexobj(S):
S, phase = core.magphase(S)
else:
phase = 1
if np.isscalar(kernel_size):
win_harm = kernel_size
win_perc = kernel_size
else:
win_harm = kernel_size[0]
win_perc = kernel_size[1]
if np.isscalar(margin):
margin_harm = margin
margin_perc = margin
else:
margin_harm = margin[0]
margin_perc = margin[1]
if margin_harm < 1 or margin_perc < 1:
raise ParameterError("Margins must be >= 1.0. "
"A typical range is between 1 and 10.")
harm = np.empty_like(S)
harm[:] = median_filter(S, size=(1, win_harm), mode='reflect')
perc = np.empty_like(S)
perc[:] = median_filter(S, size=(win_perc, 1), mode='reflect')
split_zeros = (margin_harm == 1 and margin_perc == 1)
mask_harm = util.softmask(harm, perc * margin_harm,
power=power,
split_zeros=split_zeros)
mask_perc = util.softmask(perc, harm * margin_perc,
power=power,
split_zeros=split_zeros)
if mask:
return mask_harm, mask_perc
return ((S * mask_harm) * phase, (S * mask_perc) * phase)
@cache(level=30)
def nn_filter(S, rec=None, aggregate=None, axis=-1, **kwargs):
if aggregate is None:
aggregate = np.mean
if rec is None:
kwargs = dict(kwargs)
kwargs['sparse'] = True
rec = segment.recurrence_matrix(S, axis=axis, **kwargs)
elif not scipy.sparse.issparse(rec):
rec = scipy.sparse.csr_matrix(rec)
if rec.shape[0] != S.shape[axis] or rec.shape[0] != rec.shape[1]:
raise ParameterError('Invalid self-similarity matrix shape '
'rec.shape={} for S.shape={}'.format(rec.shape,
S.shape))
return __nn_filter_helper(rec.data, rec.indices, rec.indptr,
S.swapaxes(0, axis), aggregate).swapaxes(0, axis)
def __nn_filter_helper(R_data, R_indices, R_ptr, S, aggregate):
s_out = np.empty_like(S)
for i in range(len(R_ptr)-1):
targets = R_indices[R_ptr[i]:R_ptr[i+1]]
if not len(targets):
s_out[i] = S[i]
continue
neighbors = np.take(S, targets, axis=0)
if aggregate is np.average:
weights = R_data[R_ptr[i]:R_ptr[i+1]]
s_out[i] = aggregate(neighbors, axis=0, weights=weights)
else:
s_out[i] = aggregate(neighbors, axis=0)
return s_out
| true
| true
|
790e0144510fac1fbcd705569fa24982f6cb97db
| 3,352
|
py
|
Python
|
xjsonrpc/server/validators/pydantic.py
|
bernhardkaindl/pjrpc
|
6e21534ee5a073315e805a911ae75cada4a81137
|
[
"Unlicense"
] | 10
|
2020-03-15T06:41:58.000Z
|
2022-03-17T08:55:53.000Z
|
xjsonrpc/server/validators/pydantic.py
|
bernhardkaindl/pjrpc
|
6e21534ee5a073315e805a911ae75cada4a81137
|
[
"Unlicense"
] | 41
|
2019-11-16T09:57:54.000Z
|
2022-03-31T17:34:13.000Z
|
xjsonrpc/server/validators/pydantic.py
|
bernhardkaindl/pjrpc
|
6e21534ee5a073315e805a911ae75cada4a81137
|
[
"Unlicense"
] | 1
|
2022-03-17T08:21:28.000Z
|
2022-03-17T08:21:28.000Z
|
import functools as ft
import inspect
from typing import Any, Callable, Dict, Iterable, List, Optional, Union
import pydantic
from . import base
class PydanticValidator(base.BaseValidator):
"""
Parameters validator based on `pydantic <https://pydantic-docs.helpmanual.io/>`_ library.
Uses python type annotations for parameters validation.
:param coerce: if ``True`` returns converted (coerced) parameters according to parameter type annotation
otherwise returns parameters as is
"""
def __init__(self, coerce: bool = True, **config_args: Any):
self._coerce = coerce
config_args.setdefault('extra', 'forbid')
# https://pydantic-docs.helpmanual.io/usage/model_config/
self._model_config = type('ModelConfig', (pydantic.BaseConfig,), config_args)
def validate_method(
self, method: Callable, params: Optional[Union[list, dict]], exclude: Iterable[str] = (), **kwargs: Any,
) -> Dict[str, Any]:
"""
Validates params against method using ``pydantic`` validator.
:param method: method to validate parameters against
:param params: parameters to be validated
:param exclude: parameter names to be excluded from validation
:returns: coerced parameters if `coerce` flag is ``True`` otherwise parameters as is
:raises: ValidationError
"""
signature = self.signature(method, exclude)
schema = self.build_validation_schema(signature)
params_model = pydantic.create_model(method.__name__, **schema, __config__=self._model_config)
bound_params = self.bind(signature, params)
try:
obj = params_model(**bound_params.arguments)
except pydantic.ValidationError as e:
raise base.ValidationError(*e.errors()) from e
return {attr: getattr(obj, attr) for attr in obj.__fields_set__} if self._coerce else bound_params.arguments
@ft.lru_cache(maxsize=None)
def build_validation_schema(self, signature: inspect.Signature) -> Dict[str, Any]:
"""
Builds pydantic model based validation schema from method signature.
:param signature: method signature to build schema for
:returns: validation schema
"""
field_definitions = {}
for param in signature.parameters.values():
if param.kind is inspect.Parameter.VAR_KEYWORD:
field_definitions[param.name] = (
Optional[Dict[str, param.annotation]] if param.annotation is not inspect.Parameter.empty else Any,
param.default if param.default is not inspect.Parameter.empty else None,
)
elif param.kind is inspect.Parameter.VAR_POSITIONAL:
field_definitions[param.name] = (
Optional[List[param.annotation]] if param.annotation is not inspect.Parameter.empty else Any,
param.default if param.default is not inspect.Parameter.empty else None,
)
else:
field_definitions[param.name] = (
param.annotation if param.annotation is not inspect.Parameter.empty else Any,
param.default if param.default is not inspect.Parameter.empty else ...,
)
return field_definitions
| 40.385542
| 118
| 0.654236
|
import functools as ft
import inspect
from typing import Any, Callable, Dict, Iterable, List, Optional, Union
import pydantic
from . import base
class PydanticValidator(base.BaseValidator):
def __init__(self, coerce: bool = True, **config_args: Any):
self._coerce = coerce
config_args.setdefault('extra', 'forbid')
self._model_config = type('ModelConfig', (pydantic.BaseConfig,), config_args)
def validate_method(
self, method: Callable, params: Optional[Union[list, dict]], exclude: Iterable[str] = (), **kwargs: Any,
) -> Dict[str, Any]:
signature = self.signature(method, exclude)
schema = self.build_validation_schema(signature)
params_model = pydantic.create_model(method.__name__, **schema, __config__=self._model_config)
bound_params = self.bind(signature, params)
try:
obj = params_model(**bound_params.arguments)
except pydantic.ValidationError as e:
raise base.ValidationError(*e.errors()) from e
return {attr: getattr(obj, attr) for attr in obj.__fields_set__} if self._coerce else bound_params.arguments
@ft.lru_cache(maxsize=None)
def build_validation_schema(self, signature: inspect.Signature) -> Dict[str, Any]:
field_definitions = {}
for param in signature.parameters.values():
if param.kind is inspect.Parameter.VAR_KEYWORD:
field_definitions[param.name] = (
Optional[Dict[str, param.annotation]] if param.annotation is not inspect.Parameter.empty else Any,
param.default if param.default is not inspect.Parameter.empty else None,
)
elif param.kind is inspect.Parameter.VAR_POSITIONAL:
field_definitions[param.name] = (
Optional[List[param.annotation]] if param.annotation is not inspect.Parameter.empty else Any,
param.default if param.default is not inspect.Parameter.empty else None,
)
else:
field_definitions[param.name] = (
param.annotation if param.annotation is not inspect.Parameter.empty else Any,
param.default if param.default is not inspect.Parameter.empty else ...,
)
return field_definitions
| true
| true
|
790e030ef31dc13e50d1a930d050458132b0c2ea
| 7,501
|
py
|
Python
|
flux_param.py
|
aelkouk/rainfall_runoff
|
7ab984c77abbef38c768fea9993b0cfecaca3e67
|
[
"MIT"
] | null | null | null |
flux_param.py
|
aelkouk/rainfall_runoff
|
7ab984c77abbef38c768fea9993b0cfecaca3e67
|
[
"MIT"
] | null | null | null |
flux_param.py
|
aelkouk/rainfall_runoff
|
7ab984c77abbef38c768fea9993b0cfecaca3e67
|
[
"MIT"
] | null | null | null |
# Purpose: Calculate hydrological fluxes in the canopy, unsaturated and saturated sub-domains
# Record of revisions:
# Date Programmer Description of change
# ======== ============= =====================
# 09-2020 A. Elkouk Original code
# ----------------------------------------------------------------------------------------------------------------------
# Parametrization for the fluxes in the vegetation canopy
# ----------------------------------------------------------------------------------------------------------------------
def calc_wetted_fraction(canopyStore, canopyStore_max, gamma):
""" Calculate the wetted fraction of the canopy
Parameters
----------
canopyStore : int or float
Canopy Interception storage [mm]
canopyStore_max : int or float
Maximum non-drainable canopy interception storage [mm]
gamma : float
Parameter to account for the non-linearity in the wetted fraction of the canopy
Returns
-------
wetFrac: float
Wetted fraction of the canopy
"""
if canopyStore < canopyStore_max:
wetFrac = (canopyStore / canopyStore_max) ** gamma
else:
wetFrac = 1.0
return wetFrac
def calc_canopy_evaporation(pet, wetFrac):
""" Calculate the evaporation from canopy interception storage
Parameters
----------
pet : int or float
Potential evapotranspiration [mm day^-1]
wetFrac : float
Wetted fraction of the canopy
Returns
-------
canopyEvap: float
Evaporation from canopy interception storage [mm day^-1]
"""
canopyEvap = pet * wetFrac
return canopyEvap
def calc_throughfall_flux(precip, canopyStore, canopyStore_max):
""" Calculate the throughfall flux from canopy interception storage
Parameters
----------
precip : int or float
Precipitation flux [mm day^-1]
canopyStore : int or float
Canopy Interception storage [mm]
canopyStore_max : int or float
Maximum non-drainable canopy interception storage [mm]
Returns
-------
throughfall : int or float
Throughfall flux [mm day^-1]
"""
if canopyStore < canopyStore_max:
throughfall = precip * (canopyStore / canopyStore_max)
else:
throughfall = precip
return throughfall
def calc_canopy_drainage_flux(canopyStore, canopyStore_max, k_can):
""" Calculate the canopy drainage flux from canopy interception storage
Parameters
----------
canopyStore : int or float
Canopy Interception storage [mm]
canopyStore_max : int or float
Maximum non-drainable canopy interception storage [mm]
k_can: float
Canopy drainage coecient [day^-1]
Returns
-------
canopyDrain : int or float
Canopy drainage flux [mm day^-1]
"""
if canopyStore < canopyStore_max:
canopyDrain = 0.0
else:
canopyDrain = k_can * (canopyStore - canopyStore_max)
return canopyDrain
def calc_precipitation_excess(throughfall, canopyDrain):
""" Calculate excess precipitation (the sum of throughfall and canopy drainage)
Parameters
----------
throughfall : int or float
Throughfall flux [mm day^-1]
canopyDrain : int or float
Canopy drainage flux [mm day^-1]
Returns
-------
precipExcess : int or float
Excess precipitation [mm day^-1]
"""
precipExcess = throughfall + canopyDrain
return precipExcess
# ----------------------------------------------------------------------------------------------------------------------
# Parametrization for the fluxes in the unsaturated zone
# ----------------------------------------------------------------------------------------------------------------------
def calc_saturated_fraction(unsatStore, unsatStore_max, alpha):
""" Calculate the saturated fraction of the unsaturated zone
Parameters
----------
unsatStore : int or float
Storage in the unsaturated zone [mm]
unsatStore_max : int or float
Maximum storage in the unsaturated zone [mm]
alpha : float
Parameter to account for the non-linearity in the variable source area for saturation-excess runoff
Returns
-------
satFrac: float
Saturated fraction of the unsaturated zone
"""
if unsatStore < unsatStore_max:
satFrac = 1 - (1 - (unsatStore / unsatStore_max)) ** alpha
else:
satFrac = 1
return satFrac
def calc_unsaturated_evaporation(pet, unsatStore, fieldCap, wetFrac):
""" Calculate evaporation from the unsaturated zone
Parameters
----------
pet : int or float
Potential evapotranspiration [mm day^-1]
unsatStore : int or float
Storage in the unsaturated zone [mm]
fieldCap : int or float
Field capacity [mm]
wetFrac : float
Wetted fraction of the canopy
Returns
-------
unsatEvap : float
Evaporation from the unsaturated zone [mm day^-1]
"""
if unsatStore < fieldCap:
unsatEvap = pet * (unsatStore / fieldCap) * (1 - wetFrac)
else:
unsatEvap = pet * (1 - wetFrac)
return unsatEvap
def calc_overland_flow(precipExcess, satFrac):
""" Calculate overland flow (surface runoff)
Parameters
----------
precipExcess : int or float
Excess precipitation [mm day^-1]
satFrac : float
Saturated fraction of the unsaturated zone
Returns
-------
overlandFlow : float
Overland flow (surface runoff) [mm day^-1]
"""
overlandFlow = precipExcess * satFrac
return overlandFlow
def calc_percolation_flux(unsatStore, unsatStore_max, fieldCap, k_sat, beta):
""" Calculate the percolation flux from the unsaturated to the saturated zone
Parameters
----------
unsatStore : int or float
Storage in the unsaturated zone [mm]
unsatStore_max : int or float
Maximum storage in the unsaturated zone [mm]
fieldCap : int or float
Field capacity [mm]
k_sat : int or float
Maximum percolation rate [mm day^-1]
beta : int or float
Parameter to account for percolation non-linearity
Returns
-------
percolation : int or float
Percolation flux [mm day^-1]
"""
if unsatStore < fieldCap:
percolation = 0.0
else:
percolation = k_sat * ((unsatStore - fieldCap) / (unsatStore_max - fieldCap)) ** beta
return percolation
# ----------------------------------------------------------------------------------------------------------------------
# Parametrization for the fluxes in the saturated zone
# ----------------------------------------------------------------------------------------------------------------------
def calc_baseflow(satStore, k_sz):
""" Calculate baseflow from the saturated zone
Parameters
----------
satStore : int or float
Storage in the saturated zone [mm]
k_sz : float
Runoff coefficient for the saturated zone [day^-1]
Returns
-------
baseflow : float
Baseflow from the saturated zone [mm day^-1]
"""
baseflow = satStore * k_sz
return baseflow
| 28.520913
| 121
| 0.557792
|
def calc_wetted_fraction(canopyStore, canopyStore_max, gamma):
if canopyStore < canopyStore_max:
wetFrac = (canopyStore / canopyStore_max) ** gamma
else:
wetFrac = 1.0
return wetFrac
def calc_canopy_evaporation(pet, wetFrac):
canopyEvap = pet * wetFrac
return canopyEvap
def calc_throughfall_flux(precip, canopyStore, canopyStore_max):
if canopyStore < canopyStore_max:
throughfall = precip * (canopyStore / canopyStore_max)
else:
throughfall = precip
return throughfall
def calc_canopy_drainage_flux(canopyStore, canopyStore_max, k_can):
if canopyStore < canopyStore_max:
canopyDrain = 0.0
else:
canopyDrain = k_can * (canopyStore - canopyStore_max)
return canopyDrain
def calc_precipitation_excess(throughfall, canopyDrain):
precipExcess = throughfall + canopyDrain
return precipExcess
def calc_saturated_fraction(unsatStore, unsatStore_max, alpha):
if unsatStore < unsatStore_max:
satFrac = 1 - (1 - (unsatStore / unsatStore_max)) ** alpha
else:
satFrac = 1
return satFrac
def calc_unsaturated_evaporation(pet, unsatStore, fieldCap, wetFrac):
if unsatStore < fieldCap:
unsatEvap = pet * (unsatStore / fieldCap) * (1 - wetFrac)
else:
unsatEvap = pet * (1 - wetFrac)
return unsatEvap
def calc_overland_flow(precipExcess, satFrac):
overlandFlow = precipExcess * satFrac
return overlandFlow
def calc_percolation_flux(unsatStore, unsatStore_max, fieldCap, k_sat, beta):
if unsatStore < fieldCap:
percolation = 0.0
else:
percolation = k_sat * ((unsatStore - fieldCap) / (unsatStore_max - fieldCap)) ** beta
return percolation
def calc_baseflow(satStore, k_sz):
baseflow = satStore * k_sz
return baseflow
| true
| true
|
790e0328786b1fd98594b1a19535e01ae6bcec3e
| 573
|
py
|
Python
|
convnet3d/layers/misc.py
|
yecharlie/convnet3d
|
0b2771eec149b196ef59b58d09eef71c9b201d40
|
[
"MIT"
] | 6
|
2020-03-12T10:28:41.000Z
|
2021-11-18T16:17:20.000Z
|
convnet3d/layers/misc.py
|
yecharlie/convnet3d
|
0b2771eec149b196ef59b58d09eef71c9b201d40
|
[
"MIT"
] | null | null | null |
convnet3d/layers/misc.py
|
yecharlie/convnet3d
|
0b2771eec149b196ef59b58d09eef71c9b201d40
|
[
"MIT"
] | 1
|
2019-08-01T02:50:05.000Z
|
2019-08-01T02:50:05.000Z
|
import keras
import keras.backend as K
class Shape(keras.layers.Layer):
def call(self, inputs):
return K.shape(inputs)
def compute_output_shape(self, input_shape):
return (len(input_shape),)
class Cast(keras.layers.Layer):
def __init__(self, dtype, **kwargs):
self.dtype = dtype
super(Cast, self).__init__(**kwargs)
def call(self, inputs):
return K.cast(inputs, self.dtype)
def get_config(self):
config = super(Cast, self).get_config()
config.update(dtype=self.dtype)
return config
| 22.92
| 48
| 0.645724
|
import keras
import keras.backend as K
class Shape(keras.layers.Layer):
def call(self, inputs):
return K.shape(inputs)
def compute_output_shape(self, input_shape):
return (len(input_shape),)
class Cast(keras.layers.Layer):
def __init__(self, dtype, **kwargs):
self.dtype = dtype
super(Cast, self).__init__(**kwargs)
def call(self, inputs):
return K.cast(inputs, self.dtype)
def get_config(self):
config = super(Cast, self).get_config()
config.update(dtype=self.dtype)
return config
| true
| true
|
790e03e4bd37269bbae332195207109b25e08b98
| 2,532
|
py
|
Python
|
scripts/support/split_genomes.py
|
Rfam/rfam-production
|
36f3963380da2a08e9cf73c951691c4e95738ac4
|
[
"Apache-2.0"
] | 7
|
2016-06-17T09:21:11.000Z
|
2021-10-13T20:25:06.000Z
|
support/split_genomes.py
|
mb1069/rfam-production
|
10c76e249dc22d30862b3a873fd54f390e859ad8
|
[
"Apache-2.0"
] | 82
|
2016-04-08T10:51:32.000Z
|
2022-03-11T13:49:18.000Z
|
support/split_genomes.py
|
mb1069/rfam-production
|
10c76e249dc22d30862b3a873fd54f390e859ad8
|
[
"Apache-2.0"
] | 3
|
2019-09-01T09:46:35.000Z
|
2021-11-29T08:01:58.000Z
|
import os
import sys
import shutil
import subprocess
from config import rfam_local as conf
from config import gen_config as gc
from utils import genome_search_utils as gsu
# ------------------------------------------------------------------------
def split_genome_to_chunks(updir, upid):
"""
updir:
upid:
return:
"""
# get updir location
upid_fasta = os.path.join(updir, upid + '.fa')
seq_chunks_dir = os.path.join(updir, "search_chunks")
if not os.path.exists(seq_chunks_dir):
os.mkdir(seq_chunks_dir)
os.chmod(seq_chunks_dir, 0777)
# check if we need to split the seq_file
if gsu.count_nucleotides_in_fasta(upid_fasta) >= gc.SPLIT_SIZE:
# split sequence file into smalled chunks
gsu.split_seq_file(upid_fasta, gc.SPLIT_SIZE, dest_dir=seq_chunks_dir)
# now index the fasta files
seq_files = os.listdir(seq_chunks_dir)
for seq_file in seq_files:
seq_file_loc = os.path.join(seq_chunks_dir, seq_file)
cmd = "%s --index %s" % (conf.ESL_SFETCH, seq_file_loc)
subprocess.call(cmd, shell=True)
# for input consistency if the sequence file is small, copy it in the
# search_chunks directory
else:
# copy file
shutil.copyfile(upid_fasta, os.path.join(seq_chunks_dir,
upid + '.fa'))
# index file
cmd = "%s --index %s" % (conf.ESL_SFETCH, os.path.join(seq_chunks_dir,
upid + '.fa'))
subprocess.call(cmd, shell=True)
# ------------------------------------------------------------------------
if __name__ == '__main__':
project_dir = sys.argv[1]
# this can be a file of upids or a upid string UPXXXXXXXX
upid_input = sys.argv[2]
if os.path.isfile(upid_input):
fp = open(upid_input, 'r')
upids = [x.strip() for x in fp]
fp.close()
for upid in upids:
suffix = upid[-3:]
subdir_loc = os.path.join(project_dir, suffix)
updir_loc = os.path.join(subdir_loc, upid)
split_genome_to_chunks(updir_loc, upid)
else:
# get updir location and subdir
suffix = upid_input[-3:]
subdir_loc = os.path.join(project_dir, suffix)
updir_loc = os.path.join(subdir_loc, upid_input)
split_genome_to_chunks(updir_loc, upid_input)
| 31.259259
| 82
| 0.557267
|
import os
import sys
import shutil
import subprocess
from config import rfam_local as conf
from config import gen_config as gc
from utils import genome_search_utils as gsu
def split_genome_to_chunks(updir, upid):
"""
updir:
upid:
return:
"""
upid_fasta = os.path.join(updir, upid + '.fa')
seq_chunks_dir = os.path.join(updir, "search_chunks")
if not os.path.exists(seq_chunks_dir):
os.mkdir(seq_chunks_dir)
os.chmod(seq_chunks_dir, 0777)
if gsu.count_nucleotides_in_fasta(upid_fasta) >= gc.SPLIT_SIZE:
gsu.split_seq_file(upid_fasta, gc.SPLIT_SIZE, dest_dir=seq_chunks_dir)
seq_files = os.listdir(seq_chunks_dir)
for seq_file in seq_files:
seq_file_loc = os.path.join(seq_chunks_dir, seq_file)
cmd = "%s --index %s" % (conf.ESL_SFETCH, seq_file_loc)
subprocess.call(cmd, shell=True)
else:
shutil.copyfile(upid_fasta, os.path.join(seq_chunks_dir,
upid + '.fa'))
cmd = "%s --index %s" % (conf.ESL_SFETCH, os.path.join(seq_chunks_dir,
upid + '.fa'))
subprocess.call(cmd, shell=True)
if __name__ == '__main__':
project_dir = sys.argv[1]
upid_input = sys.argv[2]
if os.path.isfile(upid_input):
fp = open(upid_input, 'r')
upids = [x.strip() for x in fp]
fp.close()
for upid in upids:
suffix = upid[-3:]
subdir_loc = os.path.join(project_dir, suffix)
updir_loc = os.path.join(subdir_loc, upid)
split_genome_to_chunks(updir_loc, upid)
else:
suffix = upid_input[-3:]
subdir_loc = os.path.join(project_dir, suffix)
updir_loc = os.path.join(subdir_loc, upid_input)
split_genome_to_chunks(updir_loc, upid_input)
| false
| true
|
790e0404674c8530ff8eaac4431731c3de59a3d6
| 1,843
|
py
|
Python
|
mslib/mscolab/_tests/test_utils.py
|
gisi90/MSS
|
a790499e54fe6e2023608701e939a921fd02dee0
|
[
"Apache-2.0"
] | null | null | null |
mslib/mscolab/_tests/test_utils.py
|
gisi90/MSS
|
a790499e54fe6e2023608701e939a921fd02dee0
|
[
"Apache-2.0"
] | null | null | null |
mslib/mscolab/_tests/test_utils.py
|
gisi90/MSS
|
a790499e54fe6e2023608701e939a921fd02dee0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
mslib.mscolab._tests.test_utils
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tests for mscolab/utils
This file is part of mss.
:copyright: Copyright 2019 Shivashis Padhi
:copyright: Copyright 2019-2020 by the mss team, see AUTHORS.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mslib.mscolab.server import db, APP, initialize_managers
from mslib.mscolab.models import User
from mslib.mscolab.utils import get_recent_pid
from mslib.mscolab.conf import mscolab_settings
from mslib.mscolab.mscolab import handle_db_seed
class Test_Utils(object):
def setup(self):
handle_db_seed()
self.app = APP
self.app.config['SQLALCHEMY_DATABASE_URI'] = mscolab_settings.SQLALCHEMY_DB_URI
self.app.config['MSCOLAB_DATA_DIR'] = mscolab_settings.MSCOLAB_DATA_DIR
self.app.config['UPLOAD_FOLDER'] = mscolab_settings.UPLOAD_FOLDER
self.app, _, cm, fm = initialize_managers(self.app)
self.fm = fm
self.cm = cm
db.init_app(self.app)
with self.app.app_context():
self.user = User.query.filter_by(id=8).first()
def test_get_recent_pid(self):
with self.app.app_context():
p_id = get_recent_pid(self.fm, self.user)
assert p_id == 4
def teardown(self):
pass
| 33.509091
| 87
| 0.686923
|
from mslib.mscolab.server import db, APP, initialize_managers
from mslib.mscolab.models import User
from mslib.mscolab.utils import get_recent_pid
from mslib.mscolab.conf import mscolab_settings
from mslib.mscolab.mscolab import handle_db_seed
class Test_Utils(object):
def setup(self):
handle_db_seed()
self.app = APP
self.app.config['SQLALCHEMY_DATABASE_URI'] = mscolab_settings.SQLALCHEMY_DB_URI
self.app.config['MSCOLAB_DATA_DIR'] = mscolab_settings.MSCOLAB_DATA_DIR
self.app.config['UPLOAD_FOLDER'] = mscolab_settings.UPLOAD_FOLDER
self.app, _, cm, fm = initialize_managers(self.app)
self.fm = fm
self.cm = cm
db.init_app(self.app)
with self.app.app_context():
self.user = User.query.filter_by(id=8).first()
def test_get_recent_pid(self):
with self.app.app_context():
p_id = get_recent_pid(self.fm, self.user)
assert p_id == 4
def teardown(self):
pass
| true
| true
|
790e0407f77db6797cd355bcbc23b95400522672
| 1,645
|
py
|
Python
|
torch_glow/tests/nodes/conv2d_test.py
|
enricoros/glow
|
9066d4ab4f7fcfbfbbc1297a7706f7ed78dc891b
|
[
"Apache-2.0"
] | 2
|
2021-08-02T22:39:33.000Z
|
2021-11-17T11:00:17.000Z
|
torch_glow/tests/nodes/conv2d_test.py
|
a1f/glow
|
49cf6972ce0cb25cea66f9ed39d32add5eeef130
|
[
"Apache-2.0"
] | null | null | null |
torch_glow/tests/nodes/conv2d_test.py
|
a1f/glow
|
49cf6972ce0cb25cea66f9ed39d32add5eeef130
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn.functional as F
import torch_glow
from collections import namedtuple
from tests.utils import jitVsGlow
# Basic test of the PyTorch conv2d Node on Glow.
def test_conv2d_basic():
def conv2d_basic(inputs, filters):
conv = F.conv2d(inputs, filters, padding=1)
return F.relu(conv)
inputs = torch.randn(1, 4, 5, 5)
filters = torch.randn(8, 4, 3, 3)
jitVsGlow(conv2d_basic, inputs, filters)
# Test of the PyTorch conv2d Node with a provided bias tensor.
def test_conv2d_with_bias():
def conv2d_with_bias(inputs, filters, bias):
conv = F.conv2d(inputs, filters, bias)
return F.relu(conv)
inputs = torch.randn(1, 4, 5, 5)
filters = torch.randn(8, 4, 3, 3)
bias = torch.randn(8)
jitVsGlow(conv2d_with_bias, inputs, filters, bias)
# Test of the PyTorch conv2d Node sweeping through various parameters of the
# Node to test that they work correctly.
def test_conv2d_param_sweep():
hwOpts = [3, 4]
padOpts = [0, 1]
groupsOpts = [1, 2]
dilationOpts = [1, 2]
strideOpts = [1, 2]
Setting = namedtuple('Setting', ['h', 'w', 'p', 'g', 'd', 's',])
settings = [Setting(h=h, w=w, p=p, g=g, d=d, s=s) for h in hwOpts for w in hwOpts for p in padOpts for g in groupsOpts for d in dilationOpts for s in strideOpts]
for setting in settings:
def conv2d_param_sweep(inputs, filters):
conv = F.conv2d(inputs, filters, padding=setting.p, groups=setting.g)
return F.relu(conv)
inputs = torch.randn(2, 4, setting.h, setting.w)
filters = torch.randn(8, 4/setting.g, 3, 3)
jitVsGlow(conv2d_param_sweep, inputs, filters)
| 29.909091
| 163
| 0.677204
|
import torch
import torch.nn.functional as F
import torch_glow
from collections import namedtuple
from tests.utils import jitVsGlow
def test_conv2d_basic():
def conv2d_basic(inputs, filters):
conv = F.conv2d(inputs, filters, padding=1)
return F.relu(conv)
inputs = torch.randn(1, 4, 5, 5)
filters = torch.randn(8, 4, 3, 3)
jitVsGlow(conv2d_basic, inputs, filters)
def test_conv2d_with_bias():
def conv2d_with_bias(inputs, filters, bias):
conv = F.conv2d(inputs, filters, bias)
return F.relu(conv)
inputs = torch.randn(1, 4, 5, 5)
filters = torch.randn(8, 4, 3, 3)
bias = torch.randn(8)
jitVsGlow(conv2d_with_bias, inputs, filters, bias)
def test_conv2d_param_sweep():
hwOpts = [3, 4]
padOpts = [0, 1]
groupsOpts = [1, 2]
dilationOpts = [1, 2]
strideOpts = [1, 2]
Setting = namedtuple('Setting', ['h', 'w', 'p', 'g', 'd', 's',])
settings = [Setting(h=h, w=w, p=p, g=g, d=d, s=s) for h in hwOpts for w in hwOpts for p in padOpts for g in groupsOpts for d in dilationOpts for s in strideOpts]
for setting in settings:
def conv2d_param_sweep(inputs, filters):
conv = F.conv2d(inputs, filters, padding=setting.p, groups=setting.g)
return F.relu(conv)
inputs = torch.randn(2, 4, setting.h, setting.w)
filters = torch.randn(8, 4/setting.g, 3, 3)
jitVsGlow(conv2d_param_sweep, inputs, filters)
| true
| true
|
790e0584b8e5a83f684e876da3efae7fb30671d1
| 34,381
|
py
|
Python
|
corehq/apps/sms/tests/test_phone_numbers.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 1
|
2020-07-14T13:00:23.000Z
|
2020-07-14T13:00:23.000Z
|
corehq/apps/sms/tests/test_phone_numbers.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 94
|
2020-12-11T06:57:31.000Z
|
2022-03-15T10:24:06.000Z
|
corehq/apps/sms/tests/test_phone_numbers.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime, timedelta
from django.test import TestCase
from mock import patch
from corehq.apps.domain.models import Domain
from corehq.apps.hqcase.utils import update_case
from corehq.apps.sms.mixin import PhoneNumberInUseException
from corehq.apps.sms.models import (
PhoneNumber,
SQLMobileBackend,
SQLMobileBackendMapping,
)
from corehq.apps.sms.tasks import (
delete_phone_numbers_for_owners,
sync_case_phone_number,
)
from corehq.apps.sms.tests.util import delete_domain_phone_numbers
from corehq.apps.users.models import CommCareUser, WebUser
from corehq.apps.users.tasks import tag_cases_as_deleted_and_remove_indices
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.tests.utils import run_with_all_backends
from corehq.form_processor.utils import is_commcarecase
from corehq.messaging.smsbackends.test.models import SQLTestSMSBackend
from corehq.util.test_utils import create_test_case
class PhoneNumberCacheClearTestCase(TestCase):
def assertNoMatch(self, phone_search, suffix_search, owner_id_search):
self.assertIsNone(PhoneNumber.get_two_way_number(phone_search))
self.assertIsNone(PhoneNumber.get_two_way_number_by_suffix(suffix_search))
self.assertEqual(PhoneNumber.by_owner_id(owner_id_search), [])
def assertPhoneNumbersEqual(self, phone1, phone2):
for field in phone1._meta.fields:
self.assertEqual(getattr(phone1, field.name), getattr(phone2, field.name))
def assertMatch(self, match, phone_search, suffix_search, owner_id_search):
lookedup = PhoneNumber.get_two_way_number(phone_search)
self.assertPhoneNumbersEqual(match, lookedup)
lookedup = PhoneNumber.get_two_way_number_by_suffix(suffix_search)
self.assertPhoneNumbersEqual(match, lookedup)
[lookedup] = PhoneNumber.by_owner_id(owner_id_search)
self.assertPhoneNumbersEqual(match, lookedup)
def _test_cache_clear(self, refresh_each_time=True):
"""
A test to make sure that the cache clearing is working as expected.
This test gets run twice using different values for refresh_each_time.
This makes sure that the mechanism used for clearing the cache works
whether you're updating a document you just saved or getting a document
fresh from the database and updating it.
"""
created = PhoneNumber(
domain='phone-number-test',
owner_doc_type='CommCareCase',
owner_id='fake-owner-id1',
phone_number='99912341234',
backend_id=None,
ivr_backend_id=None,
verified=True,
pending_verification=False,
is_two_way=True,
contact_last_modified=datetime.utcnow()
)
created.save()
self.assertNoMatch('99952345234', '52345234', 'fake-owner-id2')
self.assertMatch(created, '99912341234', '12341234', 'fake-owner-id1')
# Update Phone Number
if refresh_each_time:
created = PhoneNumber.objects.get(pk=created.pk)
created.phone_number = '99952345234'
created.save()
self.assertNoMatch('99912341234', '12341234', 'fake-owner-id2')
self.assertMatch(created, '99952345234', '52345234', 'fake-owner-id1')
# Update Owner Id
if refresh_each_time:
created = PhoneNumber.objects.get(pk=created.pk)
created.owner_id = 'fake-owner-id2'
created.save()
self.assertNoMatch('99912341234', '12341234', 'fake-owner-id1')
self.assertMatch(created, '99952345234', '52345234', 'fake-owner-id2')
created.delete()
self.assertNoMatch('99952345234', '52345234', 'fake-owner-id2')
def test_cache_clear_with_refresh(self):
self._test_cache_clear(refresh_each_time=True)
def test_cache_clear_without_refresh(self):
self._test_cache_clear(refresh_each_time=False)
class CaseContactPhoneNumberTestCase(TestCase):
def setUp(self):
self.domain = 'case-phone-number-test'
def tearDown(self):
delete_domain_phone_numbers(self.domain)
def set_case_property(self, case, property_name, value):
update_case(self.domain, case.case_id, case_properties={property_name: value})
return CaseAccessors(self.domain).get_case(case.case_id)
def get_case_phone_number(self, case):
return case.get_phone_number()
def assertPhoneNumberDetails(self, case, phone_number, sms_backend_id, ivr_backend_id,
verified, pending_verification, is_two_way, pk=None):
v = self.get_case_phone_number(case)
self.assertEqual(v.domain, case.domain)
self.assertEqual(v.owner_doc_type, case.doc_type)
self.assertEqual(v.owner_id, case.case_id)
self.assertEqual(v.phone_number, phone_number)
self.assertEqual(v.backend_id, sms_backend_id)
self.assertEqual(v.ivr_backend_id, ivr_backend_id)
self.assertEqual(v.verified, verified)
self.assertEqual(v.pending_verification, pending_verification)
self.assertEqual(v.is_two_way, is_two_way)
self.assertEqual(v.contact_last_modified, case.server_modified_on)
if pk:
self.assertEqual(v.pk, pk)
@run_with_all_backends
def test_case_phone_number_updates(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
self.assertIsNone(self.get_case_phone_number(case))
case = self.set_case_property(case, 'contact_phone_number', '99987658765')
self.assertPhoneNumberDetails(case, '99987658765', None, None, False, False, False)
pk = self.get_case_phone_number(case).pk
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
self.assertPhoneNumberDetails(case, '99987658765', None, None, True, False, True, pk=pk)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_phone_number', '99987698769')
self.assertPhoneNumberDetails(case, '99987698769', None, None, True, False, True)
pk = self.get_case_phone_number(case).pk
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_backend_id', 'sms-backend')
self.assertPhoneNumberDetails(case, '99987698769', 'sms-backend', None, True, False, True, pk=pk)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_ivr_backend_id', 'ivr-backend')
self.assertPhoneNumberDetails(case, '99987698769', 'sms-backend', 'ivr-backend', True, False, True,
pk=pk)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
# If phone entry is ahead of the case in terms of contact_last_modified, no update should happen
v = self.get_case_phone_number(case)
v.contact_last_modified += timedelta(days=1)
v.save()
with patch('corehq.apps.sms.models.PhoneNumber.save') as mock_save:
case = self.set_case_property(case, 'contact_phone_number', '99912341234')
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
mock_save.assert_not_called()
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
@run_with_all_backends
def test_close_case(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
case = self.set_case_property(case, 'contact_phone_number', '99987658765')
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
self.assertIsNotNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
update_case(self.domain, case.case_id, close=True)
self.assertIsNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
@run_with_all_backends
def test_case_soft_delete(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
case = self.set_case_property(case, 'contact_phone_number', '99987658765')
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
self.assertIsNotNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
tag_cases_as_deleted_and_remove_indices(self.domain, [case.case_id], '123', datetime.utcnow())
self.assertIsNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
@run_with_all_backends
def test_case_zero_phone_number(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
case = self.set_case_property(case, 'contact_phone_number', '99987658765')
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
self.assertIsNotNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_phone_number', '0')
self.assertIsNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
@run_with_all_backends
def test_invalid_phone_format(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
case = self.set_case_property(case, 'contact_phone_number', '99987658765')
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
self.assertIsNotNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_phone_number', 'xyz')
self.assertIsNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
@run_with_all_backends
def test_phone_number_already_in_use(self):
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 0)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case1, \
create_test_case(self.domain, 'participant', 'test2', drop_signals=False) as case2:
case1 = self.set_case_property(case1, 'contact_phone_number', '99987658765')
case1 = self.set_case_property(case1, 'contact_phone_number_is_verified', '1')
case2 = self.set_case_property(case2, 'contact_phone_number', '99987698769')
case2 = self.set_case_property(case2, 'contact_phone_number_is_verified', '1')
self.assertIsNotNone(self.get_case_phone_number(case1))
self.assertIsNotNone(self.get_case_phone_number(case2))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case2 = self.set_case_property(case2, 'contact_phone_number', '99987658765')
self.assertIsNotNone(self.get_case_phone_number(case1))
self.assertIsNotNone(self.get_case_phone_number(case2))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
self.assertPhoneNumberDetails(case1, '99987658765', None, None, True, False, True)
self.assertPhoneNumberDetails(case2, '99987658765', None, None, False, False, False)
@run_with_all_backends
def test_multiple_entries(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999124',
verified=False,
pending_verification=False,
is_two_way=False
)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
case = self.set_case_property(case, 'contact_phone_number', '999124')
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
case.create_phone_entry('999125')
self.assertEqual(PhoneNumber.objects.count(), 3)
sync_case_phone_number(case)
self.assertEqual(PhoneNumber.objects.count(), 2)
number1 = PhoneNumber.objects.get(pk=extra_number.pk)
self.assertEqual(number1.owner_id, 'X')
number2 = PhoneNumber.objects.get(owner_id=case.case_id)
self.assertTrue(number2.verified)
self.assertTrue(number2.is_two_way)
self.assertFalse(number2.pending_verification)
class SQLPhoneNumberTestCase(TestCase):
def setUp(self):
self.domain = 'sql-phone-number-test'
self.domain_obj = Domain(name=self.domain)
self.domain_obj.save()
def delete_objects(self, result):
for obj in result:
# Delete and clear cache
obj.delete()
def tearDown(self):
self.delete_objects(PhoneNumber.objects.filter(domain=self.domain))
self.delete_objects(SQLMobileBackend.objects.filter(domain=self.domain))
SQLMobileBackendMapping.objects.filter(domain=self.domain).delete()
self.domain_obj.delete()
def test_backend(self):
backend1 = SQLTestSMSBackend.objects.create(
hq_api_id=SQLTestSMSBackend.get_api_id(),
is_global=False,
domain=self.domain,
name='BACKEND1'
)
backend2 = SQLTestSMSBackend.objects.create(
hq_api_id=SQLTestSMSBackend.get_api_id(),
is_global=False,
domain=self.domain,
name='BACKEND2'
)
SQLMobileBackendMapping.set_default_domain_backend(self.domain, backend1)
number = PhoneNumber(domain=self.domain, phone_number='+999123')
self.assertEqual(number.backend, backend1)
number.backend_id = backend2.name
self.assertEqual(number.backend, backend2)
number.backend_id = ' '
self.assertEqual(number.backend, backend1)
@run_with_all_backends
def test_case_owner(self):
with create_test_case(self.domain, 'participant', 'test') as case:
number = PhoneNumber(owner_doc_type='CommCareCase', owner_id=case.case_id)
owner = number.owner
self.assertTrue(is_commcarecase(owner))
self.assertEqual(owner.case_id, case.case_id)
def test_user_owner(self):
mobile_user = CommCareUser.create(self.domain, 'abc', 'def', None, None)
number = PhoneNumber(owner_doc_type='CommCareUser', owner_id=mobile_user.get_id)
owner = number.owner
self.assertTrue(isinstance(owner, CommCareUser))
self.assertEqual(owner.get_id, mobile_user.get_id)
web_user = WebUser.create(self.domain, 'ghi', 'jkl', None, None)
number = PhoneNumber(owner_doc_type='WebUser', owner_id=web_user.get_id)
owner = number.owner
self.assertTrue(isinstance(owner, WebUser))
self.assertEqual(owner.get_id, web_user.get_id)
number = PhoneNumber(owner_doc_type='X')
self.assertIsNone(number.owner)
def test_get_two_way_number(self):
number1 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=False,
pending_verification=False,
is_two_way=False
)
self.assertEqual(PhoneNumber.get_two_way_number('999123'), number1)
self.assertEqual(PhoneNumber.get_two_way_number('+999 123'), number1)
self.assertIsNone(PhoneNumber.get_two_way_number('999124'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
# test cache clear on save
number1.phone_number = '999124'
number1.save()
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertEqual(PhoneNumber.get_two_way_number('999124'), number1)
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
# test cache clear on delete
number1.delete()
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertIsNone(PhoneNumber.get_two_way_number('999124'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
def test_get_number_pending_verification(self):
number1 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=False,
pending_verification=True,
is_two_way=False
)
PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=False,
pending_verification=False,
is_two_way=False
)
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertIsNone(PhoneNumber.get_two_way_number('999124'))
self.assertEqual(PhoneNumber.get_number_pending_verification('999123'), number1)
self.assertEqual(PhoneNumber.get_number_pending_verification('+999 123'), number1)
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
# test cache clear on save
number1.phone_number = '999124'
number1.save()
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertIsNone(PhoneNumber.get_two_way_number('999124'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertEqual(PhoneNumber.get_number_pending_verification('999124'), number1)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
# test promotion to two-way
number1.set_two_way()
number1.set_verified()
number1.save()
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertEqual(PhoneNumber.get_two_way_number('999124'), number1)
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
# test cache clear on delete
number1.delete()
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertIsNone(PhoneNumber.get_two_way_number('999124'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
def test_suffix_lookup(self):
number1 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
number2 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999223',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.get_two_way_number_by_suffix('1 23'), number1)
self.assertEqual(PhoneNumber.get_two_way_number_by_suffix('2 23'), number2)
self.assertIsNone(PhoneNumber.get_two_way_number_by_suffix('23'))
# test update
number1.phone_number = '999124'
number1.save()
number2.phone_number = '999224'
number2.save()
self.assertIsNone(PhoneNumber.get_two_way_number_by_suffix('1 23'))
self.assertIsNone(PhoneNumber.get_two_way_number_by_suffix('2 23'))
self.assertEqual(PhoneNumber.get_two_way_number_by_suffix('124'), number1)
self.assertEqual(PhoneNumber.get_two_way_number_by_suffix('224'), number2)
def test_extensive_search(self):
number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.by_extensive_search('999123'), number)
self.assertEqual(PhoneNumber.by_extensive_search('0999123'), number)
self.assertEqual(PhoneNumber.by_extensive_search('00999123'), number)
self.assertEqual(PhoneNumber.by_extensive_search('000999123'), number)
self.assertEqual(PhoneNumber.by_extensive_search('123'), number)
self.assertIsNone(PhoneNumber.by_extensive_search('999124'))
def test_by_domain(self):
number1 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
number2 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999124',
verified=False,
pending_verification=False,
is_two_way=False
)
number3 = PhoneNumber.objects.create(
domain=self.domain + 'X',
owner_doc_type='X',
owner_id='X',
phone_number='999124',
verified=True,
pending_verification=False,
is_two_way=True
)
self.addCleanup(number3.delete)
self.assertEqual(
set(PhoneNumber.by_domain(self.domain)),
set([number1, number2])
)
self.assertEqual(
set(PhoneNumber.by_domain(self.domain, ids_only=True)),
set([number1.couch_id, number2.couch_id])
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
def test_by_owner_id(self):
number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='owner1',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
[lookup] = PhoneNumber.by_owner_id('owner1')
self.assertEqual(lookup, number)
# test cache clear
number.owner_id = 'owner2'
number.save()
self.assertEqual(PhoneNumber.by_owner_id('owner1'), [])
[lookup] = PhoneNumber.by_owner_id('owner2')
self.assertEqual(lookup, number)
number.verified = False
number.is_two_way = False
number.save()
[lookup] = PhoneNumber.by_owner_id('owner2')
self.assertFalse(lookup.verified)
self.assertFalse(lookup.is_two_way)
def create_case_contact(self, phone_number):
return create_test_case(
self.domain,
'participant',
'test',
case_properties={
'contact_phone_number': phone_number,
'contact_phone_number_is_verified': '1',
},
drop_signals=False
)
@run_with_all_backends
def test_delete_phone_numbers_for_owners(self):
with self.create_case_contact('9990001') as case1, \
self.create_case_contact('9990002') as case2, \
self.create_case_contact('9990003') as case3:
self.assertEqual(len(PhoneNumber.by_owner_id(case1.case_id)), 1)
self.assertEqual(len(PhoneNumber.by_owner_id(case2.case_id)), 1)
self.assertEqual(len(PhoneNumber.by_owner_id(case3.case_id)), 1)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 3)
delete_phone_numbers_for_owners([case2.case_id, case3.case_id])
self.assertEqual(len(PhoneNumber.by_owner_id(case1.case_id)), 1)
self.assertEqual(len(PhoneNumber.by_owner_id(case2.case_id)), 0)
self.assertEqual(len(PhoneNumber.by_owner_id(case3.case_id)), 0)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
def test_verify_uniqueness(self):
number1 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
number2 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=False,
pending_verification=False,
is_two_way=False
)
# Raises no exception
number1.verify_uniqueness()
# Raises PhoneNumberInUseException
with self.assertRaises(PhoneNumberInUseException):
number2.verify_uniqueness()
class TestUserPhoneNumberSync(TestCase):
def setUp(self):
self.domain = 'user-phone-number-test'
self.domain_obj = Domain(name=self.domain)
self.domain_obj.save()
self.mobile_worker1 = CommCareUser.create(self.domain, 'mobile1', 'mobile1', None, None)
self.mobile_worker2 = CommCareUser.create(self.domain, 'mobile2', 'mobile2', None, None)
def tearDown(self):
delete_domain_phone_numbers(self.domain)
self.domain_obj.delete()
def assertPhoneEntries(self, user, phone_numbers):
entries = user.get_phone_entries()
self.assertEqual(len(entries), len(phone_numbers))
self.assertEqual(set(entries.keys()), set(phone_numbers))
def testSync(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='owner1',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
user = self.mobile_worker1
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
user.phone_numbers = ['9990001']
user.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 2)
self.assertPhoneEntries(user, ['9990001'])
before = user.get_phone_entries()['9990001']
user.phone_numbers = ['9990001', '9990002']
user.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 3)
self.assertPhoneEntries(user, ['9990001', '9990002'])
after = user.get_phone_entries()['9990001']
self.assertEqual(before.pk, after.pk)
user.phone_numbers = ['9990002']
user.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 2)
self.assertPhoneEntries(user, ['9990002'])
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
def testRetire(self):
self.mobile_worker1.phone_numbers = ['9990001']
self.mobile_worker1.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
self.assertPhoneEntries(self.mobile_worker1, ['9990001'])
self.mobile_worker2.phone_numbers = ['9990002']
self.mobile_worker2.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 2)
self.assertPhoneEntries(self.mobile_worker2, ['9990002'])
self.mobile_worker1.retire(deleted_by=None)
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
self.assertPhoneEntries(self.mobile_worker2, ['9990002'])
class TestGenericContactMethods(TestCase):
def setUp(self):
self.domain = 'contact-phone-number-test'
self.domain_obj = Domain(name=self.domain)
self.domain_obj.save()
self.mobile_worker1 = CommCareUser.create(self.domain, 'mobile1', 'mobile1', None, None)
self.mobile_worker2 = CommCareUser.create(self.domain, 'mobile2', 'mobile2', None, None)
def tearDown(self):
delete_domain_phone_numbers(self.domain)
self.domain_obj.delete()
def testGetOrCreate(self):
before = self.mobile_worker1.get_or_create_phone_entry('999123')
self.assertEqual(before.owner_doc_type, 'CommCareUser')
self.assertEqual(before.owner_id, self.mobile_worker1.get_id)
self.assertEqual(before.phone_number, '999123')
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
after = self.mobile_worker1.get_or_create_phone_entry('999123')
self.assertEqual(before.pk, after.pk)
self.assertEqual(after.owner_doc_type, 'CommCareUser')
self.assertEqual(after.owner_id, self.mobile_worker1.get_id)
self.assertEqual(after.phone_number, '999123')
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
def testGetPhoneEntries(self):
number1 = self.mobile_worker1.get_or_create_phone_entry('999123')
number2 = self.mobile_worker1.get_or_create_phone_entry('999124')
self.mobile_worker1.get_or_create_phone_entry('999125')
number4 = self.mobile_worker2.get_or_create_phone_entry('999126')
number1.set_two_way()
number2.set_pending_verification()
number4.set_two_way()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 4)
entries = self.mobile_worker1.get_phone_entries()
self.assertEqual(set(entries.keys()), set(['999123', '999124', '999125']))
entries = self.mobile_worker1.get_two_way_numbers()
self.assertEqual(set(entries.keys()), set(['999123']))
def testDelete(self):
self.mobile_worker1.get_or_create_phone_entry('999123')
self.mobile_worker1.get_or_create_phone_entry('999124')
self.mobile_worker1.get_or_create_phone_entry('999125')
self.mobile_worker2.get_or_create_phone_entry('999126')
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 4)
self.mobile_worker1.delete_phone_entry('999124')
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 3)
entries = self.mobile_worker1.get_phone_entries()
self.assertEqual(set(entries.keys()), set(['999123', '999125']))
entries = self.mobile_worker2.get_phone_entries()
self.assertEqual(set(entries.keys()), set(['999126']))
def testUserSyncNoChange(self):
before = self.mobile_worker1.get_or_create_phone_entry('999123')
before.set_two_way()
before.set_verified()
before.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
self.mobile_worker1.phone_numbers = ['999123']
self.mobile_worker1.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
after = self.mobile_worker1.get_phone_entries()['999123']
self.assertEqual(before.pk, after.pk)
self.assertTrue(after.is_two_way)
self.assertTrue(after.verified)
self.assertFalse(after.pending_verification)
| 41.17485
| 111
| 0.666647
|
from datetime import datetime, timedelta
from django.test import TestCase
from mock import patch
from corehq.apps.domain.models import Domain
from corehq.apps.hqcase.utils import update_case
from corehq.apps.sms.mixin import PhoneNumberInUseException
from corehq.apps.sms.models import (
PhoneNumber,
SQLMobileBackend,
SQLMobileBackendMapping,
)
from corehq.apps.sms.tasks import (
delete_phone_numbers_for_owners,
sync_case_phone_number,
)
from corehq.apps.sms.tests.util import delete_domain_phone_numbers
from corehq.apps.users.models import CommCareUser, WebUser
from corehq.apps.users.tasks import tag_cases_as_deleted_and_remove_indices
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.tests.utils import run_with_all_backends
from corehq.form_processor.utils import is_commcarecase
from corehq.messaging.smsbackends.test.models import SQLTestSMSBackend
from corehq.util.test_utils import create_test_case
class PhoneNumberCacheClearTestCase(TestCase):
def assertNoMatch(self, phone_search, suffix_search, owner_id_search):
self.assertIsNone(PhoneNumber.get_two_way_number(phone_search))
self.assertIsNone(PhoneNumber.get_two_way_number_by_suffix(suffix_search))
self.assertEqual(PhoneNumber.by_owner_id(owner_id_search), [])
def assertPhoneNumbersEqual(self, phone1, phone2):
for field in phone1._meta.fields:
self.assertEqual(getattr(phone1, field.name), getattr(phone2, field.name))
def assertMatch(self, match, phone_search, suffix_search, owner_id_search):
lookedup = PhoneNumber.get_two_way_number(phone_search)
self.assertPhoneNumbersEqual(match, lookedup)
lookedup = PhoneNumber.get_two_way_number_by_suffix(suffix_search)
self.assertPhoneNumbersEqual(match, lookedup)
[lookedup] = PhoneNumber.by_owner_id(owner_id_search)
self.assertPhoneNumbersEqual(match, lookedup)
def _test_cache_clear(self, refresh_each_time=True):
created = PhoneNumber(
domain='phone-number-test',
owner_doc_type='CommCareCase',
owner_id='fake-owner-id1',
phone_number='99912341234',
backend_id=None,
ivr_backend_id=None,
verified=True,
pending_verification=False,
is_two_way=True,
contact_last_modified=datetime.utcnow()
)
created.save()
self.assertNoMatch('99952345234', '52345234', 'fake-owner-id2')
self.assertMatch(created, '99912341234', '12341234', 'fake-owner-id1')
if refresh_each_time:
created = PhoneNumber.objects.get(pk=created.pk)
created.phone_number = '99952345234'
created.save()
self.assertNoMatch('99912341234', '12341234', 'fake-owner-id2')
self.assertMatch(created, '99952345234', '52345234', 'fake-owner-id1')
if refresh_each_time:
created = PhoneNumber.objects.get(pk=created.pk)
created.owner_id = 'fake-owner-id2'
created.save()
self.assertNoMatch('99912341234', '12341234', 'fake-owner-id1')
self.assertMatch(created, '99952345234', '52345234', 'fake-owner-id2')
created.delete()
self.assertNoMatch('99952345234', '52345234', 'fake-owner-id2')
def test_cache_clear_with_refresh(self):
self._test_cache_clear(refresh_each_time=True)
def test_cache_clear_without_refresh(self):
self._test_cache_clear(refresh_each_time=False)
class CaseContactPhoneNumberTestCase(TestCase):
def setUp(self):
self.domain = 'case-phone-number-test'
def tearDown(self):
delete_domain_phone_numbers(self.domain)
def set_case_property(self, case, property_name, value):
update_case(self.domain, case.case_id, case_properties={property_name: value})
return CaseAccessors(self.domain).get_case(case.case_id)
def get_case_phone_number(self, case):
return case.get_phone_number()
def assertPhoneNumberDetails(self, case, phone_number, sms_backend_id, ivr_backend_id,
verified, pending_verification, is_two_way, pk=None):
v = self.get_case_phone_number(case)
self.assertEqual(v.domain, case.domain)
self.assertEqual(v.owner_doc_type, case.doc_type)
self.assertEqual(v.owner_id, case.case_id)
self.assertEqual(v.phone_number, phone_number)
self.assertEqual(v.backend_id, sms_backend_id)
self.assertEqual(v.ivr_backend_id, ivr_backend_id)
self.assertEqual(v.verified, verified)
self.assertEqual(v.pending_verification, pending_verification)
self.assertEqual(v.is_two_way, is_two_way)
self.assertEqual(v.contact_last_modified, case.server_modified_on)
if pk:
self.assertEqual(v.pk, pk)
@run_with_all_backends
def test_case_phone_number_updates(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
self.assertIsNone(self.get_case_phone_number(case))
case = self.set_case_property(case, 'contact_phone_number', '99987658765')
self.assertPhoneNumberDetails(case, '99987658765', None, None, False, False, False)
pk = self.get_case_phone_number(case).pk
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
self.assertPhoneNumberDetails(case, '99987658765', None, None, True, False, True, pk=pk)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_phone_number', '99987698769')
self.assertPhoneNumberDetails(case, '99987698769', None, None, True, False, True)
pk = self.get_case_phone_number(case).pk
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_backend_id', 'sms-backend')
self.assertPhoneNumberDetails(case, '99987698769', 'sms-backend', None, True, False, True, pk=pk)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_ivr_backend_id', 'ivr-backend')
self.assertPhoneNumberDetails(case, '99987698769', 'sms-backend', 'ivr-backend', True, False, True,
pk=pk)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
v = self.get_case_phone_number(case)
v.contact_last_modified += timedelta(days=1)
v.save()
with patch('corehq.apps.sms.models.PhoneNumber.save') as mock_save:
case = self.set_case_property(case, 'contact_phone_number', '99912341234')
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
mock_save.assert_not_called()
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
@run_with_all_backends
def test_close_case(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
case = self.set_case_property(case, 'contact_phone_number', '99987658765')
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
self.assertIsNotNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
update_case(self.domain, case.case_id, close=True)
self.assertIsNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
@run_with_all_backends
def test_case_soft_delete(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
case = self.set_case_property(case, 'contact_phone_number', '99987658765')
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
self.assertIsNotNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
tag_cases_as_deleted_and_remove_indices(self.domain, [case.case_id], '123', datetime.utcnow())
self.assertIsNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
@run_with_all_backends
def test_case_zero_phone_number(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
case = self.set_case_property(case, 'contact_phone_number', '99987658765')
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
self.assertIsNotNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_phone_number', '0')
self.assertIsNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
@run_with_all_backends
def test_invalid_phone_format(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
case = self.set_case_property(case, 'contact_phone_number', '99987658765')
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
self.assertIsNotNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_phone_number', 'xyz')
self.assertIsNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
@run_with_all_backends
def test_phone_number_already_in_use(self):
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 0)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case1, \
create_test_case(self.domain, 'participant', 'test2', drop_signals=False) as case2:
case1 = self.set_case_property(case1, 'contact_phone_number', '99987658765')
case1 = self.set_case_property(case1, 'contact_phone_number_is_verified', '1')
case2 = self.set_case_property(case2, 'contact_phone_number', '99987698769')
case2 = self.set_case_property(case2, 'contact_phone_number_is_verified', '1')
self.assertIsNotNone(self.get_case_phone_number(case1))
self.assertIsNotNone(self.get_case_phone_number(case2))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case2 = self.set_case_property(case2, 'contact_phone_number', '99987658765')
self.assertIsNotNone(self.get_case_phone_number(case1))
self.assertIsNotNone(self.get_case_phone_number(case2))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
self.assertPhoneNumberDetails(case1, '99987658765', None, None, True, False, True)
self.assertPhoneNumberDetails(case2, '99987658765', None, None, False, False, False)
@run_with_all_backends
def test_multiple_entries(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999124',
verified=False,
pending_verification=False,
is_two_way=False
)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
case = self.set_case_property(case, 'contact_phone_number', '999124')
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
case.create_phone_entry('999125')
self.assertEqual(PhoneNumber.objects.count(), 3)
sync_case_phone_number(case)
self.assertEqual(PhoneNumber.objects.count(), 2)
number1 = PhoneNumber.objects.get(pk=extra_number.pk)
self.assertEqual(number1.owner_id, 'X')
number2 = PhoneNumber.objects.get(owner_id=case.case_id)
self.assertTrue(number2.verified)
self.assertTrue(number2.is_two_way)
self.assertFalse(number2.pending_verification)
class SQLPhoneNumberTestCase(TestCase):
def setUp(self):
self.domain = 'sql-phone-number-test'
self.domain_obj = Domain(name=self.domain)
self.domain_obj.save()
def delete_objects(self, result):
for obj in result:
obj.delete()
def tearDown(self):
self.delete_objects(PhoneNumber.objects.filter(domain=self.domain))
self.delete_objects(SQLMobileBackend.objects.filter(domain=self.domain))
SQLMobileBackendMapping.objects.filter(domain=self.domain).delete()
self.domain_obj.delete()
def test_backend(self):
backend1 = SQLTestSMSBackend.objects.create(
hq_api_id=SQLTestSMSBackend.get_api_id(),
is_global=False,
domain=self.domain,
name='BACKEND1'
)
backend2 = SQLTestSMSBackend.objects.create(
hq_api_id=SQLTestSMSBackend.get_api_id(),
is_global=False,
domain=self.domain,
name='BACKEND2'
)
SQLMobileBackendMapping.set_default_domain_backend(self.domain, backend1)
number = PhoneNumber(domain=self.domain, phone_number='+999123')
self.assertEqual(number.backend, backend1)
number.backend_id = backend2.name
self.assertEqual(number.backend, backend2)
number.backend_id = ' '
self.assertEqual(number.backend, backend1)
@run_with_all_backends
def test_case_owner(self):
with create_test_case(self.domain, 'participant', 'test') as case:
number = PhoneNumber(owner_doc_type='CommCareCase', owner_id=case.case_id)
owner = number.owner
self.assertTrue(is_commcarecase(owner))
self.assertEqual(owner.case_id, case.case_id)
def test_user_owner(self):
mobile_user = CommCareUser.create(self.domain, 'abc', 'def', None, None)
number = PhoneNumber(owner_doc_type='CommCareUser', owner_id=mobile_user.get_id)
owner = number.owner
self.assertTrue(isinstance(owner, CommCareUser))
self.assertEqual(owner.get_id, mobile_user.get_id)
web_user = WebUser.create(self.domain, 'ghi', 'jkl', None, None)
number = PhoneNumber(owner_doc_type='WebUser', owner_id=web_user.get_id)
owner = number.owner
self.assertTrue(isinstance(owner, WebUser))
self.assertEqual(owner.get_id, web_user.get_id)
number = PhoneNumber(owner_doc_type='X')
self.assertIsNone(number.owner)
def test_get_two_way_number(self):
number1 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=False,
pending_verification=False,
is_two_way=False
)
self.assertEqual(PhoneNumber.get_two_way_number('999123'), number1)
self.assertEqual(PhoneNumber.get_two_way_number('+999 123'), number1)
self.assertIsNone(PhoneNumber.get_two_way_number('999124'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
number1.phone_number = '999124'
number1.save()
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertEqual(PhoneNumber.get_two_way_number('999124'), number1)
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
number1.delete()
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertIsNone(PhoneNumber.get_two_way_number('999124'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
def test_get_number_pending_verification(self):
number1 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=False,
pending_verification=True,
is_two_way=False
)
PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=False,
pending_verification=False,
is_two_way=False
)
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertIsNone(PhoneNumber.get_two_way_number('999124'))
self.assertEqual(PhoneNumber.get_number_pending_verification('999123'), number1)
self.assertEqual(PhoneNumber.get_number_pending_verification('+999 123'), number1)
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
number1.phone_number = '999124'
number1.save()
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertIsNone(PhoneNumber.get_two_way_number('999124'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertEqual(PhoneNumber.get_number_pending_verification('999124'), number1)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
number1.set_two_way()
number1.set_verified()
number1.save()
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertEqual(PhoneNumber.get_two_way_number('999124'), number1)
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
number1.delete()
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertIsNone(PhoneNumber.get_two_way_number('999124'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
def test_suffix_lookup(self):
number1 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
number2 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999223',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.get_two_way_number_by_suffix('1 23'), number1)
self.assertEqual(PhoneNumber.get_two_way_number_by_suffix('2 23'), number2)
self.assertIsNone(PhoneNumber.get_two_way_number_by_suffix('23'))
number1.phone_number = '999124'
number1.save()
number2.phone_number = '999224'
number2.save()
self.assertIsNone(PhoneNumber.get_two_way_number_by_suffix('1 23'))
self.assertIsNone(PhoneNumber.get_two_way_number_by_suffix('2 23'))
self.assertEqual(PhoneNumber.get_two_way_number_by_suffix('124'), number1)
self.assertEqual(PhoneNumber.get_two_way_number_by_suffix('224'), number2)
def test_extensive_search(self):
number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.by_extensive_search('999123'), number)
self.assertEqual(PhoneNumber.by_extensive_search('0999123'), number)
self.assertEqual(PhoneNumber.by_extensive_search('00999123'), number)
self.assertEqual(PhoneNumber.by_extensive_search('000999123'), number)
self.assertEqual(PhoneNumber.by_extensive_search('123'), number)
self.assertIsNone(PhoneNumber.by_extensive_search('999124'))
def test_by_domain(self):
number1 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
number2 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999124',
verified=False,
pending_verification=False,
is_two_way=False
)
number3 = PhoneNumber.objects.create(
domain=self.domain + 'X',
owner_doc_type='X',
owner_id='X',
phone_number='999124',
verified=True,
pending_verification=False,
is_two_way=True
)
self.addCleanup(number3.delete)
self.assertEqual(
set(PhoneNumber.by_domain(self.domain)),
set([number1, number2])
)
self.assertEqual(
set(PhoneNumber.by_domain(self.domain, ids_only=True)),
set([number1.couch_id, number2.couch_id])
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
def test_by_owner_id(self):
number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='owner1',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
[lookup] = PhoneNumber.by_owner_id('owner1')
self.assertEqual(lookup, number)
number.owner_id = 'owner2'
number.save()
self.assertEqual(PhoneNumber.by_owner_id('owner1'), [])
[lookup] = PhoneNumber.by_owner_id('owner2')
self.assertEqual(lookup, number)
number.verified = False
number.is_two_way = False
number.save()
[lookup] = PhoneNumber.by_owner_id('owner2')
self.assertFalse(lookup.verified)
self.assertFalse(lookup.is_two_way)
def create_case_contact(self, phone_number):
return create_test_case(
self.domain,
'participant',
'test',
case_properties={
'contact_phone_number': phone_number,
'contact_phone_number_is_verified': '1',
},
drop_signals=False
)
@run_with_all_backends
def test_delete_phone_numbers_for_owners(self):
with self.create_case_contact('9990001') as case1, \
self.create_case_contact('9990002') as case2, \
self.create_case_contact('9990003') as case3:
self.assertEqual(len(PhoneNumber.by_owner_id(case1.case_id)), 1)
self.assertEqual(len(PhoneNumber.by_owner_id(case2.case_id)), 1)
self.assertEqual(len(PhoneNumber.by_owner_id(case3.case_id)), 1)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 3)
delete_phone_numbers_for_owners([case2.case_id, case3.case_id])
self.assertEqual(len(PhoneNumber.by_owner_id(case1.case_id)), 1)
self.assertEqual(len(PhoneNumber.by_owner_id(case2.case_id)), 0)
self.assertEqual(len(PhoneNumber.by_owner_id(case3.case_id)), 0)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
def test_verify_uniqueness(self):
number1 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
number2 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=False,
pending_verification=False,
is_two_way=False
)
number1.verify_uniqueness()
with self.assertRaises(PhoneNumberInUseException):
number2.verify_uniqueness()
class TestUserPhoneNumberSync(TestCase):
def setUp(self):
self.domain = 'user-phone-number-test'
self.domain_obj = Domain(name=self.domain)
self.domain_obj.save()
self.mobile_worker1 = CommCareUser.create(self.domain, 'mobile1', 'mobile1', None, None)
self.mobile_worker2 = CommCareUser.create(self.domain, 'mobile2', 'mobile2', None, None)
def tearDown(self):
delete_domain_phone_numbers(self.domain)
self.domain_obj.delete()
def assertPhoneEntries(self, user, phone_numbers):
entries = user.get_phone_entries()
self.assertEqual(len(entries), len(phone_numbers))
self.assertEqual(set(entries.keys()), set(phone_numbers))
def testSync(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='owner1',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
user = self.mobile_worker1
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
user.phone_numbers = ['9990001']
user.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 2)
self.assertPhoneEntries(user, ['9990001'])
before = user.get_phone_entries()['9990001']
user.phone_numbers = ['9990001', '9990002']
user.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 3)
self.assertPhoneEntries(user, ['9990001', '9990002'])
after = user.get_phone_entries()['9990001']
self.assertEqual(before.pk, after.pk)
user.phone_numbers = ['9990002']
user.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 2)
self.assertPhoneEntries(user, ['9990002'])
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
def testRetire(self):
self.mobile_worker1.phone_numbers = ['9990001']
self.mobile_worker1.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
self.assertPhoneEntries(self.mobile_worker1, ['9990001'])
self.mobile_worker2.phone_numbers = ['9990002']
self.mobile_worker2.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 2)
self.assertPhoneEntries(self.mobile_worker2, ['9990002'])
self.mobile_worker1.retire(deleted_by=None)
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
self.assertPhoneEntries(self.mobile_worker2, ['9990002'])
class TestGenericContactMethods(TestCase):
def setUp(self):
self.domain = 'contact-phone-number-test'
self.domain_obj = Domain(name=self.domain)
self.domain_obj.save()
self.mobile_worker1 = CommCareUser.create(self.domain, 'mobile1', 'mobile1', None, None)
self.mobile_worker2 = CommCareUser.create(self.domain, 'mobile2', 'mobile2', None, None)
def tearDown(self):
delete_domain_phone_numbers(self.domain)
self.domain_obj.delete()
def testGetOrCreate(self):
before = self.mobile_worker1.get_or_create_phone_entry('999123')
self.assertEqual(before.owner_doc_type, 'CommCareUser')
self.assertEqual(before.owner_id, self.mobile_worker1.get_id)
self.assertEqual(before.phone_number, '999123')
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
after = self.mobile_worker1.get_or_create_phone_entry('999123')
self.assertEqual(before.pk, after.pk)
self.assertEqual(after.owner_doc_type, 'CommCareUser')
self.assertEqual(after.owner_id, self.mobile_worker1.get_id)
self.assertEqual(after.phone_number, '999123')
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
def testGetPhoneEntries(self):
number1 = self.mobile_worker1.get_or_create_phone_entry('999123')
number2 = self.mobile_worker1.get_or_create_phone_entry('999124')
self.mobile_worker1.get_or_create_phone_entry('999125')
number4 = self.mobile_worker2.get_or_create_phone_entry('999126')
number1.set_two_way()
number2.set_pending_verification()
number4.set_two_way()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 4)
entries = self.mobile_worker1.get_phone_entries()
self.assertEqual(set(entries.keys()), set(['999123', '999124', '999125']))
entries = self.mobile_worker1.get_two_way_numbers()
self.assertEqual(set(entries.keys()), set(['999123']))
def testDelete(self):
self.mobile_worker1.get_or_create_phone_entry('999123')
self.mobile_worker1.get_or_create_phone_entry('999124')
self.mobile_worker1.get_or_create_phone_entry('999125')
self.mobile_worker2.get_or_create_phone_entry('999126')
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 4)
self.mobile_worker1.delete_phone_entry('999124')
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 3)
entries = self.mobile_worker1.get_phone_entries()
self.assertEqual(set(entries.keys()), set(['999123', '999125']))
entries = self.mobile_worker2.get_phone_entries()
self.assertEqual(set(entries.keys()), set(['999126']))
def testUserSyncNoChange(self):
before = self.mobile_worker1.get_or_create_phone_entry('999123')
before.set_two_way()
before.set_verified()
before.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
self.mobile_worker1.phone_numbers = ['999123']
self.mobile_worker1.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
after = self.mobile_worker1.get_phone_entries()['999123']
self.assertEqual(before.pk, after.pk)
self.assertTrue(after.is_two_way)
self.assertTrue(after.verified)
self.assertFalse(after.pending_verification)
| true
| true
|
790e06e38495bb0be9f9f830d21b0ae4d802b00d
| 4,386
|
py
|
Python
|
contrib/seeds/generate-seeds.py
|
binariumpay/binarium
|
f527bb1829c0ea5ebb4d713b00a2ea8353fc13fa
|
[
"MIT"
] | 24
|
2018-05-12T01:29:50.000Z
|
2021-03-05T13:02:47.000Z
|
contrib/seeds/generate-seeds.py
|
binariumpay/binarium
|
f527bb1829c0ea5ebb4d713b00a2ea8353fc13fa
|
[
"MIT"
] | 4
|
2018-09-11T16:32:39.000Z
|
2019-02-22T06:31:02.000Z
|
contrib/seeds/generate-seeds.py
|
binariumpay/binarium
|
f527bb1829c0ea5ebb4d713b00a2ea8353fc13fa
|
[
"MIT"
] | 10
|
2018-05-17T09:34:32.000Z
|
2019-10-01T13:50:31.000Z
|
#!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BINARIUM_CHAINPARAMSSEEDS_H\n')
g.write('#define BINARIUM_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the binarium network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9999)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19999)
g.write('#endif // BINARIUM_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.553957
| 98
| 0.582991
|
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr:
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'):
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match:
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1:
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BINARIUM_CHAINPARAMSSEEDS_H\n')
g.write('#define BINARIUM_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the binarium network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9999)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19999)
g.write('#endif // BINARIUM_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| true
| true
|
790e07aec44be9d4fe5025463f80574ced0792e3
| 16,156
|
py
|
Python
|
DiffractionClassifierCombinatorial2.0.py
|
MatthewGong/DiffractionClassification
|
68be6cf3960f09388253c79bab13cbd9dc07edbb
|
[
"MIT"
] | null | null | null |
DiffractionClassifierCombinatorial2.0.py
|
MatthewGong/DiffractionClassification
|
68be6cf3960f09388253c79bab13cbd9dc07edbb
|
[
"MIT"
] | null | null | null |
DiffractionClassifierCombinatorial2.0.py
|
MatthewGong/DiffractionClassification
|
68be6cf3960f09388253c79bab13cbd9dc07edbb
|
[
"MIT"
] | null | null | null |
import ClientSide2 #custom package
import numpy as np
import argparse
import json
import os
import ClassifierFunctions2 as cf
import random
import logging
from matplotlib import pyplot as plt
from builtins import input
from Notation import SpaceGroupsDict as spgs
SpGr = spgs.spacegroups()
from itertools import combinations,chain
# Initialize essential global variables
#URL = "" #you'll need me to send you the link
FAMILIES = ["triclinic","monoclinic","orthorhombic","tetragonal",
"trigonal","hexagonal","cubic"]
DEFAULT_SESSION = os.path.join ("Sessions","session.json")
DEFAULT_USER = "user_profile.json"
SERVER_INFO = "server_gen2.json"
# list of three, one per level
prediction_per_level = [1, 1, 2]
num_peaks = [1, 5]
DEFAULT_FILTER_SETTINGS = { "max_numpeaks": 75,
"dspace_range" : [0.5,6],
"peak_threshold": 0.7,
"filter_size" : 15,
"passes" : 2
}
def build_parser():
parser = argparse.ArgumentParser()
# This will be implemented as rollout broadens
parser.add_argument('--apikey', type=str,
dest='key', help='api key to securely access service',
metavar='KEY', required=False)
parser.add_argument('--session',
dest='session', help='Keep user preferences for multirun sessions', metavar='SESSION',required=False, default=None)
parser.add_argument('--subset',
dest='subset',help='Run a small number of the possible combinations. Mostly for testing. Input the number of combos to run.', metavar='NO_OF_COMBOS',required=False, default=None)
parser.add_argument('--dataonly',
dest='data_only',help='run the classification without plotting', metavar='True/[False]',required=False, default=False)
parser.add_argument('--figuresonly',
dest='figures_only',help='Plot the figures without running data. Data must be saved previously.', metavar='True/[False]',required=False, default=False)
return parser
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def combination_peaks(peak_batch, chem_vec, mode, temp_name, crystal_family, user_info, URL, prediction_per_level, subset, num_peaks):
outpath = "Ready"
if not os.path.exists(outpath):
os.makedirs(outpath)
find_valid_peaks = list(powerset(peak_batch["vec"]))
find_valid_peaks = [item for item in find_valid_peaks if len(item) > num_peaks[0] and len(item) < num_peaks[1]]
print(len(find_valid_peaks),"valid peak combinations")
valid_peaks_combinations = [{"vec":proto_combo} for proto_combo in find_valid_peaks]
found = False
threshold = 0
tot_spec = 1
for p in prediction_per_level:
tot_spec *= p
guesses = {"num_pred": tot_spec}
for k in range(1,tot_spec+1):
guesses["species_"+str(k)]=[]
guesses["spec_confidence_"+str(k)]=[]
# print(guesses)
common_peaks = []
failed_combos = valid_peaks_combinations
#peak_locs,user_info,URL,fam
persistance = 0
LIMIT = 3
# print(failed_combos)
if subset >0 and subset<len(failed_combos):
failed_combos = random.sample(failed_combos, subset)
print("using ", len(failed_combos)," peak combinations")
while len(failed_combos) > 0 and persistance < LIMIT:
for combo in failed_combos:
try:
# print('---classifying---')
# print(combo)
classificated = ClientSide2.Send_For_Classification(combo, chem_vec, mode, crystal_family, user_info, URL, prediction_per_level)
print(classificated)
classificated["file_name"] = temp_name
# print('name =')
# print(temp_name)
print(os.path.join(outpath,temp_name))
cf.write_to_csv(os.path.join(outpath,temp_name) + ".csv", classificated, prediction_per_level)
print(tot_spec)
for k in range(1,tot_spec+1):
print(guesses)
guesses['species_'+str(k)].append( classificated["species_"+str(k)] )
guesses['spec_confidence_'+str(k)].append( classificated["spec_confidence_"+str(k)] )
common_peaks.append(classificated["peaks"])
# remove the classified combination
failed_combos.remove(combo)
except KeyboardInterrupt:
raise
except:
print("An error occured this combination was not classified.\nIt will be retried {} more times".format(LIMIT-persistance))
persistance += 1
if len(failed_combos)>0:
print("there were {} failed combinations".format(len(failed_combos)))
print('returning')
return common_peaks, guesses
def make_figures(guesses,crystal_family,froot):
if crystal_family:
lower_gen = SpGr.edges["genus"][crystal_family][0]
upper_gen = SpGr.edges["genus"][crystal_family][1]
else:
lower_gen = SpGr.edges["genus"][FAMILIES[0]][0]
upper_gen = SpGr.edges["genus"][FAMILIES[-1]][1]
fam_range = range(SpGr.edges["species"][lower_gen][0],1+SpGr.edges["species"][upper_gen][1])
# phi = 2*np.pi/360
fig_ang = 300
phi = (2*np.pi*fig_ang/360)/(max(fam_range)-min(fam_range)+1)
thet = fig_ang/(max(fam_range)-min(fam_range)+1)
fam_axes = [1,3,16,75,143,168,195]
# fig1 = plt.figure(1,figsize=(len(fam_range),16))
fig1 = plt.figure(2,figsize=(16,8))
plt.clf()
ax1 = fig1.add_axes([0.03,0.1,.96,.8])
# ax1.set_yscale('log')
fam_color = ['k','g','b','c','m','y','k']
for k in range(len(fam_axes)-1):
ax1.axvspan(fam_axes[k]-0.5,fam_axes[k+1]-0.5,facecolor = fam_color[k], alpha=0.5)
# ax1.axvspan(fam_axes[0],fam_axes[1]-1,alpha=0.5)
ax1.axvspan(fam_axes[-1]-0.5,np.max(fam_range)-0.5,alpha=0.3)
plt.ion
fig2 = plt.figure(3,figsize=(8,8))
plt.clf()
plt.ion
ax2 = fig2.add_axes([0.1,0.1,0.8,0.8],polar=True)
ax2.set_thetamin(1)
ax2.set_rmin(0)
ax2.set_thetamax(fig_ang)
ax2.set_rlabel_position(30)
ax2.set_theta_direction(-1)
ax2.set_theta_zero_location("S",offset=-(360-fig_ang)/2)
# ax2.set_rscale('log')
prev_histograms_1 = []
prev_histograms_2 = []
plots_1 = []
plots_2 = []
# print('guesses = ')
# print(guesses)
num_pred = np.prod(prediction_per_level)
for rank in range(1,num_pred+1):
histo = np.histogram([g for g in guesses["species_{}".format(rank)]], weights = [g for g in guesses["spec_confidence_{}".format(rank)]], bins = np.arange(min(fam_range)-0.5, max(fam_range)+1.5))
histo_log = np.array([np.log10(float(h))+1 if h>0 else 0 for h in histo[0]])
# print('log_histo = ')
# print(histo_log.tolist())
if rank > 1:
plt.figure(2)
plot_1 = plt.bar(fam_range, histo[0], bottom = np.sum(np.vstack(prev_histograms_1), axis=0), align="center", width = 1.1)
plt.figure(3)
sum_hist = np.sum(np.vstack(prev_histograms_1), axis=0)
log_sum = np.array([np.log10(float(h))-1 if h>0 else -1 for h in sum_hist])
# print('log_sum = ')
# print(log_sum.tolist())
plot_2 = plt.bar([f*phi for f in fam_range], histo_log, bottom = log_sum, align="center", width = phi)
else:
plt.figure(2)
plot_1 = plt.bar(fam_range, histo[0], align="center", color='red', width = 1.1)
plt.figure(3)
plot_2 = plt.bar([f*phi for f in fam_range], histo_log, bottom = -1, align="center", color='red', width = phi)
plots_1.append(plot_1)
plots_2.append(plot_2)
plt.figure(2)
plt.yticks(rotation='vertical')
plt.xticks(fam_range,rotation='vertical')
prev_histograms_1.append(histo[0])
prev_histograms_2.append(histo[0])
# plt.figure(3)
# ax2.set_xticks(histo[1][:-1])
plt.figure(2)
# ym = ax1.get_ymax()*.9
r_max = 0
for rect in plot_1:
n_max = rect.get_height()+rect.get_y()
if n_max>r_max:
r_max = n_max
for k in range(len(FAMILIES)-1):
if k ==0:
ym_t = r_max*0.7
cent = 'left'
else:
ym_t = r_max*0.6
cent = 'center'
ax1.text((fam_axes[k+1]+fam_axes[k])/2,ym_t, FAMILIES[k], horizontalalignment=cent)
ax1.text((fam_axes[-1]+np.max(fam_range))/2,ym_t, FAMILIES[-1], horizontalalignment='center')
ax1.autoscale(enable=True, axis='x', tight=True)
ax1.tick_params(axis='x', which='major', labelsize=6)
plt.xlabel("Prediction",fontsize=10)
plt.ylabel("Counts",fontsize=10)
# plt.legend(plots,("species_1","species_2","species_3","species_4"))
leg_list = [ "species_{}".format(k+1) for k in range(num_pred) ]
plt.legend(plots_1,leg_list)
print("Results/"+froot+"_gen2.png")
plt.savefig("Results/"+froot+"_gen2.png",dpi = 300)
plt.figure(3)
# plt.xlabel("Prediction",fontsize=10,rotation='vertical')
# plt.ylabel("Counts",fontsize=10)
r_ticks = list(range(int(np.floor(ax2.get_rmin())),int(np.ceil(ax2.get_rmax())+1)))
ax2.set_rgrids(r_ticks, labels = ['10e'+str(r) for r in r_ticks])
ax2.set_thetagrids([f*thet for f in fam_axes],labels = FAMILIES)
plt.legend(plots_2,leg_list)
# plt.legend(plots,("species_1","species_2","species_3","species_4"))
# print("Results/"+froot+"_gen2_polar.png")
# plt.savefig("Results/"+froot+"_gen2_polar.png",dpi = 300)
# plt.show()
def main():
parser = build_parser()
options = parser.parse_args()
if options.subset:
subset = int(options.subset)
else:
subset = -1
print(options.session)
# opens the user specified session
if options.session:
with open(os.path.join("Sessions",options.session),'r') as f:
session = json.load(f)
# opens the default session
else:
with open(DEFAULT_SESSION,'r') as f:
session = json.load(f)
# set variables from loaded session data
# print(session)
file_path = session["file_path"]
if "output_file" in session:
output_file = session["output_file"]
else:
output_file = ''
if "output_file_root" in session:
output_file_root = session["output_file_root"]
else:
output_file_root = ''
if not (output_file or output_file_root):
raise ValueError('output_file or output_file_root must be defined in session file.')
manual_peak_selection = session["manual_peak_selection"]
known_family = session["known_family"]
chemistry = session["chemistry"]
diffraction = session["diffraction"]
print('file inputs')
print(output_file)
print(output_file_root)
mode = ""
if diffraction:
if chemistry:
mode="DiffChem"
else:
mode="DiffOnly"
else:
if chemistry:
raise ValueError('Running chemistry only predictions is currently not implemented')
else:
raise ValueError('Invalid prediction type. Either diffraction or chemistry must be enabled')
if known_family and known_family=='yes':
print('known family')
crystal_family = session["crystal_family"]
prediction_per_level[0] = 1
else:
crystal_family = None
# Load user from provided path, [IN PROGRESS]
if session["user_info"]:
with open(session["user_info"],'r') as f:
user_info = json.load(f)
else:
with open(DEFAULT_USER,'r') as f:
user_info = json.load(f)
with open(session["server_info"],'r') as f:
server_info = json.load(f)
if server_info['URL']:
url = server_info['URL']
else:
raise ValueError('you need to have the server URL provided to you')
chem_vec = cf.check_for_chemistry(session)
print(file_path)
print('---starting loop--')
# Determine if the path is a directory or a file
if os.path.isdir(file_path):
print("loading files from directory")
file_paths = []
for dirpath,dirnames,fpath in os.walk(file_path):
for path in fpath:
if not path[0] == '.':
file_paths.append(os.path.join(dirpath,path))
print("found {} files to load.".format(len(file_paths)))
else:
file_paths = [file_path]
if not os.path.exists("Results"):
os.makedirs("Results")
print(file_paths)
for f_path in file_paths:
# Load Data from specified file (DM3, TIFF, CSV etc....)
print("loading data from {}".format(f_path))
image_data,scale = ClientSide2.Load_Profile(f_path)
print("I successfully loaded the data")
# print(scale)
print(options.figures_only)
print(options.data_only)
# difining filepaths here to facilitate loading data.
froot = os.path.splitext(os.path.basename(f_path))[0]
if output_file_root:
outfile = 'Results/'+output_file_root+froot+'.json'
outfile_2 = 'Results/'+output_file_root+froot+'_peaks.json'
else:
output_file_root='' #for the figure filenames
[outroot, ext] = os.path.splitext(output_file)
if not ext=='.json':
output_file = outroot+'.json'
output_file_2 = outroot+'_peaks.json'
outfile = 'Results/'+output_file
outfile_2 = 'Results/'+output_file_2
# optional skipping the data creation
if options.figures_only:
print('Only creating figures')
with open(outfile, 'r') as fp:
guesses = json.load(fp)
else:
if diffraction:
peak_locs,peaks_h = ClientSide2.Find_Peaks(image_data, scale, **FILTER_SETTINGS)
# Choose which peaks to classify on
if manual_peak_selection:
peak_locs = cf.choose_peaks(peak_locs,peaks_h)
#raise NotImplementedError
else:
peak_locs = []
peaks_h = []
# Script hangs when there are too many peaks.
# TODO: implement something better.
if len(peak_locs['d_spacing'])>25:
print("\n\n======================================================")
print("there are "+ str(len(peak_locs['d_spacing']))+" peaks, which is too many.")
print(f_path)
print("======================================================\n\n")
continue
common_peaks,guesses = combination_peaks(peak_locs, chem_vec, mode, froot, crystal_family, user_info, url, prediction_per_level, subset, num_peaks)
# print("--- peak_locs ---")
# print(peak_locs)
guesses["pk_d_spacing"] = peak_locs["d_spacing"].tolist()
guesses["pk_vec"] = peak_locs["vec"]
print(guesses)
# save data
with open(outfile, 'w') as fp:
json.dump(guesses, fp)
with open(outfile_2, 'w') as fp:
json.dump(common_peaks, fp)
if options.data_only:
print('skipping figures')
else:
make_figures(guesses,crystal_family,output_file_root+froot)
# TODO: Split up this function and enable plotting on precomupted data.
# plt.show(block=False)
if __name__ == "__main__":
main()
| 36.224215
| 203
| 0.590121
|
import ClientSide2
import numpy as np
import argparse
import json
import os
import ClassifierFunctions2 as cf
import random
import logging
from matplotlib import pyplot as plt
from builtins import input
from Notation import SpaceGroupsDict as spgs
SpGr = spgs.spacegroups()
from itertools import combinations,chain
","orthorhombic","tetragonal",
"trigonal","hexagonal","cubic"]
DEFAULT_SESSION = os.path.join ("Sessions","session.json")
DEFAULT_USER = "user_profile.json"
SERVER_INFO = "server_gen2.json"
# list of three, one per level
prediction_per_level = [1, 1, 2]
num_peaks = [1, 5]
DEFAULT_FILTER_SETTINGS = { "max_numpeaks": 75,
"dspace_range" : [0.5,6],
"peak_threshold": 0.7,
"filter_size" : 15,
"passes" : 2
}
def build_parser():
parser = argparse.ArgumentParser()
# This will be implemented as rollout broadens
parser.add_argument('--apikey', type=str,
dest='key', help='api key to securely access service',
metavar='KEY', required=False)
parser.add_argument('--session',
dest='session', help='Keep user preferences for multirun sessions', metavar='SESSION',required=False, default=None)
parser.add_argument('--subset',
dest='subset',help='Run a small number of the possible combinations. Mostly for testing. Input the number of combos to run.', metavar='NO_OF_COMBOS',required=False, default=None)
parser.add_argument('--dataonly',
dest='data_only',help='run the classification without plotting', metavar='True/[False]',required=False, default=False)
parser.add_argument('--figuresonly',
dest='figures_only',help='Plot the figures without running data. Data must be saved previously.', metavar='True/[False]',required=False, default=False)
return parser
def powerset(iterable):
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def combination_peaks(peak_batch, chem_vec, mode, temp_name, crystal_family, user_info, URL, prediction_per_level, subset, num_peaks):
outpath = "Ready"
if not os.path.exists(outpath):
os.makedirs(outpath)
find_valid_peaks = list(powerset(peak_batch["vec"]))
find_valid_peaks = [item for item in find_valid_peaks if len(item) > num_peaks[0] and len(item) < num_peaks[1]]
print(len(find_valid_peaks),"valid peak combinations")
valid_peaks_combinations = [{"vec":proto_combo} for proto_combo in find_valid_peaks]
found = False
threshold = 0
tot_spec = 1
for p in prediction_per_level:
tot_spec *= p
guesses = {"num_pred": tot_spec}
for k in range(1,tot_spec+1):
guesses["species_"+str(k)]=[]
guesses["spec_confidence_"+str(k)]=[]
# print(guesses)
common_peaks = []
failed_combos = valid_peaks_combinations
#peak_locs,user_info,URL,fam
persistance = 0
LIMIT = 3
# print(failed_combos)
if subset >0 and subset<len(failed_combos):
failed_combos = random.sample(failed_combos, subset)
print("using ", len(failed_combos)," peak combinations")
while len(failed_combos) > 0 and persistance < LIMIT:
for combo in failed_combos:
try:
# print('---classifying---')
# print(combo)
classificated = ClientSide2.Send_For_Classification(combo, chem_vec, mode, crystal_family, user_info, URL, prediction_per_level)
print(classificated)
classificated["file_name"] = temp_name
# print('name =')
# print(temp_name)
print(os.path.join(outpath,temp_name))
cf.write_to_csv(os.path.join(outpath,temp_name) + ".csv", classificated, prediction_per_level)
print(tot_spec)
for k in range(1,tot_spec+1):
print(guesses)
guesses['species_'+str(k)].append( classificated["species_"+str(k)] )
guesses['spec_confidence_'+str(k)].append( classificated["spec_confidence_"+str(k)] )
common_peaks.append(classificated["peaks"])
# remove the classified combination
failed_combos.remove(combo)
except KeyboardInterrupt:
raise
except:
print("An error occured this combination was not classified.\nIt will be retried {} more times".format(LIMIT-persistance))
persistance += 1
if len(failed_combos)>0:
print("there were {} failed combinations".format(len(failed_combos)))
print('returning')
return common_peaks, guesses
def make_figures(guesses,crystal_family,froot):
if crystal_family:
lower_gen = SpGr.edges["genus"][crystal_family][0]
upper_gen = SpGr.edges["genus"][crystal_family][1]
else:
lower_gen = SpGr.edges["genus"][FAMILIES[0]][0]
upper_gen = SpGr.edges["genus"][FAMILIES[-1]][1]
fam_range = range(SpGr.edges["species"][lower_gen][0],1+SpGr.edges["species"][upper_gen][1])
# phi = 2*np.pi/360
fig_ang = 300
phi = (2*np.pi*fig_ang/360)/(max(fam_range)-min(fam_range)+1)
thet = fig_ang/(max(fam_range)-min(fam_range)+1)
fam_axes = [1,3,16,75,143,168,195]
# fig1 = plt.figure(1,figsize=(len(fam_range),16))
fig1 = plt.figure(2,figsize=(16,8))
plt.clf()
ax1 = fig1.add_axes([0.03,0.1,.96,.8])
# ax1.set_yscale('log')
fam_color = ['k','g','b','c','m','y','k']
for k in range(len(fam_axes)-1):
ax1.axvspan(fam_axes[k]-0.5,fam_axes[k+1]-0.5,facecolor = fam_color[k], alpha=0.5)
# ax1.axvspan(fam_axes[0],fam_axes[1]-1,alpha=0.5)
ax1.axvspan(fam_axes[-1]-0.5,np.max(fam_range)-0.5,alpha=0.3)
plt.ion
fig2 = plt.figure(3,figsize=(8,8))
plt.clf()
plt.ion
ax2 = fig2.add_axes([0.1,0.1,0.8,0.8],polar=True)
ax2.set_thetamin(1)
ax2.set_rmin(0)
ax2.set_thetamax(fig_ang)
ax2.set_rlabel_position(30)
ax2.set_theta_direction(-1)
ax2.set_theta_zero_location("S",offset=-(360-fig_ang)/2)
# ax2.set_rscale('log')
prev_histograms_1 = []
prev_histograms_2 = []
plots_1 = []
plots_2 = []
# print('guesses = ')
# print(guesses)
num_pred = np.prod(prediction_per_level)
for rank in range(1,num_pred+1):
histo = np.histogram([g for g in guesses["species_{}".format(rank)]], weights = [g for g in guesses["spec_confidence_{}".format(rank)]], bins = np.arange(min(fam_range)-0.5, max(fam_range)+1.5))
histo_log = np.array([np.log10(float(h))+1 if h>0 else 0 for h in histo[0]])
# print('log_histo = ')
# print(histo_log.tolist())
if rank > 1:
plt.figure(2)
plot_1 = plt.bar(fam_range, histo[0], bottom = np.sum(np.vstack(prev_histograms_1), axis=0), align="center", width = 1.1)
plt.figure(3)
sum_hist = np.sum(np.vstack(prev_histograms_1), axis=0)
log_sum = np.array([np.log10(float(h))-1 if h>0 else -1 for h in sum_hist])
# print('log_sum = ')
# print(log_sum.tolist())
plot_2 = plt.bar([f*phi for f in fam_range], histo_log, bottom = log_sum, align="center", width = phi)
else:
plt.figure(2)
plot_1 = plt.bar(fam_range, histo[0], align="center", color='red', width = 1.1)
plt.figure(3)
plot_2 = plt.bar([f*phi for f in fam_range], histo_log, bottom = -1, align="center", color='red', width = phi)
plots_1.append(plot_1)
plots_2.append(plot_2)
plt.figure(2)
plt.yticks(rotation='vertical')
plt.xticks(fam_range,rotation='vertical')
prev_histograms_1.append(histo[0])
prev_histograms_2.append(histo[0])
# plt.figure(3)
# ax2.set_xticks(histo[1][:-1])
plt.figure(2)
# ym = ax1.get_ymax()*.9
r_max = 0
for rect in plot_1:
n_max = rect.get_height()+rect.get_y()
if n_max>r_max:
r_max = n_max
for k in range(len(FAMILIES)-1):
if k ==0:
ym_t = r_max*0.7
cent = 'left'
else:
ym_t = r_max*0.6
cent = 'center'
ax1.text((fam_axes[k+1]+fam_axes[k])/2,ym_t, FAMILIES[k], horizontalalignment=cent)
ax1.text((fam_axes[-1]+np.max(fam_range))/2,ym_t, FAMILIES[-1], horizontalalignment='center')
ax1.autoscale(enable=True, axis='x', tight=True)
ax1.tick_params(axis='x', which='major', labelsize=6)
plt.xlabel("Prediction",fontsize=10)
plt.ylabel("Counts",fontsize=10)
# plt.legend(plots,("species_1","species_2","species_3","species_4"))
leg_list = [ "species_{}".format(k+1) for k in range(num_pred) ]
plt.legend(plots_1,leg_list)
print("Results/"+froot+"_gen2.png")
plt.savefig("Results/"+froot+"_gen2.png",dpi = 300)
plt.figure(3)
# plt.xlabel("Prediction",fontsize=10,rotation='vertical')
# plt.ylabel("Counts",fontsize=10)
r_ticks = list(range(int(np.floor(ax2.get_rmin())),int(np.ceil(ax2.get_rmax())+1)))
ax2.set_rgrids(r_ticks, labels = ['10e'+str(r) for r in r_ticks])
ax2.set_thetagrids([f*thet for f in fam_axes],labels = FAMILIES)
plt.legend(plots_2,leg_list)
# plt.legend(plots,("species_1","species_2","species_3","species_4"))
# print("Results/"+froot+"_gen2_polar.png")
# plt.savefig("Results/"+froot+"_gen2_polar.png",dpi = 300)
# plt.show()
def main():
parser = build_parser()
options = parser.parse_args()
if options.subset:
subset = int(options.subset)
else:
subset = -1
print(options.session)
# opens the user specified session
if options.session:
with open(os.path.join("Sessions",options.session),'r') as f:
session = json.load(f)
# opens the default session
else:
with open(DEFAULT_SESSION,'r') as f:
session = json.load(f)
# set variables from loaded session data
# print(session)
file_path = session["file_path"]
if "output_file" in session:
output_file = session["output_file"]
else:
output_file = ''
if "output_file_root" in session:
output_file_root = session["output_file_root"]
else:
output_file_root = ''
if not (output_file or output_file_root):
raise ValueError('output_file or output_file_root must be defined in session file.')
manual_peak_selection = session["manual_peak_selection"]
known_family = session["known_family"]
chemistry = session["chemistry"]
diffraction = session["diffraction"]
print('file inputs')
print(output_file)
print(output_file_root)
mode = ""
if diffraction:
if chemistry:
mode="DiffChem"
else:
mode="DiffOnly"
else:
if chemistry:
raise ValueError('Running chemistry only predictions is currently not implemented')
else:
raise ValueError('Invalid prediction type. Either diffraction or chemistry must be enabled')
if known_family and known_family=='yes':
print('known family')
crystal_family = session["crystal_family"]
prediction_per_level[0] = 1
else:
crystal_family = None
# Load user from provided path, [IN PROGRESS]
if session["user_info"]:
with open(session["user_info"],'r') as f:
user_info = json.load(f)
else:
with open(DEFAULT_USER,'r') as f:
user_info = json.load(f)
with open(session["server_info"],'r') as f:
server_info = json.load(f)
if server_info['URL']:
url = server_info['URL']
else:
raise ValueError('you need to have the server URL provided to you')
chem_vec = cf.check_for_chemistry(session)
print(file_path)
print('---starting loop--')
# Determine if the path is a directory or a file
if os.path.isdir(file_path):
print("loading files from directory")
file_paths = []
for dirpath,dirnames,fpath in os.walk(file_path):
for path in fpath:
if not path[0] == '.':
file_paths.append(os.path.join(dirpath,path))
print("found {} files to load.".format(len(file_paths)))
else:
file_paths = [file_path]
if not os.path.exists("Results"):
os.makedirs("Results")
print(file_paths)
for f_path in file_paths:
# Load Data from specified file (DM3, TIFF, CSV etc....)
print("loading data from {}".format(f_path))
image_data,scale = ClientSide2.Load_Profile(f_path)
print("I successfully loaded the data")
# print(scale)
print(options.figures_only)
print(options.data_only)
# difining filepaths here to facilitate loading data.
froot = os.path.splitext(os.path.basename(f_path))[0]
if output_file_root:
outfile = 'Results/'+output_file_root+froot+'.json'
outfile_2 = 'Results/'+output_file_root+froot+'_peaks.json'
else:
output_file_root='' #for the figure filenames
[outroot, ext] = os.path.splitext(output_file)
if not ext=='.json':
output_file = outroot+'.json'
output_file_2 = outroot+'_peaks.json'
outfile = 'Results/'+output_file
outfile_2 = 'Results/'+output_file_2
# optional skipping the data creation
if options.figures_only:
print('Only creating figures')
with open(outfile, 'r') as fp:
guesses = json.load(fp)
else:
if diffraction:
peak_locs,peaks_h = ClientSide2.Find_Peaks(image_data, scale, **FILTER_SETTINGS)
# Choose which peaks to classify on
if manual_peak_selection:
peak_locs = cf.choose_peaks(peak_locs,peaks_h)
#raise NotImplementedError
else:
peak_locs = []
peaks_h = []
# Script hangs when there are too many peaks.
# TODO: implement something better.
if len(peak_locs['d_spacing'])>25:
print("\n\n======================================================")
print("there are "+ str(len(peak_locs['d_spacing']))+" peaks, which is too many.")
print(f_path)
print("======================================================\n\n")
continue
common_peaks,guesses = combination_peaks(peak_locs, chem_vec, mode, froot, crystal_family, user_info, url, prediction_per_level, subset, num_peaks)
# print("--- peak_locs ---")
# print(peak_locs)
guesses["pk_d_spacing"] = peak_locs["d_spacing"].tolist()
guesses["pk_vec"] = peak_locs["vec"]
print(guesses)
# save data
with open(outfile, 'w') as fp:
json.dump(guesses, fp)
with open(outfile_2, 'w') as fp:
json.dump(common_peaks, fp)
if options.data_only:
print('skipping figures')
else:
make_figures(guesses,crystal_family,output_file_root+froot)
# TODO: Split up this function and enable plotting on precomupted data.
# plt.show(block=False)
if __name__ == "__main__":
main()
| true
| true
|
790e082c63165426a7ac60263e8c58bd75754c64
| 2,006
|
py
|
Python
|
tests/test_api.py
|
prtkv/biathlonresults
|
0910322c29ad8dd7612d36e1d9914277ee48c336
|
[
"MIT"
] | 7
|
2020-01-23T09:25:24.000Z
|
2022-03-05T13:43:03.000Z
|
tests/test_api.py
|
prtkv/biathlonresults
|
0910322c29ad8dd7612d36e1d9914277ee48c336
|
[
"MIT"
] | null | null | null |
tests/test_api.py
|
prtkv/biathlonresults
|
0910322c29ad8dd7612d36e1d9914277ee48c336
|
[
"MIT"
] | 2
|
2020-08-24T18:42:39.000Z
|
2022-01-27T10:19:37.000Z
|
import biathlonresults as api
def test_cups():
res = api.cups(1819)
assert isinstance(res, list)
assert len(res) == 37
def test_cup_results():
res = api.cup_results("BT1819SWRLCP__SMTS")
assert isinstance(res, dict)
assert isinstance(res["Rows"], list)
assert res["Rows"][0]["Name"] == "BOE Johannes Thingnes"
def test_athletes():
res = api.athletes("boe", "johannes")
assert isinstance(res, dict)
assert isinstance(res["Athletes"], list)
assert "boe" in res["Athletes"][0]["FamilyName"].lower()
assert "johannes" in res["Athletes"][0]["GivenName"].lower()
def test_cisbios():
res = api.cisbios("BTNOR11605199301")
assert isinstance(res, dict)
assert res["FullName"] == "Johannes Thingnes BOE"
def test_all_results():
# Raphael Poiree
res = api.all_results("BTFRA10908197401")
assert isinstance(res, dict)
assert isinstance(res["Results"], list)
assert res["Results"][0]["SO"] == 2
assert len(res["Results"]) == 329
def test_events():
res = api.events(1819, 1)
assert isinstance(res, list)
assert len(res) == 10
assert res[0]["Level"] == 1
assert res[-1]["ShortDescription"] == "Oslo Holmenkollen"
def test_competitions():
# Pokljuka 1819
res = api.competitions("BT1819SWRLCP01")
assert isinstance(res, list)
assert len(res) == 8
assert res[-1]["ShortDescription"] == "Women 10km Pursuit"
def test_results():
# Pokljuka 1819 W PU
res = api.results("BT1819SWRLCP01SWPU")
assert isinstance(res, dict)
assert isinstance(res["Results"], list)
assert len(res["Results"]) == 60
assert res["Results"][0]["ResultOrder"] == 1
assert res["Results"][0]["Name"] == "MAKARAINEN Kaisa"
def test_stats():
# podiums men stat
res = api.stats("WCPOD_M", "WCPOD", "ATH", "M")
assert isinstance(res, dict)
assert isinstance(res["Rows"], list)
# in case someone breaks Bjoerndalen's record
assert int(res["Rows"][0]["Value"]) >= 199
| 27.479452
| 64
| 0.649053
|
import biathlonresults as api
def test_cups():
res = api.cups(1819)
assert isinstance(res, list)
assert len(res) == 37
def test_cup_results():
res = api.cup_results("BT1819SWRLCP__SMTS")
assert isinstance(res, dict)
assert isinstance(res["Rows"], list)
assert res["Rows"][0]["Name"] == "BOE Johannes Thingnes"
def test_athletes():
res = api.athletes("boe", "johannes")
assert isinstance(res, dict)
assert isinstance(res["Athletes"], list)
assert "boe" in res["Athletes"][0]["FamilyName"].lower()
assert "johannes" in res["Athletes"][0]["GivenName"].lower()
def test_cisbios():
res = api.cisbios("BTNOR11605199301")
assert isinstance(res, dict)
assert res["FullName"] == "Johannes Thingnes BOE"
def test_all_results():
res = api.all_results("BTFRA10908197401")
assert isinstance(res, dict)
assert isinstance(res["Results"], list)
assert res["Results"][0]["SO"] == 2
assert len(res["Results"]) == 329
def test_events():
res = api.events(1819, 1)
assert isinstance(res, list)
assert len(res) == 10
assert res[0]["Level"] == 1
assert res[-1]["ShortDescription"] == "Oslo Holmenkollen"
def test_competitions():
res = api.competitions("BT1819SWRLCP01")
assert isinstance(res, list)
assert len(res) == 8
assert res[-1]["ShortDescription"] == "Women 10km Pursuit"
def test_results():
res = api.results("BT1819SWRLCP01SWPU")
assert isinstance(res, dict)
assert isinstance(res["Results"], list)
assert len(res["Results"]) == 60
assert res["Results"][0]["ResultOrder"] == 1
assert res["Results"][0]["Name"] == "MAKARAINEN Kaisa"
def test_stats():
res = api.stats("WCPOD_M", "WCPOD", "ATH", "M")
assert isinstance(res, dict)
assert isinstance(res["Rows"], list)
assert int(res["Rows"][0]["Value"]) >= 199
| true
| true
|
790e088d126ecd746c50545a8644616f0f0f746e
| 9,297
|
py
|
Python
|
lspeas/analysis/stent_migration.py
|
almarklein/stentseg
|
48255fffdc2394d1dc4ce2208c9a91e1d4c35a46
|
[
"BSD-3-Clause"
] | 1
|
2020-08-28T16:34:10.000Z
|
2020-08-28T16:34:10.000Z
|
lspeas/analysis/stent_migration.py
|
almarklein/stentseg
|
48255fffdc2394d1dc4ce2208c9a91e1d4c35a46
|
[
"BSD-3-Clause"
] | null | null | null |
lspeas/analysis/stent_migration.py
|
almarklein/stentseg
|
48255fffdc2394d1dc4ce2208c9a91e1d4c35a46
|
[
"BSD-3-Clause"
] | 1
|
2021-04-25T06:59:36.000Z
|
2021-04-25T06:59:36.000Z
|
""" Measure stent migration relative to renals
Option to visualize 2 longitudinal scans
"""
import sys, os
import visvis as vv
from stentseg.utils.datahandling import select_dir, loadvol, loadmodel, loadmesh
from stentseg.stentdirect.stentgraph import create_mesh
from stentseg.utils.visualization import show_ctvolume
from stentseg.utils import _utils_GUI, PointSet
from stentseg.utils.picker import pick3d
from stentseg.utils.centerline import find_centerline, points_from_mesh, smooth_centerline, dist_over_centerline
from lspeas.analysis.utils_analysis import ExcelAnalysis
from stentseg.utils.utils_graphs_pointsets import point_in_pointcloud_closest_to_p
#sys.path.insert(0, os.path.abspath('..')) # parent, 2 folders further in pythonPath
#import utils_analysis
#from utils_analysis import ExcelAnalysis
#import get_anaconda_ringparts
from lspeas.utils.get_anaconda_ringparts import _get_model_hooks,get_midpoints_peaksvalleys,identify_peaks_valleys
#todo: from outline to script:
## Initialize
# select the ssdf basedir
basedir = select_dir(r'F/LSPEAS\LSPEAS_ssdf',
r'F/LSPEAS_ssdf_backup')
basedirstl = select_dir(r'D:\Profiles\koenradesma\Dropbox\UTdrive\MedDataMimics\LSPEAS_Mimics\Tests')
# select dataset
ptcode = 'LSPEAS_003'
ctcodes = ctcode1, ctcode2 = 'discharge', '12months' # ctcode2 = None if no second code
cropname = 'ring'
modelname = 'modelavgreg'
vesselname1 = 'LSPEAS_003_D_MK Smoothed_Wrapped1.0_edit-smart 4_copy_001.stl'
# LSPEAS_003_D_MK Smoothed_Wrapped1.0_edit-smart 4_copy_noRenals 7_001
vesselname2 = 'LSPEAS_003_12M_MK Smoothed_Wrapped1.0_smart 3_copy_001.stl'
sheet_renals_obs = 'renal locations obs1'
showAxis = True # True or False
showVol = 'ISO' # MIP or ISO or 2D or None
ringpart = True # True; False
clim0 = (0,2500)
# clim0 = -550,500
isoTh = 250
meshradius = 0.7
# create class object for excel analysis
foo = ExcelAnalysis() # excel locations initialized in class
## Renal origin coordinates: input by user/read excel
# coordinates, left and right most caudal renal
# ctcode1
xrenal1, yrenal1, zrenal1 = 132.7, 89.2, 85.5
renal1 = PointSet(list((xrenal1, yrenal1, zrenal1)))
# ctcode2
if ctcode2:
xrenal2, yrenal2, zrenal2 = 171, 165.1, 39.5
renal2 = PointSet(list((xrenal2, yrenal2, zrenal2)))
# renal_left, renal_right = foo.readRenalsExcel(sheet_renals_obs, ptcode, ctcode1)
# renal1 = renal_left
## Load (dynamic) stent models, vessel, ct
# Load static CT image to add as reference
s = loadvol(basedir, ptcode, ctcode1, cropname, 'avgreg')
vol1 = s.vol
if ctcode2:
s = loadvol(basedir, ptcode, ctcode2, cropname, 'avgreg')
vol2 = s.vol
# load stent model
s2 = loadmodel(basedir, ptcode, ctcode1, cropname, modelname)
model1 = s2.model
modelmesh1 = create_mesh(model1, meshradius)
if ctcode2:
s2 = loadmodel(basedir, ptcode, ctcode2, cropname, modelname)
model2 = s2.model
modelmesh2 = create_mesh(model2, meshradius)
# Load vessel mesh (output Mimics)
vessel1 = loadmesh(basedirstl,ptcode,vesselname1) #inverts Z
if ctcode2:
vessel2 = loadmesh(basedirstl,ptcode,vesselname2) #inverts Z
# get pointset from STL
ppvessel1 = points_from_mesh(vessel1, invertZ = False) # removes duplicates
if ctcode2:
ppvessel2 = points_from_mesh(vessel2, invertZ = False) # removes duplicates
## Create centerline: input start/end
# ctcode1
c1_start1 = (153, 86, 104.5) # distal end
c1_ends = [(142, 94, 64.5)] # either single point or multiple
centerline1 = find_centerline(ppvessel1, c1_start1, c1_ends, 0.5, ndist=20, regfactor=0.2, regsteps=10)
centerline1 = smooth_centerline(centerline1, 30) # 20 iterations for stepsize 0.5 is reasonable
# ctcode2
if ctcode2:
c2_start1 = (190, 165, 60) # distal end
c2_ends = [(179, 169, 17)] # either single point or multiple
centerline2 = find_centerline(ppvessel2, c2_start1, c2_ends, 0.5, ndist=20, regfactor=0.2, regsteps=10)
centerline2 = smooth_centerline(centerline2, 30)
# scipy.ndimage.interpolation.zoom
# scipy.interpolate.interpn
## Get peak and valley points
if False:
# ===== OPTION automated detection =====
# get midpoints peaks valleys
midpoints_peaks_valleys = get_midpoints_peaksvalleys(model1)
# from peaks valley pointcloud identiy peaks and valleys
R1_left,R2_left,R1_right,R2_right,R1_ant,R2_ant,R1_post,R2_post = identify_peaks_valleys(
midpoints_peaks_valleys, model1, vol1,vis=True)
# ===== OPTION excel =====
R1 = foo.readRingExcel(ptcode, ctcode1, ring='R1')
R1_ant, R1_post, R1_left, R1_right = R1[0], R1[1], R1[2], R1[3]
##
#todo: orientatie aorta bepalen dmv 4 hooks -> gemiddelde hoek
# z distance hiermee corrigeren
R2 = foo.readRingExcel(ptcode, ctcode1, ring='R2')
R2_ant, R2_post, R2_left, R2_right = R2[0], R2[1], R2[2], R2[3]
def get_stent_orientation(R1, R2):
R1, R2 = np.asarray(R1), np.asarray(R2)
R1, R2 = PointSet(R1), PointSet(R2) # turn array ndim2 into PointSet
R1_ant, R1_post, R1_left, R1_right = R1[0], R1[1], R1[2], R1[3]
R2_ant, R2_post, R2_left, R2_right = R2[0], R2[1], R2[2], R2[3]
refvector = [0,0,10] # z-axis
angle = (R1_ant-R2_ant).angle(refvector) # order does not matter
## Calculate distance ring peaks and valleys to renal
# ===== in Z =====
# proximal to renal is positive; origin is proximal
z_dist_R1_ant = list(renal1.flat)[2]-R1_ant[2]
z_dist_R1_post = list(renal1.flat)[2]-R1_post[2]
z_dist_R1_left = list(renal1.flat)[2]-R1_left[2]
z_dist_R1_right = list(renal1.flat)[2]-R1_right[2]
# ===== along centerline =====
# point of centerline closest to renal
renal1_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, renal1)
if ctcode2:
renal2_and_cl_point = point_in_pointcloud_closest_to_p(centerline2, renal2)
# point of centerline closest to peaks valleys
R1_left_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_left)
R1_right_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_right)
R1_ant_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_ant)
R1_post_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_post)
# calculate distance over centerline
dist_for_R1_left = dist_over_centerline(centerline1, R1_left_and_cl_point[0], renal1_and_cl_point[0])
dist_for_R1_right = dist_over_centerline(centerline1, R1_right_and_cl_point[0], renal1_and_cl_point[0])
dist_for_R1_ant = dist_over_centerline(centerline1, R1_ant_and_cl_point[0], renal1_and_cl_point[0])
dist_for_R1_post = dist_over_centerline(centerline1, R1_post_and_cl_point[0], renal1_and_cl_point[0])
# Main outcome 1: distance 2nd ring valleys to renal
# Main outcome 2: migration 2nd ring valleys from discharge to 1, 6, 12 months
## Visualize
f = vv.figure(2); vv.clf()
f.position = 0.00, 22.00, 1920.00, 1018.00
alpha = 0.5
if ctcode2:
a1 = vv.subplot(121)
else:
a1 = vv.gca()
show_ctvolume(vol1, model1, showVol=showVol, clim=clim0, isoTh=isoTh)
pick3d(vv.gca(), vol1)
model1.Draw(mc='b', mw = 10, lc='g')
vm = vv.mesh(modelmesh1)
vm.faceColor = 'g'
# m = vv.mesh(vessel1)
# m.faceColor = (1,0,0, alpha) # red
# vis vessel, centerline, renal origo, peaks valleys R1
vv.plot(ppvessel1, ms='.', ls='', mc= 'r', alpha=0.2, mw = 7, axes = a1) # vessel
vv.plot(PointSet(list(c1_start1)), ms='.', ls='', mc='g', mw=18, axes = a1) # start1
vv.plot([e[0] for e in c1_ends], [e[1] for e in c1_ends], [e[2] for e in c1_ends], ms='.', ls='', mc='b', mw=18, axes = a1) # ends
vv.plot(centerline1, ms='.', ls='', mw=8, mc='y', axes = a1)
vv.plot(renal1, ms='.', ls='', mc='m', mw=18, axes = a1)
vv.plot(renal1_and_cl_point, ms='.', ls='-', mc='m', mw=18, axes = a1)
# vv.plot(R1_left_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1)
# vv.plot(R1_right_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1)
# vv.plot(R1_ant_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1)
# vv.plot(R1_post_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1)
vv.xlabel('x (mm)');vv.ylabel('y (mm)');vv.zlabel('z (mm)')
vv.title('Analysis for model LSPEAS %s - %s' % (ptcode[7:], ctcode1))
a1.axis.axisColor= 1,1,1
a1.bgcolor= 0,0,0
a1.daspect= 1, 1, -1 # z-axis flipped
a1.axis.visible = showAxis
if ctcode2:
a2 = vv.subplot(122)
show_ctvolume(vol2, model2, showVol=showVol, clim=clim0, isoTh=isoTh)
pick3d(vv.gca(), vol2)
model2.Draw(mc='b', mw = 10, lc='g')
vm = vv.mesh(modelmesh2)
vm.faceColor = 'g'
# m = vv.mesh(vessel2)
# m.faceColor = (1,0,0, alpha) # red
# vis vessel, centerline, renal origo, peaks valleys R1
vv.plot(ppvessel2, ms='.', ls='', mc= 'r', alpha=0.2, mw = 7, axes = a2) # vessel
vv.plot(PointSet(list(c2_start1)), ms='.', ls='', mc='g', mw=18, axes = a2) # start1
vv.plot([e[0] for e in c2_ends], [e[1] for e in c2_ends], [e[2] for e in c2_ends], ms='.', ls='', mc='b', mw=18, axes = a2) # ends
vv.plot(centerline2, ms='.', ls='', mw=8, mc='y', axes = a2)
vv.plot(renal2, ms='.', ls='', mc='m', mw=18, axes = a2)
vv.plot(renal2_and_cl_point, ms='.', ls='-', mc='m', mw=18, axes = a2)
vv.xlabel('x (mm)');vv.ylabel('y (mm)');vv.zlabel('z (mm)')
vv.title('Analysis for model LSPEAS %s - %s' % (ptcode[7:], ctcode2))
a2.axis.axisColor= 1,1,1
a2.bgcolor= 0,0,0
a2.daspect= 1, 1, -1 # z-axis flipped
a2.axis.visible = showAxis
| 40.776316
| 136
| 0.713994
|
import sys, os
import visvis as vv
from stentseg.utils.datahandling import select_dir, loadvol, loadmodel, loadmesh
from stentseg.stentdirect.stentgraph import create_mesh
from stentseg.utils.visualization import show_ctvolume
from stentseg.utils import _utils_GUI, PointSet
from stentseg.utils.picker import pick3d
from stentseg.utils.centerline import find_centerline, points_from_mesh, smooth_centerline, dist_over_centerline
from lspeas.analysis.utils_analysis import ExcelAnalysis
from stentseg.utils.utils_graphs_pointsets import point_in_pointcloud_closest_to_p
rts import _get_model_hooks,get_midpoints_peaksvalleys,identify_peaks_valleys
select_dir(r'F/LSPEAS\LSPEAS_ssdf',
r'F/LSPEAS_ssdf_backup')
basedirstl = select_dir(r'D:\Profiles\koenradesma\Dropbox\UTdrive\MedDataMimics\LSPEAS_Mimics\Tests')
ptcode = 'LSPEAS_003'
ctcodes = ctcode1, ctcode2 = 'discharge', '12months'
cropname = 'ring'
modelname = 'modelavgreg'
vesselname1 = 'LSPEAS_003_D_MK Smoothed_Wrapped1.0_edit-smart 4_copy_001.stl'
vesselname2 = 'LSPEAS_003_12M_MK Smoothed_Wrapped1.0_smart 3_copy_001.stl'
sheet_renals_obs = 'renal locations obs1'
showAxis = True
showVol = 'ISO'
ringpart = True
clim0 = (0,2500)
isoTh = 250
meshradius = 0.7
foo = ExcelAnalysis()
al1 = PointSet(list((xrenal1, yrenal1, zrenal1)))
if ctcode2:
xrenal2, yrenal2, zrenal2 = 171, 165.1, 39.5
renal2 = PointSet(list((xrenal2, yrenal2, zrenal2)))
ropname, 'avgreg')
vol1 = s.vol
if ctcode2:
s = loadvol(basedir, ptcode, ctcode2, cropname, 'avgreg')
vol2 = s.vol
s2 = loadmodel(basedir, ptcode, ctcode1, cropname, modelname)
model1 = s2.model
modelmesh1 = create_mesh(model1, meshradius)
if ctcode2:
s2 = loadmodel(basedir, ptcode, ctcode2, cropname, modelname)
model2 = s2.model
modelmesh2 = create_mesh(model2, meshradius)
vessel1 = loadmesh(basedirstl,ptcode,vesselname1)
if ctcode2:
vessel2 = loadmesh(basedirstl,ptcode,vesselname2)
ppvessel1 = points_from_mesh(vessel1, invertZ = False)
if ctcode2:
ppvessel2 = points_from_mesh(vessel2, invertZ = False)
nds = [(142, 94, 64.5)]
centerline1 = find_centerline(ppvessel1, c1_start1, c1_ends, 0.5, ndist=20, regfactor=0.2, regsteps=10)
centerline1 = smooth_centerline(centerline1, 30)
if ctcode2:
c2_start1 = (190, 165, 60)
c2_ends = [(179, 169, 17)]
centerline2 = find_centerline(ppvessel2, c2_start1, c2_ends, 0.5, ndist=20, regfactor=0.2, regsteps=10)
centerline2 = smooth_centerline(centerline2, 30)
nts_peaks_valleys = get_midpoints_peaksvalleys(model1)
R1_left,R2_left,R1_right,R2_right,R1_ant,R2_ant,R1_post,R2_post = identify_peaks_valleys(
midpoints_peaks_valleys, model1, vol1,vis=True)
R1 = foo.readRingExcel(ptcode, ctcode1, ring='R1')
R1_ant, R1_post, R1_left, R1_right = R1[0], R1[1], R1[2], R1[3]
R2 = foo.readRingExcel(ptcode, ctcode1, ring='R2')
R2_ant, R2_post, R2_left, R2_right = R2[0], R2[1], R2[2], R2[3]
def get_stent_orientation(R1, R2):
R1, R2 = np.asarray(R1), np.asarray(R2)
R1, R2 = PointSet(R1), PointSet(R2)
R1_ant, R1_post, R1_left, R1_right = R1[0], R1[1], R1[2], R1[3]
R2_ant, R2_post, R2_left, R2_right = R2[0], R2[1], R2[2], R2[3]
refvector = [0,0,10]
angle = (R1_ant-R2_ant).angle(refvector)
_dist_R1_post = list(renal1.flat)[2]-R1_post[2]
z_dist_R1_left = list(renal1.flat)[2]-R1_left[2]
z_dist_R1_right = list(renal1.flat)[2]-R1_right[2]
renal1_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, renal1)
if ctcode2:
renal2_and_cl_point = point_in_pointcloud_closest_to_p(centerline2, renal2)
R1_left_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_left)
R1_right_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_right)
R1_ant_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_ant)
R1_post_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_post)
dist_for_R1_left = dist_over_centerline(centerline1, R1_left_and_cl_point[0], renal1_and_cl_point[0])
dist_for_R1_right = dist_over_centerline(centerline1, R1_right_and_cl_point[0], renal1_and_cl_point[0])
dist_for_R1_ant = dist_over_centerline(centerline1, R1_ant_and_cl_point[0], renal1_and_cl_point[0])
dist_for_R1_post = dist_over_centerline(centerline1, R1_post_and_cl_point[0], renal1_and_cl_point[0])
ure(2); vv.clf()
f.position = 0.00, 22.00, 1920.00, 1018.00
alpha = 0.5
if ctcode2:
a1 = vv.subplot(121)
else:
a1 = vv.gca()
show_ctvolume(vol1, model1, showVol=showVol, clim=clim0, isoTh=isoTh)
pick3d(vv.gca(), vol1)
model1.Draw(mc='b', mw = 10, lc='g')
vm = vv.mesh(modelmesh1)
vm.faceColor = 'g'
.plot(ppvessel1, ms='.', ls='', mc= 'r', alpha=0.2, mw = 7, axes = a1)
vv.plot(PointSet(list(c1_start1)), ms='.', ls='', mc='g', mw=18, axes = a1)
vv.plot([e[0] for e in c1_ends], [e[1] for e in c1_ends], [e[2] for e in c1_ends], ms='.', ls='', mc='b', mw=18, axes = a1)
vv.plot(centerline1, ms='.', ls='', mw=8, mc='y', axes = a1)
vv.plot(renal1, ms='.', ls='', mc='m', mw=18, axes = a1)
vv.plot(renal1_and_cl_point, ms='.', ls='-', mc='m', mw=18, axes = a1)
vv.xlabel('x (mm)');vv.ylabel('y (mm)');vv.zlabel('z (mm)')
vv.title('Analysis for model LSPEAS %s - %s' % (ptcode[7:], ctcode1))
a1.axis.axisColor= 1,1,1
a1.bgcolor= 0,0,0
a1.daspect= 1, 1, -1
a1.axis.visible = showAxis
if ctcode2:
a2 = vv.subplot(122)
show_ctvolume(vol2, model2, showVol=showVol, clim=clim0, isoTh=isoTh)
pick3d(vv.gca(), vol2)
model2.Draw(mc='b', mw = 10, lc='g')
vm = vv.mesh(modelmesh2)
vm.faceColor = 'g'
vv.plot(ppvessel2, ms='.', ls='', mc= 'r', alpha=0.2, mw = 7, axes = a2)
vv.plot(PointSet(list(c2_start1)), ms='.', ls='', mc='g', mw=18, axes = a2)
vv.plot([e[0] for e in c2_ends], [e[1] for e in c2_ends], [e[2] for e in c2_ends], ms='.', ls='', mc='b', mw=18, axes = a2)
vv.plot(centerline2, ms='.', ls='', mw=8, mc='y', axes = a2)
vv.plot(renal2, ms='.', ls='', mc='m', mw=18, axes = a2)
vv.plot(renal2_and_cl_point, ms='.', ls='-', mc='m', mw=18, axes = a2)
vv.xlabel('x (mm)');vv.ylabel('y (mm)');vv.zlabel('z (mm)')
vv.title('Analysis for model LSPEAS %s - %s' % (ptcode[7:], ctcode2))
a2.axis.axisColor= 1,1,1
a2.bgcolor= 0,0,0
a2.daspect= 1, 1, -1
a2.axis.visible = showAxis
| true
| true
|
790e08c0aa1a381aa0bc42733bf546e522788fcf
| 1,322
|
py
|
Python
|
issues/models.py
|
mariofix/svsagro
|
7eb05e59128ee8ccdab00c2945bfa74d4090b466
|
[
"MIT"
] | null | null | null |
issues/models.py
|
mariofix/svsagro
|
7eb05e59128ee8ccdab00c2945bfa74d4090b466
|
[
"MIT"
] | null | null | null |
issues/models.py
|
mariofix/svsagro
|
7eb05e59128ee8ccdab00c2945bfa74d4090b466
|
[
"MIT"
] | null | null | null |
from svs.models import Customer
from django.db import models
from django.utils import timezone
from svs.models import Customer, Machine
from core.models import CoreUser
from markdownx.models import MarkdownxField
STATUSES = (
("pending_our", "Pending - Our Side"),
("pending_their", "Pending - Their Side"),
("timeout", "More Than a Week"),
("closed", "Closed 😁"),
)
class Issue(models.Model):
created_at = models.DateTimeField(default=timezone.now)
title = models.CharField(max_length=255)
contact = models.CharField(max_length=255)
customer = models.ForeignKey(Customer, on_delete=models.CASCADE)
user = models.ForeignKey(CoreUser, on_delete=models.CASCADE)
machine = models.ForeignKey(Machine, on_delete=models.CASCADE)
description = MarkdownxField()
status = models.CharField(
max_length=20, choices=STATUSES, null=False, default="pending_ours"
)
def __str__(self) -> str:
return self.title
class IssueEntry(models.Model):
issue = models.ForeignKey(Issue, on_delete=models.CASCADE)
created_at = models.DateTimeField(default=timezone.now)
title = models.CharField(max_length=255)
description = MarkdownxField()
def __str__(self) -> str:
return self.title
class Meta:
verbose_name_plural = "Entries"
| 30.744186
| 75
| 0.715582
|
from svs.models import Customer
from django.db import models
from django.utils import timezone
from svs.models import Customer, Machine
from core.models import CoreUser
from markdownx.models import MarkdownxField
STATUSES = (
("pending_our", "Pending - Our Side"),
("pending_their", "Pending - Their Side"),
("timeout", "More Than a Week"),
("closed", "Closed 😁"),
)
class Issue(models.Model):
created_at = models.DateTimeField(default=timezone.now)
title = models.CharField(max_length=255)
contact = models.CharField(max_length=255)
customer = models.ForeignKey(Customer, on_delete=models.CASCADE)
user = models.ForeignKey(CoreUser, on_delete=models.CASCADE)
machine = models.ForeignKey(Machine, on_delete=models.CASCADE)
description = MarkdownxField()
status = models.CharField(
max_length=20, choices=STATUSES, null=False, default="pending_ours"
)
def __str__(self) -> str:
return self.title
class IssueEntry(models.Model):
issue = models.ForeignKey(Issue, on_delete=models.CASCADE)
created_at = models.DateTimeField(default=timezone.now)
title = models.CharField(max_length=255)
description = MarkdownxField()
def __str__(self) -> str:
return self.title
class Meta:
verbose_name_plural = "Entries"
| true
| true
|
790e090aa563b12ea93463b7f3475dec12766fb1
| 5,379
|
py
|
Python
|
stancode_project/baby_names/babygraphics.py
|
beomgyutxt/stanCode_project
|
6e1e09b40c2104e98c0cf97478fbec0e345be21b
|
[
"MIT"
] | null | null | null |
stancode_project/baby_names/babygraphics.py
|
beomgyutxt/stanCode_project
|
6e1e09b40c2104e98c0cf97478fbec0e345be21b
|
[
"MIT"
] | null | null | null |
stancode_project/baby_names/babygraphics.py
|
beomgyutxt/stanCode_project
|
6e1e09b40c2104e98c0cf97478fbec0e345be21b
|
[
"MIT"
] | null | null | null |
"""
SC101 Baby Names Project
Adapted from Nick Parlante's Baby Names assignment by
Jerry Liao.
YOUR DESCRIPTION HERE
"""
import tkinter
import babynames
import babygraphicsgui as gui
FILENAMES = [
'data/full/baby-1900.txt', 'data/full/baby-1910.txt',
'data/full/baby-1920.txt', 'data/full/baby-1930.txt',
'data/full/baby-1940.txt', 'data/full/baby-1950.txt',
'data/full/baby-1960.txt', 'data/full/baby-1970.txt',
'data/full/baby-1980.txt', 'data/full/baby-1990.txt',
'data/full/baby-2000.txt', 'data/full/baby-2010.txt'
]
CANVAS_WIDTH = 1000
CANVAS_HEIGHT = 600
YEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
GRAPH_MARGIN_SIZE = 20
COLORS = ['red', 'purple', 'green', 'blue']
TEXT_DX = 2
LINE_WIDTH = 2
MAX_RANK = 1000
def get_x_coordinate(width, year_index):
"""
Given the width of the canvas and the index of the current year
in the YEARS list, returns the x coordinate of the vertical
line associated with that year.
Input:
width (int): The width of the canvas
year_index (int): The index of the current year in the YEARS list
Returns:
x_coordinate (int): The x coordinate of the vertical line associated
with the specified year.
"""
space = (width - GRAPH_MARGIN_SIZE*2) / len(YEARS) # space between two lines
x_coordinate = GRAPH_MARGIN_SIZE + year_index * space # x coordinate of the vertical line
return x_coordinate
def draw_fixed_lines(canvas):
"""
Erases all existing information on the given canvas and then
draws the fixed background lines on it.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
Returns:
This function does not return any value.
"""
canvas.delete('all') # delete all existing lines from the canvas
# Write your code below this line
#################################
# horizontal line (upper)
canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH-GRAPH_MARGIN_SIZE,
GRAPH_MARGIN_SIZE, width=LINE_WIDTH)
# horizontal line (bottom)
canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE,
CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, width=LINE_WIDTH)
# vertical lines
for i in range(len(YEARS)):
x = get_x_coordinate(CANVAS_WIDTH, i)
canvas.create_line(x, 0, x, CANVAS_HEIGHT, width=LINE_WIDTH)
canvas.create_text(x+TEXT_DX, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, text=YEARS[i],
anchor=tkinter.NW)
def draw_names(canvas, name_data, lookup_names):
"""
Given a dict of baby name data and a list of name, plots
the historical trend of those names onto the canvas.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
name_data (dict): Dictionary holding baby name data
lookup_names (List[str]): A list of names whose data you want to plot
Returns:
This function does not return any value.
"""
draw_fixed_lines(canvas) # draw the fixed background grid
# Write your code below this line
#################################
# (x_previous, y_previous) represents the rank of the target name in the last year
x_previous = 0
y_previous = 0
# draw the trend lines of the target names respectively
for i in range(len(lookup_names)):
name = lookup_names[i]
color = COLORS[i % len(COLORS)] # 使用常數!!
for j in range(len(YEARS)):
year = str(YEARS[j])
# (x, y) represents the rank of the target name in the year
# x: at the year line
# y: associated with the rank of the year
x = get_x_coordinate(CANVAS_WIDTH, j)
if year in name_data[name]: # the target name is in top 1000 of the year
rank = int(name_data[name][year])
y = GRAPH_MARGIN_SIZE + ((CANVAS_HEIGHT-GRAPH_MARGIN_SIZE*2)/MAX_RANK)*(rank-1) # 使用長數
else: # the target name is not in top 1000 of the year
rank = '*'
y = CANVAS_HEIGHT - GRAPH_MARGIN_SIZE
# add trend lines
if j != 0:
canvas.create_line(x_previous, y_previous, x, y, width=LINE_WIDTH, fill=color)
# add text(name and rank) besides trend lines
canvas.create_text(x + TEXT_DX, y, text=f'{name} {rank}', anchor=tkinter.SW, fill=color)
# record
x_previous = x
y_previous = y
# main() code is provided, feel free to read through it but DO NOT MODIFY
def main():
# Load data
name_data = babynames.read_files(FILENAMES)
print(name_data)
# Create the window and the canvas
top = tkinter.Tk()
top.wm_title('Baby Names')
canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, name_data, draw_names, babynames.search_names)
# Call draw_fixed_lines() once at startup so we have the lines
# even before the user types anything.
draw_fixed_lines(canvas)
# This line starts the graphical loop that is responsible for
# processing user interactions and plotting data
top.mainloop()
if __name__ == '__main__':
main()
| 35.86
| 112
| 0.636178
|
import tkinter
import babynames
import babygraphicsgui as gui
FILENAMES = [
'data/full/baby-1900.txt', 'data/full/baby-1910.txt',
'data/full/baby-1920.txt', 'data/full/baby-1930.txt',
'data/full/baby-1940.txt', 'data/full/baby-1950.txt',
'data/full/baby-1960.txt', 'data/full/baby-1970.txt',
'data/full/baby-1980.txt', 'data/full/baby-1990.txt',
'data/full/baby-2000.txt', 'data/full/baby-2010.txt'
]
CANVAS_WIDTH = 1000
CANVAS_HEIGHT = 600
YEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
GRAPH_MARGIN_SIZE = 20
COLORS = ['red', 'purple', 'green', 'blue']
TEXT_DX = 2
LINE_WIDTH = 2
MAX_RANK = 1000
def get_x_coordinate(width, year_index):
space = (width - GRAPH_MARGIN_SIZE*2) / len(YEARS)
x_coordinate = GRAPH_MARGIN_SIZE + year_index * space
return x_coordinate
def draw_fixed_lines(canvas):
canvas.delete('all')
xt(x+TEXT_DX, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, text=YEARS[i],
anchor=tkinter.NW)
def draw_names(canvas, name_data, lookup_names):
draw_fixed_lines(canvas)
k-1)
else:
rank = '*'
y = CANVAS_HEIGHT - GRAPH_MARGIN_SIZE
if j != 0:
canvas.create_line(x_previous, y_previous, x, y, width=LINE_WIDTH, fill=color)
canvas.create_text(x + TEXT_DX, y, text=f'{name} {rank}', anchor=tkinter.SW, fill=color)
x_previous = x
y_previous = y
def main():
name_data = babynames.read_files(FILENAMES)
print(name_data)
top = tkinter.Tk()
top.wm_title('Baby Names')
canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, name_data, draw_names, babynames.search_names)
draw_fixed_lines(canvas)
top.mainloop()
if __name__ == '__main__':
main()
| true
| true
|
790e0a0b65973504adceb349cc0ba8538aa2567b
| 2,449
|
py
|
Python
|
nih/loader.py
|
edupooch/cxr-domain-shift
|
9e88f82e3d42f660e9f79723adb8a733d0a0e5e3
|
[
"Apache-2.0"
] | 1
|
2019-11-17T19:08:48.000Z
|
2019-11-17T19:08:48.000Z
|
nih/loader.py
|
edupooch/cxr-domain-shift
|
9e88f82e3d42f660e9f79723adb8a733d0a0e5e3
|
[
"Apache-2.0"
] | null | null | null |
nih/loader.py
|
edupooch/cxr-domain-shift
|
9e88f82e3d42f660e9f79723adb8a733d0a0e5e3
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
import os
from PIL import Image
class CXRDataset(Dataset):
def __init__(
self,
path_to_images,
fold,
transform=None,
sample=0,
finding="any",):
self.transform = transform
self.path_to_images = path_to_images
self.df = pd.read_csv("nih/nih_labels.csv")
self.df = self.df[self.df['fold'] == fold]
# can limit to sample, useful for testing
# if fold == "train" or fold =="val": sample=500
if(sample > 0 and sample < len(self.df)):
self.df = self.df.sample(sample)
if not finding == "any": # can filter for positive findings of the kind described; useful for evaluation
if finding in self.df.columns:
if len(self.df[self.df[finding] == 1]) > 0:
self.df = self.df[self.df[finding] == 1]
else:
print("No positive cases exist for "+LABEL+", returning all unfiltered cases")
else:
print("cannot filter on finding " + finding +
" as not in data - please check spelling")
self.df = self.df.set_index("Image Index")
self.PRED_LABEL = [
'No Finding',
'Atelectasis',
'Cardiomegaly',
'Effusion',
'Infiltration',
'Lung Lesion',
'Pneumonia',
'Pneumothorax',
'Consolidation',
'Edema',
'Emphysema',
'Fibrosis',
'Pleural_Thickening',
'Hernia']
RESULT_PATH = "results/"
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
image = Image.open(
os.path.join(
self.path_to_images,
self.df.index[idx]))
image = image.convert('RGB')
label = np.zeros(len(self.PRED_LABEL), dtype=int)
for i in range(0, len(self.PRED_LABEL)):
# can leave zero if zero, else make one
if(self.df[self.PRED_LABEL[i].strip()].iloc[idx].astype('int') > 0):
label[i] = self.df[self.PRED_LABEL[i].strip()
].iloc[idx].astype('int')
if self.transform:
image = self.transform(image)
return (image, label,self.df.index[idx])
| 31
| 113
| 0.518987
|
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
import os
from PIL import Image
class CXRDataset(Dataset):
def __init__(
self,
path_to_images,
fold,
transform=None,
sample=0,
finding="any",):
self.transform = transform
self.path_to_images = path_to_images
self.df = pd.read_csv("nih/nih_labels.csv")
self.df = self.df[self.df['fold'] == fold]
if(sample > 0 and sample < len(self.df)):
self.df = self.df.sample(sample)
if not finding == "any":
if finding in self.df.columns:
if len(self.df[self.df[finding] == 1]) > 0:
self.df = self.df[self.df[finding] == 1]
else:
print("No positive cases exist for "+LABEL+", returning all unfiltered cases")
else:
print("cannot filter on finding " + finding +
" as not in data - please check spelling")
self.df = self.df.set_index("Image Index")
self.PRED_LABEL = [
'No Finding',
'Atelectasis',
'Cardiomegaly',
'Effusion',
'Infiltration',
'Lung Lesion',
'Pneumonia',
'Pneumothorax',
'Consolidation',
'Edema',
'Emphysema',
'Fibrosis',
'Pleural_Thickening',
'Hernia']
RESULT_PATH = "results/"
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
image = Image.open(
os.path.join(
self.path_to_images,
self.df.index[idx]))
image = image.convert('RGB')
label = np.zeros(len(self.PRED_LABEL), dtype=int)
for i in range(0, len(self.PRED_LABEL)):
if(self.df[self.PRED_LABEL[i].strip()].iloc[idx].astype('int') > 0):
label[i] = self.df[self.PRED_LABEL[i].strip()
].iloc[idx].astype('int')
if self.transform:
image = self.transform(image)
return (image, label,self.df.index[idx])
| true
| true
|
790e0a12961861e69d4eb75fa9db8e9aff53a130
| 1,099
|
py
|
Python
|
src/atcoder/dp/q/sol_4.py
|
kagemeka/competitive-programming
|
c70fe481bcd518f507b885fc9234691d8ce63171
|
[
"MIT"
] | 1
|
2021-07-11T03:20:10.000Z
|
2021-07-11T03:20:10.000Z
|
src/atcoder/dp/q/sol_4.py
|
kagemeka/competitive-programming
|
c70fe481bcd518f507b885fc9234691d8ce63171
|
[
"MIT"
] | 39
|
2021-07-10T05:21:09.000Z
|
2021-12-15T06:10:12.000Z
|
src/atcoder/dp/q/sol_4.py
|
kagemeka/competitive-programming
|
c70fe481bcd518f507b885fc9234691d8ce63171
|
[
"MIT"
] | null | null | null |
import typing
import sys
import numpy as np
def set_val(
a: np.array,
i: int,
x: int,
) -> typing.NoReturn:
while i < a.size:
a[i] = max(a[i], x)
i += i & -i
def get_mx(
a: np.array,
i: int,
) -> int:
mx = 0
while i > 0:
mx = max(mx, a[i])
i -= i & -i
return mx
def solve(
n: int,
h: np.array,
a: np.array,
) -> typing.NoReturn:
fw = np.zeros(
n + 1,
dtype=np.int64,
)
mx = 0
for i in range(n):
v = get_mx(fw, h[i] - 1)
set_val(fw, h[i], v + a[i])
print(get_mx(fw, n))
def main() -> typing.NoReturn:
n = int(input())
h = np.array(
sys.stdin.readline()
.split(),
dtype=np.int64,
)
a = np.array(
sys.stdin.readline()
.split(),
dtype=np.int64,
)
solve(n, h, a)
OJ = 'ONLINE_JUDGE'
if sys.argv[-1] == OJ:
from numba import njit, i8
from numba.pycc import CC
cc = CC('my_module')
fn = solve
sig = (i8, i8[:], i8[:])
get_mx = njit(get_mx)
set_val = njit(set_val)
cc.export(
fn.__name__,
sig,
)(fn)
cc.compile()
exit(0)
from my_module import solve
main()
| 13.7375
| 31
| 0.535032
|
import typing
import sys
import numpy as np
def set_val(
a: np.array,
i: int,
x: int,
) -> typing.NoReturn:
while i < a.size:
a[i] = max(a[i], x)
i += i & -i
def get_mx(
a: np.array,
i: int,
) -> int:
mx = 0
while i > 0:
mx = max(mx, a[i])
i -= i & -i
return mx
def solve(
n: int,
h: np.array,
a: np.array,
) -> typing.NoReturn:
fw = np.zeros(
n + 1,
dtype=np.int64,
)
mx = 0
for i in range(n):
v = get_mx(fw, h[i] - 1)
set_val(fw, h[i], v + a[i])
print(get_mx(fw, n))
def main() -> typing.NoReturn:
n = int(input())
h = np.array(
sys.stdin.readline()
.split(),
dtype=np.int64,
)
a = np.array(
sys.stdin.readline()
.split(),
dtype=np.int64,
)
solve(n, h, a)
OJ = 'ONLINE_JUDGE'
if sys.argv[-1] == OJ:
from numba import njit, i8
from numba.pycc import CC
cc = CC('my_module')
fn = solve
sig = (i8, i8[:], i8[:])
get_mx = njit(get_mx)
set_val = njit(set_val)
cc.export(
fn.__name__,
sig,
)(fn)
cc.compile()
exit(0)
from my_module import solve
main()
| true
| true
|
790e0a5e8648b11550e56f5728f6b2ed76bfdb21
| 34
|
py
|
Python
|
convore/__init__.py
|
kennethreitz-archive/python-convore
|
fcf1917a3c07441712a089500ca710abfcf81ad6
|
[
"0BSD"
] | 2
|
2016-03-01T22:15:41.000Z
|
2016-07-17T18:10:17.000Z
|
convore/__init__.py
|
colekowalski/python-convore
|
77f4b3dfd50b710729839d0e9481a1a37b0cbd75
|
[
"0BSD"
] | null | null | null |
convore/__init__.py
|
colekowalski/python-convore
|
77f4b3dfd50b710729839d0e9481a1a37b0cbd75
|
[
"0BSD"
] | null | null | null |
from core import *
import packages
| 17
| 18
| 0.823529
|
from core import *
import packages
| true
| true
|
790e0abc1b375c170241b587ccd08f6c0ca72a1e
| 454
|
py
|
Python
|
gram/migrations/0005_auto_20190310_1523.py
|
viisualworks/instanoir
|
e1ab476b61b1cdb1abc7546ef06de51579b8f24e
|
[
"MIT"
] | null | null | null |
gram/migrations/0005_auto_20190310_1523.py
|
viisualworks/instanoir
|
e1ab476b61b1cdb1abc7546ef06de51579b8f24e
|
[
"MIT"
] | 1
|
2021-09-08T00:51:50.000Z
|
2021-09-08T00:51:50.000Z
|
gram/migrations/0005_auto_20190310_1523.py
|
viisualworks/instanoir
|
e1ab476b61b1cdb1abc7546ef06de51579b8f24e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-03-10 12:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gram', '0004_auto_20190310_1510'),
]
operations = [
migrations.AlterField(
model_name='image',
name='comment',
field=models.TextField(blank=True, null=True),
),
]
| 21.619048
| 58
| 0.614537
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gram', '0004_auto_20190310_1510'),
]
operations = [
migrations.AlterField(
model_name='image',
name='comment',
field=models.TextField(blank=True, null=True),
),
]
| true
| true
|
790e0b4a985eb831926bb97e4f587081aa98a586
| 837
|
py
|
Python
|
CtCI_custom_classes/overflow_stack.py
|
enyquist/Cracking_the_Coding_Interview
|
3247ad7b349f728c83ec5d3f452a7dce34d6d50c
|
[
"MIT"
] | null | null | null |
CtCI_custom_classes/overflow_stack.py
|
enyquist/Cracking_the_Coding_Interview
|
3247ad7b349f728c83ec5d3f452a7dce34d6d50c
|
[
"MIT"
] | null | null | null |
CtCI_custom_classes/overflow_stack.py
|
enyquist/Cracking_the_Coding_Interview
|
3247ad7b349f728c83ec5d3f452a7dce34d6d50c
|
[
"MIT"
] | null | null | null |
from CtCI_Custom_Classes.stack import Stack
class SetOfStacks:
def __init__(self, capacity):
self.capacity = capacity
self.stacks = []
def get_last_stack(self):
if not self.stacks:
return None
return self.stacks[-1]
def is_empty(self):
last = self.get_last_stack()
return not last or last.is_empty()
def pop(self):
last = self.get_last_stack()
if not last:
return None
v = last.pop()
if last.get_size() == 0:
del self.stacks[-1]
return v
def push(self, data):
last = self.get_last_stack()
if last and not last.is_full():
last.push(data)
else:
stack = Stack(self.capacity)
stack.push(data)
self.stacks.append(stack)
| 23.914286
| 43
| 0.551971
|
from CtCI_Custom_Classes.stack import Stack
class SetOfStacks:
def __init__(self, capacity):
self.capacity = capacity
self.stacks = []
def get_last_stack(self):
if not self.stacks:
return None
return self.stacks[-1]
def is_empty(self):
last = self.get_last_stack()
return not last or last.is_empty()
def pop(self):
last = self.get_last_stack()
if not last:
return None
v = last.pop()
if last.get_size() == 0:
del self.stacks[-1]
return v
def push(self, data):
last = self.get_last_stack()
if last and not last.is_full():
last.push(data)
else:
stack = Stack(self.capacity)
stack.push(data)
self.stacks.append(stack)
| true
| true
|
790e0ba2d5067ba9776aa3c41a6f454ad345e441
| 3,177
|
py
|
Python
|
resources/src/mythbox/mythtv/publish.py
|
bopopescu/ServerStatus
|
a883598248ad6f5273eb3be498e3b04a1fab6510
|
[
"MIT"
] | null | null | null |
resources/src/mythbox/mythtv/publish.py
|
bopopescu/ServerStatus
|
a883598248ad6f5273eb3be498e3b04a1fab6510
|
[
"MIT"
] | 1
|
2015-04-21T22:05:02.000Z
|
2015-04-22T22:27:15.000Z
|
resources/src/mythbox/mythtv/publish.py
|
GetSomeBlocks/Score_Soccer
|
a883598248ad6f5273eb3be498e3b04a1fab6510
|
[
"MIT"
] | 2
|
2015-09-29T16:31:43.000Z
|
2020-07-26T03:41:10.000Z
|
#
# MythBox for XBMC - http://mythbox.googlecode.com
# Copyright (C) 2011 analogue@yahoo.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import logging
import xbmc
from mythbox.bus import Event
from mythbox.util import run_async
from mythbox.mythtv.conn import EventConnection, inject_conn
log = logging.getLogger('mythbox.core')
class MythEventPublisher(object):
# Before recording starts:
#
# [u'BACKEND_MESSAGE', u'SYSTEM_EVENT REC_PENDING SECS 120 CARDID 7 CHANID 4282 STARTTIME 2011-05-27T20:00:00 SENDER athena', u'empty']
#
# Delete recording
#
# [u'BACKEND_MESSAGE', u'RECORDING_LIST_CHANGE DELETE 1071 2011-05-27T15:30:00', u'empty']
#
# Create/edit/delete schedule
#
# [u'BACKEND_MESSAGE', u'SCHEDULE_CHANGE', u'empty']
#
def __init__(self, *args, **kwargs):
[setattr(self, k, v) for k,v in kwargs.items() if k in ['bus', 'settings','translator','platform']]
self.closed = False
@inject_conn
def supportsSystemEvents(self):
return self.conn().platform.supportsSystemEvents()
@run_async
def startup(self):
log.debug('Starting MythEventPublisher..')
self.eventConn = EventConnection(settings=self.settings, translator=self.translator, platform=self.platform, bus=self.bus)
while not self.closed and not xbmc.abortRequested:
try:
tokens = self.eventConn.readEvent()
if len(tokens) >= 2 and not tokens[1].startswith(u'UPDATE_FILE_SIZE'):
log.debug('EVENT: %s' % tokens)
if len(tokens)>=3 and tokens[0] == 'BACKEND_MESSAGE':
if tokens[1].startswith('SYSTEM_EVENT') and 'SCHEDULER_RAN' in tokens[1]:
self.bus.publish({'id':Event.SCHEDULER_RAN})
elif tokens[1].startswith('COMMFLAG_START'):
self.bus.publish({'id':Event.COMMFLAG_START})
elif tokens[1].startswith('SCHEDULE_CHANGE'):
self.bus.publish({'id':Event.SCHEDULE_CHANGED})
except Exception, e:
log.exception(e)
log.debug('Exiting MythEventPublisher')
def shutdown(self):
self.closed = True
try:
self.eventConn.close()
except:
log.exception('On shutting down MythEventPublisher')
| 38.277108
| 144
| 0.627951
|
import logging
import xbmc
from mythbox.bus import Event
from mythbox.util import run_async
from mythbox.mythtv.conn import EventConnection, inject_conn
log = logging.getLogger('mythbox.core')
class MythEventPublisher(object):
def __init__(self, *args, **kwargs):
[setattr(self, k, v) for k,v in kwargs.items() if k in ['bus', 'settings','translator','platform']]
self.closed = False
@inject_conn
def supportsSystemEvents(self):
return self.conn().platform.supportsSystemEvents()
@run_async
def startup(self):
log.debug('Starting MythEventPublisher..')
self.eventConn = EventConnection(settings=self.settings, translator=self.translator, platform=self.platform, bus=self.bus)
while not self.closed and not xbmc.abortRequested:
try:
tokens = self.eventConn.readEvent()
if len(tokens) >= 2 and not tokens[1].startswith(u'UPDATE_FILE_SIZE'):
log.debug('EVENT: %s' % tokens)
if len(tokens)>=3 and tokens[0] == 'BACKEND_MESSAGE':
if tokens[1].startswith('SYSTEM_EVENT') and 'SCHEDULER_RAN' in tokens[1]:
self.bus.publish({'id':Event.SCHEDULER_RAN})
elif tokens[1].startswith('COMMFLAG_START'):
self.bus.publish({'id':Event.COMMFLAG_START})
elif tokens[1].startswith('SCHEDULE_CHANGE'):
self.bus.publish({'id':Event.SCHEDULE_CHANGED})
except Exception, e:
log.exception(e)
log.debug('Exiting MythEventPublisher')
def shutdown(self):
self.closed = True
try:
self.eventConn.close()
except:
log.exception('On shutting down MythEventPublisher')
| false
| true
|
790e0c7056763bda123b6710f702fca2952e4e53
| 9,151
|
py
|
Python
|
statsmodels/sandbox/regression/try_ols_anova.py
|
toobaz/statsmodels
|
5286dd713a809b0630232508bf9ad5104aae1980
|
[
"BSD-3-Clause"
] | 10
|
2016-05-18T11:46:33.000Z
|
2018-12-23T04:52:27.000Z
|
statsmodels/sandbox/regression/try_ols_anova.py
|
AnaMP/statsmodels
|
2d4aad9a14619ce0c84d4c7bca9dacd66b2be566
|
[
"BSD-3-Clause"
] | 1
|
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
statsmodels/sandbox/regression/try_ols_anova.py
|
AnaMP/statsmodels
|
2d4aad9a14619ce0c84d4c7bca9dacd66b2be566
|
[
"BSD-3-Clause"
] | 3
|
2015-04-01T08:26:54.000Z
|
2020-02-14T14:34:10.000Z
|
''' convenience functions for ANOVA type analysis with OLS
Note: statistical results of ANOVA are not checked, OLS is
checked but not whether the reported results are the ones used
in ANOVA
includes form2design for creating dummy variables
TODO:
* ...
*
'''
import numpy as np
#from scipy import stats
import statsmodels.api as sm
def data2dummy(x, returnall=False):
'''convert array of categories to dummy variables
by default drops dummy variable for last category
uses ravel, 1d only'''
x = x.ravel()
groups = np.unique(x)
if returnall:
return (x[:, None] == groups).astype(int)
else:
return (x[:, None] == groups).astype(int)[:,:-1]
def data2proddummy(x):
'''creates product dummy variables from 2 columns of 2d array
drops last dummy variable, but not from each category
singular with simple dummy variable but not with constant
quickly written, no safeguards
'''
#brute force, assumes x is 2d
#replace with encoding if possible
groups = np.unique(map(tuple, x.tolist()))
#includes singularity with additive factors
return (x==groups[:,None,:]).all(-1).T.astype(int)[:,:-1]
def data2groupcont(x1,x2):
'''create dummy continuous variable
Parameters
----------
x1 : 1d array
label or group array
x2 : 1d array (float)
continuous variable
Notes
-----
useful for group specific slope coefficients in regression
'''
if x2.ndim == 1:
x2 = x2[:,None]
dummy = data2dummy(x1, returnall=True)
return dummy * x2
# Result strings
#the second leaves the constant in, not with NIST regression
#but something fishy with res.ess negative in examples ?
#not checked if these are all the right ones
anova_str0 = '''
ANOVA statistics (model sum of squares excludes constant)
Source DF Sum Squares Mean Square F Value Pr > F
Model %(df_model)i %(ess)f %(mse_model)f %(fvalue)f %(f_pvalue)f
Error %(df_resid)i %(ssr)f %(mse_resid)f
CTotal %(nobs)i %(uncentered_tss)f %(mse_total)f
R squared %(rsquared)f
'''
anova_str = '''
ANOVA statistics (model sum of squares includes constant)
Source DF Sum Squares Mean Square F Value Pr > F
Model %(df_model)i %(ssmwithmean)f %(mse_model)f %(fvalue)f %(f_pvalue)f
Error %(df_resid)i %(ssr)f %(mse_resid)f
CTotal %(nobs)i %(uncentered_tss)f %(mse_total)f
R squared %(rsquared)f
'''
def anovadict(res):
'''update regression results dictionary with ANOVA specific statistics
not checked for completeness
'''
ad = {}
ad.update(res.__dict__) #dict doesn't work with cached attributes
anova_attr = ['df_model', 'df_resid', 'ess', 'ssr','uncentered_tss',
'mse_model', 'mse_resid', 'mse_total', 'fvalue', 'f_pvalue',
'rsquared']
for key in anova_attr:
ad[key] = getattr(res, key)
ad['nobs'] = res.model.nobs
ad['ssmwithmean'] = res.uncentered_tss - res.ssr
return ad
def form2design(ss, data):
'''convert string formula to data dictionary
ss : string
* I : add constant
* varname : for simple varnames data is used as is
* F:varname : create dummy variables for factor varname
* P:varname1*varname2 : create product dummy variables for
varnames
* G:varname1*varname2 : create product between factor and
continuous variable
data : dict or structured array
data set, access of variables by name as in dictionaries
Returns
-------
vars : dictionary
dictionary of variables with converted dummy variables
names : list
list of names, product (P:) and grouped continuous
variables (G:) have name by joining individual names
sorted according to input
Examples
--------
>>> xx, n = form2design('I a F:b P:c*d G:c*f', testdata)
>>> xx.keys()
['a', 'b', 'const', 'cf', 'cd']
>>> n
['const', 'a', 'b', 'cd', 'cf']
Notes
-----
with sorted dict, separate name list wouldn't be necessary
'''
vars = {}
names = []
for item in ss.split():
if item == 'I':
vars['const'] = np.ones(data.shape[0])
names.append('const')
elif not ':' in item:
vars[item] = data[item]
names.append(item)
elif item[:2] == 'F:':
v = item.split(':')[1]
vars[v] = data2dummy(data[v])
names.append(v)
elif item[:2] == 'P:':
v = item.split(':')[1].split('*')
vars[''.join(v)] = data2proddummy(np.c_[data[v[0]],data[v[1]]])
names.append(''.join(v))
elif item[:2] == 'G:':
v = item.split(':')[1].split('*')
vars[''.join(v)] = data2groupcont(data[v[0]], data[v[1]])
names.append(''.join(v))
else:
raise ValueError('unknown expression in formula')
return vars, names
def dropname(ss, li):
'''drop names from a list of strings,
names to drop are in space delimeted list
does not change original list
'''
newli = li[:]
for item in ss.split():
newli.remove(item)
return newli
if __name__ == '__main__':
# Test Example with created data
# ------------------------------
nobs = 1000
testdataint = np.random.randint(3, size=(nobs,4)).view([('a',int),('b',int),('c',int),('d',int)])
testdatacont = np.random.normal( size=(nobs,2)).view([('e',float), ('f',float)])
import numpy.lib.recfunctions
dt2 = numpy.lib.recfunctions.zip_descr((testdataint, testdatacont),flatten=True)
# concatenate structured arrays
testdata = np.empty((nobs,1), dt2)
for name in testdataint.dtype.names:
testdata[name] = testdataint[name]
for name in testdatacont.dtype.names:
testdata[name] = testdatacont[name]
#print form2design('a',testdata)
if 0: # print only when nobs is small, e.g. nobs=10
xx, n = form2design('F:a',testdata)
print xx
print form2design('P:a*b',testdata)
print data2proddummy((np.c_[testdata['a'],testdata['b']]))
xx, names = form2design('a F:b P:c*d',testdata)
#xx, names = form2design('I a F:b F:c F:d P:c*d',testdata)
xx, names = form2design('I a F:b P:c*d', testdata)
xx, names = form2design('I a F:b P:c*d G:a*e f', testdata)
X = np.column_stack([xx[nn] for nn in names])
# simple test version: all coefficients equal to one
y = X.sum(1) + 0.01*np.random.normal(size=(nobs))
rest1 = sm.OLS(y,X).fit() #results
print rest1.params
print anova_str % anovadict(rest1)
X = np.column_stack([xx[nn] for nn in dropname('ae f', names)])
# simple test version: all coefficients equal to one
y = X.sum(1) + 0.01*np.random.normal(size=(nobs))
rest1 = sm.OLS(y,X).fit()
print rest1.params
print anova_str % anovadict(rest1)
# Example: from Bruce
# -------------------
#get data and clean it
#^^^^^^^^^^^^^^^^^^^^^
# requires file 'dftest3.data' posted by Bruce
# read data set and drop rows with missing data
dt_b = np.dtype([('breed', int), ('sex', int), ('litter', int),
('pen', int), ('pig', int), ('age', float),
('bage', float), ('y', float)])
dta = np.genfromtxt('dftest3.data', dt_b,missing='.', usemask=True)
print 'missing', [dta.mask[k].sum() for k in dta.dtype.names]
m = dta.mask.view(bool)
droprows = m.reshape(-1,len(dta.dtype.names)).any(1)
# get complete data as plain structured array
# maybe doesn't work with masked arrays
dta_use_b1 = dta[~droprows,:].data
print dta_use_b1.shape
print dta_use_b1.dtype
#Example b1: variables from Bruce's glm
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# prepare data and dummy variables
xx_b1, names_b1 = form2design('I F:sex age', dta_use_b1)
# create design matrix
X_b1 = np.column_stack([xx_b1[nn] for nn in dropname('', names_b1)])
y_b1 = dta_use_b1['y']
# estimate using OLS
rest_b1 = sm.OLS(y_b1, X_b1).fit()
# print results
print rest_b1.params
print anova_str % anovadict(rest_b1)
#compare with original version only in original version
#print anova_str % anovadict(res_b0)
# Example: use all variables except pig identifier
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
allexog = ' '.join(dta.dtype.names[:-1])
#'breed sex litter pen pig age bage'
xx_b1a, names_b1a = form2design('I F:breed F:sex F:litter F:pen age bage', dta_use_b1)
X_b1a = np.column_stack([xx_b1a[nn] for nn in dropname('', names_b1a)])
y_b1a = dta_use_b1['y']
rest_b1a = sm.OLS(y_b1a, X_b1a).fit()
print rest_b1a.params
print anova_str % anovadict(rest_b1a)
for dropn in names_b1a:
print '\nResults dropping', dropn
X_b1a_ = np.column_stack([xx_b1a[nn] for nn in dropname(dropn, names_b1a)])
y_b1a_ = dta_use_b1['y']
rest_b1a_ = sm.OLS(y_b1a_, X_b1a_).fit()
#print rest_b1a_.params
print anova_str % anovadict(rest_b1a_)
| 31.774306
| 101
| 0.605289
|
''' convenience functions for ANOVA type analysis with OLS
Note: statistical results of ANOVA are not checked, OLS is
checked but not whether the reported results are the ones used
in ANOVA
includes form2design for creating dummy variables
TODO:
* ...
*
'''
import numpy as np
import statsmodels.api as sm
def data2dummy(x, returnall=False):
'''convert array of categories to dummy variables
by default drops dummy variable for last category
uses ravel, 1d only'''
x = x.ravel()
groups = np.unique(x)
if returnall:
return (x[:, None] == groups).astype(int)
else:
return (x[:, None] == groups).astype(int)[:,:-1]
def data2proddummy(x):
'''creates product dummy variables from 2 columns of 2d array
drops last dummy variable, but not from each category
singular with simple dummy variable but not with constant
quickly written, no safeguards
'''
groups = np.unique(map(tuple, x.tolist()))
return (x==groups[:,None,:]).all(-1).T.astype(int)[:,:-1]
def data2groupcont(x1,x2):
'''create dummy continuous variable
Parameters
----------
x1 : 1d array
label or group array
x2 : 1d array (float)
continuous variable
Notes
-----
useful for group specific slope coefficients in regression
'''
if x2.ndim == 1:
x2 = x2[:,None]
dummy = data2dummy(x1, returnall=True)
return dummy * x2
anova_str0 = '''
ANOVA statistics (model sum of squares excludes constant)
Source DF Sum Squares Mean Square F Value Pr > F
Model %(df_model)i %(ess)f %(mse_model)f %(fvalue)f %(f_pvalue)f
Error %(df_resid)i %(ssr)f %(mse_resid)f
CTotal %(nobs)i %(uncentered_tss)f %(mse_total)f
R squared %(rsquared)f
'''
anova_str = '''
ANOVA statistics (model sum of squares includes constant)
Source DF Sum Squares Mean Square F Value Pr > F
Model %(df_model)i %(ssmwithmean)f %(mse_model)f %(fvalue)f %(f_pvalue)f
Error %(df_resid)i %(ssr)f %(mse_resid)f
CTotal %(nobs)i %(uncentered_tss)f %(mse_total)f
R squared %(rsquared)f
'''
def anovadict(res):
'''update regression results dictionary with ANOVA specific statistics
not checked for completeness
'''
ad = {}
ad.update(res.__dict__)
anova_attr = ['df_model', 'df_resid', 'ess', 'ssr','uncentered_tss',
'mse_model', 'mse_resid', 'mse_total', 'fvalue', 'f_pvalue',
'rsquared']
for key in anova_attr:
ad[key] = getattr(res, key)
ad['nobs'] = res.model.nobs
ad['ssmwithmean'] = res.uncentered_tss - res.ssr
return ad
def form2design(ss, data):
'''convert string formula to data dictionary
ss : string
* I : add constant
* varname : for simple varnames data is used as is
* F:varname : create dummy variables for factor varname
* P:varname1*varname2 : create product dummy variables for
varnames
* G:varname1*varname2 : create product between factor and
continuous variable
data : dict or structured array
data set, access of variables by name as in dictionaries
Returns
-------
vars : dictionary
dictionary of variables with converted dummy variables
names : list
list of names, product (P:) and grouped continuous
variables (G:) have name by joining individual names
sorted according to input
Examples
--------
>>> xx, n = form2design('I a F:b P:c*d G:c*f', testdata)
>>> xx.keys()
['a', 'b', 'const', 'cf', 'cd']
>>> n
['const', 'a', 'b', 'cd', 'cf']
Notes
-----
with sorted dict, separate name list wouldn't be necessary
'''
vars = {}
names = []
for item in ss.split():
if item == 'I':
vars['const'] = np.ones(data.shape[0])
names.append('const')
elif not ':' in item:
vars[item] = data[item]
names.append(item)
elif item[:2] == 'F:':
v = item.split(':')[1]
vars[v] = data2dummy(data[v])
names.append(v)
elif item[:2] == 'P:':
v = item.split(':')[1].split('*')
vars[''.join(v)] = data2proddummy(np.c_[data[v[0]],data[v[1]]])
names.append(''.join(v))
elif item[:2] == 'G:':
v = item.split(':')[1].split('*')
vars[''.join(v)] = data2groupcont(data[v[0]], data[v[1]])
names.append(''.join(v))
else:
raise ValueError('unknown expression in formula')
return vars, names
def dropname(ss, li):
'''drop names from a list of strings,
names to drop are in space delimeted list
does not change original list
'''
newli = li[:]
for item in ss.split():
newli.remove(item)
return newli
if __name__ == '__main__':
nobs = 1000
testdataint = np.random.randint(3, size=(nobs,4)).view([('a',int),('b',int),('c',int),('d',int)])
testdatacont = np.random.normal( size=(nobs,2)).view([('e',float), ('f',float)])
import numpy.lib.recfunctions
dt2 = numpy.lib.recfunctions.zip_descr((testdataint, testdatacont),flatten=True)
testdata = np.empty((nobs,1), dt2)
for name in testdataint.dtype.names:
testdata[name] = testdataint[name]
for name in testdatacont.dtype.names:
testdata[name] = testdatacont[name]
if 0:
xx, n = form2design('F:a',testdata)
print xx
print form2design('P:a*b',testdata)
print data2proddummy((np.c_[testdata['a'],testdata['b']]))
xx, names = form2design('a F:b P:c*d',testdata)
xx, names = form2design('I a F:b P:c*d', testdata)
xx, names = form2design('I a F:b P:c*d G:a*e f', testdata)
X = np.column_stack([xx[nn] for nn in names])
y = X.sum(1) + 0.01*np.random.normal(size=(nobs))
rest1 = sm.OLS(y,X).fit()
print rest1.params
print anova_str % anovadict(rest1)
X = np.column_stack([xx[nn] for nn in dropname('ae f', names)])
y = X.sum(1) + 0.01*np.random.normal(size=(nobs))
rest1 = sm.OLS(y,X).fit()
print rest1.params
print anova_str % anovadict(rest1)
dt_b = np.dtype([('breed', int), ('sex', int), ('litter', int),
('pen', int), ('pig', int), ('age', float),
('bage', float), ('y', float)])
dta = np.genfromtxt('dftest3.data', dt_b,missing='.', usemask=True)
print 'missing', [dta.mask[k].sum() for k in dta.dtype.names]
m = dta.mask.view(bool)
droprows = m.reshape(-1,len(dta.dtype.names)).any(1)
dta_use_b1 = dta[~droprows,:].data
print dta_use_b1.shape
print dta_use_b1.dtype
#Example b1: variables from Bruce's glm
xx_b1, names_b1 = form2design('I F:sex age', dta_use_b1)
X_b1 = np.column_stack([xx_b1[nn] for nn in dropname('', names_b1)])
y_b1 = dta_use_b1['y']
rest_b1 = sm.OLS(y_b1, X_b1).fit()
print rest_b1.params
print anova_str % anovadict(rest_b1)
allexog = ' '.join(dta.dtype.names[:-1])
xx_b1a, names_b1a = form2design('I F:breed F:sex F:litter F:pen age bage', dta_use_b1)
X_b1a = np.column_stack([xx_b1a[nn] for nn in dropname('', names_b1a)])
y_b1a = dta_use_b1['y']
rest_b1a = sm.OLS(y_b1a, X_b1a).fit()
print rest_b1a.params
print anova_str % anovadict(rest_b1a)
for dropn in names_b1a:
print '\nResults dropping', dropn
X_b1a_ = np.column_stack([xx_b1a[nn] for nn in dropname(dropn, names_b1a)])
y_b1a_ = dta_use_b1['y']
rest_b1a_ = sm.OLS(y_b1a_, X_b1a_).fit()
print anova_str % anovadict(rest_b1a_)
| false
| true
|
790e0ce0cd33314e6bd660016ad994d3cc52b9e7
| 154,790
|
py
|
Python
|
cmdx.py
|
fvbehr/cmdx
|
d2203e8eee51857b6e1bb1ed175d0ff928854c47
|
[
"BSD-2-Clause"
] | null | null | null |
cmdx.py
|
fvbehr/cmdx
|
d2203e8eee51857b6e1bb1ed175d0ff928854c47
|
[
"BSD-2-Clause"
] | null | null | null |
cmdx.py
|
fvbehr/cmdx
|
d2203e8eee51857b6e1bb1ed175d0ff928854c47
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import json
import time
import math
import types
import logging
import traceback
import operator
import collections
from functools import wraps
from maya import cmds
from maya.api import OpenMaya as om, OpenMayaAnim as oma, OpenMayaUI as omui
from maya import OpenMaya as om1, OpenMayaMPx as ompx1, OpenMayaUI as omui1
__version__ = "0.4.6"
PY3 = sys.version_info[0] == 3
# Bypass assertion error on unsupported Maya versions
IGNORE_VERSION = bool(os.getenv("CMDX_IGNORE_VERSION"))
# Output profiling information to console
# CAREFUL! This will flood your console. Use sparingly.
TIMINGS = bool(os.getenv("CMDX_TIMINGS"))
# Do not perform any caching of nodes or plugs
SAFE_MODE = bool(os.getenv("CMDX_SAFE_MODE"))
# Increase performance by not protecting against
# fatal crashes (e.g. operations on deleted nodes)
# This can be useful when you know for certain that a
# series of operations will happen in isolation, such
# as during an auto rigging build or export process.
ROGUE_MODE = not SAFE_MODE and bool(os.getenv("CMDX_ROGUE_MODE"))
# Increase performance by not bothering to free up unused memory
MEMORY_HOG_MODE = not SAFE_MODE and bool(os.getenv("CMDX_MEMORY_HOG_MODE"))
ENABLE_PEP8 = True
# Support undo/redo
ENABLE_UNDO = not SAFE_MODE
# Required
ENABLE_PLUG_REUSE = True
if PY3:
string_types = str,
else:
string_types = str, basestring, unicode
try:
__maya_version__ = int(cmds.about(version=True))
except (AttributeError, ValueError):
__maya_version__ = 2015 # E.g. Preview Release 95
if not IGNORE_VERSION:
assert __maya_version__ >= 2015, "Requires Maya 2015 or newer"
self = sys.modules[__name__]
self.installed = False
log = logging.getLogger("cmdx")
# Aliases - API 1.0
om1 = om1
omui1 = omui1
# Aliases - API 2.0
om = om
oma = oma
omui = omui
# Accessible via `cmdx.NodeReuseCount` etc.
Stats = self
Stats.NodeInitCount = 0
Stats.NodeReuseCount = 0
Stats.PlugReuseCount = 0
Stats.LastTiming = None
# Node reuse depends on this member
if not hasattr(om, "MObjectHandle"):
log.warning("Node reuse might not work in this version of Maya "
"(OpenMaya.MObjectHandle not found)")
TimeUnit = om.MTime.uiUnit()
# DEPRECATED
MTime = om.MTime
MDistance = om.MDistance
MAngle = om.MAngle
TimeType = om.MTime
DistanceType = om.MDistance
AngleType = om.MAngle
ExistError = type("ExistError", (RuntimeError,), {})
DoNothing = None
# Reusable objects, for performance
GlobalDagNode = om.MFnDagNode()
GlobalDependencyNode = om.MFnDependencyNode()
First = 0
Last = -1
# Animation curve interpolation, from MFnAnimCurve::TangentType
Stepped = 5
Linear = 2
Smooth = 4
history = dict()
class ModifierError(RuntimeError):
def __init__(self, history):
tasklist = list()
for task in history:
cmd, args, kwargs = task
tasklist += [
"%s(%s)" % (cmd, ", ".join(map(repr, args)))
]
message = (
"An unexpected internal failure occurred, "
"these tasks were attempted:\n- " +
"\n- ".join(tasklist)
)
self.history = history
super(ModifierError, self).__init__(message)
def withTiming(text="{func}() {time:.2f} ns"):
"""Append timing information to a function
Example:
@withTiming()
def function():
pass
"""
def timings_decorator(func):
if not TIMINGS:
# Do not wrap the function.
# This yields zero cost to runtime performance
return func
@wraps(func)
def func_wrapper(*args, **kwargs):
t0 = time.clock()
try:
return func(*args, **kwargs)
finally:
t1 = time.clock()
duration = (t1 - t0) * 10 ** 6 # microseconds
Stats.LastTiming = duration
log.debug(
text.format(func=func.__name__,
time=duration)
)
return func_wrapper
return timings_decorator
def protected(func):
"""Prevent fatal crashes from illegal access to deleted nodes"""
if ROGUE_MODE:
return func
@wraps(func)
def func_wrapper(*args, **kwargs):
if args[0]._destroyed:
raise ExistError("Cannot perform operation on deleted node")
return func(*args, **kwargs)
return func_wrapper
def add_metaclass(metaclass):
"""Add metaclass to Python 2 and 3 class
Helper decorator, from six.py
"""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
class _Type(int):
"""Facilitate use of isinstance(space, _Type)"""
MFn = om.MFn
kDagNode = _Type(om.MFn.kDagNode)
kShape = _Type(om.MFn.kShape)
kTransform = _Type(om.MFn.kTransform)
kJoint = _Type(om.MFn.kJoint)
kSet = _Type(om.MFn.kSet)
class _Space(int):
"""Facilitate use of isinstance(space, _Space)"""
# Spaces
sWorld = _Space(om.MSpace.kWorld)
sObject = _Space(om.MSpace.kObject)
sTransform = _Space(om.MSpace.kTransform)
sPostTransform = _Space(om.MSpace.kPostTransform)
sPreTransform = _Space(om.MSpace.kPreTransform)
kXYZ = om.MEulerRotation.kXYZ
kYZX = om.MEulerRotation.kYZX
kZXY = om.MEulerRotation.kZXY
kXZY = om.MEulerRotation.kXZY
kYXZ = om.MEulerRotation.kYXZ
kZYX = om.MEulerRotation.kZYX
class _Unit(int):
"""A Maya unit, for unit-attributes such as Angle and Distance
Because the resulting classes are subclasses of `int`, there
is virtually no run-time performance penalty to using it as
an integer. No additional Python is called, most notably when
passing the integer class to the Maya C++ binding (which wouldn't
call our overridden methods anyway).
The added overhead to import time is neglible.
"""
def __new__(cls, unit, enum):
self = super(_Unit, cls).__new__(cls, enum)
self._unit = unit
return self
def __call__(self, enum):
return self._unit(enum, self)
# Angular units
Degrees = _Unit(om.MAngle, om.MAngle.kDegrees)
Radians = _Unit(om.MAngle, om.MAngle.kRadians)
AngularMinutes = _Unit(om.MAngle, om.MAngle.kAngMinutes)
AngularSeconds = _Unit(om.MAngle, om.MAngle.kAngSeconds)
# Distance units
Millimeters = _Unit(om.MDistance, om.MDistance.kMillimeters)
Centimeters = _Unit(om.MDistance, om.MDistance.kCentimeters)
Meters = _Unit(om.MDistance, om.MDistance.kMeters)
Kilometers = _Unit(om.MDistance, om.MDistance.kKilometers)
Inches = _Unit(om.MDistance, om.MDistance.kInches)
Feet = _Unit(om.MDistance, om.MDistance.kFeet)
Miles = _Unit(om.MDistance, om.MDistance.kMiles)
Yards = _Unit(om.MDistance, om.MDistance.kYards)
# Time units
Milliseconds = _Unit(om.MTime, om.MTime.kMilliseconds)
Minutes = _Unit(om.MTime, om.MTime.kMinutes)
Seconds = _Unit(om.MTime, om.MTime.kSeconds)
def UiUnit():
"""Unlike other time units, this can be modified by the user at run-time"""
return _Unit(om.MTime, om.MTime.uiUnit())
_Cached = type("Cached", (object,), {}) # For isinstance(x, _Cached)
Cached = _Cached()
_data = collections.defaultdict(dict)
class Singleton(type):
"""Re-use previous instances of Node
Cost: 14 microseconds
This enables persistent state of each node, even when
a node is discovered at a later time, such as via
:func:`DagNode.parent()` or :func:`DagNode.descendents()`
Arguments:
mobject (MObject): Maya API object to wrap
exists (bool, optional): Whether or not to search for
an existing Python instance of this node
Example:
>>> nodeA = createNode("transform", name="myNode")
>>> nodeB = createNode("transform", parent=nodeA)
>>> encode("|myNode") is nodeA
True
>>> nodeB.parent() is nodeA
True
"""
_instances = {}
@withTiming()
def __call__(cls, mobject, exists=True, modifier=None):
handle = om.MObjectHandle(mobject)
hsh = handle.hashCode()
hx = "%x" % hsh
if exists and handle.isValid():
try:
node = cls._instances[hx]
assert not node._destroyed
except (KeyError, AssertionError):
pass
else:
Stats.NodeReuseCount += 1
node._removed = False
return node
# It didn't exist, let's create one
# But first, make sure we instantiate the right type
if mobject.hasFn(om.MFn.kDagNode):
sup = DagNode
elif mobject.hasFn(om.MFn.kSet):
sup = ObjectSet
elif mobject.hasFn(om.MFn.kAnimCurve):
sup = AnimCurve
else:
sup = Node
self = super(Singleton, sup).__call__(mobject, exists, modifier)
self._hashCode = hsh
self._hexStr = hx
cls._instances[hx] = self
return self
@add_metaclass(Singleton)
class Node(object):
"""A Maya dependency node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> decompose = createNode("decomposeMatrix", name="decompose")
>>> str(decompose)
'decompose'
>>> alias = encode(decompose.name())
>>> decompose == alias
True
>>> transform = createNode("transform")
>>> transform["tx"] = 5
>>> transform["worldMatrix"][0] >> decompose["inputMatrix"]
>>> decompose["outputTranslate"]
(5.0, 0.0, 0.0)
"""
_Fn = om.MFnDependencyNode
# Module-level cache of previously created instances of Node
_Cache = dict()
def __eq__(self, other):
"""MObject supports this operator explicitly"""
try:
# Better to ask forgivness than permission
return self._mobject == other._mobject
except AttributeError:
return str(self) == str(other)
def __ne__(self, other):
try:
return self._mobject != other._mobject
except AttributeError:
return str(self) != str(other)
def __str__(self):
return self.name(namespace=True)
def __repr__(self):
return self.name(namespace=True)
def __add__(self, other):
"""Support legacy + '.attr' behavior
Example:
>>> node = createNode("transform")
>>> getAttr(node + ".tx")
0.0
>>> delete(node)
"""
return self[other.strip(".")]
def __contains__(self, other):
"""Does the attribute `other` exist?"""
return self.hasAttr(other)
def __getitem__(self, key):
"""Get plug from self
Arguments:
key (str, tuple): String lookup of attribute,
optionally pass tuple to include unit.
Example:
>>> node = createNode("transform")
>>> node["translate"] = (1, 1, 1)
>>> node["translate", Meters]
(0.01, 0.01, 0.01)
"""
unit = None
cached = False
if isinstance(key, (list, tuple)):
key, items = key[0], key[1:]
for item in items:
if isinstance(item, _Unit):
unit = item
elif isinstance(item, _Cached):
cached = True
if cached:
try:
return CachedPlug(self._state["values"][key, unit])
except KeyError:
pass
try:
plug = self.findPlug(key)
except RuntimeError:
raise ExistError("%s.%s" % (self.path(), key))
return Plug(self, plug, unit=unit, key=key, modifier=self._modifier)
def __setitem__(self, key, value):
"""Support item assignment of new attributes or values
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="myNode")
>>> node["myAttr"] = Double(default=1.0)
>>> node["myAttr"] == 1.0
True
>>> node["rotateX", Degrees] = 1.0
>>> node["rotateX"] = Degrees(1)
>>> node["rotateX", Degrees]
1.0
>>> node["myDist"] = Distance()
>>> node["myDist"] = node["translateX"]
>>> node["myDist", Centimeters] = node["translateX", Meters]
>>> round(node["rotateX", Radians], 3)
0.017
>>> node["myDist"] = Distance()
Traceback (most recent call last):
...
ExistError: myDist
>>> node["notExist"] = 5
Traceback (most recent call last):
...
ExistError: |myNode.notExist
>>> delete(node)
"""
if isinstance(value, Plug):
value = value.read()
unit = None
if isinstance(key, (list, tuple)):
key, unit = key
# Convert value to the given unit
if isinstance(value, (list, tuple)):
value = list(unit(v) for v in value)
else:
value = unit(value)
# Create a new attribute
elif isinstance(value, (tuple, list)):
if isinstance(value[0], type):
if issubclass(value[0], _AbstractAttribute):
Attribute, kwargs = value
attr = Attribute(key, **kwargs)
try:
return self.addAttr(attr.create())
except RuntimeError:
# NOTE: I can't be sure this is the only occasion
# where this exception is thrown. Stay catious.
raise ExistError(key)
try:
plug = self.findPlug(key)
except RuntimeError:
raise ExistError("%s.%s" % (self.path(), key))
plug = Plug(self, plug, unit=unit)
if not getattr(self._modifier, "isDone", True):
# Only a few attribute types are supported by a modifier
if _python_to_mod(value, plug, self._modifier._modifier):
return
else:
log.warning(
"Could not write %s via modifier, writing directly.."
% plug
)
# Else, write it immediately
plug.write(value)
def _onDestroyed(self, mobject):
self._destroyed = True
om.MMessage.removeCallbacks(self._state["callbacks"])
for callback in self.onDestroyed:
try:
callback(self)
except Exception:
traceback.print_exc()
_data.pop(self.hex, None)
def _onRemoved(self, mobject, modifier, _=None):
self._removed = True
for callback in self.onRemoved:
try:
callback()
except Exception:
traceback.print_exc()
def __delitem__(self, key):
self.deleteAttr(key)
@withTiming()
def __init__(self, mobject, exists=True, modifier=None):
"""Initialise Node
Private members:
mobject (om.MObject): Wrap this MObject
fn (om.MFnDependencyNode): The corresponding function set
modifier (om.MDagModifier, optional): Operations are
deferred to this modifier.
destroyed (bool): Has this node been destroyed by Maya?
state (dict): Optional state for performance
"""
self._mobject = mobject
self._fn = self._Fn(mobject)
self._modifier = modifier
self._destroyed = False
self._removed = False
self._hashCode = None
self._state = {
"plugs": dict(),
"values": dict(),
"callbacks": list()
}
# Callbacks
self.onDestroyed = list()
self.onRemoved = list()
Stats.NodeInitCount += 1
self._state["callbacks"] += [
# Monitor node deletion, to prevent accidental
# use of MObject past its lifetime which may
# result in a fatal crash.
om.MNodeMessage.addNodeDestroyedCallback(
mobject,
self._onDestroyed, # func
None # clientData
) if not ROGUE_MODE else 0,
om.MNodeMessage.addNodeAboutToDeleteCallback(
mobject,
self._onRemoved,
None
),
]
def plugin(self):
"""Return the user-defined class of the plug-in behind this node"""
return type(self._fn.userNode())
def instance(self):
"""Return the current plug-in instance of this node"""
return self._fn.userNode()
def object(self):
"""Return MObject of this node"""
return self._mobject
def isAlive(self):
"""The node exists somewhere in memory"""
return not self._destroyed
@property
def data(self):
"""Special handling for data stored in the instance
Normally, the initialisation of data could happen in the __init__,
but for some reason the postConstructor of a custom plug-in calls
__init__ twice for every unique hex, which causes any data added
there to be wiped out once the postConstructor is done.
"""
return _data[self.hex]
@property
def destroyed(self):
return self._destroyed
@property
def exists(self):
"""The node exists in both memory *and* scene
Example:
>>> node = createNode("joint")
>>> node.exists
True
>>> cmds.delete(str(node))
>>> node.exists
False
>>> node.destroyed
False
>>> _ = cmds.file(new=True, force=True)
>>> node.exists
False
>>> node.destroyed
True
"""
return not self._removed
@property
def removed(self):
return self._removed
@property
def hashCode(self):
"""Return MObjectHandle.hashCode of this node
This a guaranteed-unique integer (long in Python 2)
similar to the UUID of Maya 2016
"""
return self._hashCode
@property
def hexStr(self):
"""Return unique hashCode as hexadecimal string
Example:
>>> node = createNode("transform")
>>> node.hexStr == format(node.hashCode, "x")
True
"""
return self._hexStr
# Alias
code = hashCode
hex = hexStr
@property
def typeId(self):
"""Return the native maya.api.MTypeId of this node
Example:
>>> node = createNode("transform")
>>> node.typeId == tTransform
True
"""
return self._fn.typeId
@property
def typeName(self):
return self._fn.typeName
def isA(self, type):
"""Evaluate whether self is of `type`
Arguments:
type (int): MFn function set constant
Example:
>>> node = createNode("transform")
>>> node.isA(kTransform)
True
>>> node.isA(kShape)
False
"""
return self._mobject.hasFn(type)
def lock(self, value=True):
self._fn.isLocked = value
def isLocked(self):
return self._fn.isLocked
@property
def storable(self):
"""Whether or not to save this node with the file"""
# How is this value queried?
return None
@storable.setter
def storable(self, value):
# The original function is a double negative
self._fn.setDoNotWrite(not bool(value))
# Module-level branch; evaluated on import
@withTiming("findPlug() reuse {time:.4f} ns")
def findPlug(self, name, cached=False):
"""Cache previously found plugs, for performance
Cost: 4.9 microseconds/call
Part of the time taken in querying an attribute is the
act of finding a plug given its name as a string.
This causes a 25% reduction in time taken for repeated
attribute queries. Though keep in mind that state is stored
in the `cmdx` object which currently does not survive rediscovery.
That is, if a node is created and later discovered through a call
to `encode`, then the original and discovered nodes carry one
state each.
Additional challenges include storing the same plug for both
long and short name of said attribute, which is currently not
the case.
Arguments:
name (str): Name of plug to find
cached (bool, optional): Return cached plug, or
throw an exception. Default to False, which
means it will run Maya's findPlug() and cache
the result.
safe (bool, optional): Always find the plug through
Maya's API, defaults to False. This will not perform
any caching and is intended for use during debugging
to spot whether caching is causing trouble.
Example:
>>> node = createNode("transform")
>>> node.findPlug("translateX", cached=True)
Traceback (most recent call last):
...
KeyError: "'translateX' not cached"
>>> plug1 = node.findPlug("translateX")
>>> isinstance(plug1, om.MPlug)
True
>>> plug1 is node.findPlug("translateX")
True
>>> plug1 is node.findPlug("translateX", cached=True)
True
"""
try:
existing = self._state["plugs"][name]
Stats.PlugReuseCount += 1
return existing
except KeyError:
if cached:
raise KeyError("'%s' not cached" % name)
plug = self._fn.findPlug(name, False)
self._state["plugs"][name] = plug
return plug
def update(self, attrs):
"""Apply a series of attributes all at once
This operates similar to a Python dictionary.
Arguments:
attrs (dict): Key/value pairs of name and attribute
Examples:
>>> node = createNode("transform")
>>> node.update({"tx": 5.0, ("ry", Degrees): 30.0})
>>> node["tx"]
5.0
"""
for key, value in attrs.items():
self[key] = value
def clear(self):
"""Clear transient state
A node may cache previously queried values for performance
at the expense of memory. This method erases any cached
values, freeing up memory at the expense of performance.
Example:
>>> node = createNode("transform")
>>> node["translateX"] = 5
>>> node["translateX"]
5.0
>>> # Plug was reused
>>> node["translateX"]
5.0
>>> # Value was reused
>>> node.clear()
>>> node["translateX"]
5.0
>>> # Plug and value was recomputed
"""
self._state["plugs"].clear()
self._state["values"].clear()
@protected
def name(self, namespace=False):
"""Return the name of this node
Arguments:
namespace (bool, optional): Return with namespace,
defaults to False
Example:
>>> node = createNode("transform", name="myName")
>>> node.name()
u'myName'
"""
if namespace:
return self._fn.name()
else:
return self._fn.name().rsplit(":", 1)[-1]
def namespace(self):
"""Get namespace of node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="myNode")
>>> node.namespace()
u''
>>> _ = cmds.namespace(add=":A")
>>> _ = cmds.namespace(add=":A:B")
>>> node = createNode("transform", name=":A:B:myNode")
>>> node.namespace()
u'A:B'
"""
name = self._fn.name()
if ":" in name:
# Else it will return name as-is, as namespace
# E.g. Ryan_:leftHand -> Ryan_, but :leftHand -> leftHand
return name.rsplit(":", 1)[0]
return type(name)()
# Alias
def path(self):
return self.name(namespace=True)
shortestPath = path
def pop(self, key):
"""Delete an attribute
Arguments:
key (str): Name of attribute to delete
Example:
>>> node = createNode("transform")
>>> node["myAttr"] = Double()
>>> node.pop("myAttr")
>>> node.hasAttr("myAttr")
False
"""
del self[key]
def dump(self, ignore_error=True):
"""Return dictionary of all attributes
Example:
>>> import json
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("choice")
>>> dump = node.dump()
>>> isinstance(dump, dict)
True
>>> dump["choice1.caching"]
False
"""
attrs = {}
count = self._fn.attributeCount()
for index in range(count):
obj = self._fn.attribute(index)
plug = self._fn.findPlug(obj, False)
try:
value = Plug(self, plug).read()
except (RuntimeError, TypeError):
# TODO: Support more types of attributes,
# such that this doesn't need to happen.
value = None
if not ignore_error:
raise
attrs[plug.name()] = value
return attrs
def dumps(self, indent=4, sortKeys=True):
"""Return a JSON compatible dictionary of all attributes"""
return json.dumps(self.dump(), indent=indent, sort_keys=sortKeys)
def type(self):
"""Return type name
Example:
>>> node = createNode("choice")
>>> node.type()
u'choice'
"""
return self._fn.typeName
def addAttr(self, attr):
"""Add a new dynamic attribute to node
Arguments:
attr (Plug): Add this attribute
Example:
>>> node = createNode("transform")
>>> attr = Double("myAttr", default=5.0)
>>> node.addAttr(attr)
>>> node["myAttr"] == 5.0
True
"""
if isinstance(attr, _AbstractAttribute):
attr = attr.create()
self._fn.addAttribute(attr)
def hasAttr(self, attr):
"""Return whether or not `attr` exists
Arguments:
attr (str): Name of attribute to check
Example:
>>> node = createNode("transform")
>>> node.hasAttr("mysteryAttribute")
False
>>> node.hasAttr("translateX")
True
>>> node["myAttr"] = Double() # Dynamic attribute
>>> node.hasAttr("myAttr")
True
"""
return self._fn.hasAttribute(attr)
def deleteAttr(self, attr):
"""Delete `attr` from node
Arguments:
attr (Plug): Attribute to remove
Example:
>>> node = createNode("transform")
>>> node["myAttr"] = Double()
>>> node.deleteAttr("myAttr")
>>> node.hasAttr("myAttr")
False
"""
if not isinstance(attr, Plug):
attr = self[attr]
attribute = attr._mplug.attribute()
self._fn.removeAttribute(attribute)
def connections(self, type=None, unit=None, plugs=False):
"""Yield plugs of node with a connection to any other plug
Arguments:
unit (int, optional): Return plug in this unit,
e.g. Meters or Radians
type (str, optional): Restrict output to nodes of this type,
e.g. "transform" or "mesh"
plugs (bool, optional): Return plugs, rather than nodes
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", name="A")
>>> b = createNode("multDoubleLinear", name="B")
>>> a["ihi"] << b["ihi"]
>>> list(a.connections()) == [b]
True
>>> list(b.connections()) == [a]
True
>>> a.connection() == b
True
"""
for plug in self._fn.getConnections():
mobject = plug.node()
node = Node(mobject)
if not type or type == node._fn.typeName:
plug = Plug(node, plug, unit)
for connection in plug.connections(plugs=plugs):
yield connection
def connection(self, type=None, unit=None, plug=False):
"""Singular version of :func:`connections()`"""
return next(self.connections(type, unit, plug), None)
def rename(self, name):
if not getattr(self._modifier, "isDone", True):
return self._modifier.rename(self, name)
mod = om.MDGModifier()
mod.renameNode(self._mobject, name)
mod.doIt()
if ENABLE_PEP8:
is_alive = isAlive
hex_str = hexStr
hash_code = hashCode
type_id = typeId
type_name = typeName
is_a = isA
is_locked = isLocked
find_plug = findPlug
add_attr = addAttr
has_attr = hasAttr
delete_attr = deleteAttr
shortest_path = shortestPath
class DagNode(Node):
"""A Maya DAG node
The difference between this and Node is that a DagNode
can have one or more children and one parent (multiple
parents not supported).
Example:
>>> _ = cmds.file(new=True, force=True)
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> child.parent() == parent
True
>>> next(parent.children()) == child
True
>>> parent.child() == child
True
>>> sibling = createNode("transform", parent=parent)
>>> child.sibling() == sibling
True
>>> shape = createNode("mesh", parent=child)
>>> child.shape() == shape
True
>>> shape.parent() == child
True
"""
_Fn = om.MFnDagNode
def __str__(self):
return self.path()
def __repr__(self):
return self.path()
def __init__(self, mobject, *args, **kwargs):
super(DagNode, self).__init__(mobject, *args, **kwargs)
self._tfn = om.MFnTransform(mobject)
@protected
def path(self):
"""Return full path to node
Example:
>>> parent = createNode("transform", "myParent")
>>> child = createNode("transform", "myChild", parent=parent)
>>> child.name()
u'myChild'
>>> child.path()
u'|myParent|myChild'
"""
return self._fn.fullPathName()
@protected
def dagPath(self):
"""Return a om.MDagPath for this node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> parent = createNode("transform", name="Parent")
>>> child = createNode("transform", name="Child", parent=parent)
>>> path = child.dagPath()
>>> str(path)
'Child'
>>> str(path.pop())
'Parent'
"""
return om.MDagPath.getAPathTo(self._mobject)
@protected
def shortestPath(self):
"""Return shortest unique path to node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> parent = createNode("transform", name="myParent")
>>> child = createNode("transform", name="myChild", parent=parent)
>>> child.shortestPath()
u'myChild'
>>> child = createNode("transform", name="myChild")
>>> # Now `myChild` could refer to more than a single node
>>> child.shortestPath()
u'|myChild'
"""
return self._fn.partialPathName()
@property
def level(self):
"""Return the number of parents this DAG node has
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> child.level
1
>>> parent.level
0
"""
return self.path().count("|") - 1
@property
def boundingBox(self):
"""Return a cmdx.BoundingBox of this DAG node"""
return BoundingBox(self._fn.boundingBox)
def hide(self):
"""Set visibility to False"""
self["visibility"] = False
def show(self):
"""Set visibility to True"""
self["visibility"] = True
def addChild(self, child, index=Last):
"""Add `child` to self
Arguments:
child (Node): Child to add
index (int, optional): Physical location in hierarchy,
defaults to cmdx.Last
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform")
>>> parent.addChild(child)
"""
mobject = child._mobject
self._fn.addChild(mobject, index)
def assembly(self):
"""Return the top-level parent of node
Example:
>>> parent1 = createNode("transform")
>>> parent2 = createNode("transform")
>>> child = createNode("transform", parent=parent1)
>>> grandchild = createNode("transform", parent=child)
>>> child.assembly() == parent1
True
>>> parent2.assembly() == parent2
True
"""
path = self._fn.getPath()
root = None
for level in range(path.length() - 1):
root = path.pop()
return self.__class__(root.node()) if root else self
def transform(self, space=sObject, time=None):
"""Return TransformationMatrix"""
plug = self["worldMatrix"][0] if space == sWorld else self["matrix"]
return TransformationMatrix(plug.asMatrix(time))
def mapFrom(self, other, time=None):
"""Return TransformationMatrix of `other` relative self
Example:
>>> a = createNode("transform")
>>> b = createNode("transform")
>>> a["translate"] = (0, 5, 0)
>>> b["translate"] = (0, -5, 0)
>>> delta = a.mapFrom(b)
>>> delta.translation()[1]
10.0
>>> a = createNode("transform")
>>> b = createNode("transform")
>>> a["translate"] = (0, 5, 0)
>>> b["translate"] = (0, -15, 0)
>>> delta = a.mapFrom(b)
>>> delta.translation()[1]
20.0
"""
a = self["worldMatrix"][0].asMatrix(time)
b = other["worldInverseMatrix"][0].asMatrix(time)
delta = a * b
return TransformationMatrix(delta)
def mapTo(self, other, time=None):
"""Return TransformationMatrix of self relative `other`
See :func:`mapFrom` for examples.
"""
return other.mapFrom(self, time)
# Alias
root = assembly
def parent(self, type=None):
"""Return parent of node
Arguments:
type (str, optional): Return parent, only if it matches this type
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> child.parent() == parent
True
>>> not child.parent(type="camera")
True
>>> parent.parent()
"""
mobject = self._fn.parent(0)
if mobject.apiType() == om.MFn.kWorld:
return
cls = self.__class__
if not type or type == self._fn.__class__(mobject).typeName:
return cls(mobject)
def children(self,
type=None,
filter=om.MFn.kTransform,
query=None,
contains=None):
"""Return children of node
All returned children are transform nodes, as specified by the
`filter` argument. For shapes, use the :func:`shapes` method.
The `contains` argument only returns transform nodes containing
a shape of the type provided.
Arguments:
type (str, optional): Return only children that match this type
filter (int, optional): Return only children with this function set
contains (str, optional): Child must have a shape of this type
query (dict, optional): Limit output to nodes with these attributes
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", "a")
>>> b = createNode("transform", "b", parent=a)
>>> c = createNode("transform", "c", parent=a)
>>> d = createNode("mesh", "d", parent=c)
>>> list(a.children()) == [b, c]
True
>>> a.child() == b
True
>>> c.child(type="mesh")
>>> c.child(type="mesh", filter=None) == d
True
>>> c.child(type=("mesh", "transform"), filter=None) == d
True
>>> a.child() == b
True
>>> a.child(contains="mesh") == c
True
>>> a.child(contains="nurbsCurve") is None
True
>>> b["myAttr"] = Double(default=5)
>>> a.child(query=["myAttr"]) == b
True
>>> a.child(query=["noExist"]) is None
True
>>> a.child(query={"myAttr": 5}) == b
True
>>> a.child(query={"myAttr": 1}) is None
True
"""
# Shapes have no children
if self.isA(kShape):
return
cls = DagNode
Fn = self._fn.__class__
op = operator.eq
if isinstance(type, (tuple, list)):
op = operator.contains
other = "typeId" if isinstance(type, om.MTypeId) else "typeName"
for index in range(self._fn.childCount()):
try:
mobject = self._fn.child(index)
except RuntimeError:
# TODO: Unsure of exactly when this happens
log.warning(
"Child %d of %s not found, this is a bug" % (index, self)
)
raise
if filter is not None and not mobject.hasFn(filter):
continue
if not type or op(type, getattr(Fn(mobject), other)):
node = cls(mobject)
if not contains or node.shape(type=contains):
if query is None:
yield node
elif isinstance(query, dict):
try:
if all(node[key] == value
for key, value in query.items()):
yield node
except ExistError:
continue
else:
if all(key in node for key in query):
yield node
def child(self,
type=None,
filter=om.MFn.kTransform,
query=None,
contains=None):
return next(self.children(type, filter, query, contains), None)
def shapes(self, type=None, query=None):
return self.children(type, kShape, query)
def shape(self, type=None):
return next(self.shapes(type), None)
def siblings(self, type=None, filter=om.MFn.kTransform):
parent = self.parent()
if parent is not None:
for child in parent.children(type=type, filter=filter):
if child != self:
yield child
def sibling(self, type=None, filter=None):
return next(self.siblings(type, filter), None)
# Module-level expression; this isn't evaluated
# at run-time, for that extra performance boost.
if hasattr(om, "MItDag"):
def descendents(self, type=None):
"""Faster and more efficient dependency graph traversal
Requires Maya 2017+
Example:
>>> grandparent = createNode("transform")
>>> parent = createNode("transform", parent=grandparent)
>>> child = createNode("transform", parent=parent)
>>> mesh = createNode("mesh", parent=child)
>>> it = grandparent.descendents(type=tMesh)
>>> next(it) == mesh
True
>>> next(it)
Traceback (most recent call last):
...
StopIteration
"""
type = type or om.MFn.kInvalid
typeName = None
# Support filtering by typeName
if isinstance(type, string_types):
typeName = type
type = om.MFn.kInvalid
it = om.MItDag(om.MItDag.kDepthFirst, om.MFn.kInvalid)
it.reset(
self._mobject,
om.MItDag.kDepthFirst,
om.MIteratorType.kMObject
)
it.next() # Skip self
while not it.isDone():
mobj = it.currentItem()
node = DagNode(mobj)
if typeName is None:
if not type or type == node._fn.typeId:
yield node
else:
if not typeName or typeName == node._fn.typeName:
yield node
it.next()
else:
def descendents(self, type=None):
"""Recursive, depth-first search; compliant with MItDag of 2017+
Example:
>>> grandparent = createNode("transform")
>>> parent = createNode("transform", parent=grandparent)
>>> child = createNode("transform", parent=parent)
>>> mesh = createNode("mesh", parent=child)
>>> it = grandparent.descendents(type=tMesh)
>>> next(it) == mesh
True
>>> next(it)
Traceback (most recent call last):
...
StopIteration
"""
def _descendents(node, children=None):
children = children or list()
children.append(node)
for child in node.children(filter=None):
_descendents(child, children)
return children
# Support filtering by typeName
typeName = None
if isinstance(type, str):
typeName = type
type = om.MFn.kInvalid
descendents = _descendents(self)[1:] # Skip self
for child in descendents:
if typeName is None:
if not type or type == child._fn.typeId:
yield child
else:
if not typeName or typeName == child._fn.typeName:
yield child
def descendent(self, type=om.MFn.kInvalid):
"""Singular version of :func:`descendents()`
A recursive, depth-first search.
.. code-block:: python
a
|
b---d
| |
c e
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", "a")
>>> b = createNode("transform", "b", parent=a)
>>> c = createNode("transform", "c", parent=b)
>>> d = createNode("transform", "d", parent=b)
>>> e = createNode("transform", "e", parent=d)
>>> a.descendent() == a.child()
True
>>> list(a.descendents()) == [b, c, d, e]
True
>>> f = createNode("mesh", "f", parent=e)
>>> list(a.descendents(type="mesh")) == [f]
True
"""
return next(self.descendents(type), None)
def duplicate(self):
"""Return a duplicate of self"""
return self.__class__(self._fn.duplicate())
def clone(self, name=None, parent=None, worldspace=False):
"""Return a clone of self
A "clone" assignes the .outMesh attribute of a mesh node
to the `.inMesh` of the resulting clone.
Supports:
- mesh
Arguments:
name (str, optional): Name of newly created clone
parent (DagNode, optional): Parent to newly cloned node
worldspace (bool, optional): Translate output to worldspace
"""
if self.isA(kShape) and self.typeName == "mesh":
assert parent is not None, "mesh cloning requires parent argument"
name or parent.name() + "Clone"
with DagModifier() as mod:
mesh = mod.createNode("mesh", name, parent)
mesh["inMesh"] << self["outMesh"]
return mesh
else:
raise TypeError("Unsupported clone target: %s" % self)
def isLimited(self, typ):
return self._tfn.isLimited(typ)
def limitValue(self, typ):
return self._tfn.limitValue(typ)
def enableLimit(self, typ, state):
return self._tfn.enableLimit(typ, state)
def setLimit(self, typ, value):
return self._tfn.setLimit(typ, value)
if ENABLE_PEP8:
shortest_path = shortestPath
add_child = addChild
dag_path = dagPath
map_from = mapFrom
map_to = mapTo
is_limited = isLimited
limit_value = limitValue
set_limit = setLimit
enable_limit = enableLimit
bounding_box = boundingBox
# MFnTransform Limit Types
kRotateMaxX = 13
kRotateMaxY = 15
kRotateMaxZ = 17
kRotateMinX = 12
kRotateMinY = 14
kRotateMinZ = 16
kScaleMaxX = 1
kScaleMaxY = 3
kScaleMaxZ = 5
kScaleMinX = 0
kScaleMinY = 2
kScaleMinZ = 4
kShearMaxXY = 7
kShearMaxXZ = 9
kShearMaxYZ = 11
kShearMinXY = 6
kShearMinXZ = 8
kShearMinYZ = 10
kTranslateMaxX = 19
kTranslateMaxY = 21
kTranslateMaxZ = 23
kTranslateMinX = 18
kTranslateMinY = 20
kTranslateMinZ = 22
class ObjectSet(Node):
"""Support set-type operations on Maya sets
Caveats
1. MFnSet was introduced in Maya 2016, this class backports
that behaviour for Maya 2015 SP3
2. Adding a DAG node as a DG node persists its function set
such that when you query it, it'll return the name rather
than the path.
Therefore, when adding a node to an object set, it's important
that it is added either a DAG or DG node depending on what it it.
This class manages this automatically.
"""
@protected
def shortestPath(self):
return self.name(namespace=True)
def __iter__(self):
for member in self.members():
yield member
def add(self, member):
"""Add single `member` to set
Arguments:
member (cmdx.Node): Node to add
"""
return self.update([member])
def remove(self, members):
mobj = _encode1(self.name(namespace=True))
selectionList = om1.MSelectionList()
if not isinstance(members, (tuple, list)):
selectionList.add(members.path())
else:
for member in members:
selectionList.add(member.path())
fn = om1.MFnSet(mobj)
fn.removeMembers(selectionList)
def update(self, members):
"""Add several `members` to set
Arguments:
members (list): Series of cmdx.Node instances
"""
cmds.sets(list(map(str, members)), forceElement=self.path())
def clear(self):
"""Remove all members from set"""
mobj = _encode1(self.name(namespace=True))
fn = om1.MFnSet(mobj)
fn.clear()
def sort(self, key=lambda o: (o.typeName, o.path())):
"""Sort members of set by `key`
Arguments:
key (lambda): See built-in `sorted(key)` for reference
"""
members = sorted(
self.members(),
key=key
)
self.clear()
self.update(members)
def descendent(self, type=None):
"""Return the first descendent"""
return next(self.descendents(type), None)
def descendents(self, type=None):
"""Return hierarchy of objects in set"""
for member in self.members(type=type):
yield member
try:
for child in member.descendents(type=type):
yield child
except AttributeError:
continue
def flatten(self, type=None):
"""Return members, converting nested object sets into its members
Example:
>>> from maya import cmds
>>> _ = cmds.file(new=True, force=True)
>>> a = cmds.createNode("transform", name="a")
>>> b = cmds.createNode("transform", name="b")
>>> c = cmds.createNode("transform", name="c")
>>> cmds.select(a)
>>> gc = cmds.sets([a], name="grandchild")
>>> cc = cmds.sets([gc, b], name="child")
>>> parent = cmds.sets([cc, c], name="parent")
>>> mainset = encode(parent)
>>> sorted(mainset.flatten(), key=lambda n: n.name())
[|a, |b, |c]
"""
members = set()
def recurse(objset):
for member in objset:
if member.isA(om.MFn.kSet):
recurse(member)
elif type is not None:
if type == member.typeName:
members.add(member)
else:
members.add(member)
recurse(self)
return list(members)
def member(self, type=None):
"""Return the first member"""
return next(self.members(type), None)
def members(self, type=None):
op = operator.eq
other = "typeId"
if isinstance(type, string_types):
other = "typeName"
if isinstance(type, (tuple, list)):
op = operator.contains
for node in cmds.sets(self.name(namespace=True), query=True) or []:
node = encode(node)
if not type or op(type, getattr(node._fn, other)):
yield node
class AnimCurve(Node):
if __maya_version__ >= 2016:
def __init__(self, mobj, exists=True, modifier=None):
super(AnimCurve, self).__init__(mobj, exists, modifier)
self._fna = oma.MFnAnimCurve(mobj)
def key(self, time, value, interpolation=Linear):
time = om.MTime(time, om.MTime.uiUnit())
index = self._fna.find(time)
if index:
self._fna.setValue(index, value)
else:
self._fna.addKey(time, value, interpolation, interpolation)
def keys(self, times, values, interpolation=Linear):
times = map(lambda t: om.MTime(t, TimeUnit), times)
try:
self._fna.addKeys(times, values)
except RuntimeError:
# The error provided by Maya aren't very descriptive,
# help a brother out by look for common problems.
if not times:
log.error("No times were provided: %s" % str(times))
if not values:
log.error("No values were provided: %s" % str(values))
if len(values) != len(times):
log.error(
"Count mismatch; len(times)=%d, len(values)=%d" % (
len(times), len(values)
)
)
raise
class Plug(object):
def __abs__(self):
"""Return absolute value of plug
Example:
>>> node = createNode("transform")
>>> node["tx"] = -10
>>> abs(node["tx"])
10.0
"""
return abs(self.read())
def __bool__(self):
"""if plug:
Example:
>>> node = createNode("transform")
>>> node["tx"] = 10
>>> if node["tx"]:
... True
...
True
"""
return bool(self.read())
# Python 3
__nonzero__ = __bool__
def __float__(self):
"""Return plug as floating point value
Example:
>>> node = createNode("transform")
>>> float(node["visibility"])
1.0
"""
return float(self.read())
def __int__(self):
"""Return plug as int
Example:
>>> node = createNode("transform")
>>> int(node["visibility"])
1
"""
return int(self.read())
def __eq__(self, other):
"""Compare plug to `other`
Example:
>>> node = createNode("transform")
>>> node["visibility"] == True
True
>>> node["visibility"] == node["nodeState"]
False
>>> node["visibility"] != node["nodeState"]
True
"""
if isinstance(other, Plug):
other = other.read()
return self.read() == other
def __ne__(self, other):
if isinstance(other, Plug):
other = other.read()
return self.read() != other
def __neg__(self):
"""Negate unary operator
Example:
>>> node = createNode("transform")
>>> node["visibility"] = 1
>>> -node["visibility"]
-1
"""
return -self.read()
def __div__(self, other):
"""Python 2.x division
Example:
>>> node = createNode("transform")
>>> node["tx"] = 5
>>> node["ty"] = 2
>>> node["tx"] / node["ty"]
2.5
"""
if isinstance(other, Plug):
other = other.read()
return self.read() / other
def __truediv__(self, other):
"""Float division, e.g. self / other"""
if isinstance(other, Plug):
other = other.read()
return self.read() / other
def __add__(self, other):
"""Support legacy add string to plug
Note:
Adding to short name is faster, e.g. node["t"] + "x",
than adding to longName, e.g. node["translate"] + "X"
Example:
>>> node = createNode("transform")
>>> node["tx"] = 5
>>> node["translate"] + "X"
5.0
>>> node["t"] + "x"
5.0
>>> try:
... node["t"] + node["r"]
... except TypeError:
... error = True
...
>>> error
True
"""
if isinstance(other, str):
try:
# E.g. node["t"] + "x"
return self._node[self.name() + other]
except ExistError:
# E.g. node["translate"] + "X"
return self._node[self.name(long=True) + other]
raise TypeError(
"unsupported operand type(s) for +: 'Plug' and '%s'"
% type(other)
)
def __iadd__(self, other):
"""Support += operator, for .append()
Example:
>>> node = createNode("transform")
>>> node["myArray"] = Double(array=True)
>>> node["myArray"].append(1.0)
>>> node["myArray"].extend([2.0, 3.0])
>>> node["myArray"] += 5.1
>>> node["myArray"] += [1.1, 2.3, 999.0]
>>> node["myArray"][0]
1.0
>>> node["myArray"][6]
999.0
>>> node["myArray"][-1]
999.0
"""
if isinstance(other, (tuple, list)):
for entry in other:
self.append(entry)
else:
self.append(other)
return self
def __str__(self):
"""Return value as str
Example:
>>> node = createNode("transform")
>>> str(node["tx"])
'0.0'
"""
return str(self.read())
def __repr__(self):
return str(self.read())
def __rshift__(self, other):
"""Support connecting attributes via A >> B"""
self.connect(other)
def __lshift__(self, other):
"""Support connecting attributes via A << B"""
other.connect(self)
def __floordiv__(self, other):
"""Disconnect attribute via A // B
Example:
>>> nodeA = createNode("transform")
>>> nodeB = createNode("transform")
>>> nodeA["tx"] >> nodeB["tx"]
>>> nodeA["tx"] = 5
>>> nodeB["tx"] == 5
True
>>> nodeA["tx"] // nodeB["tx"]
>>> nodeA["tx"] = 0
>>> nodeB["tx"] == 5
True
"""
self.disconnect(other)
def __iter__(self):
"""Iterate over value as a tuple
Example:
>>> node = createNode("transform")
>>> node["translate"] = (0, 1, 2)
>>> for index, axis in enumerate(node["translate"]):
... assert axis == float(index)
... assert isinstance(axis, Plug)
...
>>> a = createNode("transform")
>>> a["myArray"] = Message(array=True)
>>> b = createNode("transform")
>>> c = createNode("transform")
>>> a["myArray"][0] << b["message"]
>>> a["myArray"][1] << c["message"]
>>> a["myArray"][0] in list(a["myArray"])
True
>>> a["myArray"][1] in list(a["myArray"])
True
>>> for single in node["visibility"]:
... print(single)
...
True
>>> node = createNode("wtAddMatrix")
>>> node["wtMatrix"][0]["weightIn"] = 1.0
"""
if self._mplug.isArray:
# getExisting... returns indices currently in use, which is
# important if the given array is *sparse*. That is, if
# indexes 5, 7 and 8 are used. If we simply call
# `evaluateNumElements` then it'll return a single number
# we could use to `range()` from, but that would only work
# if the indices were contiguous.
for index in self._mplug.getExistingArrayAttributeIndices():
yield self[index]
elif self._mplug.isCompound:
for index in range(self._mplug.numChildren()):
yield self[index]
else:
values = self.read()
# Facilitate single-value attributes
values = values if isinstance(values, (tuple, list)) else [values]
for value in values:
yield value
def __getitem__(self, index):
"""Read from child of array or compound plug
Arguments:
index (int): Logical index of plug (NOT physical, make note)
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="mynode")
>>> node["translate"][0].read()
0.0
>>> node["visibility"][0]
Traceback (most recent call last):
...
TypeError: |mynode.visibility does not support indexing
>>> node["translate"][2] = 5.1
>>> node["translate"][2].read()
5.1
"""
cls = self.__class__
if isinstance(index, int):
# Support backwards-indexing
if index < 0:
index = self.count() - abs(index)
if self._mplug.isArray:
item = self._mplug.elementByLogicalIndex(index)
return cls(self._node, item, self._unit)
elif self._mplug.isCompound:
item = self._mplug.child(index)
return cls(self._node, item, self._unit)
else:
raise TypeError(
"%s does not support indexing" % self.path()
)
elif isinstance(index, string_types):
# Compound attributes have no equivalent
# to "MDependencyNode.findPlug()" and must
# be searched by hand.
if self._mplug.isCompound:
for child in range(self._mplug.numChildren()):
child = self._mplug.child(child)
_, name = child.name().rsplit(".", 1)
if index == name:
return cls(self._node, child)
else:
raise TypeError("'%s' is not a compound attribute"
% self.path())
raise ExistError("'%s' was not found" % index)
def __setitem__(self, index, value):
"""Write to child of array or compound plug
Example:
>>> node = createNode("transform")
>>> node["translate"][0] = 5
>>> node["tx"]
5.0
"""
self[index].write(value)
def __init__(self, node, mplug, unit=None, key=None, modifier=None):
"""A Maya plug
Arguments:
node (Node): Parent Node of plug
mplug (maya.api.OpenMaya.MPlug): Internal Maya plug
unit (int, optional): Unit with which to read plug
"""
assert isinstance(node, Node), "%s is not a Node" % node
self._node = node
self._mplug = mplug
self._unit = unit
self._cached = None
self._key = key
self._modifier = modifier
def plug(self):
return self._mplug
@property
def isArray(self):
return self._mplug.isArray
@property
def isCompound(self):
return self._mplug.isCompound
def append(self, value):
"""Add `value` to end of self, which is an array
Arguments:
value (object): If value, create a new entry and append it.
If cmdx.Plug, create a new entry and connect it.
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="appendTest")
>>> node["myArray"] = Double(array=True)
>>> node["myArray"].append(1.0)
>>> node["notArray"] = Double()
>>> node["notArray"].append(2.0)
Traceback (most recent call last):
...
TypeError: "|appendTest.notArray" was not an array attribute
"""
if not self._mplug.isArray:
raise TypeError("\"%s\" was not an array attribute" % self.path())
index = self.count()
if isinstance(value, Plug):
self[index] << value
else:
self[index].write(value)
def extend(self, values):
"""Append multiple values to the end of an array
Arguments:
values (tuple): If values, create a new entry and append it.
If cmdx.Plug's, create a new entry and connect it.
Example:
>>> node = createNode("transform")
>>> node["myArray"] = Double(array=True)
>>> node["myArray"].extend([1.0, 2.0, 3.0])
>>> node["myArray"][0]
1.0
>>> node["myArray"][-1]
3.0
"""
for value in values:
self.append(value)
def count(self):
return self._mplug.evaluateNumElements()
def asDouble(self, time=None):
"""Return plug as double (Python float)
Example:
>>> node = createNode("transform")
>>> node["translateX"] = 5.0
>>> node["translateX"].asDouble()
5.0
"""
if time is not None:
return self._mplug.asDouble(DGContext(time=time))
return self._mplug.asDouble()
def asMatrix(self, time=None):
"""Return plug as MatrixType
Example:
>>> node1 = createNode("transform")
>>> node2 = createNode("transform", parent=node1)
>>> node1["translate"] = (0, 5, 0)
>>> node2["translate"] = (0, 5, 0)
>>> plug1 = node1["matrix"]
>>> plug2 = node2["worldMatrix"][0]
>>> mat1 = plug1.asMatrix()
>>> mat2 = plug2.asMatrix()
>>> mat = mat1 * mat2
>>> tm = TransformationMatrix(mat)
>>> list(tm.translation())
[0.0, 15.0, 0.0]
"""
if time is not None:
context = DGContext(time=time)
obj = self._mplug.asMObject(context)
else:
obj = self._mplug.asMObject()
return om.MFnMatrixData(obj).matrix()
def asTransformationMatrix(self, time=None):
"""Return plug as TransformationMatrix
Example:
>>> node = createNode("transform")
>>> node["translateY"] = 12
>>> node["rotate"] = 1
>>> tm = node["matrix"].asTm()
>>> map(round, tm.rotation())
[1.0, 1.0, 1.0]
>>> list(tm.translation())
[0.0, 12.0, 0.0]
"""
return TransformationMatrix(self.asMatrix(time))
# Alias
asTm = asTransformationMatrix
def asEulerRotation(self, order=kXYZ, time=None):
value = self.read(time=time)
return om.MEulerRotation(value, order)
def asQuaternion(self, time=None):
value = self.read(time=time)
value = Euler(value).asQuaternion()
def asVector(self, time=None):
assert self.isArray or self.isCompound, "'%s' not an array" % self
return Vector(self.read(time=time))
@property
def connected(self):
"""Return whether or not this attribute is connected (to anything)"""
return self.connection() is not None
@property
def locked(self):
return self._mplug.isLocked
@locked.setter
def locked(self, value):
"""Lock attribute"""
elements = (
self
if self.isArray or self.isCompound
else [self]
)
# Use setAttr in place of MPlug.isKeyable = False, as that
# doesn't persist the scene on save if the attribute is dynamic.
for el in elements:
cmds.setAttr(el.path(), lock=value)
def lock(self):
self.locked = True
def unlock(self):
self.locked = False
@property
def channelBox(self):
"""Is the attribute visible in the Channel Box?"""
if self.isArray or self.isCompound:
return all(
plug._mplug.isChannelBox
for plug in self
)
else:
return self._mplug.isChannelBox
@channelBox.setter
def channelBox(self, value):
elements = (
self
if self.isArray or self.isCompound
else [self]
)
# Use setAttr in place of MPlug.isChannelBox = False, as that
# doesn't persist the scene on save if the attribute is dynamic.
for el in elements:
cmds.setAttr(el.path(), keyable=value, channelBox=value)
@property
def keyable(self):
"""Is the attribute keyable?"""
if self.isArray or self.isCompound:
return all(
plug._mplug.isKeyable
for plug in self
)
else:
return self._mplug.isKeyable
@keyable.setter
def keyable(self, value):
elements = (
self
if self.isArray or self.isCompound
else [self]
)
# Use setAttr in place of MPlug.isKeyable = False, as that
# doesn't persist the scene on save if the attribute is dynamic.
for el in elements:
cmds.setAttr(el.path(), keyable=value)
@property
def hidden(self):
return om.MFnAttribute(self._mplug.attribute()).hidden
@hidden.setter
def hidden(self, value):
pass
def hide(self):
"""Hide attribute from channel box
Note: An attribute cannot be hidden from the channel box
and keyable at the same time. Therefore, this method
also makes the attribute non-keyable.
Supports array and compound attributes too.
"""
self.keyable = False
self.channelBox = False
def lockAndHide(self):
self.lock()
self.hide()
@property
def default(self):
"""Return default value of plug"""
return _plug_to_default(self._mplug)
def reset(self):
"""Restore plug to default value"""
if self.writable:
self.write(self.default)
else:
raise TypeError(
"Cannot reset non-writable attribute '%s'" % self.path()
)
@property
def writable(self):
"""Can the user write to this attribute?
Convenience for combined call to `plug.connected`
and `plug.locked`.
Example:
>> if node["translateX"].writable:
.. node["translateX"] = 5
"""
return not any([self.connected, self.locked])
def show(self):
"""Show attribute in channel box
Note: An attribute can be both visible in the channel box
and non-keyable, therefore, unlike :func:`hide()`, this
method does not alter the keyable state of the attribute.
"""
self.channelBox = True
def type(self):
"""Retrieve API type of plug as string
Example:
>>> node = createNode("transform")
>>> node["translate"].type()
'kAttribute3Double'
>>> node["translateX"].type()
'kDoubleLinearAttribute'
"""
return self._mplug.attribute().apiTypeStr
def path(self):
return "%s.%s" % (
self._node.path(), self._mplug.partialName(
includeNodeName=False,
useLongNames=True,
useFullAttributePath=True
)
)
def name(self, long=False):
return self._mplug.partialName(
includeNodeName=False,
useLongNames=long,
useFullAttributePath=True
)
def read(self, unit=None, time=None):
"""Read attribute value
Arguments:
unit (int, optional): Unit with which to read plug
time (float, optional): Time at which to read plug
Example:
>>> node = createNode("transform")
>>> node["ty"] = 100.0
>>> node["ty"].read()
100.0
>>> node["ty"].read(unit=Meters)
1.0
"""
unit = unit if unit is not None else self._unit
context = None if time is None else DGContext(time=time)
try:
value = _plug_to_python(
self._mplug,
unit=unit,
context=context
)
# Store cached value
self._node._state["values"][self._key, unit] = value
return value
except RuntimeError:
raise
except TypeError:
# Expected errors
log.error("'%s': failed to read attribute" % self.path())
raise
def write(self, value):
if not getattr(self._modifier, "isDone", True):
return self._modifier.setAttr(self, value)
try:
_python_to_plug(value, self)
self._cached = value
except RuntimeError:
raise
except TypeError:
log.error("'%s': failed to write attribute" % self.path())
raise
def connect(self, other, force=True):
if not getattr(self._modifier, "isDone", True):
return self._modifier.connect(self, other, force)
mod = om.MDGModifier()
if force:
# Disconnect any plug connected to `other`
for plug in other._mplug.connectedTo(True, False):
mod.disconnect(plug, other._mplug)
mod.connect(self._mplug, other._mplug)
mod.doIt()
def disconnect(self, other=None, source=True, destination=True):
"""Disconnect self from `other`
Arguments:
other (Plug, optional): If none is provided, disconnect everything
Example:
>>> node1 = createNode("transform")
>>> node2 = createNode("transform")
>>> node2["tx"].connection() is None
True
>>> node2["ty"].connection() is None
True
>>>
>>> node2["tx"] << node1["tx"]
>>> node2["ty"] << node1["ty"]
>>> node2["ty"].connection() is None
False
>>> node2["tx"].connection() is None
False
>>>
>>> node2["tx"].disconnect(node1["tx"])
>>> node2["ty"].disconnect()
>>> node2["tx"].connection() is None
True
>>> node2["ty"].connection() is None
True
"""
other = getattr(other, "_mplug", None)
if not getattr(self._modifier, "isDone", True):
mod = self._modifier
mod.disconnect(self._mplug, other, source, destination)
# Don't do it, leave that to the parent context
else:
mod = DGModifier()
mod.disconnect(self._mplug, other, source, destination)
mod.doIt()
def connections(self,
type=None,
source=True,
destination=True,
plugs=False,
unit=None):
"""Yield plugs connected to self
Arguments:
type (int, optional): Only return nodes of this type
source (bool, optional): Return source plugs,
default is True
destination (bool, optional): Return destination plugs,
default is True
plugs (bool, optional): Return connected plugs instead of nodes
unit (int, optional): Return plug in this unit, e.g. Meters
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", name="A")
>>> b = createNode("multDoubleLinear", name="B")
>>> a["ihi"] << b["ihi"]
>>> a["ihi"].connection() == b
True
>>> b["ihi"].connection() == a
True
>>> a["ihi"]
2
"""
op = operator.eq
other = "typeId"
if isinstance(type, string_types):
other = "typeName"
if isinstance(type, (tuple, list)):
op = operator.contains
for plug in self._mplug.connectedTo(source, destination):
mobject = plug.node()
node = Node(mobject)
if not type or op(type, getattr(node._fn, other)):
yield Plug(node, plug, unit) if plugs else node
def connection(self,
type=None,
source=True,
destination=True,
plug=False,
unit=None):
"""Return first connection from :func:`connections()`"""
return next(self.connections(type=type,
source=source,
destination=destination,
plugs=plug,
unit=unit), None)
def source(self, unit=None):
cls = self.__class__
plug = self._mplug.source()
node = Node(plug.node())
if not plug.isNull:
return cls(node, plug, unit)
def node(self):
return self._node
if ENABLE_PEP8:
as_double = asDouble
as_matrix = asMatrix
as_transformation_matrix = asTransformationMatrix
as_euler_rotation = asEulerRotation
as_quaternion = asQuaternion
as_vector = asVector
channel_box = channelBox
lock_and_hide = lockAndHide
class TransformationMatrix(om.MTransformationMatrix):
"""A more readable version of Maya's MTransformationMatrix
Added:
- Takes tuples/lists in place of MVector and other native types
- Support for multiplication
- Support for getting individual axes
- Support for direct access to the quaternion
Arguments:
matrix (Matrix, TransformationMatrix, optional): Original constructor
translate (tuple, Vector, optional): Initial translate value
rotate (tuple, Vector, optional): Initial rotate value
scale (tuple, Vector, optional): Initial scale value
"""
def __init__(self, matrix=None, translate=None, rotate=None, scale=None):
# It doesn't like being handed `None`
args = [matrix] if matrix is not None else []
super(TransformationMatrix, self).__init__(*args)
if translate is not None:
self.setTranslation(translate)
if rotate is not None:
self.setRotation(rotate)
if scale is not None:
self.setScale(scale)
def __mul__(self, other):
if isinstance(other, (tuple, list)):
other = Vector(*other)
if isinstance(other, om.MVector):
p = self.translation()
q = self.quaternion()
return p + q * other
elif isinstance(other, om.MMatrix):
return type(self)(self.asMatrix() * other)
elif isinstance(other, om.MTransformationMatrix):
return type(self)(self.asMatrix() * other.asMatrix())
else:
raise TypeError(
"unsupported operand type(s) for *: '%s' and '%s'"
% (type(self).__name__, type(other).__name__)
)
@property
def xAxis(self):
return self.quaternion() * Vector(1, 0, 0)
@property
def yAxis(self):
return self.quaternion() * Vector(0, 1, 0)
@property
def zAxis(self):
return self.quaternion() * Vector(0, 0, 1)
def translateBy(self, vec, space=None):
space = space or sTransform
if isinstance(vec, (tuple, list)):
vec = Vector(vec)
return super(TransformationMatrix, self).translateBy(vec, space)
def rotateBy(self, rot, space=None):
"""Handle arguments conveniently
- Allow for optional `space` argument
- Automatically convert tuple to Vector
Arguments:
rot (Vector, Quaternion): Rotation to add
"""
space = space or sTransform
if isinstance(rot, (tuple, list)):
rot = Vector(rot)
if isinstance(rot, om.MVector):
rot = EulerRotation(rot)
return super(TransformationMatrix, self).rotateBy(rot, space)
def quaternion(self):
"""Return transformation matrix as a Quaternion"""
return Quaternion(self.rotation(asQuaternion=True))
def rotatePivot(self, space=None):
"""This method does not typically support optional arguments"""
space = space or sTransform
return super(TransformationMatrix, self).rotatePivot(space)
def translation(self, space=None):
"""This method does not typically support optional arguments"""
space = space or sTransform
return super(TransformationMatrix, self).translation(space)
def setTranslation(self, trans, space=None):
if isinstance(trans, Plug):
trans = trans.as_vector()
if isinstance(trans, (tuple, list)):
trans = Vector(*trans)
space = space or sTransform
return super(TransformationMatrix, self).setTranslation(trans, space)
def scaleBy(self, space=None):
"""This method does not typically support optional arguments"""
space = space or sTransform
return Vector(super(TransformationMatrix, self).scale(space))
def setScale(self, seq, space=None):
"""This method does not typically support optional arguments"""
if isinstance(seq, Plug):
seq = seq.as_vector()
if isinstance(seq, (tuple, list)):
seq = Vector(*seq)
space = space or sTransform
return super(TransformationMatrix, self).setScale(seq, space)
def rotation(self, asQuaternion=False):
return super(TransformationMatrix, self).rotation(asQuaternion)
def setRotation(self, rot):
"""Interpret three values as an euler rotation"""
if isinstance(rot, Plug):
rot = rot.as_vector()
if isinstance(rot, (tuple, list)):
try:
rot = Vector(rot)
except ValueError:
traceback.print_exc()
raise ValueError(
"I tried automatically converting your "
"tuple to a Vector, but couldn't.."
)
if isinstance(rot, Vector):
rot = EulerRotation(rot)
return super(TransformationMatrix, self).setRotation(rot)
def asMatrix(self):
return MatrixType(super(TransformationMatrix, self).asMatrix())
def asMatrixInverse(self):
return MatrixType(super(TransformationMatrix, self).asMatrixInverse())
# A more intuitive alternative
translate = translateBy
rotate = rotateBy
scale = scaleBy
if ENABLE_PEP8:
x_axis = xAxis
y_axis = yAxis
z_axis = zAxis
translate_by = translateBy
rotate_by = rotateBy
set_translation = setTranslation
set_rotation = setRotation
set_scale = setScale
as_matrix = asMatrix
as_matrix_inverse = asMatrixInverse
class MatrixType(om.MMatrix):
def __call__(self, *item):
"""Native API 2.0 MMatrix does not support indexing
API 1.0 however *does*, except only for elements
and not rows. Screw both of those, indexing isn't hard.
Arguments:
item (int, tuple): 1 integer for row, 2 for element
Identity/default matrix:
[[1.0, 0.0, 0.0, 0.0]]
[[0.0, 1.0, 0.0, 0.0]]
[[0.0, 0.0, 1.0, 0.0]]
[[0.0, 0.0, 0.0, 1.0]]
Example:
>>> m = MatrixType()
>>> m(0, 0)
1.0
>>> m(0, 1)
0.0
>>> m(1, 1)
1.0
>>> m(2, 1)
0.0
>>> m(3, 3)
1.0
>>>
>>> m(0)
(1.0, 0.0, 0.0, 0.0)
"""
if len(item) == 1:
return self.row(*item)
elif len(item) == 2:
return self.element(*item)
else:
raise ValueError(
"Must provide either 1 or 2 coordinates, "
"for row and element respectively"
)
def __mul__(self, other):
return type(self)(super(MatrixType, self).__mul__(other))
def __div__(self, other):
return type(self)(super(MatrixType, self).__div__(other))
def inverse(self):
return type(self)(super(MatrixType, self).inverse())
def row(self, index):
values = tuple(self)
return (
values[index * 4 + 0],
values[index * 4 + 1],
values[index * 4 + 2],
values[index * 4 + 3]
)
def element(self, row, col):
values = tuple(self)
return values[row * 4 + col % 4]
# Alias
Transformation = TransformationMatrix
Tm = TransformationMatrix
Mat = MatrixType
Mat4 = MatrixType
Matrix4 = MatrixType
class Vector(om.MVector):
"""Maya's MVector
Example:
>>> vec = Vector(1, 0, 0)
>>> vec * Vector(0, 1, 0) # Dot product
0.0
>>> vec ^ Vector(0, 1, 0) # Cross product
maya.api.OpenMaya.MVector(0, 0, 1)
"""
def __add__(self, value):
if isinstance(value, (int, float)):
return type(self)(
self.x + value,
self.y + value,
self.z + value,
)
return super(Vector, self).__add__(value)
def __iadd__(self, value):
if isinstance(value, (int, float)):
return type(self)(
self.x + value,
self.y + value,
self.z + value,
)
return super(Vector, self).__iadd__(value)
# Alias, it can't take anything other than values
# and yet it isn't explicit in its name.
Vector3 = Vector
class Point(om.MPoint):
"""Maya's MPoint"""
class BoundingBox(om.MBoundingBox):
"""Maya's MBoundingBox"""
def volume(self):
return self.width * self.height * self.depth
class Quaternion(om.MQuaternion):
"""Maya's MQuaternion
Example:
>>> q = Quaternion(0, 0, 0, 1)
>>> v = Vector(1, 2, 3)
>>> isinstance(q * v, Vector)
True
"""
def __mul__(self, other):
if isinstance(other, (tuple, list)):
other = Vector(*other)
if isinstance(other, om.MVector):
return Vector(other.rotateBy(self))
else:
return super(Quaternion, self).__mul__(other)
def lengthSquared(self):
return (
self.x * self.x +
self.y * self.y +
self.z * self.z +
self.w * self.w
)
def length(self):
return math.sqrt(self.lengthSquared())
def isNormalised(self, tol=0.0001):
return abs(self.length() - 1.0) < tol
# Alias
Quat = Quaternion
def twistSwingToQuaternion(ts):
"""Convert twist/swing1/swing2 rotation in a Vector into a quaternion
Arguments:
ts (Vector): Twist, swing1 and swing2
"""
t = tan(ts.x * 0.25)
s1 = tan(ts.y * 0.25)
s2 = tan(ts.z * 0.25)
b = 2.0 / (1.0 + s1 * s1 + s2 * s2)
c = 2.0 / (1.0 + t * t)
quat = Quaternion()
quat.w = (b - 1.0) * (c - 1.0)
quat.x = -t * (b - 1.0) * c
quat.y = -b * (c * t * s1 + (c - 1.0) * s2)
quat.z = -b * (c * t * s2 - (c - 1.0) * s1)
assert quat.isNormalised()
return quat
class EulerRotation(om.MEulerRotation):
def asQuaternion(self):
return super(EulerRotation, self).asQuaternion()
if ENABLE_PEP8:
as_quaternion = asQuaternion
# Alias
Euler = EulerRotation
def NurbsCurveData(points, degree=1, form=om1.MFnNurbsCurve.kOpen):
"""Tuple of points to MObject suitable for nurbsCurve-typed data
Arguments:
points (tuple): (x, y, z) tuples per point
degree (int, optional): Defaults to 1 for linear
form (int, optional): Defaults to MFnNurbsCurve.kOpen,
also available kClosed
Example:
Create a new nurbs curve like this.
>>> data = NurbsCurveData(
... points=(
... (0, 0, 0),
... (0, 1, 0),
... (0, 2, 0),
... ))
...
>>> parent = createNode("transform")
>>> shape = createNode("nurbsCurve", parent=parent)
>>> shape["cached"] = data
"""
degree = min(3, max(1, degree))
cvs = om1.MPointArray()
curveFn = om1.MFnNurbsCurve()
data = om1.MFnNurbsCurveData()
mobj = data.create()
for point in points:
cvs.append(om1.MPoint(*point))
curveFn.createWithEditPoints(cvs,
degree,
form,
False,
False,
True,
mobj)
return mobj
class CachedPlug(Plug):
"""Returned in place of an actual plug"""
def __init__(self, value):
self._value = value
def read(self):
return self._value
def _plug_to_default(plug):
"""Find default value from plug, regardless of attribute type"""
if plug.isArray:
raise TypeError("Array plugs are unsupported")
if plug.isCompound:
raise TypeError("Compound plugs are unsupported")
attr = plug.attribute()
type = attr.apiType()
if type == om.MFn.kTypedAttribute:
return om.MFnTypedAttribute(attr).default
elif type in (om.MFn.kDoubleLinearAttribute,
om.MFn.kFloatLinearAttribute,
om.MFn.kDoubleAngleAttribute,
om.MFn.kFloatAngleAttribute):
return om.MFnUnitAttribute(attr).default
elif type == om.MFn.kNumericAttribute:
return om.MFnNumericAttribute(attr).default
elif type == om.MFn.kEnumAttribute:
return om.MFnEnumAttribute(attr).default
else:
raise TypeError("Attribute type '%s' unsupported" % type)
def _plug_to_python(plug, unit=None, context=None):
"""Convert native `plug` to Python type
Arguments:
plug (om.MPlug): Native Maya plug
unit (int, optional): Return value in this unit, e.g. Meters
context (om.MDGContext, optional): Return value in this context
"""
assert not plug.isNull, "'%s' was null" % plug
kwargs = dict()
if context is not None:
kwargs["context"] = context
# Multi attributes
# _____
# | |
# | ||
# | ||
# |_____||
# |_____|
#
if plug.isArray and plug.isCompound:
# E.g. locator["worldPosition"]
return _plug_to_python(
plug.elementByLogicalIndex(0), unit, context
)
elif plug.isArray:
# E.g. transform["worldMatrix"][0]
# E.g. locator["worldPosition"][0]
return tuple(
_plug_to_python(
plug.elementByLogicalIndex(index),
unit,
context
)
for index in range(plug.evaluateNumElements())
)
elif plug.isCompound:
return tuple(
_plug_to_python(plug.child(index), unit, context)
for index in range(plug.numChildren())
)
# Simple attributes
# _____
# | |
# | |
# | |
# |_____|
#
attr = plug.attribute()
type = attr.apiType()
if type == om.MFn.kTypedAttribute:
innerType = om.MFnTypedAttribute(attr).attrType()
if innerType == om.MFnData.kAny:
# E.g. choice["input"][0]
return None
elif innerType == om.MFnData.kMatrix:
# E.g. transform["worldMatrix"][0]
if plug.isArray:
plug = plug.elementByLogicalIndex(0)
return tuple(
om.MFnMatrixData(plug.asMObject(**kwargs)).matrix()
)
elif innerType == om.MFnData.kString:
return plug.asString(**kwargs)
elif innerType == om.MFnData.kNurbsCurve:
return om.MFnNurbsCurveData(plug.asMObject(**kwargs))
elif innerType == om.MFnData.kComponentList:
return None
elif innerType == om.MFnData.kInvalid:
# E.g. time1.timewarpIn_Hidden
# Unsure of why some attributes are invalid
return None
else:
log.debug("Unsupported kTypedAttribute: %s" % innerType)
return None
elif type == om.MFn.kMatrixAttribute:
return tuple(om.MFnMatrixData(plug.asMObject(**kwargs)).matrix())
elif type == om.MFnData.kDoubleArray:
raise TypeError("%s: kDoubleArray is not supported" % plug)
elif type in (om.MFn.kDoubleLinearAttribute,
om.MFn.kFloatLinearAttribute):
if unit is None:
return plug.asMDistance(**kwargs).asUnits(Centimeters)
elif unit == Millimeters:
return plug.asMDistance(**kwargs).asMillimeters()
elif unit == Centimeters:
return plug.asMDistance(**kwargs).asCentimeters()
elif unit == Meters:
return plug.asMDistance(**kwargs).asMeters()
elif unit == Kilometers:
return plug.asMDistance(**kwargs).asKilometers()
elif unit == Inches:
return plug.asMDistance(**kwargs).asInches()
elif unit == Feet:
return plug.asMDistance(**kwargs).asFeet()
elif unit == Miles:
return plug.asMDistance(**kwargs).asMiles()
elif unit == Yards:
return plug.asMDistance(**kwargs).asYards()
else:
raise TypeError("Unsupported unit '%d'" % unit)
elif type in (om.MFn.kDoubleAngleAttribute,
om.MFn.kFloatAngleAttribute):
if unit is None:
return plug.asMAngle(**kwargs).asUnits(Radians)
elif unit == Degrees:
return plug.asMAngle(**kwargs).asDegrees()
elif unit == Radians:
return plug.asMAngle(**kwargs).asRadians()
elif unit == AngularSeconds:
return plug.asMAngle(**kwargs).asAngSeconds()
elif unit == AngularMinutes:
return plug.asMAngle(**kwargs).asAngMinutes()
else:
raise TypeError("Unsupported unit '%d'" % unit)
# Number
elif type == om.MFn.kNumericAttribute:
innerType = om.MFnNumericAttribute(attr).numericType()
if innerType == om.MFnNumericData.kBoolean:
return plug.asBool(**kwargs)
elif innerType in (om.MFnNumericData.kShort,
om.MFnNumericData.kInt,
om.MFnNumericData.kLong,
om.MFnNumericData.kByte):
return plug.asInt(**kwargs)
elif innerType in (om.MFnNumericData.kFloat,
om.MFnNumericData.kDouble,
om.MFnNumericData.kAddr):
return plug.asDouble(**kwargs)
else:
raise TypeError("Unsupported numeric type: %s"
% innerType)
# Enum
elif type == om.MFn.kEnumAttribute:
return plug.asShort(**kwargs)
elif type == om.MFn.kMessageAttribute:
# In order to comply with `if plug:`
return True
elif type == om.MFn.kTimeAttribute:
if unit:
return plug.asMTime(**kwargs).asUnits(unit)
else:
return plug.asMTime(**kwargs).value
elif type == om.MFn.kInvalid:
raise TypeError("%s was invalid" % plug.name())
else:
raise TypeError("Unsupported type '%s'" % type)
def _python_to_plug(value, plug):
"""Pass value of `value` to `plug`
Arguments:
value (any): Instance of Python or Maya type
plug (Plug): Target plug to which value is applied
"""
# Compound values
if isinstance(value, (tuple, list)):
if plug.type() == "kMatrixAttribute":
assert len(value) == 16, "Value didn't appear to be a valid matrix"
return _python_to_plug(Matrix4(value), plug)
for index, value in enumerate(value):
# Tuple values are assumed flat:
# e.g. (0, 0, 0, 0)
# Nested values are not supported:
# e.g. ((0, 0), (0, 0))
# Those can sometimes appear in e.g. matrices
if isinstance(value, (tuple, list)):
raise TypeError(
"Unsupported nested Python type: %s"
% value.__class__
)
_python_to_plug(value, plug[index])
# Native Maya types
elif isinstance(value, om1.MObject):
node = _encode1(plug._node.path())
shapeFn = om1.MFnDagNode(node)
plug = shapeFn.findPlug(plug.name())
plug.setMObject(value)
elif isinstance(value, om.MEulerRotation):
for index, value in enumerate(value):
value = om.MAngle(value, om.MAngle.kRadians)
_python_to_plug(value, plug[index])
elif isinstance(value, om.MAngle):
plug._mplug.setMAngle(value)
elif isinstance(value, om.MDistance):
plug._mplug.setMDistance(value)
elif isinstance(value, om.MTime):
plug._mplug.setMTime(value)
elif isinstance(value, om.MQuaternion):
_python_to_plug(value.asEulerRotation(), plug)
elif isinstance(value, om.MVector):
for index, value in enumerate(value):
_python_to_plug(value, plug[index])
elif isinstance(value, om.MPoint):
for index, value in enumerate(value):
_python_to_plug(value, plug[index])
elif isinstance(value, om.MMatrix):
matrixData = om.MFnMatrixData()
matobj = matrixData.create(value)
plug._mplug.setMObject(matobj)
elif plug._mplug.isCompound:
count = plug._mplug.numChildren()
return _python_to_plug([value] * count, plug)
# Native Python types
elif isinstance(value, string_types):
plug._mplug.setString(value)
elif isinstance(value, int):
plug._mplug.setInt(value)
elif isinstance(value, float):
plug._mplug.setDouble(value)
elif isinstance(value, bool):
plug._mplug.setBool(value)
else:
raise TypeError("Unsupported Python type '%s'" % value.__class__)
def _python_to_mod(value, plug, mod):
"""Convert `value` into a suitable equivalent for om.MDGModifier
Arguments:
value (object): Value of any type to write into modifier
plug (Plug): Plug within which to write value
mod (om.MDGModifier): Modifier to use for writing it
"""
mplug = plug._mplug
if isinstance(value, (tuple, list)):
for index, value in enumerate(value):
# Tuple values are assumed flat:
# e.g. (0, 0, 0, 0)
# Nested values are not supported:
# e.g. ((0, 0), (0, 0))
# Those can sometimes appear in e.g. matrices
if isinstance(value, (tuple, list)):
raise TypeError(
"Unsupported nested Python type: %s"
% value.__class__
)
_python_to_mod(value, plug[index], mod)
elif isinstance(value, om.MVector):
for index, value in enumerate(value):
_python_to_mod(value, plug[index], mod)
elif isinstance(value, string_types):
mod.newPlugValueString(mplug, value)
elif isinstance(value, int):
mod.newPlugValueInt(mplug, value)
elif isinstance(value, float):
mod.newPlugValueFloat(mplug, value)
elif isinstance(value, bool):
mod.newPlugValueBool(mplug, value)
elif isinstance(value, om.MAngle):
mod.newPlugValueMAngle(mplug, value)
elif isinstance(value, om.MDistance):
mod.newPlugValueMDistance(mplug, value)
elif isinstance(value, om.MTime):
mod.newPlugValueMTime(mplug, value)
elif isinstance(value, om.MEulerRotation):
for index, value in enumerate(value):
value = om.MAngle(value, om.MAngle.kRadians)
_python_to_mod(value, plug[index], mod)
else:
log.warning(
"Unsupported plug type for modifier: %s" % type(value)
)
return False
return True
def encode(path):
"""Convert relative or absolute `path` to cmdx Node
Fastest conversion from absolute path to Node
Arguments:
path (str): Absolute or relative path to DAG or DG node
"""
assert isinstance(path, string_types), "%s was not string" % path
selectionList = om.MSelectionList()
try:
selectionList.add(path)
except RuntimeError:
raise ExistError("'%s' does not exist" % path)
mobj = selectionList.getDependNode(0)
return Node(mobj)
def fromHash(code, default=None):
"""Get existing node from MObjectHandle.hashCode()"""
try:
return Singleton._instances["%x" % code]
except KeyError:
return default
def fromHex(hex, default=None, safe=True):
"""Get existing node from Node.hex"""
node = Singleton._instances.get(hex, default)
if safe and node and node.exists:
return node
else:
return node
def toHash(mobj):
"""Cache the given `mobj` and return its hashCode
This enables pre-caching of one or more nodes in situations where
intend to access it later, at a more performance-critical moment.
Ignores nodes that have already been cached.
"""
node = Node(mobj)
return node.hashCode
def toHex(mobj):
"""Cache the given `mobj` and return its hex value
See :func:`toHash` for docstring.
"""
node = Node(mobj)
return node.hex
def asHash(mobj):
"""Return a given hashCode for `mobj`, without caching it
This can be helpful in case you wish to synchronise `cmdx`
with a third-party library or tool and wish to guarantee
that an identical algorithm is used.
"""
handle = om.MObjectHandle(mobj)
return handle.hashCode()
def asHex(mobj):
"""Return a given hex string for `mobj`, without caching it
See docstring for :func:`asHash` for details
"""
return "%x" % asHash(mobj)
if ENABLE_PEP8:
from_hash = fromHash
from_hex = fromHex
to_hash = toHash
to_hex = toHex
as_hash = asHash
as_hex = asHex
# Helpful for euler rotations
degrees = math.degrees
radians = math.radians
sin = math.sin
cos = math.cos
tan = math.tan
pi = math.pi
def meters(cm):
"""Centimeters (Maya's default unit) to Meters
Example:
>>> meters(100)
1.0
"""
return cm * 0.01
def clear():
"""Remove all reused nodes"""
Singleton._instances.clear()
def _encode1(path):
"""Convert `path` to Maya API 1.0 MObject
Arguments:
path (str): Absolute or relative path to DAG or DG node
Raises:
ExistError on `path` not existing
"""
selectionList = om1.MSelectionList()
try:
selectionList.add(path)
except RuntimeError:
raise ExistError("'%s' does not exist" % path)
mobject = om1.MObject()
selectionList.getDependNode(0, mobject)
return mobject
def _encodedagpath1(path):
"""Convert `path` to Maya API 1.0 MObject
Arguments:
path (str): Absolute or relative path to DAG or DG node
Raises:
ExistError on `path` not existing
"""
selectionList = om1.MSelectionList()
try:
selectionList.add(path)
except RuntimeError:
raise ExistError("'%s' does not exist" % path)
dagpath = om1.MDagPath()
selectionList.getDagPath(0, dagpath)
return dagpath
def decode(node):
"""Convert cmdx Node to shortest unique path
This is the same as `node.shortestPath()`
To get an absolute path, use `node.path()`
"""
try:
return node.shortestPath()
except AttributeError:
return node.name(namespace=True)
def record_history(func):
@wraps(func)
def decorator(self, *args, **kwargs):
_kwargs = kwargs.copy()
_args = list(args)
# Don't store actual objects,
# to facilitate garbage collection.
for index, arg in enumerate(args):
if isinstance(arg, (Node, Plug)):
_args[index] = arg.path()
else:
_args[index] = repr(arg)
for key, value in kwargs.items():
if isinstance(value, (Node, Plug)):
_kwargs[key] = value.path()
else:
_kwargs[key] = repr(value)
self._history.append((func.__name__, _args, _kwargs))
return func(self, *args, **kwargs)
return decorator
class _BaseModifier(object):
"""Interactively edit an existing scenegraph with support for undo/redo
Arguments:
undoable (bool, optional): Put undoIt on the undo queue
interesting (bool, optional): New nodes should appear
in the channelbox
debug (bool, optional): Include additional debug data,
at the expense of performance
atomic (bool, optional): Automatically rollback changes on failure
template (str, optional): Automatically name new nodes using
this template
"""
Type = om.MDGModifier
def __enter__(self):
self.isContext = True
return self
def __exit__(self, exc_type, exc_value, tb):
# Support calling `doIt` during a context,
# without polluting the undo queue.
if self.isContext and self._opts["undoable"]:
commit(self._modifier.undoIt, self._modifier.doIt)
self.doIt()
def __init__(self,
undoable=True,
interesting=True,
debug=True,
atomic=True,
template=None):
super(_BaseModifier, self).__init__()
self.isDone = False
self.isContext = False
self._modifier = self.Type()
self._history = list()
self._index = 1
self._opts = {
"undoable": undoable,
"interesting": interesting,
"debug": debug,
"atomic": atomic,
"template": template,
}
def doIt(self):
if (not self.isContext) and self._opts["undoable"]:
commit(self._modifier.undoIt, self._modifier.doIt)
try:
self._modifier.doIt()
except RuntimeError:
# Rollback changes
if self._opts["atomic"]:
self.undoIt()
raise ModifierError(self._history)
self.isDone = True
def undoIt(self):
self._modifier.undoIt()
@record_history
def createNode(self, type, name=None):
try:
mobj = self._modifier.createNode(type)
except TypeError:
raise TypeError("'%s' is not a valid node type" % type)
template = self._opts["template"]
if name or template:
name = (template or "{name}").format(
name=name or "",
type=type,
index=self._index,
)
self._modifier.renameNode(mobj, name)
node = Node(mobj, exists=False, modifier=self)
if not self._opts["interesting"]:
plug = node["isHistoricallyInteresting"]
_python_to_mod(False, plug, self._modifier)
self._index += 1
return node
@record_history
def deleteNode(self, node):
return self._modifier.deleteNode(node._mobject)
delete = deleteNode
@record_history
def renameNode(self, node, name):
return self._modifier.renameNode(node._mobject, name)
rename = renameNode
@record_history
def setAttr(self, plug, value):
if isinstance(value, Plug):
value = value.read()
if isinstance(plug, om.MPlug):
value = Plug(plug.node(), plug).read()
_python_to_mod(value, plug, self._modifier)
def resetAttr(self, plug):
self.setAttr(plug, plug.default)
@record_history
def connect(self, src, dst, force=True):
if isinstance(src, Plug):
src = src._mplug
if isinstance(dst, Plug):
dst = dst._mplug
if force:
# Disconnect any plug connected to `other`
for plug in dst.connectedTo(True, False):
self.disconnect(plug, dst)
self._modifier.connect(src, dst)
@record_history
def disconnect(self, a, b=None, source=True, destination=True):
"""Disconnect `a` from `b`
Arguments:
a (Plug): Starting point of a connection
b (Plug, optional): End point of a connection, defaults to all
source (bool, optional): Disconnect b, if it is a source
source (bool, optional): Disconnect b, if it is a destination
Normally, Maya only performs a disconnect if the
connection is incoming. Bidirectional
disconnect(A, B) => OK
__________ _________
| | | |
| nodeA o---->o nodeB |
|__________| |_________|
disconnect(B, A) => NO
__________ _________
| | | |
| nodeA o---->o nodeB |
|__________| |_________|
"""
if isinstance(a, Plug):
a = a._mplug
if isinstance(b, Plug):
b = b._mplug
if b is None:
# Disconnect any plug connected to `other`
if source:
for plug in a.connectedTo(True, False):
self._modifier.disconnect(plug, a)
if destination:
for plug in a.connectedTo(False, True):
self._modifier.disconnect(a, plug)
else:
if source:
self._modifier.disconnect(a, b)
if destination:
self._modifier.disconnect(b, a)
if ENABLE_PEP8:
do_it = doIt
undo_it = undoIt
create_node = createNode
delete_node = deleteNode
rename_node = renameNode
set_attr = setAttr
reset_attr = resetAttr
class DGModifier(_BaseModifier):
"""Modifier for DG nodes"""
Type = om.MDGModifier
class DagModifier(_BaseModifier):
"""Modifier for DAG nodes
Example:
>>> with DagModifier() as mod:
... node1 = mod.createNode("transform")
... node2 = mod.createNode("transform", parent=node1)
... mod.setAttr(node1["translate"], (1, 2, 3))
... mod.connect(node1 + ".translate", node2 + ".translate")
...
>>> getAttr(node1 + ".translateX")
1.0
>>> node2["translate"][0]
1.0
>>> node2["translate"][1]
2.0
>>> with DagModifier() as mod:
... node1 = mod.createNode("transform")
... node2 = mod.createNode("transform", parent=node1)
... node1["translate"] = (5, 6, 7)
... node1["translate"] >> node2["translate"]
...
>>> node2["translate"][0]
5.0
>>> node2["translate"][1]
6.0
Example, without context manager:
>>> mod = DagModifier()
>>> parent = mod.createNode("transform")
>>> shape = mod.createNode("transform", parent=parent)
>>> mod.connect(parent["tz"], shape["tz"])
>>> mod.setAttr(parent["sx"], 2.0)
>>> parent["tx"] >> shape["ty"]
>>> parent["tx"] = 5.1
>>> round(shape["ty"], 1) # Not yet created nor connected
0.0
>>> mod.doIt()
>>> round(shape["ty"], 1)
5.1
>>> round(parent["sx"])
2.0
Duplicate names are resolved, even though nodes haven't yet been created:
>>> _ = cmds.file(new=True, force=True)
>>> with DagModifier() as mod:
... node = mod.createNode("transform", name="NotUnique")
... node1 = mod.createNode("transform", name="NotUnique")
... node2 = mod.createNode("transform", name="NotUnique")
...
>>> node.name() == "NotUnique"
True
>>> node1.name() == "NotUnique1"
True
>>> node2.name() == "NotUnique2"
True
Deletion works too
>>> _ = cmds.file(new=True, force=True)
>>> mod = DagModifier()
>>> parent = mod.createNode("transform", name="myParent")
>>> child = mod.createNode("transform", name="myChild", parent=parent)
>>> mod.doIt()
>>> "myParent" in cmds.ls()
True
>>> "myChild" in cmds.ls()
True
>>> parent.child().name()
u'myChild'
>>> mod = DagModifier()
>>> _ = mod.delete(child)
>>> mod.doIt()
>>> parent.child() is None
True
>>> "myChild" in cmds.ls()
False
"""
Type = om.MDagModifier
@record_history
def createNode(self, type, name=None, parent=None):
parent = parent._mobject if parent else om.MObject.kNullObj
try:
mobj = self._modifier.createNode(type, parent)
except TypeError:
raise TypeError("'%s' is not a valid node type" % type)
template = self._opts["template"]
if name or template:
name = (template or "{name}").format(
name=name or "",
type=type,
index=self._index,
)
self._modifier.renameNode(mobj, name)
return DagNode(mobj, exists=False, modifier=self)
@record_history
def parent(self, node, parent=None):
parent = parent._mobject if parent is not None else None
self._modifier.reparentNode(node._mobject, parent)
if ENABLE_PEP8:
create_node = createNode
class DGContext(om.MDGContext):
def __init__(self, time=None):
"""Context for evaluating the Maya DG
Extension of MDGContext to also accept time as a float. In Maya 2018
and above DGContext can also be used as a context manager.
Arguments:
time (float, om.MTime, optional): Time at which to evaluate context
"""
if time is not None:
if isinstance(time, (int, float)):
time = om.MTime(time, om.MTime.uiUnit())
super(DGContext, self).__init__(time)
else:
super(DGContext, self).__init__()
self._previousContext = None
def __enter__(self):
if __maya_version__ >= 2018:
self._previousContext = self.makeCurrent()
return self
else:
cmds.error(
"'%s' does not support context manager functionality for Maya 2017 "
"and below" % self.__class__.__name__
)
def __exit__(self, exc_type, exc_value, tb):
if self._previousContext:
self._previousContext.makeCurrent()
# Alias
Context = DGContext
def ls(*args, **kwargs):
return map(encode, cmds.ls(*args, **kwargs))
def selection(*args, **kwargs):
return map(encode, cmds.ls(*args, selection=True, **kwargs))
def createNode(type, name=None, parent=None):
"""Create a new node
This function forms the basic building block
with which to create new nodes in Maya.
.. note:: Missing arguments `shared` and `skipSelect`
.. tip:: For additional performance, `type` may be given as an MTypeId
Arguments:
type (str): Type name of new node, e.g. "transform"
name (str, optional): Sets the name of the newly-created node
parent (Node, optional): Specifies the parent in the DAG under which
the new node belongs
Example:
>>> node = createNode("transform") # Type as string
>>> node = createNode(tTransform) # Type as ID
"""
try:
with DagModifier() as mod:
node = mod.createNode(type, name=name, parent=parent)
except TypeError:
with DGModifier() as mod:
node = mod.createNode(type, name=name)
return node
def getAttr(attr, type=None, time=None):
"""Read `attr`
Arguments:
attr (Plug): Attribute as a cmdx.Plug
type (str, optional): Unused
time (float, optional): Time at which to evaluate the attribute
Example:
>>> node = createNode("transform")
>>> getAttr(node + ".translateX")
0.0
"""
return attr.read(time=time)
def setAttr(attr, value, type=None):
"""Write `value` to `attr`
Arguments:
attr (Plug): Existing attribute to edit
value (any): Value to write
type (int, optional): Unused
Example:
>>> node = createNode("transform")
>>> setAttr(node + ".translateX", 5.0)
"""
attr.write(value)
def addAttr(node,
longName,
attributeType,
shortName=None,
enumName=None,
defaultValue=None):
"""Add new attribute to `node`
Arguments:
node (Node): Add attribute to this node
longName (str): Name of resulting attribute
attributeType (str): Type of attribute, e.g. `string`
shortName (str, optional): Alternate name of attribute
enumName (str, optional): Options for an enum attribute
defaultValue (any, optional): Default value of attribute
Example:
>>> node = createNode("transform")
>>> addAttr(node, "myString", attributeType="string")
>>> addAttr(node, "myDouble", attributeType=Double)
"""
at = attributeType
if isinstance(at, type) and issubclass(at, _AbstractAttribute):
Attribute = attributeType
else:
# Support legacy maya.cmds interface
Attribute = {
"double": Double,
"double3": Double3,
"string": String,
"long": Long,
"bool": Boolean,
"enume": Enum,
}[attributeType]
kwargs = {
"default": defaultValue
}
if enumName:
kwargs["fields"] = enumName.split(":")
attribute = Attribute(longName, **kwargs)
node.addAttr(attribute)
def listRelatives(node,
type=None,
children=False,
allDescendents=False,
parent=False,
shapes=False):
"""List relatives of `node`
Arguments:
node (DagNode): Node to enquire about
type (int, optional): Only return nodes of this type
children (bool, optional): Return children of `node`
parent (bool, optional): Return parent of `node`
shapes (bool, optional): Return only children that are shapes
allDescendents (bool, optional): Return descendents of `node`
fullPath (bool, optional): Unused; nodes are always exact
path (bool, optional): Unused; nodes are always exact
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> listRelatives(child, parent=True) == [parent]
True
"""
if not isinstance(node, DagNode):
return None
elif allDescendents:
return list(node.descendents(type=type))
elif shapes:
return list(node.shapes(type=type))
elif parent:
return [node.parent(type=type)]
elif children:
return list(node.children(type=type))
def listConnections(attr):
"""List connections of `attr`
Arguments:
attr (Plug or Node):
Example:
>>> node1 = createNode("transform")
>>> node2 = createNode("mesh", parent=node1)
>>> node1["v"] >> node2["v"]
>>> listConnections(node1) == [node2]
True
>>> listConnections(node1 + ".v") == [node2]
True
>>> listConnections(node1["v"]) == [node2]
True
>>> listConnections(node2) == [node1]
True
"""
return list(node for node in attr.connections())
def connectAttr(src, dst):
"""Connect `src` to `dst`
Arguments:
src (Plug): Source plug
dst (Plug): Destination plug
Example:
>>> src = createNode("transform")
>>> dst = createNode("transform")
>>> connectAttr(src + ".rotateX", dst + ".scaleY")
"""
src.connect(dst)
def delete(*nodes):
with DGModifier() as mod:
for node in nodes:
mod.delete(node)
def rename(node, name):
with DGModifier() as mod:
mod.rename(node, name)
def parent(children, parent, relative=True, absolute=False):
assert isinstance(parent, DagNode), "parent must be DagNode"
if not isinstance(children, (tuple, list)):
children = [children]
for child in children:
assert isinstance(child, DagNode), "child must be DagNode"
parent.addChild(child)
def objExists(obj):
if isinstance(obj, (Node, Plug)):
obj = obj.path()
try:
om.MSelectionList().add(obj)
except RuntimeError:
return False
else:
return True
# PEP08
sl = selection
create_node = createNode
get_attr = getAttr
set_attr = setAttr
add_attr = addAttr
list_relatives = listRelatives
list_connections = listConnections
connect_attr = connectAttr
obj_exists = objExists
# Speciality functions
kOpen = om1.MFnNurbsCurve.kOpen
kClosed = om1.MFnNurbsCurve.kClosed
kPeriodic = om1.MFnNurbsCurve.kPeriodic
def editCurve(parent, points, degree=1, form=kOpen):
assert isinstance(parent, DagNode), (
"parent must be of type cmdx.DagNode"
)
degree = min(3, max(1, degree))
cvs = om1.MPointArray()
curveFn = om1.MFnNurbsCurve()
for point in points:
cvs.append(om1.MPoint(*point))
mobj = curveFn.createWithEditPoints(cvs,
degree,
form,
False,
False,
True,
_encode1(parent.path()))
mod = om1.MDagModifier()
mod.renameNode(mobj, parent.name(namespace=True) + "Shape")
mod.doIt()
def undo():
mod.deleteNode(mobj)
mod.doIt()
def redo():
mod.undoIt()
commit(undo, redo)
shapeFn = om1.MFnDagNode(mobj)
return encode(shapeFn.fullPathName())
def curve(parent, points, degree=1, form=kOpen):
"""Create a NURBS curve from a series of points
Arguments:
parent (DagNode): Parent to resulting shape node
points (list): One tuples per point, with 3 floats each
degree (int, optional): Degree of curve, 1 is linear
form (int, optional): Whether to close the curve or not
Example:
>>> parent = createNode("transform")
>>> shape = curve(parent, [
... (0, 0, 0),
... (0, 1, 0),
... (0, 2, 0),
... ])
...
"""
assert isinstance(parent, DagNode), (
"parent must be of type cmdx.DagNode"
)
assert parent._modifier is None or parent._modifier.isDone, (
"curve() currently doesn't work with a modifier"
)
# Superimpose end knots
# startpoints = [points[0]] * (degree - 1)
# endpoints = [points[-1]] * (degree - 1)
# points = startpoints + list(points) + endpoints
degree = min(3, max(1, degree))
cvs = om1.MPointArray()
knots = om1.MDoubleArray()
curveFn = om1.MFnNurbsCurve()
knotcount = len(points) - degree + 2 * degree - 1
for point in points:
cvs.append(om1.MPoint(*point))
for index in range(knotcount):
knots.append(index)
mobj = curveFn.create(cvs,
knots,
degree,
form,
False,
True,
_encode1(parent.path()))
mod = om1.MDagModifier()
mod.renameNode(mobj, parent.name(namespace=True) + "Shape")
mod.doIt()
def undo():
mod.deleteNode(mobj)
mod.doIt()
def redo():
mod.undoIt()
commit(undo, redo)
shapeFn = om1.MFnDagNode(mobj)
return encode(shapeFn.fullPathName())
def lookAt(origin, center, up=None):
"""Build a (left-handed) look-at matrix
See glm::glc::matrix_transform::lookAt for reference
+ Z (up)
/
/
(origin) o------ + X (center)
\
+ Y
Arguments:
origin (Vector): Starting position
center (Vector): Point towards this
up (Vector, optional): Up facing this way, defaults to Y-up
Example:
>>> mat = lookAt(
... (0, 0, 0), # Relative the origin..
... (1, 0, 0), # X-axis points towards global X
... (0, 1, 0) # Z-axis points towards global Y
... )
>>> tm = Tm(mat)
>>> int(degrees(tm.rotation().x))
-90
"""
if isinstance(origin, (tuple, list)):
origin = Vector(origin)
if isinstance(center, (tuple, list)):
center = Vector(center)
if up is not None and isinstance(up, (tuple, list)):
up = Vector(up)
up = up or Vector(0, 1, 0)
x = (center - origin).normalize()
y = ((center - origin) ^ (center - up)).normalize()
z = x ^ y
return MatrixType((
x[0], x[1], x[2], 0,
y[0], y[1], y[2], 0,
z[0], z[1], z[2], 0,
0, 0, 0, 0
))
if ENABLE_PEP8:
look_at = lookAt
def first(iterator, default=None):
"""Return first member of an `iterator`
Example:
>>> def it():
... yield 1
... yield 2
... yield 3
...
>>> first(it())
1
"""
return next(iterator, default)
def last(iterator, default=None):
"""Return last member of an `iterator`
Example:
>>> def it():
... yield 1
... yield 2
... yield 3
...
>>> last(it())
3
"""
last = default
for member in iterator:
last = member
return last
# --------------------------------------------------------
#
# Attribute Types
#
# --------------------------------------------------------
class _AbstractAttribute(dict):
Fn = None
Type = None
Default = None
Readable = True
Writable = True
Cached = True # Cache in datablock?
Storable = True # Write value to file?
Hidden = False # Display in Attribute Editor?
Array = False
Connectable = True
Keyable = True
ChannelBox = False
AffectsAppearance = False
AffectsWorldSpace = False
Help = ""
def __eq__(self, other):
try:
# Support Attribute -> Attribute comparison
return self["name"] == other["name"]
except AttributeError:
# Support Attribute -> string comparison
return self["name"] == other
def __ne__(self, other):
try:
return self["name"] != other["name"]
except AttributeError:
return self["name"] != other
def __hash__(self):
"""Support storing in set()"""
return hash(self["name"])
def __repr__(self):
"""Avoid repr depicting the full contents of this dict"""
return self["name"]
def __new__(cls, *args, **kwargs):
"""Support for using name of assignment
Example:
node["thisName"] = cmdx.Double()
In this example, the attribute isn't given a `name`
Instead, the name is inferred from where it is assigned.
"""
if not args:
return cls, kwargs
return super(_AbstractAttribute, cls).__new__(cls, *args, **kwargs)
def __init__(self,
name,
default=None,
label=None,
writable=None,
readable=None,
cached=None,
storable=None,
keyable=None,
hidden=None,
min=None,
max=None,
channelBox=None,
affectsAppearance=None,
affectsWorldSpace=None,
array=False,
connectable=True,
help=None):
args = locals().copy()
args.pop("self")
self["name"] = args.pop("name")
self["label"] = args.pop("label")
self["default"] = args.pop("default")
# Exclusive to numeric attributes
self["min"] = args.pop("min")
self["max"] = args.pop("max")
# Filled in on creation
self["mobject"] = None
# MyName -> myName
self["shortName"] = self["name"][0].lower() + self["name"][1:]
for key, value in args.items():
default = getattr(self, key[0].upper() + key[1:])
self[key] = value if value is not None else default
def default(self, cls=None):
"""Return one of three available values
Resolution order:
1. Argument
2. Node default (from cls.defaults)
3. Attribute default
"""
if self["default"] is not None:
return self["default"]
if cls is not None:
return cls.defaults.get(self["name"], self.Default)
return self.Default
def type(self):
return self.Type
def create(self, cls=None):
args = [
arg
for arg in (self["name"],
self["shortName"],
self.type())
if arg is not None
]
default = self.default(cls)
if default:
if isinstance(default, (list, tuple)):
args += default
else:
args += [default]
self["mobject"] = self.Fn.create(*args)
# 3 μs
self.Fn.storable = self["storable"]
self.Fn.readable = self["readable"]
self.Fn.writable = self["writable"]
self.Fn.connectable = self["connectable"]
self.Fn.hidden = self["hidden"]
self.Fn.cached = self["cached"]
self.Fn.keyable = self["keyable"]
self.Fn.channelBox = self["channelBox"]
self.Fn.affectsAppearance = self["affectsAppearance"]
self.Fn.affectsWorldSpace = self["affectsWorldSpace"]
self.Fn.array = self["array"]
if self["min"] is not None:
self.Fn.setMin(self["min"])
if self["max"] is not None:
self.Fn.setMax(self["max"])
if self["label"] is not None:
self.Fn.setNiceNameOverride(self["label"])
return self["mobject"]
def read(self, data):
pass
class Enum(_AbstractAttribute):
Fn = om.MFnEnumAttribute()
Type = None
Default = 0
Keyable = True
def __init__(self, name, fields=None, default=0, label=None, **kwargs):
super(Enum, self).__init__(name, default, label, **kwargs)
self.update({
"fields": fields or (name,),
})
def create(self, cls=None):
attr = super(Enum, self).create(cls)
for index, field in enumerate(self["fields"]):
self.Fn.addField(field, index)
return attr
def read(self, data):
return data.inputValue(self["mobject"]).asShort()
class Divider(Enum):
"""Visual divider in channel box"""
def __init__(self, label, **kwargs):
kwargs.pop("name", None)
kwargs.pop("fields", None)
kwargs.pop("label", None)
super(Divider, self).__init__(label, fields=(label,), label=" ", **kwargs)
class String(_AbstractAttribute):
Fn = om.MFnTypedAttribute()
Type = om.MFnData.kString
Default = ""
def default(self, cls=None):
default = str(super(String, self).default(cls))
return om.MFnStringData().create(default)
def read(self, data):
return data.inputValue(self["mobject"]).asString()
class Message(_AbstractAttribute):
Fn = om.MFnMessageAttribute()
Type = None
Default = None
Storable = False
class Matrix(_AbstractAttribute):
Fn = om.MFnMatrixAttribute()
Default = (0.0,) * 4 * 4 # Identity matrix
Array = False
Readable = True
Keyable = False
Hidden = False
def default(self, cls=None):
return None
def read(self, data):
return data.inputValue(self["mobject"]).asMatrix()
class Long(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = om.MFnNumericData.kLong
Default = 0
def read(self, data):
return data.inputValue(self["mobject"]).asLong()
class Double(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = om.MFnNumericData.kDouble
Default = 0.0
def read(self, data):
return data.inputValue(self["mobject"]).asDouble()
class Double3(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = None
Default = (0.0,) * 3
def default(self, cls=None):
if self["default"] is not None:
default = self["default"]
# Support single-value default
if not isinstance(default, (tuple, list)):
default = (default,) * 3
elif cls is not None:
default = cls.defaults.get(self["name"], self.Default)
else:
default = self.Default
children = list()
for index, child in enumerate("XYZ"):
attribute = self.Fn.create(self["name"] + child,
self["shortName"] + child,
om.MFnNumericData.kDouble,
default[index])
children.append(attribute)
return children
def read(self, data):
return data.inputValue(self["mobject"]).asDouble3()
class Boolean(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = om.MFnNumericData.kBoolean
Default = True
def read(self, data):
return data.inputValue(self["mobject"]).asBool()
class AbstractUnit(_AbstractAttribute):
Fn = om.MFnUnitAttribute()
Default = 0.0
Min = None
Max = None
SoftMin = None
SoftMax = None
class Angle(AbstractUnit):
def default(self, cls=None):
default = super(Angle, self).default(cls)
# When no unit was explicitly passed, assume degrees
if not isinstance(default, om.MAngle):
default = om.MAngle(default, om.MAngle.kDegrees)
return default
class Time(AbstractUnit):
def default(self, cls=None):
default = super(Time, self).default(cls)
# When no unit was explicitly passed, assume seconds
if not isinstance(default, om.MTime):
default = om.MTime(default, om.MTime.kSeconds)
return default
class Distance(AbstractUnit):
def default(self, cls=None):
default = super(Distance, self).default(cls)
# When no unit was explicitly passed, assume centimeters
if not isinstance(default, om.MDistance):
default = om.MDistance(default, om.MDistance.kCentimeters)
return default
class Compound(_AbstractAttribute):
Fn = om.MFnCompoundAttribute()
Multi = None
def __init__(self, name, children=None, **kwargs):
if not children and self.Multi:
default = kwargs.pop("default", None)
children, Type = self.Multi
children = tuple(
Type(name + child, default=default[index], **kwargs)
if default else Type(name + child, **kwargs)
for index, child in enumerate(children)
)
self["children"] = children
else:
self["children"] = children
super(Compound, self).__init__(name, **kwargs)
def default(self, cls=None):
# Compound itself has no defaults, only it's children do
pass
def create(self, cls=None):
mobj = super(Compound, self).create(cls)
default = super(Compound, self).default(cls)
for index, child in enumerate(self["children"]):
# Forward attributes from parent to child
for attr in ("storable",
"readable",
"writable",
"hidden",
"channelBox",
"keyable",
"array"):
child[attr] = self[attr]
if child["default"] is None and default is not None:
child["default"] = default[index]
self.Fn.addChild(child.create(cls))
return mobj
def read(self, handle):
"""Read from MDataHandle"""
output = list()
for child in self["children"]:
child_handle = handle.child(child["mobject"])
output.append(child.read(child_handle))
return tuple(output)
class Double2(Compound):
Multi = ("XY", Double)
class Double4(Compound):
Multi = ("XYZW", Double)
class Angle2(Compound):
Multi = ("XY", Angle)
class Angle3(Compound):
Multi = ("XYZ", Angle)
class Distance2(Compound):
Multi = ("XY", Distance)
class Distance3(Compound):
Multi = ("XYZ", Distance)
class Distance4(Compound):
Multi = ("XYZW", Distance)
# Convenience aliases, for when it isn't clear e.g. `Matrix()`
# is referring to an attribute rather than the datatype.
EnumAttribute = Enum
DividerAttribute = Divider
StringAttribute = String
MessageAttribute = Message
MatrixAttribute = Matrix
LongAttribute = Long
DoubleAttribute = Double
Double3Attribute = Double3
BooleanAttribute = Boolean
AbstractUnitAttribute = AbstractUnit
AngleAttribute = Angle
TimeAttribute = Time
DistanceAttribute = Distance
CompoundAttribute = Compound
Double2Attribute = Double2
Double4Attribute = Double4
Angle2Attribute = Angle2
Angle3Attribute = Angle3
Distance2Attribute = Distance2
Distance3Attribute = Distance3
Distance4Attribute = Distance4
# --------------------------------------------------------
#
# Undo/Redo Support
#
# NOTE: Localised version of apiundo.py 0.2.0
# https://github.com/mottosso/apiundo
#
# In Maya, history is maintained by "commands". Each command is an instance of
# MPxCommand that encapsulates a series of API calls coupled with their
# equivalent undo/redo API calls. For example, the `createNode` command
# is presumably coupled with `cmds.delete`, `setAttr` is presumably
# coupled with another `setAttr` with the previous values passed in.
#
# Thus, creating a custom command involves subclassing MPxCommand and
# implementing coupling your do, undo and redo into one neat package.
#
# cmdx however doesn't fit into this framework.
#
# With cmdx, you call upon API calls directly. There is little to no
# correlation between each of your calls, which is great for performance
# but not so great for conforming to the undo/redo framework set forth
# by Autodesk.
#
# To work around this, without losing out on performance or functionality,
# a generic command is created, capable of hosting arbitrary API calls
# and storing them in the Undo/Redo framework.
#
# >>> node = cmdx.createNode("transform")
# >>> cmdx.commit(lambda: cmdx.delete(node))
#
# Now when you go to undo, the `lambda` is called. It is then up to you
# the developer to ensure that what is being undone actually relates
# to what you wanted to have undone. For example, it is perfectly
# possible to add an unrelated call to history.
#
# >>> node = cmdx.createNode("transform")
# >>> cmdx.commit(lambda: cmdx.setAttr(node + "translateX", 5))
#
# The result would be setting an attribute to `5` when attempting to undo.
#
# --------------------------------------------------------
# Support for multiple co-existing versions of apiundo.
# NOTE: This is important for vendoring, as otherwise a vendored apiundo
# could register e.g. cmds.apiUndo() first, causing a newer version
# to inadvertently use this older command (or worse yet, throwing an
# error when trying to register it again).
command = "_cmdxApiUndo_%s" % __version__.replace(".", "_")
# This module is both a Python module and Maya plug-in.
# Data is shared amongst the two through this "module"
name = "_cmdxShared_"
if name not in sys.modules:
sys.modules[name] = types.ModuleType(name)
shared = sys.modules[name]
shared.undo = None
shared.redo = None
shared.undos = {}
shared.redos = {}
def commit(undo, redo=lambda: None):
"""Commit `undo` and `redo` to history
Arguments:
undo (func): Call this function on next undo
redo (func, optional): Like `undo`, for for redo
"""
if not ENABLE_UNDO:
return
if not hasattr(cmds, command):
install()
# Precautionary measure.
# If this doesn't pass, odds are we've got a race condition.
# NOTE: This assumes calls to `commit` can only be done
# from a single thread, which should already be the case
# given that Maya's API is not threadsafe.
try:
assert shared.redo is None
assert shared.undo is None
except AssertionError:
log.debug("%s has a problem with undo" % __name__)
# Temporarily store the functions at shared-level,
# they are later picked up by the command once called.
shared.undo = "%x" % id(undo)
shared.redo = "%x" % id(redo)
shared.undos[shared.undo] = undo
shared.redos[shared.redo] = redo
# Let Maya know that something is undoable
getattr(cmds, command)()
def install():
"""Load this shared as a plug-in
Call this prior to using the shared
"""
if ENABLE_UNDO:
cmds.loadPlugin(__file__, quiet=True)
self.installed = True
def uninstall():
if ENABLE_UNDO:
# Plug-in may exist in undo queue and
# therefore cannot be unloaded until flushed.
cmds.flushUndo()
# Discard shared module
shared.undo = None
shared.redo = None
shared.undos.clear()
shared.redos.clear()
sys.modules.pop(name, None)
cmds.unloadPlugin(os.path.basename(__file__))
self.installed = False
def maya_useNewAPI():
pass
class _apiUndo(om.MPxCommand):
def doIt(self, args):
self.undo = shared.undo
self.redo = shared.redo
# Facilitate the above precautionary measure
shared.undo = None
shared.redo = None
def undoIt(self):
shared.undos[self.undo]()
def redoIt(self):
shared.redos[self.redo]()
def isUndoable(self):
# Without this, the above undoIt and redoIt will not be called
return True
def initializePlugin(plugin):
om.MFnPlugin(plugin).registerCommand(
command,
_apiUndo
)
def uninitializePlugin(plugin):
om.MFnPlugin(plugin).deregisterCommand(command)
# --------------------------------------------------------
#
# Commonly Node Types
#
# Creating a new node using a pre-defined Type ID is 10% faster
# than doing it using a string, but keeping all (~800) around
# has a negative impact on maintainability and readability of
# the project, so a balance is struck where only the most
# performance sensitive types are included here.
#
# Developers: See cmdt.py for a list of all available types and their IDs
#
# --------------------------------------------------------
tAddDoubleLinear = om.MTypeId(0x4441444c)
tAddMatrix = om.MTypeId(0x44414d58)
tAngleBetween = om.MTypeId(0x4e414254)
tBlendShape = om.MTypeId(0x46424c53)
tMultMatrix = om.MTypeId(0x444d544d)
tAngleDimension = om.MTypeId(0x4147444e)
tBezierCurve = om.MTypeId(0x42435256)
tCamera = om.MTypeId(0x4443414d)
tChoice = om.MTypeId(0x43484345)
tChooser = om.MTypeId(0x43484f4f)
tCondition = om.MTypeId(0x52434e44)
tMesh = om.MTypeId(0x444d5348)
tNurbsCurve = om.MTypeId(0x4e435256)
tNurbsSurface = om.MTypeId(0x4e535246)
tJoint = om.MTypeId(0x4a4f494e)
tTransform = om.MTypeId(0x5846524d)
tTransformGeometry = om.MTypeId(0x5447454f)
tWtAddMatrix = om.MTypeId(0x4457414d)
# --------------------------------------------------------
#
# Plug-ins
#
# --------------------------------------------------------
InstalledPlugins = dict()
TypeId = om.MTypeId
# Get your unique ID from Autodesk, the below
# should not be trusted for production.
StartId = int(os.getenv("CMDX_BASETYPEID", "0x12b9c0"), 0)
class MetaNode(type):
def __init__(cls, *args, **kwargs):
assert isinstance(cls.name, str)
assert isinstance(cls.defaults, dict)
assert isinstance(cls.attributes, list)
assert isinstance(cls.version, tuple)
if isinstance(cls.typeid, (int, float)):
cls.typeid = TypeId(cls.typeid)
# Support Divider plug-in, without name for readability.
# E.g. Divider("_", "Label") -> Divider("Label")
index = 1
for attribute in cls.attributes:
if isinstance(attribute, Divider):
attribute["name"] = "_" * index
attribute["shortName"] = "_" * index
index += 1
# Ensure no duplicates
assert len(set(cls.attributes)) == len(cls.attributes), (
"One or more attributes in '%s' was found more than once"
% cls.__name__
)
attributes = {attr["name"]: attr for attr in cls.attributes}
def findAttribute(self, name):
return attributes.get(name)
def findMObject(self, name):
return attributes.get(name)["mobject"]
def findPlug(self, node, name):
try:
mobj = attributes.get(name)["mobject"]
return om.MPlug(node, mobj)
except KeyError:
return None
cls.findAttribute = findAttribute
cls.findMObject = findMObject
cls.findPlug = findPlug
cls.find_attribute = findAttribute
cls.find_mobject = findMObject
cls.find_plug = findPlug
cls.log = logging.getLogger(cls.__name__)
return super(MetaNode, cls).__init__(*args, **kwargs)
@add_metaclass(MetaNode)
class DgNode(om.MPxNode):
"""Abstract baseclass for a Maya DG node
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
"""
typeid = TypeId(StartId)
name = "defaultNode"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
@add_metaclass(MetaNode)
class SurfaceShape(om.MPxSurfaceShape):
"""Abstract baseclass for a Maya shape
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
"""
typeid = TypeId(StartId)
classification = "drawdb/geometry/custom"
name = "defaultNode"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
@classmethod
def uiCreator(cls):
pass
@add_metaclass(MetaNode)
class SurfaceShapeUI(omui.MPxSurfaceShapeUI):
"""Abstract baseclass for a Maya shape
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
"""
typeid = TypeId(StartId)
classification = "drawdb/geometry/custom"
name = "defaultNode"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
@add_metaclass(MetaNode)
class LocatorNode(omui.MPxLocatorNode):
"""Abstract baseclass for a Maya locator
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
"""
name = "defaultNode"
typeid = TypeId(StartId)
classification = "drawdb/geometry/custom"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
def initialize2(Plugin):
def _nodeInit():
nameToAttr = {}
for attr in Plugin.attributes:
mattr = attr.create(Plugin)
Plugin.addAttribute(mattr)
nameToAttr[attr["name"]] = mattr
for src, dst in Plugin.affects:
log.debug("'%s' affects '%s'" % (src, dst))
Plugin.attributeAffects(nameToAttr[src], nameToAttr[dst])
def _nodeCreator():
return Plugin()
def initializePlugin(obj):
version = ".".join(map(str, Plugin.version))
plugin = om.MFnPlugin(obj, "Cmdx", version, "Any")
try:
if issubclass(Plugin, LocatorNode):
plugin.registerNode(Plugin.name,
Plugin.typeid,
_nodeCreator,
_nodeInit,
om.MPxNode.kLocatorNode,
Plugin.classification)
elif issubclass(Plugin, DgNode):
plugin.registerNode(Plugin.name,
Plugin.typeid,
_nodeCreator,
_nodeInit)
elif issubclass(Plugin, SurfaceShape):
plugin.registerShape(Plugin.name,
Plugin.typeid,
_nodeCreator,
_nodeInit,
Plugin.uiCreator,
Plugin.classification)
else:
raise TypeError("Unsupported subclass: '%s'" % Plugin)
except Exception:
raise
else:
# Maintain reference to original class
InstalledPlugins[Plugin.name] = Plugin
Plugin.postInitialize()
return initializePlugin
def uninitialize2(Plugin):
def uninitializePlugin(obj):
om.MFnPlugin(obj).deregisterNode(Plugin.typeid)
return uninitializePlugin
# Plugins written with Maya Python API 1.0
class MPxManipContainer1(ompx1.MPxManipContainer):
name = "defaultManip"
version = (0, 0)
ownerid = om1.MTypeId(StartId)
typeid = om1.MTypeId(StartId)
def initializeManipulator1(Manipulator):
def _manipulatorCreator():
return ompx1.asMPxPtr(Manipulator())
def _manipulatorInit():
ompx1.MPxManipContainer.addToManipConnectTable(Manipulator.ownerid)
ompx1.MPxManipContainer.initialize()
def initializePlugin(obj):
version = ".".join(map(str, Manipulator.version))
plugin = ompx1.MFnPlugin(obj, "Cmdx", version, "Any")
# NOTE(marcus): The name *must* end with Manip
# See https://download.autodesk.com/us/maya/2011help
# /API/class_m_px_manip_container.html
# #e95527ff30ae53c8ae0419a1abde8b0c
assert Manipulator.name.endswith("Manip"), (
"Manipulator '%s' must have the name of a plug-in, "
"and end with 'Manip'"
)
plugin.registerNode(
Manipulator.name,
Manipulator.typeid,
_manipulatorCreator,
_manipulatorInit,
ompx1.MPxNode.kManipContainer
)
return initializePlugin
def uninitializeManipulator1(Manipulator):
def uninitializePlugin(obj):
ompx1.MFnPlugin(obj).deregisterNode(Manipulator.typeid)
return uninitializePlugin
def findPlugin(name):
"""Find the original class of a plug-in by `name`"""
try:
return InstalledPlugins[name]
except KeyError:
raise ExistError("'%s' is not a recognised plug-in" % name)
# --------------------------
#
# Callback Manager
#
# --------------------------
class Callback(object):
"""A Maya callback"""
log = logging.getLogger("cmdx.Callback")
def __init__(self, name, installer, args, api=2, help="", parent=None):
self._id = None
self._args = args
self._name = name
self._installer = installer
self._help = help
# Callbacks are all uninstalled using the same function
# relative either API 1.0 or 2.0
self._uninstaller = {
1: om1.MMessage.removeCallback,
2: om.MMessage.removeCallback
}[api]
def __del__(self):
self.deactivate()
def name(self):
return self._name
def help(self):
return self._help
def is_active(self):
return self._id is not None
def activate(self):
self.log.debug("Activating callback '%s'.." % self._name)
if self.is_active():
self.log.debug("%s already active, ignoring" % self._name)
return
self._id = self._installer(*self._args)
def deactivate(self):
self.log.debug("Deactivating callback '%s'.." % self._name)
if self.is_active():
self._uninstaller(self._id)
self._id = None
class CallbackGroup(list):
"""Multiple callbacks rolled into one"""
def __init__(self, name, callbacks, parent=None):
self._name = name
self[:] = callbacks
def name(self):
return self._name
def add(self, name, installer, args, api=2):
"""Convenience method for .append(Callback())"""
callback = Callback(name, installer, args, api)
self.append(callback)
def activate(self):
for callback in self._callbacks:
callback.activate()
def deactivate(self):
for callback in self._callbacks:
callback.deactivate()
# ----------------------
#
# Cache Manager
#
# ----------------------
class Cache(object):
def __init__(self):
self._values = {}
def clear(self, node=None):
pass
def read(self, node, attr, time):
pass
def transform(self, node):
pass
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 27.488901
| 84
| 0.553472
|
import os
import sys
import json
import time
import math
import types
import logging
import traceback
import operator
import collections
from functools import wraps
from maya import cmds
from maya.api import OpenMaya as om, OpenMayaAnim as oma, OpenMayaUI as omui
from maya import OpenMaya as om1, OpenMayaMPx as ompx1, OpenMayaUI as omui1
__version__ = "0.4.6"
PY3 = sys.version_info[0] == 3
IGNORE_VERSION = bool(os.getenv("CMDX_IGNORE_VERSION"))
TIMINGS = bool(os.getenv("CMDX_TIMINGS"))
SAFE_MODE = bool(os.getenv("CMDX_SAFE_MODE"))
ROGUE_MODE = not SAFE_MODE and bool(os.getenv("CMDX_ROGUE_MODE"))
MEMORY_HOG_MODE = not SAFE_MODE and bool(os.getenv("CMDX_MEMORY_HOG_MODE"))
ENABLE_PEP8 = True
ENABLE_UNDO = not SAFE_MODE
ENABLE_PLUG_REUSE = True
if PY3:
string_types = str,
else:
string_types = str, basestring, unicode
try:
__maya_version__ = int(cmds.about(version=True))
except (AttributeError, ValueError):
__maya_version__ = 2015
if not IGNORE_VERSION:
assert __maya_version__ >= 2015, "Requires Maya 2015 or newer"
self = sys.modules[__name__]
self.installed = False
log = logging.getLogger("cmdx")
om1 = om1
omui1 = omui1
om = om
oma = oma
omui = omui
Stats = self
Stats.NodeInitCount = 0
Stats.NodeReuseCount = 0
Stats.PlugReuseCount = 0
Stats.LastTiming = None
if not hasattr(om, "MObjectHandle"):
log.warning("Node reuse might not work in this version of Maya "
"(OpenMaya.MObjectHandle not found)")
TimeUnit = om.MTime.uiUnit()
MTime = om.MTime
MDistance = om.MDistance
MAngle = om.MAngle
TimeType = om.MTime
DistanceType = om.MDistance
AngleType = om.MAngle
ExistError = type("ExistError", (RuntimeError,), {})
DoNothing = None
GlobalDagNode = om.MFnDagNode()
GlobalDependencyNode = om.MFnDependencyNode()
First = 0
Last = -1
Stepped = 5
Linear = 2
Smooth = 4
history = dict()
class ModifierError(RuntimeError):
def __init__(self, history):
tasklist = list()
for task in history:
cmd, args, kwargs = task
tasklist += [
"%s(%s)" % (cmd, ", ".join(map(repr, args)))
]
message = (
"An unexpected internal failure occurred, "
"these tasks were attempted:\n- " +
"\n- ".join(tasklist)
)
self.history = history
super(ModifierError, self).__init__(message)
def withTiming(text="{func}() {time:.2f} ns"):
def timings_decorator(func):
if not TIMINGS:
return func
@wraps(func)
def func_wrapper(*args, **kwargs):
t0 = time.clock()
try:
return func(*args, **kwargs)
finally:
t1 = time.clock()
duration = (t1 - t0) * 10 ** 6
Stats.LastTiming = duration
log.debug(
text.format(func=func.__name__,
time=duration)
)
return func_wrapper
return timings_decorator
def protected(func):
if ROGUE_MODE:
return func
@wraps(func)
def func_wrapper(*args, **kwargs):
if args[0]._destroyed:
raise ExistError("Cannot perform operation on deleted node")
return func(*args, **kwargs)
return func_wrapper
def add_metaclass(metaclass):
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
class _Type(int):
MFn = om.MFn
kDagNode = _Type(om.MFn.kDagNode)
kShape = _Type(om.MFn.kShape)
kTransform = _Type(om.MFn.kTransform)
kJoint = _Type(om.MFn.kJoint)
kSet = _Type(om.MFn.kSet)
class _Space(int):
sWorld = _Space(om.MSpace.kWorld)
sObject = _Space(om.MSpace.kObject)
sTransform = _Space(om.MSpace.kTransform)
sPostTransform = _Space(om.MSpace.kPostTransform)
sPreTransform = _Space(om.MSpace.kPreTransform)
kXYZ = om.MEulerRotation.kXYZ
kYZX = om.MEulerRotation.kYZX
kZXY = om.MEulerRotation.kZXY
kXZY = om.MEulerRotation.kXZY
kYXZ = om.MEulerRotation.kYXZ
kZYX = om.MEulerRotation.kZYX
class _Unit(int):
def __new__(cls, unit, enum):
self = super(_Unit, cls).__new__(cls, enum)
self._unit = unit
return self
def __call__(self, enum):
return self._unit(enum, self)
Degrees = _Unit(om.MAngle, om.MAngle.kDegrees)
Radians = _Unit(om.MAngle, om.MAngle.kRadians)
AngularMinutes = _Unit(om.MAngle, om.MAngle.kAngMinutes)
AngularSeconds = _Unit(om.MAngle, om.MAngle.kAngSeconds)
Millimeters = _Unit(om.MDistance, om.MDistance.kMillimeters)
Centimeters = _Unit(om.MDistance, om.MDistance.kCentimeters)
Meters = _Unit(om.MDistance, om.MDistance.kMeters)
Kilometers = _Unit(om.MDistance, om.MDistance.kKilometers)
Inches = _Unit(om.MDistance, om.MDistance.kInches)
Feet = _Unit(om.MDistance, om.MDistance.kFeet)
Miles = _Unit(om.MDistance, om.MDistance.kMiles)
Yards = _Unit(om.MDistance, om.MDistance.kYards)
Milliseconds = _Unit(om.MTime, om.MTime.kMilliseconds)
Minutes = _Unit(om.MTime, om.MTime.kMinutes)
Seconds = _Unit(om.MTime, om.MTime.kSeconds)
def UiUnit():
return _Unit(om.MTime, om.MTime.uiUnit())
_Cached = type("Cached", (object,), {})
Cached = _Cached()
_data = collections.defaultdict(dict)
class Singleton(type):
_instances = {}
@withTiming()
def __call__(cls, mobject, exists=True, modifier=None):
handle = om.MObjectHandle(mobject)
hsh = handle.hashCode()
hx = "%x" % hsh
if exists and handle.isValid():
try:
node = cls._instances[hx]
assert not node._destroyed
except (KeyError, AssertionError):
pass
else:
Stats.NodeReuseCount += 1
node._removed = False
return node
if mobject.hasFn(om.MFn.kDagNode):
sup = DagNode
elif mobject.hasFn(om.MFn.kSet):
sup = ObjectSet
elif mobject.hasFn(om.MFn.kAnimCurve):
sup = AnimCurve
else:
sup = Node
self = super(Singleton, sup).__call__(mobject, exists, modifier)
self._hashCode = hsh
self._hexStr = hx
cls._instances[hx] = self
return self
@add_metaclass(Singleton)
class Node(object):
_Fn = om.MFnDependencyNode
_Cache = dict()
def __eq__(self, other):
try:
return self._mobject == other._mobject
except AttributeError:
return str(self) == str(other)
def __ne__(self, other):
try:
return self._mobject != other._mobject
except AttributeError:
return str(self) != str(other)
def __str__(self):
return self.name(namespace=True)
def __repr__(self):
return self.name(namespace=True)
def __add__(self, other):
return self[other.strip(".")]
def __contains__(self, other):
return self.hasAttr(other)
def __getitem__(self, key):
unit = None
cached = False
if isinstance(key, (list, tuple)):
key, items = key[0], key[1:]
for item in items:
if isinstance(item, _Unit):
unit = item
elif isinstance(item, _Cached):
cached = True
if cached:
try:
return CachedPlug(self._state["values"][key, unit])
except KeyError:
pass
try:
plug = self.findPlug(key)
except RuntimeError:
raise ExistError("%s.%s" % (self.path(), key))
return Plug(self, plug, unit=unit, key=key, modifier=self._modifier)
def __setitem__(self, key, value):
if isinstance(value, Plug):
value = value.read()
unit = None
if isinstance(key, (list, tuple)):
key, unit = key
if isinstance(value, (list, tuple)):
value = list(unit(v) for v in value)
else:
value = unit(value)
elif isinstance(value, (tuple, list)):
if isinstance(value[0], type):
if issubclass(value[0], _AbstractAttribute):
Attribute, kwargs = value
attr = Attribute(key, **kwargs)
try:
return self.addAttr(attr.create())
except RuntimeError:
# where this exception is thrown. Stay catious.
raise ExistError(key)
try:
plug = self.findPlug(key)
except RuntimeError:
raise ExistError("%s.%s" % (self.path(), key))
plug = Plug(self, plug, unit=unit)
if not getattr(self._modifier, "isDone", True):
# Only a few attribute types are supported by a modifier
if _python_to_mod(value, plug, self._modifier._modifier):
return
else:
log.warning(
"Could not write %s via modifier, writing directly.."
% plug
)
# Else, write it immediately
plug.write(value)
def _onDestroyed(self, mobject):
self._destroyed = True
om.MMessage.removeCallbacks(self._state["callbacks"])
for callback in self.onDestroyed:
try:
callback(self)
except Exception:
traceback.print_exc()
_data.pop(self.hex, None)
def _onRemoved(self, mobject, modifier, _=None):
self._removed = True
for callback in self.onRemoved:
try:
callback()
except Exception:
traceback.print_exc()
def __delitem__(self, key):
self.deleteAttr(key)
@withTiming()
def __init__(self, mobject, exists=True, modifier=None):
self._mobject = mobject
self._fn = self._Fn(mobject)
self._modifier = modifier
self._destroyed = False
self._removed = False
self._hashCode = None
self._state = {
"plugs": dict(),
"values": dict(),
"callbacks": list()
}
# Callbacks
self.onDestroyed = list()
self.onRemoved = list()
Stats.NodeInitCount += 1
self._state["callbacks"] += [
# Monitor node deletion, to prevent accidental
# use of MObject past its lifetime which may
# result in a fatal crash.
om.MNodeMessage.addNodeDestroyedCallback(
mobject,
self._onDestroyed, # func
None # clientData
) if not ROGUE_MODE else 0,
om.MNodeMessage.addNodeAboutToDeleteCallback(
mobject,
self._onRemoved,
None
),
]
def plugin(self):
return type(self._fn.userNode())
def instance(self):
return self._fn.userNode()
def object(self):
return self._mobject
def isAlive(self):
return not self._destroyed
@property
def data(self):
return _data[self.hex]
@property
def destroyed(self):
return self._destroyed
@property
def exists(self):
return not self._removed
@property
def removed(self):
return self._removed
@property
def hashCode(self):
return self._hashCode
@property
def hexStr(self):
return self._hexStr
# Alias
code = hashCode
hex = hexStr
@property
def typeId(self):
return self._fn.typeId
@property
def typeName(self):
return self._fn.typeName
def isA(self, type):
return self._mobject.hasFn(type)
def lock(self, value=True):
self._fn.isLocked = value
def isLocked(self):
return self._fn.isLocked
@property
def storable(self):
# How is this value queried?
return None
@storable.setter
def storable(self, value):
# The original function is a double negative
self._fn.setDoNotWrite(not bool(value))
# Module-level branch; evaluated on import
@withTiming("findPlug() reuse {time:.4f} ns")
def findPlug(self, name, cached=False):
try:
existing = self._state["plugs"][name]
Stats.PlugReuseCount += 1
return existing
except KeyError:
if cached:
raise KeyError("'%s' not cached" % name)
plug = self._fn.findPlug(name, False)
self._state["plugs"][name] = plug
return plug
def update(self, attrs):
for key, value in attrs.items():
self[key] = value
def clear(self):
self._state["plugs"].clear()
self._state["values"].clear()
@protected
def name(self, namespace=False):
if namespace:
return self._fn.name()
else:
return self._fn.name().rsplit(":", 1)[-1]
def namespace(self):
name = self._fn.name()
if ":" in name:
# Else it will return name as-is, as namespace
# E.g. Ryan_:leftHand -> Ryan_, but :leftHand -> leftHand
return name.rsplit(":", 1)[0]
return type(name)()
# Alias
def path(self):
return self.name(namespace=True)
shortestPath = path
def pop(self, key):
del self[key]
def dump(self, ignore_error=True):
attrs = {}
count = self._fn.attributeCount()
for index in range(count):
obj = self._fn.attribute(index)
plug = self._fn.findPlug(obj, False)
try:
value = Plug(self, plug).read()
except (RuntimeError, TypeError):
# TODO: Support more types of attributes,
# such that this doesn't need to happen.
value = None
if not ignore_error:
raise
attrs[plug.name()] = value
return attrs
def dumps(self, indent=4, sortKeys=True):
return json.dumps(self.dump(), indent=indent, sort_keys=sortKeys)
def type(self):
return self._fn.typeName
def addAttr(self, attr):
if isinstance(attr, _AbstractAttribute):
attr = attr.create()
self._fn.addAttribute(attr)
def hasAttr(self, attr):
return self._fn.hasAttribute(attr)
def deleteAttr(self, attr):
if not isinstance(attr, Plug):
attr = self[attr]
attribute = attr._mplug.attribute()
self._fn.removeAttribute(attribute)
def connections(self, type=None, unit=None, plugs=False):
for plug in self._fn.getConnections():
mobject = plug.node()
node = Node(mobject)
if not type or type == node._fn.typeName:
plug = Plug(node, plug, unit)
for connection in plug.connections(plugs=plugs):
yield connection
def connection(self, type=None, unit=None, plug=False):
return next(self.connections(type, unit, plug), None)
def rename(self, name):
if not getattr(self._modifier, "isDone", True):
return self._modifier.rename(self, name)
mod = om.MDGModifier()
mod.renameNode(self._mobject, name)
mod.doIt()
if ENABLE_PEP8:
is_alive = isAlive
hex_str = hexStr
hash_code = hashCode
type_id = typeId
type_name = typeName
is_a = isA
is_locked = isLocked
find_plug = findPlug
add_attr = addAttr
has_attr = hasAttr
delete_attr = deleteAttr
shortest_path = shortestPath
class DagNode(Node):
_Fn = om.MFnDagNode
def __str__(self):
return self.path()
def __repr__(self):
return self.path()
def __init__(self, mobject, *args, **kwargs):
super(DagNode, self).__init__(mobject, *args, **kwargs)
self._tfn = om.MFnTransform(mobject)
@protected
def path(self):
return self._fn.fullPathName()
@protected
def dagPath(self):
return om.MDagPath.getAPathTo(self._mobject)
@protected
def shortestPath(self):
return self._fn.partialPathName()
@property
def level(self):
return self.path().count("|") - 1
@property
def boundingBox(self):
return BoundingBox(self._fn.boundingBox)
def hide(self):
self["visibility"] = False
def show(self):
self["visibility"] = True
def addChild(self, child, index=Last):
mobject = child._mobject
self._fn.addChild(mobject, index)
def assembly(self):
path = self._fn.getPath()
root = None
for level in range(path.length() - 1):
root = path.pop()
return self.__class__(root.node()) if root else self
def transform(self, space=sObject, time=None):
plug = self["worldMatrix"][0] if space == sWorld else self["matrix"]
return TransformationMatrix(plug.asMatrix(time))
def mapFrom(self, other, time=None):
a = self["worldMatrix"][0].asMatrix(time)
b = other["worldInverseMatrix"][0].asMatrix(time)
delta = a * b
return TransformationMatrix(delta)
def mapTo(self, other, time=None):
return other.mapFrom(self, time)
root = assembly
def parent(self, type=None):
mobject = self._fn.parent(0)
if mobject.apiType() == om.MFn.kWorld:
return
cls = self.__class__
if not type or type == self._fn.__class__(mobject).typeName:
return cls(mobject)
def children(self,
type=None,
filter=om.MFn.kTransform,
query=None,
contains=None):
if self.isA(kShape):
return
cls = DagNode
Fn = self._fn.__class__
op = operator.eq
if isinstance(type, (tuple, list)):
op = operator.contains
other = "typeId" if isinstance(type, om.MTypeId) else "typeName"
for index in range(self._fn.childCount()):
try:
mobject = self._fn.child(index)
except RuntimeError:
log.warning(
"Child %d of %s not found, this is a bug" % (index, self)
)
raise
if filter is not None and not mobject.hasFn(filter):
continue
if not type or op(type, getattr(Fn(mobject), other)):
node = cls(mobject)
if not contains or node.shape(type=contains):
if query is None:
yield node
elif isinstance(query, dict):
try:
if all(node[key] == value
for key, value in query.items()):
yield node
except ExistError:
continue
else:
if all(key in node for key in query):
yield node
def child(self,
type=None,
filter=om.MFn.kTransform,
query=None,
contains=None):
return next(self.children(type, filter, query, contains), None)
def shapes(self, type=None, query=None):
return self.children(type, kShape, query)
def shape(self, type=None):
return next(self.shapes(type), None)
def siblings(self, type=None, filter=om.MFn.kTransform):
parent = self.parent()
if parent is not None:
for child in parent.children(type=type, filter=filter):
if child != self:
yield child
def sibling(self, type=None, filter=None):
return next(self.siblings(type, filter), None)
# at run-time, for that extra performance boost.
if hasattr(om, "MItDag"):
def descendents(self, type=None):
type = type or om.MFn.kInvalid
typeName = None
# Support filtering by typeName
if isinstance(type, string_types):
typeName = type
type = om.MFn.kInvalid
it = om.MItDag(om.MItDag.kDepthFirst, om.MFn.kInvalid)
it.reset(
self._mobject,
om.MItDag.kDepthFirst,
om.MIteratorType.kMObject
)
it.next() # Skip self
while not it.isDone():
mobj = it.currentItem()
node = DagNode(mobj)
if typeName is None:
if not type or type == node._fn.typeId:
yield node
else:
if not typeName or typeName == node._fn.typeName:
yield node
it.next()
else:
def descendents(self, type=None):
"""Recursive, depth-first search; compliant with MItDag of 2017+
Example:
>>> grandparent = createNode("transform")
>>> parent = createNode("transform", parent=grandparent)
>>> child = createNode("transform", parent=parent)
>>> mesh = createNode("mesh", parent=child)
>>> it = grandparent.descendents(type=tMesh)
>>> next(it) == mesh
True
>>> next(it)
Traceback (most recent call last):
...
StopIteration
"""
def _descendents(node, children=None):
children = children or list()
children.append(node)
for child in node.children(filter=None):
_descendents(child, children)
return children
# Support filtering by typeName
typeName = None
if isinstance(type, str):
typeName = type
type = om.MFn.kInvalid
descendents = _descendents(self)[1:] # Skip self
for child in descendents:
if typeName is None:
if not type or type == child._fn.typeId:
yield child
else:
if not typeName or typeName == child._fn.typeName:
yield child
def descendent(self, type=om.MFn.kInvalid):
return next(self.descendents(type), None)
def duplicate(self):
return self.__class__(self._fn.duplicate())
def clone(self, name=None, parent=None, worldspace=False):
if self.isA(kShape) and self.typeName == "mesh":
assert parent is not None, "mesh cloning requires parent argument"
name or parent.name() + "Clone"
with DagModifier() as mod:
mesh = mod.createNode("mesh", name, parent)
mesh["inMesh"] << self["outMesh"]
return mesh
else:
raise TypeError("Unsupported clone target: %s" % self)
def isLimited(self, typ):
return self._tfn.isLimited(typ)
def limitValue(self, typ):
return self._tfn.limitValue(typ)
def enableLimit(self, typ, state):
return self._tfn.enableLimit(typ, state)
def setLimit(self, typ, value):
return self._tfn.setLimit(typ, value)
if ENABLE_PEP8:
shortest_path = shortestPath
add_child = addChild
dag_path = dagPath
map_from = mapFrom
map_to = mapTo
is_limited = isLimited
limit_value = limitValue
set_limit = setLimit
enable_limit = enableLimit
bounding_box = boundingBox
# MFnTransform Limit Types
kRotateMaxX = 13
kRotateMaxY = 15
kRotateMaxZ = 17
kRotateMinX = 12
kRotateMinY = 14
kRotateMinZ = 16
kScaleMaxX = 1
kScaleMaxY = 3
kScaleMaxZ = 5
kScaleMinX = 0
kScaleMinY = 2
kScaleMinZ = 4
kShearMaxXY = 7
kShearMaxXZ = 9
kShearMaxYZ = 11
kShearMinXY = 6
kShearMinXZ = 8
kShearMinYZ = 10
kTranslateMaxX = 19
kTranslateMaxY = 21
kTranslateMaxZ = 23
kTranslateMinX = 18
kTranslateMinY = 20
kTranslateMinZ = 22
class ObjectSet(Node):
@protected
def shortestPath(self):
return self.name(namespace=True)
def __iter__(self):
for member in self.members():
yield member
def add(self, member):
return self.update([member])
def remove(self, members):
mobj = _encode1(self.name(namespace=True))
selectionList = om1.MSelectionList()
if not isinstance(members, (tuple, list)):
selectionList.add(members.path())
else:
for member in members:
selectionList.add(member.path())
fn = om1.MFnSet(mobj)
fn.removeMembers(selectionList)
def update(self, members):
cmds.sets(list(map(str, members)), forceElement=self.path())
def clear(self):
mobj = _encode1(self.name(namespace=True))
fn = om1.MFnSet(mobj)
fn.clear()
def sort(self, key=lambda o: (o.typeName, o.path())):
members = sorted(
self.members(),
key=key
)
self.clear()
self.update(members)
def descendent(self, type=None):
return next(self.descendents(type), None)
def descendents(self, type=None):
for member in self.members(type=type):
yield member
try:
for child in member.descendents(type=type):
yield child
except AttributeError:
continue
def flatten(self, type=None):
members = set()
def recurse(objset):
for member in objset:
if member.isA(om.MFn.kSet):
recurse(member)
elif type is not None:
if type == member.typeName:
members.add(member)
else:
members.add(member)
recurse(self)
return list(members)
def member(self, type=None):
return next(self.members(type), None)
def members(self, type=None):
op = operator.eq
other = "typeId"
if isinstance(type, string_types):
other = "typeName"
if isinstance(type, (tuple, list)):
op = operator.contains
for node in cmds.sets(self.name(namespace=True), query=True) or []:
node = encode(node)
if not type or op(type, getattr(node._fn, other)):
yield node
class AnimCurve(Node):
if __maya_version__ >= 2016:
def __init__(self, mobj, exists=True, modifier=None):
super(AnimCurve, self).__init__(mobj, exists, modifier)
self._fna = oma.MFnAnimCurve(mobj)
def key(self, time, value, interpolation=Linear):
time = om.MTime(time, om.MTime.uiUnit())
index = self._fna.find(time)
if index:
self._fna.setValue(index, value)
else:
self._fna.addKey(time, value, interpolation, interpolation)
def keys(self, times, values, interpolation=Linear):
times = map(lambda t: om.MTime(t, TimeUnit), times)
try:
self._fna.addKeys(times, values)
except RuntimeError:
# The error provided by Maya aren't very descriptive,
if not times:
log.error("No times were provided: %s" % str(times))
if not values:
log.error("No values were provided: %s" % str(values))
if len(values) != len(times):
log.error(
"Count mismatch; len(times)=%d, len(values)=%d" % (
len(times), len(values)
)
)
raise
class Plug(object):
def __abs__(self):
return abs(self.read())
def __bool__(self):
return bool(self.read())
__nonzero__ = __bool__
def __float__(self):
return float(self.read())
def __int__(self):
return int(self.read())
def __eq__(self, other):
if isinstance(other, Plug):
other = other.read()
return self.read() == other
def __ne__(self, other):
if isinstance(other, Plug):
other = other.read()
return self.read() != other
def __neg__(self):
return -self.read()
def __div__(self, other):
if isinstance(other, Plug):
other = other.read()
return self.read() / other
def __truediv__(self, other):
if isinstance(other, Plug):
other = other.read()
return self.read() / other
def __add__(self, other):
if isinstance(other, str):
try:
return self._node[self.name() + other]
except ExistError:
return self._node[self.name(long=True) + other]
raise TypeError(
"unsupported operand type(s) for +: 'Plug' and '%s'"
% type(other)
)
def __iadd__(self, other):
if isinstance(other, (tuple, list)):
for entry in other:
self.append(entry)
else:
self.append(other)
return self
def __str__(self):
return str(self.read())
def __repr__(self):
return str(self.read())
def __rshift__(self, other):
self.connect(other)
def __lshift__(self, other):
other.connect(self)
def __floordiv__(self, other):
self.disconnect(other)
def __iter__(self):
if self._mplug.isArray:
# we could use to `range()` from, but that would only work
# if the indices were contiguous.
for index in self._mplug.getExistingArrayAttributeIndices():
yield self[index]
elif self._mplug.isCompound:
for index in range(self._mplug.numChildren()):
yield self[index]
else:
values = self.read()
# Facilitate single-value attributes
values = values if isinstance(values, (tuple, list)) else [values]
for value in values:
yield value
def __getitem__(self, index):
cls = self.__class__
if isinstance(index, int):
# Support backwards-indexing
if index < 0:
index = self.count() - abs(index)
if self._mplug.isArray:
item = self._mplug.elementByLogicalIndex(index)
return cls(self._node, item, self._unit)
elif self._mplug.isCompound:
item = self._mplug.child(index)
return cls(self._node, item, self._unit)
else:
raise TypeError(
"%s does not support indexing" % self.path()
)
elif isinstance(index, string_types):
# Compound attributes have no equivalent
# to "MDependencyNode.findPlug()" and must
# be searched by hand.
if self._mplug.isCompound:
for child in range(self._mplug.numChildren()):
child = self._mplug.child(child)
_, name = child.name().rsplit(".", 1)
if index == name:
return cls(self._node, child)
else:
raise TypeError("'%s' is not a compound attribute"
% self.path())
raise ExistError("'%s' was not found" % index)
def __setitem__(self, index, value):
self[index].write(value)
def __init__(self, node, mplug, unit=None, key=None, modifier=None):
assert isinstance(node, Node), "%s is not a Node" % node
self._node = node
self._mplug = mplug
self._unit = unit
self._cached = None
self._key = key
self._modifier = modifier
def plug(self):
return self._mplug
@property
def isArray(self):
return self._mplug.isArray
@property
def isCompound(self):
return self._mplug.isCompound
def append(self, value):
if not self._mplug.isArray:
raise TypeError("\"%s\" was not an array attribute" % self.path())
index = self.count()
if isinstance(value, Plug):
self[index] << value
else:
self[index].write(value)
def extend(self, values):
for value in values:
self.append(value)
def count(self):
return self._mplug.evaluateNumElements()
def asDouble(self, time=None):
if time is not None:
return self._mplug.asDouble(DGContext(time=time))
return self._mplug.asDouble()
def asMatrix(self, time=None):
if time is not None:
context = DGContext(time=time)
obj = self._mplug.asMObject(context)
else:
obj = self._mplug.asMObject()
return om.MFnMatrixData(obj).matrix()
def asTransformationMatrix(self, time=None):
return TransformationMatrix(self.asMatrix(time))
# Alias
asTm = asTransformationMatrix
def asEulerRotation(self, order=kXYZ, time=None):
value = self.read(time=time)
return om.MEulerRotation(value, order)
def asQuaternion(self, time=None):
value = self.read(time=time)
value = Euler(value).asQuaternion()
def asVector(self, time=None):
assert self.isArray or self.isCompound, "'%s' not an array" % self
return Vector(self.read(time=time))
@property
def connected(self):
return self.connection() is not None
@property
def locked(self):
return self._mplug.isLocked
@locked.setter
def locked(self, value):
elements = (
self
if self.isArray or self.isCompound
else [self]
)
# Use setAttr in place of MPlug.isKeyable = False, as that
# doesn't persist the scene on save if the attribute is dynamic.
for el in elements:
cmds.setAttr(el.path(), lock=value)
def lock(self):
self.locked = True
def unlock(self):
self.locked = False
@property
def channelBox(self):
if self.isArray or self.isCompound:
return all(
plug._mplug.isChannelBox
for plug in self
)
else:
return self._mplug.isChannelBox
@channelBox.setter
def channelBox(self, value):
elements = (
self
if self.isArray or self.isCompound
else [self]
)
for el in elements:
cmds.setAttr(el.path(), keyable=value, channelBox=value)
@property
def keyable(self):
if self.isArray or self.isCompound:
return all(
plug._mplug.isKeyable
for plug in self
)
else:
return self._mplug.isKeyable
@keyable.setter
def keyable(self, value):
elements = (
self
if self.isArray or self.isCompound
else [self]
)
# Use setAttr in place of MPlug.isKeyable = False, as that
# doesn't persist the scene on save if the attribute is dynamic.
for el in elements:
cmds.setAttr(el.path(), keyable=value)
@property
def hidden(self):
return om.MFnAttribute(self._mplug.attribute()).hidden
@hidden.setter
def hidden(self, value):
pass
def hide(self):
self.keyable = False
self.channelBox = False
def lockAndHide(self):
self.lock()
self.hide()
@property
def default(self):
return _plug_to_default(self._mplug)
def reset(self):
if self.writable:
self.write(self.default)
else:
raise TypeError(
"Cannot reset non-writable attribute '%s'" % self.path()
)
@property
def writable(self):
return not any([self.connected, self.locked])
def show(self):
self.channelBox = True
def type(self):
return self._mplug.attribute().apiTypeStr
def path(self):
return "%s.%s" % (
self._node.path(), self._mplug.partialName(
includeNodeName=False,
useLongNames=True,
useFullAttributePath=True
)
)
def name(self, long=False):
return self._mplug.partialName(
includeNodeName=False,
useLongNames=long,
useFullAttributePath=True
)
def read(self, unit=None, time=None):
unit = unit if unit is not None else self._unit
context = None if time is None else DGContext(time=time)
try:
value = _plug_to_python(
self._mplug,
unit=unit,
context=context
)
self._node._state["values"][self._key, unit] = value
return value
except RuntimeError:
raise
except TypeError:
log.error("'%s': failed to read attribute" % self.path())
raise
def write(self, value):
if not getattr(self._modifier, "isDone", True):
return self._modifier.setAttr(self, value)
try:
_python_to_plug(value, self)
self._cached = value
except RuntimeError:
raise
except TypeError:
log.error("'%s': failed to write attribute" % self.path())
raise
def connect(self, other, force=True):
if not getattr(self._modifier, "isDone", True):
return self._modifier.connect(self, other, force)
mod = om.MDGModifier()
if force:
for plug in other._mplug.connectedTo(True, False):
mod.disconnect(plug, other._mplug)
mod.connect(self._mplug, other._mplug)
mod.doIt()
def disconnect(self, other=None, source=True, destination=True):
other = getattr(other, "_mplug", None)
if not getattr(self._modifier, "isDone", True):
mod = self._modifier
mod.disconnect(self._mplug, other, source, destination)
else:
mod = DGModifier()
mod.disconnect(self._mplug, other, source, destination)
mod.doIt()
def connections(self,
type=None,
source=True,
destination=True,
plugs=False,
unit=None):
op = operator.eq
other = "typeId"
if isinstance(type, string_types):
other = "typeName"
if isinstance(type, (tuple, list)):
op = operator.contains
for plug in self._mplug.connectedTo(source, destination):
mobject = plug.node()
node = Node(mobject)
if not type or op(type, getattr(node._fn, other)):
yield Plug(node, plug, unit) if plugs else node
def connection(self,
type=None,
source=True,
destination=True,
plug=False,
unit=None):
return next(self.connections(type=type,
source=source,
destination=destination,
plugs=plug,
unit=unit), None)
def source(self, unit=None):
cls = self.__class__
plug = self._mplug.source()
node = Node(plug.node())
if not plug.isNull:
return cls(node, plug, unit)
def node(self):
return self._node
if ENABLE_PEP8:
as_double = asDouble
as_matrix = asMatrix
as_transformation_matrix = asTransformationMatrix
as_euler_rotation = asEulerRotation
as_quaternion = asQuaternion
as_vector = asVector
channel_box = channelBox
lock_and_hide = lockAndHide
class TransformationMatrix(om.MTransformationMatrix):
def __init__(self, matrix=None, translate=None, rotate=None, scale=None):
# It doesn't like being handed `None`
args = [matrix] if matrix is not None else []
super(TransformationMatrix, self).__init__(*args)
if translate is not None:
self.setTranslation(translate)
if rotate is not None:
self.setRotation(rotate)
if scale is not None:
self.setScale(scale)
def __mul__(self, other):
if isinstance(other, (tuple, list)):
other = Vector(*other)
if isinstance(other, om.MVector):
p = self.translation()
q = self.quaternion()
return p + q * other
elif isinstance(other, om.MMatrix):
return type(self)(self.asMatrix() * other)
elif isinstance(other, om.MTransformationMatrix):
return type(self)(self.asMatrix() * other.asMatrix())
else:
raise TypeError(
"unsupported operand type(s) for *: '%s' and '%s'"
% (type(self).__name__, type(other).__name__)
)
@property
def xAxis(self):
return self.quaternion() * Vector(1, 0, 0)
@property
def yAxis(self):
return self.quaternion() * Vector(0, 1, 0)
@property
def zAxis(self):
return self.quaternion() * Vector(0, 0, 1)
def translateBy(self, vec, space=None):
space = space or sTransform
if isinstance(vec, (tuple, list)):
vec = Vector(vec)
return super(TransformationMatrix, self).translateBy(vec, space)
def rotateBy(self, rot, space=None):
space = space or sTransform
if isinstance(rot, (tuple, list)):
rot = Vector(rot)
if isinstance(rot, om.MVector):
rot = EulerRotation(rot)
return super(TransformationMatrix, self).rotateBy(rot, space)
def quaternion(self):
return Quaternion(self.rotation(asQuaternion=True))
def rotatePivot(self, space=None):
space = space or sTransform
return super(TransformationMatrix, self).rotatePivot(space)
def translation(self, space=None):
space = space or sTransform
return super(TransformationMatrix, self).translation(space)
def setTranslation(self, trans, space=None):
if isinstance(trans, Plug):
trans = trans.as_vector()
if isinstance(trans, (tuple, list)):
trans = Vector(*trans)
space = space or sTransform
return super(TransformationMatrix, self).setTranslation(trans, space)
def scaleBy(self, space=None):
space = space or sTransform
return Vector(super(TransformationMatrix, self).scale(space))
def setScale(self, seq, space=None):
if isinstance(seq, Plug):
seq = seq.as_vector()
if isinstance(seq, (tuple, list)):
seq = Vector(*seq)
space = space or sTransform
return super(TransformationMatrix, self).setScale(seq, space)
def rotation(self, asQuaternion=False):
return super(TransformationMatrix, self).rotation(asQuaternion)
def setRotation(self, rot):
if isinstance(rot, Plug):
rot = rot.as_vector()
if isinstance(rot, (tuple, list)):
try:
rot = Vector(rot)
except ValueError:
traceback.print_exc()
raise ValueError(
"I tried automatically converting your "
"tuple to a Vector, but couldn't.."
)
if isinstance(rot, Vector):
rot = EulerRotation(rot)
return super(TransformationMatrix, self).setRotation(rot)
def asMatrix(self):
return MatrixType(super(TransformationMatrix, self).asMatrix())
def asMatrixInverse(self):
return MatrixType(super(TransformationMatrix, self).asMatrixInverse())
# A more intuitive alternative
translate = translateBy
rotate = rotateBy
scale = scaleBy
if ENABLE_PEP8:
x_axis = xAxis
y_axis = yAxis
z_axis = zAxis
translate_by = translateBy
rotate_by = rotateBy
set_translation = setTranslation
set_rotation = setRotation
set_scale = setScale
as_matrix = asMatrix
as_matrix_inverse = asMatrixInverse
class MatrixType(om.MMatrix):
def __call__(self, *item):
if len(item) == 1:
return self.row(*item)
elif len(item) == 2:
return self.element(*item)
else:
raise ValueError(
"Must provide either 1 or 2 coordinates, "
"for row and element respectively"
)
def __mul__(self, other):
return type(self)(super(MatrixType, self).__mul__(other))
def __div__(self, other):
return type(self)(super(MatrixType, self).__div__(other))
def inverse(self):
return type(self)(super(MatrixType, self).inverse())
def row(self, index):
values = tuple(self)
return (
values[index * 4 + 0],
values[index * 4 + 1],
values[index * 4 + 2],
values[index * 4 + 3]
)
def element(self, row, col):
values = tuple(self)
return values[row * 4 + col % 4]
# Alias
Transformation = TransformationMatrix
Tm = TransformationMatrix
Mat = MatrixType
Mat4 = MatrixType
Matrix4 = MatrixType
class Vector(om.MVector):
def __add__(self, value):
if isinstance(value, (int, float)):
return type(self)(
self.x + value,
self.y + value,
self.z + value,
)
return super(Vector, self).__add__(value)
def __iadd__(self, value):
if isinstance(value, (int, float)):
return type(self)(
self.x + value,
self.y + value,
self.z + value,
)
return super(Vector, self).__iadd__(value)
# Alias, it can't take anything other than values
Vector3 = Vector
class Point(om.MPoint):
class BoundingBox(om.MBoundingBox):
def volume(self):
return self.width * self.height * self.depth
class Quaternion(om.MQuaternion):
def __mul__(self, other):
if isinstance(other, (tuple, list)):
other = Vector(*other)
if isinstance(other, om.MVector):
return Vector(other.rotateBy(self))
else:
return super(Quaternion, self).__mul__(other)
def lengthSquared(self):
return (
self.x * self.x +
self.y * self.y +
self.z * self.z +
self.w * self.w
)
def length(self):
return math.sqrt(self.lengthSquared())
def isNormalised(self, tol=0.0001):
return abs(self.length() - 1.0) < tol
# Alias
Quat = Quaternion
def twistSwingToQuaternion(ts):
t = tan(ts.x * 0.25)
s1 = tan(ts.y * 0.25)
s2 = tan(ts.z * 0.25)
b = 2.0 / (1.0 + s1 * s1 + s2 * s2)
c = 2.0 / (1.0 + t * t)
quat = Quaternion()
quat.w = (b - 1.0) * (c - 1.0)
quat.x = -t * (b - 1.0) * c
quat.y = -b * (c * t * s1 + (c - 1.0) * s2)
quat.z = -b * (c * t * s2 - (c - 1.0) * s1)
assert quat.isNormalised()
return quat
class EulerRotation(om.MEulerRotation):
def asQuaternion(self):
return super(EulerRotation, self).asQuaternion()
if ENABLE_PEP8:
as_quaternion = asQuaternion
# Alias
Euler = EulerRotation
def NurbsCurveData(points, degree=1, form=om1.MFnNurbsCurve.kOpen):
degree = min(3, max(1, degree))
cvs = om1.MPointArray()
curveFn = om1.MFnNurbsCurve()
data = om1.MFnNurbsCurveData()
mobj = data.create()
for point in points:
cvs.append(om1.MPoint(*point))
curveFn.createWithEditPoints(cvs,
degree,
form,
False,
False,
True,
mobj)
return mobj
class CachedPlug(Plug):
def __init__(self, value):
self._value = value
def read(self):
return self._value
def _plug_to_default(plug):
if plug.isArray:
raise TypeError("Array plugs are unsupported")
if plug.isCompound:
raise TypeError("Compound plugs are unsupported")
attr = plug.attribute()
type = attr.apiType()
if type == om.MFn.kTypedAttribute:
return om.MFnTypedAttribute(attr).default
elif type in (om.MFn.kDoubleLinearAttribute,
om.MFn.kFloatLinearAttribute,
om.MFn.kDoubleAngleAttribute,
om.MFn.kFloatAngleAttribute):
return om.MFnUnitAttribute(attr).default
elif type == om.MFn.kNumericAttribute:
return om.MFnNumericAttribute(attr).default
elif type == om.MFn.kEnumAttribute:
return om.MFnEnumAttribute(attr).default
else:
raise TypeError("Attribute type '%s' unsupported" % type)
def _plug_to_python(plug, unit=None, context=None):
assert not plug.isNull, "'%s' was null" % plug
kwargs = dict()
if context is not None:
kwargs["context"] = context
# Multi attributes
# _____
# | |
# | ||
# | ||
# |_____||
# |_____|
#
if plug.isArray and plug.isCompound:
# E.g. locator["worldPosition"]
return _plug_to_python(
plug.elementByLogicalIndex(0), unit, context
)
elif plug.isArray:
# E.g. transform["worldMatrix"][0]
# E.g. locator["worldPosition"][0]
return tuple(
_plug_to_python(
plug.elementByLogicalIndex(index),
unit,
context
)
for index in range(plug.evaluateNumElements())
)
elif plug.isCompound:
return tuple(
_plug_to_python(plug.child(index), unit, context)
for index in range(plug.numChildren())
)
# Simple attributes
# _____
# | |
# | |
# | |
# |_____|
#
attr = plug.attribute()
type = attr.apiType()
if type == om.MFn.kTypedAttribute:
innerType = om.MFnTypedAttribute(attr).attrType()
if innerType == om.MFnData.kAny:
# E.g. choice["input"][0]
return None
elif innerType == om.MFnData.kMatrix:
# E.g. transform["worldMatrix"][0]
if plug.isArray:
plug = plug.elementByLogicalIndex(0)
return tuple(
om.MFnMatrixData(plug.asMObject(**kwargs)).matrix()
)
elif innerType == om.MFnData.kString:
return plug.asString(**kwargs)
elif innerType == om.MFnData.kNurbsCurve:
return om.MFnNurbsCurveData(plug.asMObject(**kwargs))
elif innerType == om.MFnData.kComponentList:
return None
elif innerType == om.MFnData.kInvalid:
# E.g. time1.timewarpIn_Hidden
# Unsure of why some attributes are invalid
return None
else:
log.debug("Unsupported kTypedAttribute: %s" % innerType)
return None
elif type == om.MFn.kMatrixAttribute:
return tuple(om.MFnMatrixData(plug.asMObject(**kwargs)).matrix())
elif type == om.MFnData.kDoubleArray:
raise TypeError("%s: kDoubleArray is not supported" % plug)
elif type in (om.MFn.kDoubleLinearAttribute,
om.MFn.kFloatLinearAttribute):
if unit is None:
return plug.asMDistance(**kwargs).asUnits(Centimeters)
elif unit == Millimeters:
return plug.asMDistance(**kwargs).asMillimeters()
elif unit == Centimeters:
return plug.asMDistance(**kwargs).asCentimeters()
elif unit == Meters:
return plug.asMDistance(**kwargs).asMeters()
elif unit == Kilometers:
return plug.asMDistance(**kwargs).asKilometers()
elif unit == Inches:
return plug.asMDistance(**kwargs).asInches()
elif unit == Feet:
return plug.asMDistance(**kwargs).asFeet()
elif unit == Miles:
return plug.asMDistance(**kwargs).asMiles()
elif unit == Yards:
return plug.asMDistance(**kwargs).asYards()
else:
raise TypeError("Unsupported unit '%d'" % unit)
elif type in (om.MFn.kDoubleAngleAttribute,
om.MFn.kFloatAngleAttribute):
if unit is None:
return plug.asMAngle(**kwargs).asUnits(Radians)
elif unit == Degrees:
return plug.asMAngle(**kwargs).asDegrees()
elif unit == Radians:
return plug.asMAngle(**kwargs).asRadians()
elif unit == AngularSeconds:
return plug.asMAngle(**kwargs).asAngSeconds()
elif unit == AngularMinutes:
return plug.asMAngle(**kwargs).asAngMinutes()
else:
raise TypeError("Unsupported unit '%d'" % unit)
# Number
elif type == om.MFn.kNumericAttribute:
innerType = om.MFnNumericAttribute(attr).numericType()
if innerType == om.MFnNumericData.kBoolean:
return plug.asBool(**kwargs)
elif innerType in (om.MFnNumericData.kShort,
om.MFnNumericData.kInt,
om.MFnNumericData.kLong,
om.MFnNumericData.kByte):
return plug.asInt(**kwargs)
elif innerType in (om.MFnNumericData.kFloat,
om.MFnNumericData.kDouble,
om.MFnNumericData.kAddr):
return plug.asDouble(**kwargs)
else:
raise TypeError("Unsupported numeric type: %s"
% innerType)
# Enum
elif type == om.MFn.kEnumAttribute:
return plug.asShort(**kwargs)
elif type == om.MFn.kMessageAttribute:
# In order to comply with `if plug:`
return True
elif type == om.MFn.kTimeAttribute:
if unit:
return plug.asMTime(**kwargs).asUnits(unit)
else:
return plug.asMTime(**kwargs).value
elif type == om.MFn.kInvalid:
raise TypeError("%s was invalid" % plug.name())
else:
raise TypeError("Unsupported type '%s'" % type)
def _python_to_plug(value, plug):
# Compound values
if isinstance(value, (tuple, list)):
if plug.type() == "kMatrixAttribute":
assert len(value) == 16, "Value didn't appear to be a valid matrix"
return _python_to_plug(Matrix4(value), plug)
for index, value in enumerate(value):
if isinstance(value, (tuple, list)):
raise TypeError(
"Unsupported nested Python type: %s"
% value.__class__
)
_python_to_plug(value, plug[index])
elif isinstance(value, om1.MObject):
node = _encode1(plug._node.path())
shapeFn = om1.MFnDagNode(node)
plug = shapeFn.findPlug(plug.name())
plug.setMObject(value)
elif isinstance(value, om.MEulerRotation):
for index, value in enumerate(value):
value = om.MAngle(value, om.MAngle.kRadians)
_python_to_plug(value, plug[index])
elif isinstance(value, om.MAngle):
plug._mplug.setMAngle(value)
elif isinstance(value, om.MDistance):
plug._mplug.setMDistance(value)
elif isinstance(value, om.MTime):
plug._mplug.setMTime(value)
elif isinstance(value, om.MQuaternion):
_python_to_plug(value.asEulerRotation(), plug)
elif isinstance(value, om.MVector):
for index, value in enumerate(value):
_python_to_plug(value, plug[index])
elif isinstance(value, om.MPoint):
for index, value in enumerate(value):
_python_to_plug(value, plug[index])
elif isinstance(value, om.MMatrix):
matrixData = om.MFnMatrixData()
matobj = matrixData.create(value)
plug._mplug.setMObject(matobj)
elif plug._mplug.isCompound:
count = plug._mplug.numChildren()
return _python_to_plug([value] * count, plug)
elif isinstance(value, string_types):
plug._mplug.setString(value)
elif isinstance(value, int):
plug._mplug.setInt(value)
elif isinstance(value, float):
plug._mplug.setDouble(value)
elif isinstance(value, bool):
plug._mplug.setBool(value)
else:
raise TypeError("Unsupported Python type '%s'" % value.__class__)
def _python_to_mod(value, plug, mod):
mplug = plug._mplug
if isinstance(value, (tuple, list)):
for index, value in enumerate(value):
if isinstance(value, (tuple, list)):
raise TypeError(
"Unsupported nested Python type: %s"
% value.__class__
)
_python_to_mod(value, plug[index], mod)
elif isinstance(value, om.MVector):
for index, value in enumerate(value):
_python_to_mod(value, plug[index], mod)
elif isinstance(value, string_types):
mod.newPlugValueString(mplug, value)
elif isinstance(value, int):
mod.newPlugValueInt(mplug, value)
elif isinstance(value, float):
mod.newPlugValueFloat(mplug, value)
elif isinstance(value, bool):
mod.newPlugValueBool(mplug, value)
elif isinstance(value, om.MAngle):
mod.newPlugValueMAngle(mplug, value)
elif isinstance(value, om.MDistance):
mod.newPlugValueMDistance(mplug, value)
elif isinstance(value, om.MTime):
mod.newPlugValueMTime(mplug, value)
elif isinstance(value, om.MEulerRotation):
for index, value in enumerate(value):
value = om.MAngle(value, om.MAngle.kRadians)
_python_to_mod(value, plug[index], mod)
else:
log.warning(
"Unsupported plug type for modifier: %s" % type(value)
)
return False
return True
def encode(path):
assert isinstance(path, string_types), "%s was not string" % path
selectionList = om.MSelectionList()
try:
selectionList.add(path)
except RuntimeError:
raise ExistError("'%s' does not exist" % path)
mobj = selectionList.getDependNode(0)
return Node(mobj)
def fromHash(code, default=None):
try:
return Singleton._instances["%x" % code]
except KeyError:
return default
def fromHex(hex, default=None, safe=True):
node = Singleton._instances.get(hex, default)
if safe and node and node.exists:
return node
else:
return node
def toHash(mobj):
node = Node(mobj)
return node.hashCode
def toHex(mobj):
node = Node(mobj)
return node.hex
def asHash(mobj):
handle = om.MObjectHandle(mobj)
return handle.hashCode()
def asHex(mobj):
return "%x" % asHash(mobj)
if ENABLE_PEP8:
from_hash = fromHash
from_hex = fromHex
to_hash = toHash
to_hex = toHex
as_hash = asHash
as_hex = asHex
degrees = math.degrees
radians = math.radians
sin = math.sin
cos = math.cos
tan = math.tan
pi = math.pi
def meters(cm):
return cm * 0.01
def clear():
Singleton._instances.clear()
def _encode1(path):
selectionList = om1.MSelectionList()
try:
selectionList.add(path)
except RuntimeError:
raise ExistError("'%s' does not exist" % path)
mobject = om1.MObject()
selectionList.getDependNode(0, mobject)
return mobject
def _encodedagpath1(path):
selectionList = om1.MSelectionList()
try:
selectionList.add(path)
except RuntimeError:
raise ExistError("'%s' does not exist" % path)
dagpath = om1.MDagPath()
selectionList.getDagPath(0, dagpath)
return dagpath
def decode(node):
try:
return node.shortestPath()
except AttributeError:
return node.name(namespace=True)
def record_history(func):
@wraps(func)
def decorator(self, *args, **kwargs):
_kwargs = kwargs.copy()
_args = list(args)
# to facilitate garbage collection.
for index, arg in enumerate(args):
if isinstance(arg, (Node, Plug)):
_args[index] = arg.path()
else:
_args[index] = repr(arg)
for key, value in kwargs.items():
if isinstance(value, (Node, Plug)):
_kwargs[key] = value.path()
else:
_kwargs[key] = repr(value)
self._history.append((func.__name__, _args, _kwargs))
return func(self, *args, **kwargs)
return decorator
class _BaseModifier(object):
Type = om.MDGModifier
def __enter__(self):
self.isContext = True
return self
def __exit__(self, exc_type, exc_value, tb):
# Support calling `doIt` during a context,
# without polluting the undo queue.
if self.isContext and self._opts["undoable"]:
commit(self._modifier.undoIt, self._modifier.doIt)
self.doIt()
def __init__(self,
undoable=True,
interesting=True,
debug=True,
atomic=True,
template=None):
super(_BaseModifier, self).__init__()
self.isDone = False
self.isContext = False
self._modifier = self.Type()
self._history = list()
self._index = 1
self._opts = {
"undoable": undoable,
"interesting": interesting,
"debug": debug,
"atomic": atomic,
"template": template,
}
def doIt(self):
if (not self.isContext) and self._opts["undoable"]:
commit(self._modifier.undoIt, self._modifier.doIt)
try:
self._modifier.doIt()
except RuntimeError:
# Rollback changes
if self._opts["atomic"]:
self.undoIt()
raise ModifierError(self._history)
self.isDone = True
def undoIt(self):
self._modifier.undoIt()
@record_history
def createNode(self, type, name=None):
try:
mobj = self._modifier.createNode(type)
except TypeError:
raise TypeError("'%s' is not a valid node type" % type)
template = self._opts["template"]
if name or template:
name = (template or "{name}").format(
name=name or "",
type=type,
index=self._index,
)
self._modifier.renameNode(mobj, name)
node = Node(mobj, exists=False, modifier=self)
if not self._opts["interesting"]:
plug = node["isHistoricallyInteresting"]
_python_to_mod(False, plug, self._modifier)
self._index += 1
return node
@record_history
def deleteNode(self, node):
return self._modifier.deleteNode(node._mobject)
delete = deleteNode
@record_history
def renameNode(self, node, name):
return self._modifier.renameNode(node._mobject, name)
rename = renameNode
@record_history
def setAttr(self, plug, value):
if isinstance(value, Plug):
value = value.read()
if isinstance(plug, om.MPlug):
value = Plug(plug.node(), plug).read()
_python_to_mod(value, plug, self._modifier)
def resetAttr(self, plug):
self.setAttr(plug, plug.default)
@record_history
def connect(self, src, dst, force=True):
if isinstance(src, Plug):
src = src._mplug
if isinstance(dst, Plug):
dst = dst._mplug
if force:
# Disconnect any plug connected to `other`
for plug in dst.connectedTo(True, False):
self.disconnect(plug, dst)
self._modifier.connect(src, dst)
@record_history
def disconnect(self, a, b=None, source=True, destination=True):
if isinstance(a, Plug):
a = a._mplug
if isinstance(b, Plug):
b = b._mplug
if b is None:
# Disconnect any plug connected to `other`
if source:
for plug in a.connectedTo(True, False):
self._modifier.disconnect(plug, a)
if destination:
for plug in a.connectedTo(False, True):
self._modifier.disconnect(a, plug)
else:
if source:
self._modifier.disconnect(a, b)
if destination:
self._modifier.disconnect(b, a)
if ENABLE_PEP8:
do_it = doIt
undo_it = undoIt
create_node = createNode
delete_node = deleteNode
rename_node = renameNode
set_attr = setAttr
reset_attr = resetAttr
class DGModifier(_BaseModifier):
Type = om.MDGModifier
class DagModifier(_BaseModifier):
Type = om.MDagModifier
@record_history
def createNode(self, type, name=None, parent=None):
parent = parent._mobject if parent else om.MObject.kNullObj
try:
mobj = self._modifier.createNode(type, parent)
except TypeError:
raise TypeError("'%s' is not a valid node type" % type)
template = self._opts["template"]
if name or template:
name = (template or "{name}").format(
name=name or "",
type=type,
index=self._index,
)
self._modifier.renameNode(mobj, name)
return DagNode(mobj, exists=False, modifier=self)
@record_history
def parent(self, node, parent=None):
parent = parent._mobject if parent is not None else None
self._modifier.reparentNode(node._mobject, parent)
if ENABLE_PEP8:
create_node = createNode
class DGContext(om.MDGContext):
def __init__(self, time=None):
if time is not None:
if isinstance(time, (int, float)):
time = om.MTime(time, om.MTime.uiUnit())
super(DGContext, self).__init__(time)
else:
super(DGContext, self).__init__()
self._previousContext = None
def __enter__(self):
if __maya_version__ >= 2018:
self._previousContext = self.makeCurrent()
return self
else:
cmds.error(
"'%s' does not support context manager functionality for Maya 2017 "
"and below" % self.__class__.__name__
)
def __exit__(self, exc_type, exc_value, tb):
if self._previousContext:
self._previousContext.makeCurrent()
# Alias
Context = DGContext
def ls(*args, **kwargs):
return map(encode, cmds.ls(*args, **kwargs))
def selection(*args, **kwargs):
return map(encode, cmds.ls(*args, selection=True, **kwargs))
def createNode(type, name=None, parent=None):
try:
with DagModifier() as mod:
node = mod.createNode(type, name=name, parent=parent)
except TypeError:
with DGModifier() as mod:
node = mod.createNode(type, name=name)
return node
def getAttr(attr, type=None, time=None):
return attr.read(time=time)
def setAttr(attr, value, type=None):
attr.write(value)
def addAttr(node,
longName,
attributeType,
shortName=None,
enumName=None,
defaultValue=None):
at = attributeType
if isinstance(at, type) and issubclass(at, _AbstractAttribute):
Attribute = attributeType
else:
# Support legacy maya.cmds interface
Attribute = {
"double": Double,
"double3": Double3,
"string": String,
"long": Long,
"bool": Boolean,
"enume": Enum,
}[attributeType]
kwargs = {
"default": defaultValue
}
if enumName:
kwargs["fields"] = enumName.split(":")
attribute = Attribute(longName, **kwargs)
node.addAttr(attribute)
def listRelatives(node,
type=None,
children=False,
allDescendents=False,
parent=False,
shapes=False):
if not isinstance(node, DagNode):
return None
elif allDescendents:
return list(node.descendents(type=type))
elif shapes:
return list(node.shapes(type=type))
elif parent:
return [node.parent(type=type)]
elif children:
return list(node.children(type=type))
def listConnections(attr):
return list(node for node in attr.connections())
def connectAttr(src, dst):
src.connect(dst)
def delete(*nodes):
with DGModifier() as mod:
for node in nodes:
mod.delete(node)
def rename(node, name):
with DGModifier() as mod:
mod.rename(node, name)
def parent(children, parent, relative=True, absolute=False):
assert isinstance(parent, DagNode), "parent must be DagNode"
if not isinstance(children, (tuple, list)):
children = [children]
for child in children:
assert isinstance(child, DagNode), "child must be DagNode"
parent.addChild(child)
def objExists(obj):
if isinstance(obj, (Node, Plug)):
obj = obj.path()
try:
om.MSelectionList().add(obj)
except RuntimeError:
return False
else:
return True
# PEP08
sl = selection
create_node = createNode
get_attr = getAttr
set_attr = setAttr
add_attr = addAttr
list_relatives = listRelatives
list_connections = listConnections
connect_attr = connectAttr
obj_exists = objExists
# Speciality functions
kOpen = om1.MFnNurbsCurve.kOpen
kClosed = om1.MFnNurbsCurve.kClosed
kPeriodic = om1.MFnNurbsCurve.kPeriodic
def editCurve(parent, points, degree=1, form=kOpen):
assert isinstance(parent, DagNode), (
"parent must be of type cmdx.DagNode"
)
degree = min(3, max(1, degree))
cvs = om1.MPointArray()
curveFn = om1.MFnNurbsCurve()
for point in points:
cvs.append(om1.MPoint(*point))
mobj = curveFn.createWithEditPoints(cvs,
degree,
form,
False,
False,
True,
_encode1(parent.path()))
mod = om1.MDagModifier()
mod.renameNode(mobj, parent.name(namespace=True) + "Shape")
mod.doIt()
def undo():
mod.deleteNode(mobj)
mod.doIt()
def redo():
mod.undoIt()
commit(undo, redo)
shapeFn = om1.MFnDagNode(mobj)
return encode(shapeFn.fullPathName())
def curve(parent, points, degree=1, form=kOpen):
assert isinstance(parent, DagNode), (
"parent must be of type cmdx.DagNode"
)
assert parent._modifier is None or parent._modifier.isDone, (
"curve() currently doesn't work with a modifier"
)
degree = min(3, max(1, degree))
cvs = om1.MPointArray()
knots = om1.MDoubleArray()
curveFn = om1.MFnNurbsCurve()
knotcount = len(points) - degree + 2 * degree - 1
for point in points:
cvs.append(om1.MPoint(*point))
for index in range(knotcount):
knots.append(index)
mobj = curveFn.create(cvs,
knots,
degree,
form,
False,
True,
_encode1(parent.path()))
mod = om1.MDagModifier()
mod.renameNode(mobj, parent.name(namespace=True) + "Shape")
mod.doIt()
def undo():
mod.deleteNode(mobj)
mod.doIt()
def redo():
mod.undoIt()
commit(undo, redo)
shapeFn = om1.MFnDagNode(mobj)
return encode(shapeFn.fullPathName())
def lookAt(origin, center, up=None):
if isinstance(origin, (tuple, list)):
origin = Vector(origin)
if isinstance(center, (tuple, list)):
center = Vector(center)
if up is not None and isinstance(up, (tuple, list)):
up = Vector(up)
up = up or Vector(0, 1, 0)
x = (center - origin).normalize()
y = ((center - origin) ^ (center - up)).normalize()
z = x ^ y
return MatrixType((
x[0], x[1], x[2], 0,
y[0], y[1], y[2], 0,
z[0], z[1], z[2], 0,
0, 0, 0, 0
))
if ENABLE_PEP8:
look_at = lookAt
def first(iterator, default=None):
return next(iterator, default)
def last(iterator, default=None):
last = default
for member in iterator:
last = member
return last
class _AbstractAttribute(dict):
Fn = None
Type = None
Default = None
Readable = True
Writable = True
Cached = True
Storable = True
Hidden = False
Array = False
Connectable = True
Keyable = True
ChannelBox = False
AffectsAppearance = False
AffectsWorldSpace = False
Help = ""
def __eq__(self, other):
try:
return self["name"] == other["name"]
except AttributeError:
return self["name"] == other
def __ne__(self, other):
try:
return self["name"] != other["name"]
except AttributeError:
return self["name"] != other
def __hash__(self):
return hash(self["name"])
def __repr__(self):
return self["name"]
def __new__(cls, *args, **kwargs):
if not args:
return cls, kwargs
return super(_AbstractAttribute, cls).__new__(cls, *args, **kwargs)
def __init__(self,
name,
default=None,
label=None,
writable=None,
readable=None,
cached=None,
storable=None,
keyable=None,
hidden=None,
min=None,
max=None,
channelBox=None,
affectsAppearance=None,
affectsWorldSpace=None,
array=False,
connectable=True,
help=None):
args = locals().copy()
args.pop("self")
self["name"] = args.pop("name")
self["label"] = args.pop("label")
self["default"] = args.pop("default")
self["min"] = args.pop("min")
self["max"] = args.pop("max")
self["mobject"] = None
self["shortName"] = self["name"][0].lower() + self["name"][1:]
for key, value in args.items():
default = getattr(self, key[0].upper() + key[1:])
self[key] = value if value is not None else default
def default(self, cls=None):
if self["default"] is not None:
return self["default"]
if cls is not None:
return cls.defaults.get(self["name"], self.Default)
return self.Default
def type(self):
return self.Type
def create(self, cls=None):
args = [
arg
for arg in (self["name"],
self["shortName"],
self.type())
if arg is not None
]
default = self.default(cls)
if default:
if isinstance(default, (list, tuple)):
args += default
else:
args += [default]
self["mobject"] = self.Fn.create(*args)
self.Fn.storable = self["storable"]
self.Fn.readable = self["readable"]
self.Fn.writable = self["writable"]
self.Fn.connectable = self["connectable"]
self.Fn.hidden = self["hidden"]
self.Fn.cached = self["cached"]
self.Fn.keyable = self["keyable"]
self.Fn.channelBox = self["channelBox"]
self.Fn.affectsAppearance = self["affectsAppearance"]
self.Fn.affectsWorldSpace = self["affectsWorldSpace"]
self.Fn.array = self["array"]
if self["min"] is not None:
self.Fn.setMin(self["min"])
if self["max"] is not None:
self.Fn.setMax(self["max"])
if self["label"] is not None:
self.Fn.setNiceNameOverride(self["label"])
return self["mobject"]
def read(self, data):
pass
class Enum(_AbstractAttribute):
Fn = om.MFnEnumAttribute()
Type = None
Default = 0
Keyable = True
def __init__(self, name, fields=None, default=0, label=None, **kwargs):
super(Enum, self).__init__(name, default, label, **kwargs)
self.update({
"fields": fields or (name,),
})
def create(self, cls=None):
attr = super(Enum, self).create(cls)
for index, field in enumerate(self["fields"]):
self.Fn.addField(field, index)
return attr
def read(self, data):
return data.inputValue(self["mobject"]).asShort()
class Divider(Enum):
def __init__(self, label, **kwargs):
kwargs.pop("name", None)
kwargs.pop("fields", None)
kwargs.pop("label", None)
super(Divider, self).__init__(label, fields=(label,), label=" ", **kwargs)
class String(_AbstractAttribute):
Fn = om.MFnTypedAttribute()
Type = om.MFnData.kString
Default = ""
def default(self, cls=None):
default = str(super(String, self).default(cls))
return om.MFnStringData().create(default)
def read(self, data):
return data.inputValue(self["mobject"]).asString()
class Message(_AbstractAttribute):
Fn = om.MFnMessageAttribute()
Type = None
Default = None
Storable = False
class Matrix(_AbstractAttribute):
Fn = om.MFnMatrixAttribute()
Default = (0.0,) * 4 * 4
Array = False
Readable = True
Keyable = False
Hidden = False
def default(self, cls=None):
return None
def read(self, data):
return data.inputValue(self["mobject"]).asMatrix()
class Long(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = om.MFnNumericData.kLong
Default = 0
def read(self, data):
return data.inputValue(self["mobject"]).asLong()
class Double(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = om.MFnNumericData.kDouble
Default = 0.0
def read(self, data):
return data.inputValue(self["mobject"]).asDouble()
class Double3(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = None
Default = (0.0,) * 3
def default(self, cls=None):
if self["default"] is not None:
default = self["default"]
if not isinstance(default, (tuple, list)):
default = (default,) * 3
elif cls is not None:
default = cls.defaults.get(self["name"], self.Default)
else:
default = self.Default
children = list()
for index, child in enumerate("XYZ"):
attribute = self.Fn.create(self["name"] + child,
self["shortName"] + child,
om.MFnNumericData.kDouble,
default[index])
children.append(attribute)
return children
def read(self, data):
return data.inputValue(self["mobject"]).asDouble3()
class Boolean(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = om.MFnNumericData.kBoolean
Default = True
def read(self, data):
return data.inputValue(self["mobject"]).asBool()
class AbstractUnit(_AbstractAttribute):
Fn = om.MFnUnitAttribute()
Default = 0.0
Min = None
Max = None
SoftMin = None
SoftMax = None
class Angle(AbstractUnit):
def default(self, cls=None):
default = super(Angle, self).default(cls)
if not isinstance(default, om.MAngle):
default = om.MAngle(default, om.MAngle.kDegrees)
return default
class Time(AbstractUnit):
def default(self, cls=None):
default = super(Time, self).default(cls)
if not isinstance(default, om.MTime):
default = om.MTime(default, om.MTime.kSeconds)
return default
class Distance(AbstractUnit):
def default(self, cls=None):
default = super(Distance, self).default(cls)
if not isinstance(default, om.MDistance):
default = om.MDistance(default, om.MDistance.kCentimeters)
return default
class Compound(_AbstractAttribute):
Fn = om.MFnCompoundAttribute()
Multi = None
def __init__(self, name, children=None, **kwargs):
if not children and self.Multi:
default = kwargs.pop("default", None)
children, Type = self.Multi
children = tuple(
Type(name + child, default=default[index], **kwargs)
if default else Type(name + child, **kwargs)
for index, child in enumerate(children)
)
self["children"] = children
else:
self["children"] = children
super(Compound, self).__init__(name, **kwargs)
def default(self, cls=None):
pass
def create(self, cls=None):
mobj = super(Compound, self).create(cls)
default = super(Compound, self).default(cls)
for index, child in enumerate(self["children"]):
# Forward attributes from parent to child
for attr in ("storable",
"readable",
"writable",
"hidden",
"channelBox",
"keyable",
"array"):
child[attr] = self[attr]
if child["default"] is None and default is not None:
child["default"] = default[index]
self.Fn.addChild(child.create(cls))
return mobj
def read(self, handle):
output = list()
for child in self["children"]:
child_handle = handle.child(child["mobject"])
output.append(child.read(child_handle))
return tuple(output)
class Double2(Compound):
Multi = ("XY", Double)
class Double4(Compound):
Multi = ("XYZW", Double)
class Angle2(Compound):
Multi = ("XY", Angle)
class Angle3(Compound):
Multi = ("XYZ", Angle)
class Distance2(Compound):
Multi = ("XY", Distance)
class Distance3(Compound):
Multi = ("XYZ", Distance)
class Distance4(Compound):
Multi = ("XYZW", Distance)
# Convenience aliases, for when it isn't clear e.g. `Matrix()`
EnumAttribute = Enum
DividerAttribute = Divider
StringAttribute = String
MessageAttribute = Message
MatrixAttribute = Matrix
LongAttribute = Long
DoubleAttribute = Double
Double3Attribute = Double3
BooleanAttribute = Boolean
AbstractUnitAttribute = AbstractUnit
AngleAttribute = Angle
TimeAttribute = Time
DistanceAttribute = Distance
CompoundAttribute = Compound
Double2Attribute = Double2
Double4Attribute = Double4
Angle2Attribute = Angle2
Angle3Attribute = Angle3
Distance2Attribute = Distance2
Distance3Attribute = Distance3
Distance4Attribute = Distance4
#
# With cmdx, you call upon API calls directly. There is little to no
# correlation between each of your calls, which is great for performance
# but not so great for conforming to the undo/redo framework set forth
# by Autodesk.
#
# To work around this, without losing out on performance or functionality,
# a generic command is created, capable of hosting arbitrary API calls
# and storing them in the Undo/Redo framework.
#
# >>> node = cmdx.createNode("transform")
# >>> cmdx.commit(lambda: cmdx.delete(node))
#
# Now when you go to undo, the `lambda` is called. It is then up to you
# the developer to ensure that what is being undone actually relates
# to what you wanted to have undone. For example, it is perfectly
# possible to add an unrelated call to history.
#
# >>> node = cmdx.createNode("transform")
# >>> cmdx.commit(lambda: cmdx.setAttr(node + "translateX", 5))
#
# The result would be setting an attribute to `5` when attempting to undo.
#
# --------------------------------------------------------
# Support for multiple co-existing versions of apiundo.
# NOTE: This is important for vendoring, as otherwise a vendored apiundo
# could register e.g. cmds.apiUndo() first, causing a newer version
# to inadvertently use this older command (or worse yet, throwing an
# error when trying to register it again).
command = "_cmdxApiUndo_%s" % __version__.replace(".", "_")
# This module is both a Python module and Maya plug-in.
# Data is shared amongst the two through this "module"
name = "_cmdxShared_"
if name not in sys.modules:
sys.modules[name] = types.ModuleType(name)
shared = sys.modules[name]
shared.undo = None
shared.redo = None
shared.undos = {}
shared.redos = {}
def commit(undo, redo=lambda: None):
if not ENABLE_UNDO:
return
if not hasattr(cmds, command):
install()
# Precautionary measure.
# If this doesn't pass, odds are we've got a race condition.
# NOTE: This assumes calls to `commit` can only be done
# from a single thread, which should already be the case
# given that Maya's API is not threadsafe.
try:
assert shared.redo is None
assert shared.undo is None
except AssertionError:
log.debug("%s has a problem with undo" % __name__)
shared.undo = "%x" % id(undo)
shared.redo = "%x" % id(redo)
shared.undos[shared.undo] = undo
shared.redos[shared.redo] = redo
getattr(cmds, command)()
def install():
if ENABLE_UNDO:
cmds.loadPlugin(__file__, quiet=True)
self.installed = True
def uninstall():
if ENABLE_UNDO:
cmds.flushUndo()
shared.undo = None
shared.redo = None
shared.undos.clear()
shared.redos.clear()
sys.modules.pop(name, None)
cmds.unloadPlugin(os.path.basename(__file__))
self.installed = False
def maya_useNewAPI():
pass
class _apiUndo(om.MPxCommand):
def doIt(self, args):
self.undo = shared.undo
self.redo = shared.redo
shared.undo = None
shared.redo = None
def undoIt(self):
shared.undos[self.undo]()
def redoIt(self):
shared.redos[self.redo]()
def isUndoable(self):
return True
def initializePlugin(plugin):
om.MFnPlugin(plugin).registerCommand(
command,
_apiUndo
)
def uninitializePlugin(plugin):
om.MFnPlugin(plugin).deregisterCommand(command)
tAddDoubleLinear = om.MTypeId(0x4441444c)
tAddMatrix = om.MTypeId(0x44414d58)
tAngleBetween = om.MTypeId(0x4e414254)
tBlendShape = om.MTypeId(0x46424c53)
tMultMatrix = om.MTypeId(0x444d544d)
tAngleDimension = om.MTypeId(0x4147444e)
tBezierCurve = om.MTypeId(0x42435256)
tCamera = om.MTypeId(0x4443414d)
tChoice = om.MTypeId(0x43484345)
tChooser = om.MTypeId(0x43484f4f)
tCondition = om.MTypeId(0x52434e44)
tMesh = om.MTypeId(0x444d5348)
tNurbsCurve = om.MTypeId(0x4e435256)
tNurbsSurface = om.MTypeId(0x4e535246)
tJoint = om.MTypeId(0x4a4f494e)
tTransform = om.MTypeId(0x5846524d)
tTransformGeometry = om.MTypeId(0x5447454f)
tWtAddMatrix = om.MTypeId(0x4457414d)
InstalledPlugins = dict()
TypeId = om.MTypeId
StartId = int(os.getenv("CMDX_BASETYPEID", "0x12b9c0"), 0)
class MetaNode(type):
def __init__(cls, *args, **kwargs):
assert isinstance(cls.name, str)
assert isinstance(cls.defaults, dict)
assert isinstance(cls.attributes, list)
assert isinstance(cls.version, tuple)
if isinstance(cls.typeid, (int, float)):
cls.typeid = TypeId(cls.typeid)
index = 1
for attribute in cls.attributes:
if isinstance(attribute, Divider):
attribute["name"] = "_" * index
attribute["shortName"] = "_" * index
index += 1
assert len(set(cls.attributes)) == len(cls.attributes), (
"One or more attributes in '%s' was found more than once"
% cls.__name__
)
attributes = {attr["name"]: attr for attr in cls.attributes}
def findAttribute(self, name):
return attributes.get(name)
def findMObject(self, name):
return attributes.get(name)["mobject"]
def findPlug(self, node, name):
try:
mobj = attributes.get(name)["mobject"]
return om.MPlug(node, mobj)
except KeyError:
return None
cls.findAttribute = findAttribute
cls.findMObject = findMObject
cls.findPlug = findPlug
cls.find_attribute = findAttribute
cls.find_mobject = findMObject
cls.find_plug = findPlug
cls.log = logging.getLogger(cls.__name__)
return super(MetaNode, cls).__init__(*args, **kwargs)
@add_metaclass(MetaNode)
class DgNode(om.MPxNode):
typeid = TypeId(StartId)
name = "defaultNode"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
@add_metaclass(MetaNode)
class SurfaceShape(om.MPxSurfaceShape):
typeid = TypeId(StartId)
classification = "drawdb/geometry/custom"
name = "defaultNode"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
@classmethod
def uiCreator(cls):
pass
@add_metaclass(MetaNode)
class SurfaceShapeUI(omui.MPxSurfaceShapeUI):
typeid = TypeId(StartId)
classification = "drawdb/geometry/custom"
name = "defaultNode"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
@add_metaclass(MetaNode)
class LocatorNode(omui.MPxLocatorNode):
name = "defaultNode"
typeid = TypeId(StartId)
classification = "drawdb/geometry/custom"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
def initialize2(Plugin):
def _nodeInit():
nameToAttr = {}
for attr in Plugin.attributes:
mattr = attr.create(Plugin)
Plugin.addAttribute(mattr)
nameToAttr[attr["name"]] = mattr
for src, dst in Plugin.affects:
log.debug("'%s' affects '%s'" % (src, dst))
Plugin.attributeAffects(nameToAttr[src], nameToAttr[dst])
def _nodeCreator():
return Plugin()
def initializePlugin(obj):
version = ".".join(map(str, Plugin.version))
plugin = om.MFnPlugin(obj, "Cmdx", version, "Any")
try:
if issubclass(Plugin, LocatorNode):
plugin.registerNode(Plugin.name,
Plugin.typeid,
_nodeCreator,
_nodeInit,
om.MPxNode.kLocatorNode,
Plugin.classification)
elif issubclass(Plugin, DgNode):
plugin.registerNode(Plugin.name,
Plugin.typeid,
_nodeCreator,
_nodeInit)
elif issubclass(Plugin, SurfaceShape):
plugin.registerShape(Plugin.name,
Plugin.typeid,
_nodeCreator,
_nodeInit,
Plugin.uiCreator,
Plugin.classification)
else:
raise TypeError("Unsupported subclass: '%s'" % Plugin)
except Exception:
raise
else:
InstalledPlugins[Plugin.name] = Plugin
Plugin.postInitialize()
return initializePlugin
def uninitialize2(Plugin):
def uninitializePlugin(obj):
om.MFnPlugin(obj).deregisterNode(Plugin.typeid)
return uninitializePlugin
class MPxManipContainer1(ompx1.MPxManipContainer):
name = "defaultManip"
version = (0, 0)
ownerid = om1.MTypeId(StartId)
typeid = om1.MTypeId(StartId)
def initializeManipulator1(Manipulator):
def _manipulatorCreator():
return ompx1.asMPxPtr(Manipulator())
def _manipulatorInit():
ompx1.MPxManipContainer.addToManipConnectTable(Manipulator.ownerid)
ompx1.MPxManipContainer.initialize()
def initializePlugin(obj):
version = ".".join(map(str, Manipulator.version))
plugin = ompx1.MFnPlugin(obj, "Cmdx", version, "Any")
endswith("Manip"), (
"Manipulator '%s' must have the name of a plug-in, "
"and end with 'Manip'"
)
plugin.registerNode(
Manipulator.name,
Manipulator.typeid,
_manipulatorCreator,
_manipulatorInit,
ompx1.MPxNode.kManipContainer
)
return initializePlugin
def uninitializeManipulator1(Manipulator):
def uninitializePlugin(obj):
ompx1.MFnPlugin(obj).deregisterNode(Manipulator.typeid)
return uninitializePlugin
def findPlugin(name):
try:
return InstalledPlugins[name]
except KeyError:
raise ExistError("'%s' is not a recognised plug-in" % name)
class Callback(object):
log = logging.getLogger("cmdx.Callback")
def __init__(self, name, installer, args, api=2, help="", parent=None):
self._id = None
self._args = args
self._name = name
self._installer = installer
self._help = help
self._uninstaller = {
1: om1.MMessage.removeCallback,
2: om.MMessage.removeCallback
}[api]
def __del__(self):
self.deactivate()
def name(self):
return self._name
def help(self):
return self._help
def is_active(self):
return self._id is not None
def activate(self):
self.log.debug("Activating callback '%s'.." % self._name)
if self.is_active():
self.log.debug("%s already active, ignoring" % self._name)
return
self._id = self._installer(*self._args)
def deactivate(self):
self.log.debug("Deactivating callback '%s'.." % self._name)
if self.is_active():
self._uninstaller(self._id)
self._id = None
class CallbackGroup(list):
def __init__(self, name, callbacks, parent=None):
self._name = name
self[:] = callbacks
def name(self):
return self._name
def add(self, name, installer, args, api=2):
callback = Callback(name, installer, args, api)
self.append(callback)
def activate(self):
for callback in self._callbacks:
callback.activate()
def deactivate(self):
for callback in self._callbacks:
callback.deactivate()
class Cache(object):
def __init__(self):
self._values = {}
def clear(self, node=None):
pass
def read(self, node, attr, time):
pass
def transform(self, node):
pass
| true
| true
|
790e0ce2f08f084c7a447e3f2a97c3bf8f907996
| 1,020
|
py
|
Python
|
py/parse_sort_objects.py
|
rckmnt/How-Big-Is-A-Thing-
|
48d723c8410e5014e234a979cabcf77a05b18393
|
[
"MIT"
] | null | null | null |
py/parse_sort_objects.py
|
rckmnt/How-Big-Is-A-Thing-
|
48d723c8410e5014e234a979cabcf77a05b18393
|
[
"MIT"
] | null | null | null |
py/parse_sort_objects.py
|
rckmnt/How-Big-Is-A-Thing-
|
48d723c8410e5014e234a979cabcf77a05b18393
|
[
"MIT"
] | null | null | null |
# parse list of objects
import csv
file = "basic_objects.txt"
objects = []
with open(file) as f:
for line in f:
if line[0:2] == '//' or line[0:2] == None: # skip empties, comments
pass
else:
obj = line.rstrip() # strip Newlines
obj = obj.capitalize()
o = [o.capitalize() for o in obj.split(' ')] # Capitalize every word
obj = ' '.join(o)
objects.append(obj)
nice_objects = sorted(set(objects))
print(nice_objects[0:10], "............", nice_objects[-10:-1])
print(len(nice_objects))
def print_dupes(objs): # test for dupes
last = None
for n in nice_objects:
if n == last:
print(n)
last = n
# Write out txt list
# with open("cleaned_basic_objects.txt", 'wb') as csvfile:
# writer = csv.writer(csvfile)
# writer.writerows(n.split(',') for n in nice_objects) # idk why you need the split
| 30
| 91
| 0.523529
|
import csv
file = "basic_objects.txt"
objects = []
with open(file) as f:
for line in f:
if line[0:2] == '//' or line[0:2] == None:
pass
else:
obj = line.rstrip()
obj = obj.capitalize()
o = [o.capitalize() for o in obj.split(' ')]
obj = ' '.join(o)
objects.append(obj)
nice_objects = sorted(set(objects))
print(nice_objects[0:10], "............", nice_objects[-10:-1])
print(len(nice_objects))
def print_dupes(objs):
last = None
for n in nice_objects:
if n == last:
print(n)
last = n
| true
| true
|
790e0cfb98cd34babc58558cceb42f5a63791c4b
| 840
|
py
|
Python
|
Python learnings/Django projects/learning_users/basic_app/models.py
|
warpalatino/public
|
f04ce183799bcdd2fb8dc376d41d286314c19460
|
[
"MIT"
] | 1
|
2021-01-04T10:37:16.000Z
|
2021-01-04T10:37:16.000Z
|
Python learnings/Django projects/learning_users/basic_app/models.py
|
warpalatino/public
|
f04ce183799bcdd2fb8dc376d41d286314c19460
|
[
"MIT"
] | null | null | null |
Python learnings/Django projects/learning_users/basic_app/models.py
|
warpalatino/public
|
f04ce183799bcdd2fb8dc376d41d286314c19460
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
# Which data the user already has:
# SuperUserInformation
# User: Jose
# Email: training@pieriandata.com
# Password: testpassword
# Create your models here.
class UserProfileInfo(models.Model):
# Create relationship (don't inherit from User!)
user = models.OneToOneField(User, on_delete=models.CASCADE)
# Add any additional attributes to the user you want
portfolio_site = models.URLField(blank=True)
# pip install pillow to use this, so that users do not need to upload their pic if they
#...do not want it
profile_pic = models.ImageField(upload_to='basic_app/profile_pics',blank=True)
def __str__(self):
# Built-in attribute of django.contrib.auth.models.User !
return self.user.username
| 33.6
| 92
| 0.717857
|
from django.db import models
from django.contrib.auth.models import User
class UserProfileInfo(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
# Add any additional attributes to the user you want
portfolio_site = models.URLField(blank=True)
# pip install pillow to use this, so that users do not need to upload their pic if they
#...do not want it
profile_pic = models.ImageField(upload_to='basic_app/profile_pics',blank=True)
def __str__(self):
# Built-in attribute of django.contrib.auth.models.User !
return self.user.username
| true
| true
|
790e0d697f3464551928b67195f4c0d850b6829c
| 7,240
|
py
|
Python
|
mxfusion/components/distributions/gp/kernels/static.py
|
JeremiasKnoblauch/MXFusion
|
af6223e9636b055d029d136dd7ae023b210b4560
|
[
"Apache-2.0"
] | 2
|
2019-05-31T09:50:47.000Z
|
2021-03-06T09:38:47.000Z
|
mxfusion/components/distributions/gp/kernels/static.py
|
JeremiasKnoblauch/MXFusion
|
af6223e9636b055d029d136dd7ae023b210b4560
|
[
"Apache-2.0"
] | null | null | null |
mxfusion/components/distributions/gp/kernels/static.py
|
JeremiasKnoblauch/MXFusion
|
af6223e9636b055d029d136dd7ae023b210b4560
|
[
"Apache-2.0"
] | 1
|
2019-05-30T09:39:46.000Z
|
2019-05-30T09:39:46.000Z
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
from .kernel import NativeKernel
from ....variables import Variable
from ....variables import PositiveTransformation
from .....util.customop import broadcast_to_w_samples
class Bias(NativeKernel):
"""
Bias kernel, which produces a constant value for every entries of the covariance matrix.
.. math::
k(x,y) = \\sigma^2
:param input_dim: the number of dimensions of the kernel. (The total number of active dimensions).
:type input_dim: int
:param variance: the initial value for the variance parameter.
:type variance: float or MXNet NDArray
:param name: the name of the kernel. The name is used to access kernel parameters.
:type name: str
:param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation.
(default: None, taking all the dimensions).
:type active_dims: [int] or None
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
broadcastable = True
def __init__(self, input_dim, variance=1., name='bias', active_dims=None,
dtype=None, ctx=None):
super(Bias, self).__init__(input_dim=input_dim, name=name,
active_dims=active_dims, dtype=dtype,
ctx=ctx)
if not isinstance(variance, Variable):
variance = Variable(shape=(1,),
transformation=PositiveTransformation(),
initial_value=variance)
self.variance = variance
def _compute_K(self, F, X, variance, X2=None):
"""
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None,
this computes a square covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
if X2 is None:
X2 = X
return broadcast_to_w_samples(F, variance, X.shape[:-1] +
(X2.shape[-2],))
def _compute_Kdiag(self, F, X, variance):
"""
The internal interface for the actual computation for the diagonal.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
return broadcast_to_w_samples(F, variance, X.shape[:-1])
class White(NativeKernel):
"""
White kernel, which produces a constant value for the diagonal of the covariance matrix.
.. math::
K = \\sigma^2 I
:param input_dim: the number of dimensions of the kernel. (The total number of active dimensions).
:type input_dim: int
:param variance: the initial value for the variance parameter.
:type variance: float or MXNet NDArray
:param name: the name of the kernel. The name is used to access kernel parameters.
:type name: str
:param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation.
(default: None, taking all the dimensions).
:type active_dims: [int] or None
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
broadcastable = True
def __init__(self, input_dim, variance=1., name='white', active_dims=None,
dtype=None, ctx=None):
super(White, self).__init__(input_dim=input_dim, name=name,
active_dims=active_dims, dtype=dtype,
ctx=ctx)
if not isinstance(variance, Variable):
variance = Variable(shape=(1,),
transformation=PositiveTransformation(),
initial_value=variance)
self.variance = variance
def _compute_K(self, F, X, variance, X2=None):
"""
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square
covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
if X2 is None:
Imat = F.eye(N=X.shape[-2:-1][0],
ctx=self.ctx,
dtype=self.dtype)
Imat = broadcast_to_w_samples(F, Imat, X.shape[:-1] +
X.shape[-2:-1], False)
return Imat * broadcast_to_w_samples(F, variance, X.shape[:-1] +
X.shape[-2:-1])
else:
return F.zeros(shape=X.shape[:-1] + X2.shape[-2:-1], ctx=self.ctx,
dtype=self.dtype)
def _compute_Kdiag(self, F, X, variance):
"""
The internal interface for the actual computation for the diagonal of the covariance matrix.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
return broadcast_to_w_samples(F, variance, X.shape[:-1])
| 43.878788
| 110
| 0.618785
|
from .kernel import NativeKernel
from ....variables import Variable
from ....variables import PositiveTransformation
from .....util.customop import broadcast_to_w_samples
class Bias(NativeKernel):
broadcastable = True
def __init__(self, input_dim, variance=1., name='bias', active_dims=None,
dtype=None, ctx=None):
super(Bias, self).__init__(input_dim=input_dim, name=name,
active_dims=active_dims, dtype=dtype,
ctx=ctx)
if not isinstance(variance, Variable):
variance = Variable(shape=(1,),
transformation=PositiveTransformation(),
initial_value=variance)
self.variance = variance
def _compute_K(self, F, X, variance, X2=None):
if X2 is None:
X2 = X
return broadcast_to_w_samples(F, variance, X.shape[:-1] +
(X2.shape[-2],))
def _compute_Kdiag(self, F, X, variance):
return broadcast_to_w_samples(F, variance, X.shape[:-1])
class White(NativeKernel):
broadcastable = True
def __init__(self, input_dim, variance=1., name='white', active_dims=None,
dtype=None, ctx=None):
super(White, self).__init__(input_dim=input_dim, name=name,
active_dims=active_dims, dtype=dtype,
ctx=ctx)
if not isinstance(variance, Variable):
variance = Variable(shape=(1,),
transformation=PositiveTransformation(),
initial_value=variance)
self.variance = variance
def _compute_K(self, F, X, variance, X2=None):
if X2 is None:
Imat = F.eye(N=X.shape[-2:-1][0],
ctx=self.ctx,
dtype=self.dtype)
Imat = broadcast_to_w_samples(F, Imat, X.shape[:-1] +
X.shape[-2:-1], False)
return Imat * broadcast_to_w_samples(F, variance, X.shape[:-1] +
X.shape[-2:-1])
else:
return F.zeros(shape=X.shape[:-1] + X2.shape[-2:-1], ctx=self.ctx,
dtype=self.dtype)
def _compute_Kdiag(self, F, X, variance):
return broadcast_to_w_samples(F, variance, X.shape[:-1])
| true
| true
|
790e0d94f216908823fa654ecbd8eaa6320f382c
| 54
|
py
|
Python
|
Crypto-babyFibo/ans/secret.py
|
scnu-sloth/hsctf-2021-freshmen
|
251d3e9d13e0d430eb5b76775acde519648b401f
|
[
"MIT"
] | null | null | null |
Crypto-babyFibo/ans/secret.py
|
scnu-sloth/hsctf-2021-freshmen
|
251d3e9d13e0d430eb5b76775acde519648b401f
|
[
"MIT"
] | null | null | null |
Crypto-babyFibo/ans/secret.py
|
scnu-sloth/hsctf-2021-freshmen
|
251d3e9d13e0d430eb5b76775acde519648b401f
|
[
"MIT"
] | 1
|
2021-11-26T14:35:18.000Z
|
2021-11-26T14:35:18.000Z
|
flag = b'HSCTF{1d9cb42f-3302-46f3-a3a7-0ca30d631cc9}'
| 27
| 53
| 0.777778
|
flag = b'HSCTF{1d9cb42f-3302-46f3-a3a7-0ca30d631cc9}'
| true
| true
|
790e0dd71afd4d196424463192f46a938c992ed8
| 9,694
|
py
|
Python
|
baseline/fast_rcnn/trainer.py
|
ITMO-NSS-team/LightObjRecEnsembler
|
1375400f0a681aefdd3ab484e828257fd7aed318
|
[
"BSD-3-Clause"
] | 4
|
2021-07-01T14:04:47.000Z
|
2022-03-05T08:31:40.000Z
|
baseline/fast_rcnn/trainer.py
|
ITMO-NSS-team/LightObjRecEnsembler
|
1375400f0a681aefdd3ab484e828257fd7aed318
|
[
"BSD-3-Clause"
] | 3
|
2021-09-10T14:13:42.000Z
|
2021-10-05T11:35:07.000Z
|
baseline/fast_rcnn/trainer.py
|
ITMO-NSS-team/LightObjRecEnsembler
|
1375400f0a681aefdd3ab484e828257fd7aed318
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import os
from collections import namedtuple
import time
from torch.nn import functional as F
from baseline.fast_rcnn.model.utils.creator_tool import AnchorTargetCreator, ProposalTargetCreator
from torch import nn
import torch as t
from baseline.fast_rcnn.utils import array_tool as at
from baseline.fast_rcnn.utils.vis_tool import Visualizer
from baseline.fast_rcnn.utils.config import opt
from torchnet.meter import ConfusionMeter, AverageValueMeter
LossTuple = namedtuple('LossTuple',
['rpn_loc_loss',
'rpn_cls_loss',
'roi_loc_loss',
'roi_cls_loss',
'total_loss'
])
class FasterRCNNTrainer(nn.Module):
"""wrapper for conveniently training. return losses
The losses include:
* :obj:`rpn_loc_loss`: The localization loss for \
Region Proposal Network (RPN).
* :obj:`rpn_cls_loss`: The classification loss for RPN.
* :obj:`roi_loc_loss`: The localization loss for the head module.
* :obj:`roi_cls_loss`: The classification loss for the head module.
* :obj:`total_loss`: The sum of 4 loss above.
Args:
faster_rcnn (model.FasterRCNN):
A Faster R-CNN model that is going to be trained.
"""
def __init__(self, faster_rcnn):
super(FasterRCNNTrainer, self).__init__()
self.faster_rcnn = faster_rcnn
self.rpn_sigma = opt.rpn_sigma
self.roi_sigma = opt.roi_sigma
# target creator create gt_bbox gt_label etc as training targets.
self.anchor_target_creator = AnchorTargetCreator()
self.proposal_target_creator = ProposalTargetCreator()
self.loc_normalize_mean = faster_rcnn.loc_normalize_mean
self.loc_normalize_std = faster_rcnn.loc_normalize_std
self.optimizer = self.faster_rcnn.get_optimizer()
# visdom wrapper
self.vis = Visualizer(env=opt.env)
# indicators for training status
self.rpn_cm = ConfusionMeter(2)
self.roi_cm = ConfusionMeter(21)
self.meters = {k: AverageValueMeter() for k in LossTuple._fields} # average loss
def forward(self, imgs, bboxes, labels, scale):
"""Forward Faster R-CNN and calculate losses.
Here are notations used.
* :math:`N` is the batch size.
* :math:`R` is the number of bounding boxes per image.
Currently, only :math:`N=1` is supported.
Args:
imgs (~torch.autograd.Variable): A variable with a batch of images.
bboxes (~torch.autograd.Variable): A batch of bounding boxes.
Its shape is :math:`(N, R, 4)`.
labels (~torch.autograd..Variable): A batch of labels.
Its shape is :math:`(N, R)`. The background is excluded from
the definition, which means that the range of the value
is :math:`[0, L - 1]`. :math:`L` is the number of foreground
classes.
scale (float): Amount of scaling applied to
the raw image during preprocessing.
Returns:
namedtuple of 5 losses
"""
n = bboxes.shape[0]
if n != 1:
raise ValueError('Currently only batch size 1 is supported.')
_, _, H, W = imgs.shape
img_size = (H, W)
features = self.faster_rcnn.extractor(imgs)
rpn_locs, rpn_scores, rois, roi_indices, anchor = \
self.faster_rcnn.rpn(features, img_size, scale)
# Since batch size is one, convert variables to singular form
bbox = bboxes[0]
label = labels[0]
rpn_score = rpn_scores[0]
rpn_loc = rpn_locs[0]
roi = rois
# Sample RoIs and forward
# it's fine to break the computation graph of rois,
# consider them as constant input
sample_roi, gt_roi_loc, gt_roi_label = self.proposal_target_creator(
roi,
at.tonumpy(bbox),
at.tonumpy(label),
self.loc_normalize_mean,
self.loc_normalize_std)
# NOTE it's all zero because now it only support for batch=1 now
sample_roi_index = t.zeros(len(sample_roi))
roi_cls_loc, roi_score = self.faster_rcnn.head(
features,
sample_roi,
sample_roi_index)
# ------------------ RPN losses -------------------#
gt_rpn_loc, gt_rpn_label = self.anchor_target_creator(
at.tonumpy(bbox),
anchor,
img_size)
gt_rpn_label = at.totensor(gt_rpn_label).long()
gt_rpn_loc = at.totensor(gt_rpn_loc)
rpn_loc_loss = _fast_rcnn_loc_loss(
rpn_loc,
gt_rpn_loc,
gt_rpn_label.data,
self.rpn_sigma)
# NOTE: default value of ignore_index is -100 ...
rpn_cls_loss = F.cross_entropy(rpn_score, gt_rpn_label.cuda(), ignore_index=-1)
_gt_rpn_label = gt_rpn_label[gt_rpn_label > -1]
_rpn_score = at.tonumpy(rpn_score)[at.tonumpy(gt_rpn_label) > -1]
self.rpn_cm.add(at.totensor(_rpn_score, False), _gt_rpn_label.data.long())
# ------------------ ROI losses (fast rcnn loss) -------------------#
n_sample = roi_cls_loc.shape[0]
roi_cls_loc = roi_cls_loc.view(n_sample, -1, 4)
roi_loc = roi_cls_loc[t.arange(0, n_sample).long().cuda(), \
at.totensor(gt_roi_label).long()]
gt_roi_label = at.totensor(gt_roi_label).long()
gt_roi_loc = at.totensor(gt_roi_loc)
roi_loc_loss = _fast_rcnn_loc_loss(
roi_loc.contiguous(),
gt_roi_loc,
gt_roi_label.data,
self.roi_sigma)
roi_cls_loss = nn.CrossEntropyLoss()(roi_score, gt_roi_label.cuda())
self.roi_cm.add(at.totensor(roi_score, False), gt_roi_label.data.long())
losses = [rpn_loc_loss, rpn_cls_loss, roi_loc_loss, roi_cls_loss]
losses = losses + [sum(losses)]
return LossTuple(*losses)
def train_step(self, imgs, bboxes, labels, scale):
self.optimizer.zero_grad()
losses = self.forward(imgs, bboxes, labels, scale)
losses.total_loss.backward()
self.optimizer.step()
self.update_meters(losses)
return losses
def save(self, save_optimizer=False, save_path=None, **kwargs):
"""serialize models include optimizer and other info
return path where the model-file is stored.
Args:
save_optimizer (bool): whether save optimizer.state_dict().
save_path (string): where to save model, if it's None, save_path
is generate using time str and info from kwargs.
Returns:
save_path(str): the path to save models.
"""
save_dict = dict()
save_dict['model'] = self.faster_rcnn.state_dict()
save_dict['config'] = opt._state_dict()
save_dict['other_info'] = kwargs
save_dict['vis_info'] = self.vis.state_dict()
if save_optimizer:
save_dict['optimizer'] = self.optimizer.state_dict()
if save_path is None:
timestr = time.strftime('%m%d%H%M')
save_path = 'checkpoints/fasterrcnn_%s' % timestr
for k_, v_ in kwargs.items():
save_path += '_%s' % v_
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
t.save(save_dict, save_path)
self.vis.save([self.vis.env])
return save_path
def load(self, path, load_optimizer=True, parse_opt=False, cpu_flag: bool = True):
if cpu_flag:
state_dict = t.load(path,
map_location=t.device('cpu'))
else:
state_dict = t.load(path)
if 'model' in state_dict:
self.faster_rcnn.load_state_dict(state_dict['model'])
else: # legacy way, for backward compatibility
self.faster_rcnn.load_state_dict(state_dict)
return self
if parse_opt:
opt._parse(state_dict['config'])
if 'optimizer' in state_dict and load_optimizer:
self.optimizer.load_state_dict(state_dict['optimizer'])
return self
def update_meters(self, losses):
loss_d = {k: at.scalar(v) for k, v in losses._asdict().items()}
for key, meter in self.meters.items():
meter.add(loss_d[key])
def reset_meters(self):
for key, meter in self.meters.items():
meter.reset()
self.roi_cm.reset()
self.rpn_cm.reset()
def get_meter_data(self):
return {k: v.value()[0] for k, v in self.meters.items()}
def _smooth_l1_loss(x, t, in_weight, sigma):
sigma2 = sigma ** 2
diff = in_weight * (x - t)
abs_diff = diff.abs()
flag = (abs_diff.data < (1. / sigma2)).float()
y = (flag * (sigma2 / 2.) * (diff ** 2) +
(1 - flag) * (abs_diff - 0.5 / sigma2))
return y.sum()
def _fast_rcnn_loc_loss(pred_loc, gt_loc, gt_label, sigma):
in_weight = t.zeros(gt_loc.shape).cuda()
# Localization loss is calculated only for positive rois.
# NOTE: unlike origin implementation,
# we don't need inside_weight and outside_weight, they can calculate by gt_label
in_weight[(gt_label > 0).view(-1, 1).expand_as(in_weight).cuda()] = 1
loc_loss = _smooth_l1_loss(pred_loc, gt_loc, in_weight.detach(), sigma)
# Normalize by total number of negtive and positive rois.
loc_loss /= ((gt_label >= 0).sum().float()) # ignore gt_label==-1 for rpn_loss
return loc_loss
| 36.859316
| 98
| 0.611925
|
from __future__ import absolute_import
import os
from collections import namedtuple
import time
from torch.nn import functional as F
from baseline.fast_rcnn.model.utils.creator_tool import AnchorTargetCreator, ProposalTargetCreator
from torch import nn
import torch as t
from baseline.fast_rcnn.utils import array_tool as at
from baseline.fast_rcnn.utils.vis_tool import Visualizer
from baseline.fast_rcnn.utils.config import opt
from torchnet.meter import ConfusionMeter, AverageValueMeter
LossTuple = namedtuple('LossTuple',
['rpn_loc_loss',
'rpn_cls_loss',
'roi_loc_loss',
'roi_cls_loss',
'total_loss'
])
class FasterRCNNTrainer(nn.Module):
def __init__(self, faster_rcnn):
super(FasterRCNNTrainer, self).__init__()
self.faster_rcnn = faster_rcnn
self.rpn_sigma = opt.rpn_sigma
self.roi_sigma = opt.roi_sigma
self.anchor_target_creator = AnchorTargetCreator()
self.proposal_target_creator = ProposalTargetCreator()
self.loc_normalize_mean = faster_rcnn.loc_normalize_mean
self.loc_normalize_std = faster_rcnn.loc_normalize_std
self.optimizer = self.faster_rcnn.get_optimizer()
self.vis = Visualizer(env=opt.env)
self.rpn_cm = ConfusionMeter(2)
self.roi_cm = ConfusionMeter(21)
self.meters = {k: AverageValueMeter() for k in LossTuple._fields}
def forward(self, imgs, bboxes, labels, scale):
n = bboxes.shape[0]
if n != 1:
raise ValueError('Currently only batch size 1 is supported.')
_, _, H, W = imgs.shape
img_size = (H, W)
features = self.faster_rcnn.extractor(imgs)
rpn_locs, rpn_scores, rois, roi_indices, anchor = \
self.faster_rcnn.rpn(features, img_size, scale)
bbox = bboxes[0]
label = labels[0]
rpn_score = rpn_scores[0]
rpn_loc = rpn_locs[0]
roi = rois
# consider them as constant input
sample_roi, gt_roi_loc, gt_roi_label = self.proposal_target_creator(
roi,
at.tonumpy(bbox),
at.tonumpy(label),
self.loc_normalize_mean,
self.loc_normalize_std)
# NOTE it's all zero because now it only support for batch=1 now
sample_roi_index = t.zeros(len(sample_roi))
roi_cls_loc, roi_score = self.faster_rcnn.head(
features,
sample_roi,
sample_roi_index)
gt_rpn_loc, gt_rpn_label = self.anchor_target_creator(
at.tonumpy(bbox),
anchor,
img_size)
gt_rpn_label = at.totensor(gt_rpn_label).long()
gt_rpn_loc = at.totensor(gt_rpn_loc)
rpn_loc_loss = _fast_rcnn_loc_loss(
rpn_loc,
gt_rpn_loc,
gt_rpn_label.data,
self.rpn_sigma)
rpn_cls_loss = F.cross_entropy(rpn_score, gt_rpn_label.cuda(), ignore_index=-1)
_gt_rpn_label = gt_rpn_label[gt_rpn_label > -1]
_rpn_score = at.tonumpy(rpn_score)[at.tonumpy(gt_rpn_label) > -1]
self.rpn_cm.add(at.totensor(_rpn_score, False), _gt_rpn_label.data.long())
n_sample = roi_cls_loc.shape[0]
roi_cls_loc = roi_cls_loc.view(n_sample, -1, 4)
roi_loc = roi_cls_loc[t.arange(0, n_sample).long().cuda(), \
at.totensor(gt_roi_label).long()]
gt_roi_label = at.totensor(gt_roi_label).long()
gt_roi_loc = at.totensor(gt_roi_loc)
roi_loc_loss = _fast_rcnn_loc_loss(
roi_loc.contiguous(),
gt_roi_loc,
gt_roi_label.data,
self.roi_sigma)
roi_cls_loss = nn.CrossEntropyLoss()(roi_score, gt_roi_label.cuda())
self.roi_cm.add(at.totensor(roi_score, False), gt_roi_label.data.long())
losses = [rpn_loc_loss, rpn_cls_loss, roi_loc_loss, roi_cls_loss]
losses = losses + [sum(losses)]
return LossTuple(*losses)
def train_step(self, imgs, bboxes, labels, scale):
self.optimizer.zero_grad()
losses = self.forward(imgs, bboxes, labels, scale)
losses.total_loss.backward()
self.optimizer.step()
self.update_meters(losses)
return losses
def save(self, save_optimizer=False, save_path=None, **kwargs):
save_dict = dict()
save_dict['model'] = self.faster_rcnn.state_dict()
save_dict['config'] = opt._state_dict()
save_dict['other_info'] = kwargs
save_dict['vis_info'] = self.vis.state_dict()
if save_optimizer:
save_dict['optimizer'] = self.optimizer.state_dict()
if save_path is None:
timestr = time.strftime('%m%d%H%M')
save_path = 'checkpoints/fasterrcnn_%s' % timestr
for k_, v_ in kwargs.items():
save_path += '_%s' % v_
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
t.save(save_dict, save_path)
self.vis.save([self.vis.env])
return save_path
def load(self, path, load_optimizer=True, parse_opt=False, cpu_flag: bool = True):
if cpu_flag:
state_dict = t.load(path,
map_location=t.device('cpu'))
else:
state_dict = t.load(path)
if 'model' in state_dict:
self.faster_rcnn.load_state_dict(state_dict['model'])
else:
self.faster_rcnn.load_state_dict(state_dict)
return self
if parse_opt:
opt._parse(state_dict['config'])
if 'optimizer' in state_dict and load_optimizer:
self.optimizer.load_state_dict(state_dict['optimizer'])
return self
def update_meters(self, losses):
loss_d = {k: at.scalar(v) for k, v in losses._asdict().items()}
for key, meter in self.meters.items():
meter.add(loss_d[key])
def reset_meters(self):
for key, meter in self.meters.items():
meter.reset()
self.roi_cm.reset()
self.rpn_cm.reset()
def get_meter_data(self):
return {k: v.value()[0] for k, v in self.meters.items()}
def _smooth_l1_loss(x, t, in_weight, sigma):
sigma2 = sigma ** 2
diff = in_weight * (x - t)
abs_diff = diff.abs()
flag = (abs_diff.data < (1. / sigma2)).float()
y = (flag * (sigma2 / 2.) * (diff ** 2) +
(1 - flag) * (abs_diff - 0.5 / sigma2))
return y.sum()
def _fast_rcnn_loc_loss(pred_loc, gt_loc, gt_label, sigma):
in_weight = t.zeros(gt_loc.shape).cuda()
in_weight[(gt_label > 0).view(-1, 1).expand_as(in_weight).cuda()] = 1
loc_loss = _smooth_l1_loss(pred_loc, gt_loc, in_weight.detach(), sigma)
# Normalize by total number of negtive and positive rois.
loc_loss /= ((gt_label >= 0).sum().float()) # ignore gt_label==-1 for rpn_loss
return loc_loss
| true
| true
|
790e10b0d40c66a21974d20243262ab18d5737d9
| 2,865
|
py
|
Python
|
caixa/profile/tagged.py
|
wstlabs/caixa
|
7ce02640598a4261202196089a6bd1df8a9da344
|
[
"Apache-2.0"
] | null | null | null |
caixa/profile/tagged.py
|
wstlabs/caixa
|
7ce02640598a4261202196089a6bd1df8a9da344
|
[
"Apache-2.0"
] | null | null | null |
caixa/profile/tagged.py
|
wstlabs/caixa
|
7ce02640598a4261202196089a6bd1df8a9da344
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
from typing import Dict, Tuple, Iterator, Callable, Any, Optional
from dataclasses import dataclass
"""
Provides the `TaggedProfiler` class related to record profiling.
TODO: Better description needed.
"""
@dataclass
class TaggedProfilerRecordStatus:
offset: int
tag: str
key: str
val: Any
r: Optional[dict]
@dataclass
class TaggedProfilerSummary:
total: int
histo: dict
index: Optional[dict]
cache: Optional[dict]
def describe(self) -> Iterator[str]:
yield f"histo = {self.histo}"
if self.index is None:
yield f"index = {self.index}"
else:
yield f"index with {len(self.index)} items:"
for label, nums in self.index.items():
yield f"label = '{label}, size = {len(nums)}:"
if self.cache is not None:
for n in nums:
yield f"cache[{n}] = {self.cache[n]}"
class TaggedProfiler:
"""A useful tag-based profiler class which we'll describe when we have more time."""
def __init__(self, tagmap: Dict[str,Callable]):
self.tagmap = tagmap
def eval_dict(self, r: dict) -> Iterator[Tuple[str,str,str]]:
for (tag, f) in self.tagmap.items():
for (k, v) in r.items():
if f(v):
yield (tag, k, v)
def evaluate(self, recs: Iterator[dict], deep: bool = False) -> Iterator[TaggedProfilerRecordStatus]:
for (i, r) in enumerate(recs):
for (tag, k, v) in self.eval_dict(r):
yield TaggedProfilerRecordStatus(i, tag, k, v, r if deep else None)
def profile(self, recs: Iterator[dict], index: bool = False, deep: bool = False) -> TaggedProfilerSummary:
"""Provides the most useful summary counts you'll likely want from the incoming record sequence.
Optional :index and :deep flags allow us to return special indexing and cachinc structs which we'll describe later."""
# We use underscores for all "recording" structures.
# Non-nunderscore names for input variables and flags.
labels = list(self.tagmap.keys())
temp_cache: Dict[int,Any] = {}
temp_index: Dict[str,Any] = {k:defaultdict(int) for k in labels}
for status in self.evaluate(recs, deep):
temp_cache[status.offset] = status.r if deep else 1
temp_index[status.tag][status.offset] += 1
_total = len(temp_cache)
_histo: Dict[str,int] = {k:len(v) for (k,v) in temp_index.items()}
_index: Optional[Dict[str,list]] = None
_cache: Optional[Dict[int,Any]] = None
if temp_index:
_index = {k:list(v.keys()) for k,v in temp_index.items()}
if deep:
_cache = temp_cache
return TaggedProfilerSummary(_total, _histo, _index, _cache)
| 37.697368
| 126
| 0.614311
|
from collections import defaultdict
from typing import Dict, Tuple, Iterator, Callable, Any, Optional
from dataclasses import dataclass
@dataclass
class TaggedProfilerRecordStatus:
offset: int
tag: str
key: str
val: Any
r: Optional[dict]
@dataclass
class TaggedProfilerSummary:
total: int
histo: dict
index: Optional[dict]
cache: Optional[dict]
def describe(self) -> Iterator[str]:
yield f"histo = {self.histo}"
if self.index is None:
yield f"index = {self.index}"
else:
yield f"index with {len(self.index)} items:"
for label, nums in self.index.items():
yield f"label = '{label}, size = {len(nums)}:"
if self.cache is not None:
for n in nums:
yield f"cache[{n}] = {self.cache[n]}"
class TaggedProfiler:
def __init__(self, tagmap: Dict[str,Callable]):
self.tagmap = tagmap
def eval_dict(self, r: dict) -> Iterator[Tuple[str,str,str]]:
for (tag, f) in self.tagmap.items():
for (k, v) in r.items():
if f(v):
yield (tag, k, v)
def evaluate(self, recs: Iterator[dict], deep: bool = False) -> Iterator[TaggedProfilerRecordStatus]:
for (i, r) in enumerate(recs):
for (tag, k, v) in self.eval_dict(r):
yield TaggedProfilerRecordStatus(i, tag, k, v, r if deep else None)
def profile(self, recs: Iterator[dict], index: bool = False, deep: bool = False) -> TaggedProfilerSummary:
# We use underscores for all "recording" structures.
# Non-nunderscore names for input variables and flags.
labels = list(self.tagmap.keys())
temp_cache: Dict[int,Any] = {}
temp_index: Dict[str,Any] = {k:defaultdict(int) for k in labels}
for status in self.evaluate(recs, deep):
temp_cache[status.offset] = status.r if deep else 1
temp_index[status.tag][status.offset] += 1
_total = len(temp_cache)
_histo: Dict[str,int] = {k:len(v) for (k,v) in temp_index.items()}
_index: Optional[Dict[str,list]] = None
_cache: Optional[Dict[int,Any]] = None
if temp_index:
_index = {k:list(v.keys()) for k,v in temp_index.items()}
if deep:
_cache = temp_cache
return TaggedProfilerSummary(_total, _histo, _index, _cache)
| true
| true
|
790e112c634a67bcfd0bab2618f347216d424567
| 46,580
|
py
|
Python
|
lib-python/modified-2.5.2/pickle.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | 1
|
2019-05-27T00:58:46.000Z
|
2019-05-27T00:58:46.000Z
|
lib-python/modified-2.5.2/pickle.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
lib-python/modified-2.5.2/pickle.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
"""Create portable serialized representations of Python objects.
See module cPickle for a (much) faster implementation.
See module copy_reg for a mechanism for registering custom picklers.
See module pickletools source for extensive comments.
Classes:
Pickler
Unpickler
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
Misc variables:
__version__
format_version
compatible_formats
"""
__version__ = "$Revision: 38432 $" # Code version
from types import *
from copy_reg import dispatch_table
from copy_reg import _extension_registry, _inverted_registry, _extension_cache
import marshal
import sys
import struct
__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
"Unpickler", "dump", "dumps", "load", "loads"]
# These are purely informational; no code uses these.
format_version = "2.0" # File format version we write
compatible_formats = ["1.0", # Original protocol 0
"1.1", # Protocol 0 with INST added
"1.2", # Original protocol 1
"1.3", # Protocol 1 with BINFLOAT added
"2.0", # Protocol 2
] # Old format versions we can read
# Keep in synch with cPickle. This is the highest protocol number we
# know how to read.
HIGHEST_PROTOCOL = 2
# Why use struct.pack() for pickling but marshal.loads() for
# unpickling? struct.pack() is 40% faster than marshal.dumps(), but
# marshal.loads() is twice as fast as struct.unpack()!
mloads = marshal.loads
class PickleError(Exception):
"""A common base class for the other pickling exceptions."""
pass
class PicklingError(PickleError):
"""This exception is raised when an unpicklable object is passed to the
dump() method.
"""
pass
class UnpicklingError(PickleError):
"""This exception is raised when there is a problem unpickling an object,
such as a security violation.
Note that other exceptions may also be raised during unpickling, including
(but not necessarily limited to) AttributeError, EOFError, ImportError,
and IndexError.
"""
pass
# An instance of _Stop is raised by Unpickler.load_stop() in response to
# the STOP opcode, passing the object that is the result of unpickling.
class _Stop(Exception):
def __init__(self, value):
self.value = value
# Jython has PyStringMap; it's a dict subclass with string keys
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
# UnicodeType may or may not be exported (normally imported from types)
try:
UnicodeType
except NameError:
UnicodeType = None
# Pickle opcodes. See pickletools.py for extensive docs. The listing
# here is in kind-of alphabetical order of 1-character pickle code.
# pickletools groups them by purpose.
MARK = '(' # push special markobject on stack
STOP = '.' # every pickle ends with STOP
POP = '0' # discard topmost stack item
POP_MARK = '1' # discard stack top through topmost markobject
DUP = '2' # duplicate top stack item
FLOAT = 'F' # push float object; decimal string argument
INT = 'I' # push integer or bool; decimal string argument
BININT = 'J' # push four-byte signed int
BININT1 = 'K' # push 1-byte unsigned int
LONG = 'L' # push long; decimal string argument
BININT2 = 'M' # push 2-byte unsigned int
NONE = 'N' # push None
PERSID = 'P' # push persistent object; id is taken from string arg
BINPERSID = 'Q' # " " " ; " " " " stack
REDUCE = 'R' # apply callable to argtuple, both on stack
STRING = 'S' # push string; NL-terminated string argument
BINSTRING = 'T' # push string; counted binary string argument
SHORT_BINSTRING = 'U' # " " ; " " " " < 256 bytes
UNICODE = 'V' # push Unicode string; raw-unicode-escaped'd argument
BINUNICODE = 'X' # " " " ; counted UTF-8 string argument
APPEND = 'a' # append stack top to list below it
BUILD = 'b' # call __setstate__ or __dict__.update()
GLOBAL = 'c' # push self.find_class(modname, name); 2 string args
DICT = 'd' # build a dict from stack items
EMPTY_DICT = '}' # push empty dict
APPENDS = 'e' # extend list on stack by topmost stack slice
GET = 'g' # push item from memo on stack; index is string arg
BINGET = 'h' # " " " " " " ; " " 1-byte arg
INST = 'i' # build & push class instance
LONG_BINGET = 'j' # push item from memo on stack; index is 4-byte arg
LIST = 'l' # build list from topmost stack items
EMPTY_LIST = ']' # push empty list
OBJ = 'o' # build & push class instance
PUT = 'p' # store stack top in memo; index is string arg
BINPUT = 'q' # " " " " " ; " " 1-byte arg
LONG_BINPUT = 'r' # " " " " " ; " " 4-byte arg
SETITEM = 's' # add key+value pair to dict
TUPLE = 't' # build tuple from topmost stack items
EMPTY_TUPLE = ')' # push empty tuple
SETITEMS = 'u' # modify dict by adding topmost key+value pairs
BINFLOAT = 'G' # push float; arg is 8-byte float encoding
TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py
FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py
# Protocol 2
PROTO = '\x80' # identify pickle protocol
NEWOBJ = '\x81' # build object by applying cls.__new__ to argtuple
EXT1 = '\x82' # push object from extension registry; 1-byte index
EXT2 = '\x83' # ditto, but 2-byte index
EXT4 = '\x84' # ditto, but 4-byte index
TUPLE1 = '\x85' # build 1-tuple from stack top
TUPLE2 = '\x86' # build 2-tuple from two topmost stack items
TUPLE3 = '\x87' # build 3-tuple from three topmost stack items
NEWTRUE = '\x88' # push True
NEWFALSE = '\x89' # push False
LONG1 = '\x8a' # push long from < 256 bytes
LONG4 = '\x8b' # push really big long
_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
__all__.extend([x for x in dir() if x[0].isalpha() and x == x.upper()])
del x
# Pickling machinery
class Pickler:
def __init__(self, file, protocol=None):
"""This takes a file-like object for writing a pickle data stream.
The optional protocol argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2. The default
protocol is 0, to be backwards compatible. (Protocol 0 is the
only protocol that can be written to a file opened in text
mode and read back successfully. When using a protocol higher
than 0, make sure the file is opened in binary mode, both when
pickling and unpickling.)
Protocol 1 is more efficient than protocol 0; protocol 2 is
more efficient than protocol 1.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The file parameter must have a write() method that accepts a single
string argument. It can thus be an open file object, a StringIO
object, or any other custom object that meets this interface.
"""
if protocol is None:
protocol = 0
if protocol < 0:
protocol = HIGHEST_PROTOCOL
elif not 0 <= protocol <= HIGHEST_PROTOCOL:
raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
self.write = file.write
self.memo = {}
self.proto = int(protocol)
self.bin = protocol >= 1
self.fast = 0
def _pickle_moduledict(self, obj):
try:
modict = self.module_dict_ids
except AttributeError:
modict = {}
from sys import modules
for mod in modules.values():
if isinstance(mod, ModuleType):
try:
modict[id(mod.__dict__)] = mod
except KeyboardInterrupt:
raise
except: # obscure: the above can fail for
# arbitrary reasons, because of the py lib
pass
self.module_dict_ids = modict
thisid = id(obj)
try:
themodule = modict[thisid]
except KeyError:
return None
from __builtin__ import getattr
return getattr, (themodule, '__dict__')
def clear_memo(self):
"""Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects are
pickled by reference and not by value. This method is useful when
re-using picklers.
"""
self.memo.clear()
def dump(self, obj):
"""Write a pickled representation of obj to the open file."""
if self.proto >= 2:
self.write(PROTO + chr(self.proto))
self.save(obj)
self.write(STOP)
def memoize(self, obj):
"""Store an object in the memo."""
# The Pickler memo is a dictionary mapping object ids to 2-tuples
# that contain the Unpickler memo key and the object being memoized.
# The memo key is written to the pickle and will become
# the key in the Unpickler's memo. The object is stored in the
# Pickler memo so that transient objects are kept alive during
# pickling.
# The use of the Unpickler memo length as the memo key is just a
# convention. The only requirement is that the memo values be unique.
# But there appears no advantage to any other scheme, and this
# scheme allows the Unpickler memo to be implemented as a plain (but
# growable) array, indexed by memo key.
if self.fast:
return
assert id(obj) not in self.memo
memo_len = len(self.memo)
self.write(self.put(memo_len))
self.memo[id(obj)] = memo_len, obj
# Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.
def put(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINPUT + chr(i)
else:
return LONG_BINPUT + pack("<i", i)
return PUT + repr(i) + '\n'
# Return a GET (BINGET, LONG_BINGET) opcode string, with argument i.
def get(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINGET + chr(i)
else:
return LONG_BINGET + pack("<i", i)
return GET + repr(i) + '\n'
def save(self, obj):
# Check for persistent id (defined by a subclass)
pid = self.persistent_id(obj)
if pid:
self.save_pers(pid)
return
# Check the memo
x = self.memo.get(id(obj))
if x:
self.write(self.get(x[0]))
return
# Check the type dispatch table
t = type(obj)
f = self.dispatch.get(t)
if f:
f(self, obj) # Call unbound method with explicit self
return
# Check for a class with a custom metaclass; treat as regular class
try:
issc = issubclass(t, TypeType)
except TypeError: # t is not a class (old Boost; see SF #502085)
issc = 0
if issc:
self.save_global(obj)
return
# Check copy_reg.dispatch_table
reduce = dispatch_table.get(t)
if reduce:
rv = reduce(obj)
else:
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if type(rv) is StringType:
self.save_global(obj, rv)
return
# Assert that reduce() returned a tuple
if type(rv) is not TupleType:
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# Save the reduce() output and finally memoize the object
self.save_reduce(obj=obj, *rv)
def persistent_id(self, obj):
# This exists so a subclass can override it
return None
def save_pers(self, pid):
# Save a persistent id reference
if self.bin:
self.save(pid)
self.write(BINPERSID)
else:
self.write(PERSID + str(pid) + '\n')
def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
# This API is called by some subclasses
# Assert that args is a tuple or None
if not isinstance(args, TupleType):
raise PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not callable(func):
raise PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
# A __reduce__ implementation can direct protocol 2 to
# use the more efficient NEWOBJ opcode, while still
# allowing protocol 0 and 1 to work normally. For this to
# work, the function returned by __reduce__ should be
# called __newobj__, and its first argument should be a
# new-style class. The implementation for __newobj__
# should be as follows, although pickle has no way to
# verify this:
#
# def __newobj__(cls, *args):
# return cls.__new__(cls, *args)
#
# Protocols 0 and 1 will pickle a reference to __newobj__,
# while protocol 2 (and above) will pickle a reference to
# cls, the remaining args tuple, and the NEWOBJ code,
# which calls cls.__new__(cls, *args) at unpickling time
# (see load_newobj below). If __reduce__ returns a
# three-tuple, the state from the third tuple item will be
# pickled regardless of the protocol, calling __setstate__
# at unpickling time (see load_build below).
#
# Note that no standard __newobj__ implementation exists;
# you have to provide your own. This is to enforce
# compatibility with Python 2.2 (pickles written using
# protocol 0 or 1 in Python 2.3 should be unpicklable by
# Python 2.2).
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
save(args)
write(NEWOBJ)
else:
save(func)
save(args)
write(REDUCE)
if obj is not None:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(BUILD)
# Methods below this point are dispatched through the dispatch table
dispatch = {}
def save_none(self, obj):
self.write(NONE)
dispatch[NoneType] = save_none
def save_bool(self, obj):
if self.proto >= 2:
self.write(obj and NEWTRUE or NEWFALSE)
else:
self.write(obj and TRUE or FALSE)
dispatch[bool] = save_bool
def save_int(self, obj, pack=struct.pack):
if self.bin:
# If the int is small enough to fit in a signed 4-byte 2's-comp
# format, we can store it more efficiently than the general
# case.
# First one- and two-byte unsigned ints:
if obj >= 0:
if obj <= 0xff:
self.write(BININT1 + chr(obj))
return
if obj <= 0xffff:
self.write("%c%c%c" % (BININT2, obj&0xff, obj>>8))
return
# Next check for 4-byte signed ints:
high_bits = obj >> 31 # note that Python shift sign-extends
if high_bits == 0 or high_bits == -1:
# All high bits are copies of bit 2**31, so the value
# fits in a 4-byte signed int.
self.write(BININT + pack("<i", obj))
return
# Text pickle, or int too big to fit in signed 4-byte format.
self.write(INT + repr(obj) + '\n')
dispatch[IntType] = save_int
def save_long(self, obj, pack=struct.pack):
if self.proto >= 2:
bytes = encode_long(obj)
n = len(bytes)
if n < 256:
self.write(LONG1 + chr(n) + bytes)
else:
self.write(LONG4 + pack("<i", n) + bytes)
return
self.write(LONG + repr(obj) + '\n')
dispatch[LongType] = save_long
def save_float(self, obj, pack=struct.pack):
if self.bin:
self.write(BINFLOAT + pack('>d', obj))
else:
self.write(FLOAT + repr(obj) + '\n')
dispatch[FloatType] = save_float
def save_string(self, obj, pack=struct.pack):
if self.bin:
n = len(obj)
if n < 256:
self.write(SHORT_BINSTRING + chr(n) + obj)
else:
self.write(BINSTRING + pack("<i", n) + obj)
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_unicode(self, obj, pack=struct.pack):
if self.bin:
encoding = obj.encode('utf-8')
n = len(encoding)
self.write(BINUNICODE + pack("<i", n) + encoding)
else:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
self.write(UNICODE + obj.encode('raw-unicode-escape') + '\n')
self.memoize(obj)
dispatch[UnicodeType] = save_unicode
if StringType == UnicodeType:
# This is true for Jython
def save_string(self, obj, pack=struct.pack):
unicode = obj.isunicode()
if self.bin:
if unicode:
obj = obj.encode("utf-8")
l = len(obj)
if l < 256 and not unicode:
self.write(SHORT_BINSTRING + chr(l) + obj)
else:
s = pack("<i", l)
if unicode:
self.write(BINUNICODE + s + obj)
else:
self.write(BINSTRING + s + obj)
else:
if unicode:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
obj = obj.encode('raw-unicode-escape')
self.write(UNICODE + obj + '\n')
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_tuple(self, obj):
write = self.write
proto = self.proto
n = len(obj)
if n == 0:
if proto:
write(EMPTY_TUPLE)
else:
write(MARK + TUPLE)
return
save = self.save
memo = self.memo
if n <= 3 and proto >= 2:
for element in obj:
save(element)
# Subtle. Same as in the big comment below.
if id(obj) in memo:
get = self.get(memo[id(obj)][0])
write(POP * n + get)
else:
write(_tuplesize2code[n])
self.memoize(obj)
return
# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
# has more than 3 elements.
write(MARK)
for element in obj:
save(element)
if id(obj) in memo:
# Subtle. d was not in memo when we entered save_tuple(), so
# the process of saving the tuple's elements must have saved
# the tuple itself: the tuple is recursive. The proper action
# now is to throw away everything we put on the stack, and
# simply GET the tuple (it's already constructed). This check
# could have been done in the "for element" loop instead, but
# recursive tuples are a rare thing.
get = self.get(memo[id(obj)][0])
if proto:
write(POP_MARK + get)
else: # proto 0 -- POP_MARK not available
write(POP * (n+1) + get)
return
# No recursion.
self.write(TUPLE)
self.memoize(obj)
dispatch[TupleType] = save_tuple
# save_empty_tuple() isn't used by anything in Python 2.3. However, I
# found a Pickler subclass in Zope3 that calls it, so it's not harmless
# to remove it.
def save_empty_tuple(self, obj):
self.write(EMPTY_TUPLE)
def save_list(self, obj):
write = self.write
if self.bin:
write(EMPTY_LIST)
else: # proto 0 -- can't use EMPTY_LIST
write(MARK + LIST)
self.memoize(obj)
self._batch_appends(iter(obj))
dispatch[ListType] = save_list
# Keep in synch with cPickle's BATCHSIZE. Nothing will break if it gets
# out of synch, though.
_BATCHSIZE = 1000
def _batch_appends(self, items):
# Helper to batch up APPENDS sequences
save = self.save
write = self.write
if not self.bin:
for x in items:
save(x)
write(APPEND)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
x = items.next()
tmp.append(x)
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for x in tmp:
save(x)
write(APPENDS)
elif n:
save(tmp[0])
write(APPEND)
# else tmp is empty, and we're done
def save_dict(self, obj):
## Stackless addition BEGIN
modict_saver = self._pickle_moduledict(obj)
if modict_saver is not None:
return self.save_reduce(*modict_saver)
## Stackless addition END
write = self.write
if self.bin:
write(EMPTY_DICT)
else: # proto 0 -- can't use EMPTY_DICT
write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(obj.iteritems())
dispatch[DictionaryType] = save_dict
if not PyStringMap is None:
dispatch[PyStringMap] = save_dict
def _batch_setitems(self, items):
# Helper to batch up SETITEMS sequences; proto >= 1 only
save = self.save
write = self.write
if not self.bin:
for k, v in items:
save(k)
save(v)
write(SETITEM)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
tmp.append(items.next())
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for k, v in tmp:
save(k)
save(v)
write(SETITEMS)
elif n:
k, v = tmp[0]
save(k)
save(v)
write(SETITEM)
# else tmp is empty, and we're done
def save_inst(self, obj):
cls = obj.__class__
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
_keep_alive(args, memo)
else:
args = ()
write(MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(OBJ)
else:
for arg in args:
save(arg)
write(INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
else:
stuff = getstate()
_keep_alive(stuff, memo)
save(stuff)
write(BUILD)
dispatch[InstanceType] = save_inst
def save_global(self, obj, name=None, pack=struct.pack):
write = self.write
memo = self.memo
if name is None:
name = obj.__name__
module = getattr(obj, "__module__", None)
if module is None:
module = whichmodule(obj, name)
try:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module, name))
else:
if klass is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module, name))
if self.proto >= 2:
code = _extension_registry.get((module, name))
if code:
assert code > 0
if code <= 0xff:
write(EXT1 + chr(code))
elif code <= 0xffff:
write("%c%c%c" % (EXT2, code&0xff, code>>8))
else:
write(EXT4 + pack("<i", code))
return
write(GLOBAL + module + '\n' + name + '\n')
self.memoize(obj)
def save_function(self, obj):
try:
return self.save_global(obj)
except PicklingError, e:
pass
# Check copy_reg.dispatch_table
reduce = dispatch_table.get(type(obj))
if reduce:
rv = reduce(obj)
else:
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce:
rv = reduce()
else:
raise e
return self.save_reduce(obj=obj, *rv)
dispatch[ClassType] = save_global
dispatch[FunctionType] = save_function
dispatch[BuiltinFunctionType] = save_global
dispatch[TypeType] = save_global
# Pickling helpers
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
# A cache for whichmodule(), mapping a function object to the name of
# the module in which the function was found.
classmap = {} # called classmap for backwards compatibility
def whichmodule(func, funcname):
"""Figure out the module in which a function occurs.
Search sys.modules for the module.
Cache in classmap.
Return a module name.
If the function cannot be found, return "__main__".
"""
# Python functions should always get an __module__ from their globals.
mod = getattr(func, "__module__", None)
if mod is not None:
return mod
if func in classmap:
return classmap[func]
for name, module in sys.modules.items():
if module is None:
continue # skip dummy package entries
if name != '__main__' and getattr(module, funcname, None) is func:
break
else:
name = '__main__'
classmap[func] = name
return name
# Unpickling machinery
class Unpickler:
def __init__(self, file):
"""This takes a file-like object for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so no
proto argument is needed.
The file-like object must have two methods, a read() method that
takes an integer argument, and a readline() method that requires no
arguments. Both methods should return a string. Thus file-like
object can be a file object opened for reading, a StringIO object,
or any other custom object that meets this interface.
"""
self.readline = file.readline
self.read = file.read
self.memo = {}
def load(self):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
self.mark = object() # any new unique object
self.stack = []
self.append = self.stack.append
read = self.read
dispatch = self.dispatch
try:
while 1:
key = read(1)
dispatch[key](self)
except _Stop, stopinst:
return stopinst.value
# Return largest index k such that self.stack[k] is self.mark.
# If the stack doesn't contain a mark, eventually raises IndexError.
# This could be sped by maintaining another stack, of indices at which
# the mark appears. For that matter, the latter stack would suffice,
# and we wouldn't need to push mark objects on self.stack at all.
# Doing so is probably a good thing, though, since if the pickle is
# corrupt (or hostile) we may get a clue from finding self.mark embedded
# in unpickled objects.
def marker(self):
stack = self.stack
mark = self.mark
k = len(stack)-1
while stack[k] is not mark: k = k-1
return k
dispatch = {}
def load_eof(self):
raise EOFError
dispatch[''] = load_eof
def load_proto(self):
proto = ord(self.read(1))
if not 0 <= proto <= 2:
raise ValueError, "unsupported pickle protocol: %d" % proto
dispatch[PROTO] = load_proto
def load_persid(self):
pid = self.readline()[:-1]
self.append(self.persistent_load(pid))
dispatch[PERSID] = load_persid
def load_binpersid(self):
pid = self.stack.pop()
self.append(self.persistent_load(pid))
dispatch[BINPERSID] = load_binpersid
def load_none(self):
self.append(None)
dispatch[NONE] = load_none
def load_false(self):
self.append(False)
dispatch[NEWFALSE] = load_false
def load_true(self):
self.append(True)
dispatch[NEWTRUE] = load_true
def load_int(self):
data = self.readline()
if data == FALSE[1:]:
val = False
elif data == TRUE[1:]:
val = True
else:
try:
val = int(data)
except ValueError:
val = long(data)
self.append(val)
dispatch[INT] = load_int
def load_binint(self):
self.append(mloads('i' + self.read(4)))
dispatch[BININT] = load_binint
def load_binint1(self):
self.append(ord(self.read(1)))
dispatch[BININT1] = load_binint1
def load_binint2(self):
self.append(mloads('i' + self.read(2) + '\000\000'))
dispatch[BININT2] = load_binint2
def load_long(self):
self.append(long(self.readline()[:-1], 0))
dispatch[LONG] = load_long
def load_long1(self):
n = ord(self.read(1))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG1] = load_long1
def load_long4(self):
n = mloads('i' + self.read(4))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG4] = load_long4
def load_float(self):
self.append(float(self.readline()[:-1]))
dispatch[FLOAT] = load_float
def load_binfloat(self, unpack=struct.unpack):
self.append(unpack('>d', self.read(8))[0])
dispatch[BINFLOAT] = load_binfloat
def load_string(self):
rep = self.readline()[:-1]
for q in "\"'": # double or single quote
if rep.startswith(q):
if not rep.endswith(q):
raise ValueError, "insecure string pickle"
rep = rep[len(q):-len(q)]
break
else:
raise ValueError, "insecure string pickle"
self.append(rep.decode("string-escape"))
dispatch[STRING] = load_string
def load_binstring(self):
len = mloads('i' + self.read(4))
self.append(self.read(len))
dispatch[BINSTRING] = load_binstring
def load_unicode(self):
self.append(unicode(self.readline()[:-1],'raw-unicode-escape'))
dispatch[UNICODE] = load_unicode
def load_binunicode(self):
len = mloads('i' + self.read(4))
self.append(unicode(self.read(len),'utf-8'))
dispatch[BINUNICODE] = load_binunicode
def load_short_binstring(self):
len = ord(self.read(1))
self.append(self.read(len))
dispatch[SHORT_BINSTRING] = load_short_binstring
def load_tuple(self):
k = self.marker()
self.stack[k:] = [tuple(self.stack[k+1:])]
dispatch[TUPLE] = load_tuple
def load_empty_tuple(self):
self.stack.append(())
dispatch[EMPTY_TUPLE] = load_empty_tuple
def load_tuple1(self):
self.stack[-1] = (self.stack[-1],)
dispatch[TUPLE1] = load_tuple1
def load_tuple2(self):
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
dispatch[TUPLE2] = load_tuple2
def load_tuple3(self):
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
dispatch[TUPLE3] = load_tuple3
def load_empty_list(self):
self.stack.append([])
dispatch[EMPTY_LIST] = load_empty_list
def load_empty_dictionary(self):
self.stack.append({})
dispatch[EMPTY_DICT] = load_empty_dictionary
def load_list(self):
k = self.marker()
self.stack[k:] = [self.stack[k+1:]]
dispatch[LIST] = load_list
def load_dict(self):
k = self.marker()
d = {}
items = self.stack[k+1:]
for i in range(0, len(items), 2):
key = items[i]
value = items[i+1]
d[key] = value
self.stack[k:] = [d]
dispatch[DICT] = load_dict
# INST and OBJ differ only in how they get a class object. It's not
# only sensible to do the rest in a common routine, the two routines
# previously diverged and grew different bugs.
# klass is the class to instantiate, and k points to the topmost mark
# object, following which are the arguments for klass.__init__.
def _instantiate(self, klass, k):
args = tuple(self.stack[k+1:])
del self.stack[k:]
instantiated = 0
if (not args and
type(klass) is ClassType and
not hasattr(klass, "__getinitargs__")):
try:
value = _EmptyClass()
value.__class__ = klass
instantiated = 1
except RuntimeError:
# In restricted execution, assignment to inst.__class__ is
# prohibited
pass
if not instantiated:
try:
value = klass(*args)
except TypeError, err:
raise TypeError, "in constructor for %s: %s" % (
klass.__name__, str(err)), sys.exc_info()[2]
self.append(value)
def load_inst(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self._instantiate(klass, self.marker())
dispatch[INST] = load_inst
def load_obj(self):
# Stack is ... markobject classobject arg1 arg2 ...
k = self.marker()
klass = self.stack.pop(k+1)
self._instantiate(klass, k)
dispatch[OBJ] = load_obj
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
dispatch[NEWOBJ] = load_newobj
def load_global(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self.append(klass)
dispatch[GLOBAL] = load_global
def load_ext1(self):
code = ord(self.read(1))
self.get_extension(code)
dispatch[EXT1] = load_ext1
def load_ext2(self):
code = mloads('i' + self.read(2) + '\000\000')
self.get_extension(code)
dispatch[EXT2] = load_ext2
def load_ext4(self):
code = mloads('i' + self.read(4))
self.get_extension(code)
dispatch[EXT4] = load_ext4
def get_extension(self, code):
nil = []
obj = _extension_cache.get(code, nil)
if obj is not nil:
self.append(obj)
return
key = _inverted_registry.get(code)
if not key:
raise ValueError("unregistered extension code %d" % code)
obj = self.find_class(*key)
_extension_cache[code] = obj
self.append(obj)
def find_class(self, module, name):
# Subclasses may override this
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
value = func(*args)
stack[-1] = value
dispatch[REDUCE] = load_reduce
def load_pop(self):
del self.stack[-1]
dispatch[POP] = load_pop
def load_pop_mark(self):
k = self.marker()
del self.stack[k:]
dispatch[POP_MARK] = load_pop_mark
def load_dup(self):
self.append(self.stack[-1])
dispatch[DUP] = load_dup
def load_get(self):
self.append(self.memo[self.readline()[:-1]])
dispatch[GET] = load_get
def load_binget(self):
i = ord(self.read(1))
self.append(self.memo[repr(i)])
dispatch[BINGET] = load_binget
def load_long_binget(self):
i = mloads('i' + self.read(4))
self.append(self.memo[repr(i)])
dispatch[LONG_BINGET] = load_long_binget
def load_put(self):
self.memo[self.readline()[:-1]] = self.stack[-1]
dispatch[PUT] = load_put
def load_binput(self):
i = ord(self.read(1))
self.memo[repr(i)] = self.stack[-1]
dispatch[BINPUT] = load_binput
def load_long_binput(self):
i = mloads('i' + self.read(4))
self.memo[repr(i)] = self.stack[-1]
dispatch[LONG_BINPUT] = load_long_binput
def load_append(self):
stack = self.stack
value = stack.pop()
list = stack[-1]
list.append(value)
dispatch[APPEND] = load_append
def load_appends(self):
stack = self.stack
mark = self.marker()
list = stack[mark - 1]
list.extend(stack[mark + 1:])
del stack[mark:]
dispatch[APPENDS] = load_appends
def load_setitem(self):
stack = self.stack
value = stack.pop()
key = stack.pop()
dict = stack[-1]
dict[key] = value
dispatch[SETITEM] = load_setitem
def load_setitems(self):
stack = self.stack
mark = self.marker()
dict = stack[mark - 1]
for i in range(mark + 1, len(stack), 2):
dict[stack[i]] = stack[i + 1]
del stack[mark:]
dispatch[SETITEMS] = load_setitems
def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
setstate = getattr(inst, "__setstate__", None)
if setstate:
setstate(state)
return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
try:
inst.__dict__.update(state)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__
# is not accessible. Use the old way of unpickling
# the instance variables. This is a semantic
# difference when unpickling in restricted
# vs. unrestricted modes.
# Note, however, that cPickle has never tried to do the
# .update() business, and always uses
# PyObject_SetItem(inst.__dict__, key, value) in a
# loop over state.items().
for k, v in state.items():
setattr(inst, k, v)
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
dispatch[BUILD] = load_build
def load_mark(self):
self.append(self.mark)
dispatch[MARK] = load_mark
def load_stop(self):
value = self.stack.pop()
raise _Stop(value)
dispatch[STOP] = load_stop
# Helper class for load_inst/load_obj
class _EmptyClass:
pass
# Encode/decode longs in linear time.
import binascii as _binascii
def encode_long(x):
r"""Encode a long to a two's complement little-endian binary string.
Note that 0L is a special case, returning an empty string, to save a
byte in the LONG1 pickling context.
>>> encode_long(0L)
''
>>> encode_long(255L)
'\xff\x00'
>>> encode_long(32767L)
'\xff\x7f'
>>> encode_long(-256L)
'\x00\xff'
>>> encode_long(-32768L)
'\x00\x80'
>>> encode_long(-128L)
'\x80'
>>> encode_long(127L)
'\x7f'
>>>
"""
if x == 0:
return ''
if x > 0:
ashex = hex(x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# need an even # of nibbles for unhexlify
ashex = "0x0" + ashex[2:]
elif int(ashex[2], 16) >= 8:
# "looks negative", so need a byte of sign bits
ashex = "0x00" + ashex[2:]
else:
# Build the 256's-complement: (1L << nbytes) + x. The trick is
# to find the number of bytes in linear time (although that should
# really be a constant-time task).
ashex = hex(-x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# Extend to a full byte.
nibbles += 1
nbits = nibbles * 4
x += 1L << nbits
assert x > 0
ashex = hex(x)
njunkchars = 2 + ashex.endswith('L')
newnibbles = len(ashex) - njunkchars
if newnibbles < nibbles:
ashex = "0x" + "0" * (nibbles - newnibbles) + ashex[2:]
if int(ashex[2], 16) < 8:
# "looks positive", so need a byte of sign bits
ashex = "0xff" + ashex[2:]
if ashex.endswith('L'):
ashex = ashex[2:-1]
else:
ashex = ashex[2:]
assert len(ashex) & 1 == 0, (x, ashex)
binary = _binascii.unhexlify(ashex)
return binary[::-1]
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
>>> decode_long('')
0L
>>> decode_long("\xff\x00")
255L
>>> decode_long("\xff\x7f")
32767L
>>> decode_long("\x00\xff")
-256L
>>> decode_long("\x00\x80")
-32768L
>>> decode_long("\x80")
-128L
>>> decode_long("\x7f")
127L
"""
nbytes = len(data)
if nbytes == 0:
return 0L
ashex = _binascii.hexlify(data[::-1])
n = long(ashex, 16) # quadratic time before Python 2.3; linear now
if data[-1] >= '\x80':
n -= 1L << (nbytes * 8)
return n
# Shorthands
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def dump(obj, file, protocol=None):
Pickler(file, protocol).dump(obj)
def dumps(obj, protocol=None):
file = StringIO()
Pickler(file, protocol).dump(obj)
return file.getvalue()
def load(file):
return Unpickler(file).load()
def loads(str):
file = StringIO(str)
return Unpickler(file).load()
# Doctest
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| 32.437326
| 80
| 0.557793
|
"""Create portable serialized representations of Python objects.
See module cPickle for a (much) faster implementation.
See module copy_reg for a mechanism for registering custom picklers.
See module pickletools source for extensive comments.
Classes:
Pickler
Unpickler
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
Misc variables:
__version__
format_version
compatible_formats
"""
__version__ = "$Revision: 38432 $"
from types import *
from copy_reg import dispatch_table
from copy_reg import _extension_registry, _inverted_registry, _extension_cache
import marshal
import sys
import struct
__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
"Unpickler", "dump", "dumps", "load", "loads"]
format_version = "2.0"
compatible_formats = ["1.0",
"1.1",
"1.2",
"1.3",
"2.0",
]
HIGHEST_PROTOCOL = 2
mloads = marshal.loads
class PickleError(Exception):
"""A common base class for the other pickling exceptions."""
pass
class PicklingError(PickleError):
"""This exception is raised when an unpicklable object is passed to the
dump() method.
"""
pass
class UnpicklingError(PickleError):
"""This exception is raised when there is a problem unpickling an object,
such as a security violation.
Note that other exceptions may also be raised during unpickling, including
(but not necessarily limited to) AttributeError, EOFError, ImportError,
and IndexError.
"""
pass
class _Stop(Exception):
def __init__(self, value):
self.value = value
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
# UnicodeType may or may not be exported (normally imported from types)
try:
UnicodeType
except NameError:
UnicodeType = None
# Pickle opcodes. See pickletools.py for extensive docs. The listing
# here is in kind-of alphabetical order of 1-character pickle code.
# pickletools groups them by purpose.
MARK = '(' # push special markobject on stack
STOP = '.' # every pickle ends with STOP
POP = '0' # discard topmost stack item
POP_MARK = '1' # discard stack top through topmost markobject
DUP = '2' # duplicate top stack item
FLOAT = 'F' # push float object; decimal string argument
INT = 'I' # push integer or bool; decimal string argument
BININT = 'J' # push four-byte signed int
BININT1 = 'K' # push 1-byte unsigned int
LONG = 'L' # push long; decimal string argument
BININT2 = 'M' # push 2-byte unsigned int
NONE = 'N' # push None
PERSID = 'P' # push persistent object; id is taken from string arg
BINPERSID = 'Q' # " " " ; " " " " stack
REDUCE = 'R' # apply callable to argtuple, both on stack
STRING = 'S' # push string; NL-terminated string argument
BINSTRING = 'T' # push string; counted binary string argument
SHORT_BINSTRING = 'U' # " " ; " " " " < 256 bytes
UNICODE = 'V' # push Unicode string; raw-unicode-escaped'd argument
BINUNICODE = 'X' # " " " ; counted UTF-8 string argument
APPEND = 'a'
BUILD = 'b'
GLOBAL = 'c'
DICT = 'd'
EMPTY_DICT = '}'
APPENDS = 'e'
GET = 'g'
BINGET = 'h'
INST = 'i'
LONG_BINGET = 'j'
LIST = 'l'
EMPTY_LIST = ']'
OBJ = 'o'
PUT = 'p'
BINPUT = 'q'
LONG_BINPUT = 'r' # " " " " " ; " " 4-byte arg
SETITEM = 's'
TUPLE = 't'
EMPTY_TUPLE = ')'
SETITEMS = 'u'
BINFLOAT = 'G'
TRUE = 'I01\n'
FALSE = 'I00\n'
PROTO = '\x80'
NEWOBJ = '\x81'
EXT1 = '\x82'
EXT2 = '\x83'
EXT4 = '\x84'
TUPLE1 = '\x85'
TUPLE2 = '\x86'
TUPLE3 = '\x87'
NEWTRUE = '\x88'
NEWFALSE = '\x89'
LONG1 = '\x8a'
LONG4 = '\x8b'
_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
__all__.extend([x for x in dir() if x[0].isalpha() and x == x.upper()])
del x
class Pickler:
def __init__(self, file, protocol=None):
"""This takes a file-like object for writing a pickle data stream.
The optional protocol argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2. The default
protocol is 0, to be backwards compatible. (Protocol 0 is the
only protocol that can be written to a file opened in text
mode and read back successfully. When using a protocol higher
than 0, make sure the file is opened in binary mode, both when
pickling and unpickling.)
Protocol 1 is more efficient than protocol 0; protocol 2 is
more efficient than protocol 1.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The file parameter must have a write() method that accepts a single
string argument. It can thus be an open file object, a StringIO
object, or any other custom object that meets this interface.
"""
if protocol is None:
protocol = 0
if protocol < 0:
protocol = HIGHEST_PROTOCOL
elif not 0 <= protocol <= HIGHEST_PROTOCOL:
raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
self.write = file.write
self.memo = {}
self.proto = int(protocol)
self.bin = protocol >= 1
self.fast = 0
def _pickle_moduledict(self, obj):
try:
modict = self.module_dict_ids
except AttributeError:
modict = {}
from sys import modules
for mod in modules.values():
if isinstance(mod, ModuleType):
try:
modict[id(mod.__dict__)] = mod
except KeyboardInterrupt:
raise
except:
pass
self.module_dict_ids = modict
thisid = id(obj)
try:
themodule = modict[thisid]
except KeyError:
return None
from __builtin__ import getattr
return getattr, (themodule, '__dict__')
def clear_memo(self):
"""Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects are
pickled by reference and not by value. This method is useful when
re-using picklers.
"""
self.memo.clear()
def dump(self, obj):
"""Write a pickled representation of obj to the open file."""
if self.proto >= 2:
self.write(PROTO + chr(self.proto))
self.save(obj)
self.write(STOP)
def memoize(self, obj):
"""Store an object in the memo."""
# The Pickler memo is a dictionary mapping object ids to 2-tuples
# that contain the Unpickler memo key and the object being memoized.
# The memo key is written to the pickle and will become
# the key in the Unpickler's memo. The object is stored in the
if self.fast:
return
assert id(obj) not in self.memo
memo_len = len(self.memo)
self.write(self.put(memo_len))
self.memo[id(obj)] = memo_len, obj
def put(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINPUT + chr(i)
else:
return LONG_BINPUT + pack("<i", i)
return PUT + repr(i) + '\n'
def get(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINGET + chr(i)
else:
return LONG_BINGET + pack("<i", i)
return GET + repr(i) + '\n'
def save(self, obj):
pid = self.persistent_id(obj)
if pid:
self.save_pers(pid)
return
x = self.memo.get(id(obj))
if x:
self.write(self.get(x[0]))
return
t = type(obj)
f = self.dispatch.get(t)
if f:
f(self, obj)
return
try:
issc = issubclass(t, TypeType)
except TypeError: issc = 0
if issc:
self.save_global(obj)
return
reduce = dispatch_table.get(t)
if reduce:
rv = reduce(obj)
else:
reduce = getattr(obj, "__reduce_ex__", None)
if reduce:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if type(rv) is StringType:
self.save_global(obj, rv)
return
# Assert that reduce() returned a tuple
if type(rv) is not TupleType:
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# Save the reduce() output and finally memoize the object
self.save_reduce(obj=obj, *rv)
def persistent_id(self, obj):
# This exists so a subclass can override it
return None
def save_pers(self, pid):
# Save a persistent id reference
if self.bin:
self.save(pid)
self.write(BINPERSID)
else:
self.write(PERSID + str(pid) + '\n')
def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
# This API is called by some subclasses
# Assert that args is a tuple or None
if not isinstance(args, TupleType):
raise PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not callable(func):
raise PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
save(args)
write(NEWOBJ)
else:
save(func)
save(args)
write(REDUCE)
if obj is not None:
self.memoize(obj)
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(BUILD)
dispatch = {}
def save_none(self, obj):
self.write(NONE)
dispatch[NoneType] = save_none
def save_bool(self, obj):
if self.proto >= 2:
self.write(obj and NEWTRUE or NEWFALSE)
else:
self.write(obj and TRUE or FALSE)
dispatch[bool] = save_bool
def save_int(self, obj, pack=struct.pack):
if self.bin:
# format, we can store it more efficiently than the general
# case.
# First one- and two-byte unsigned ints:
if obj >= 0:
if obj <= 0xff:
self.write(BININT1 + chr(obj))
return
if obj <= 0xffff:
self.write("%c%c%c" % (BININT2, obj&0xff, obj>>8))
return
# Next check for 4-byte signed ints:
high_bits = obj >> 31 # note that Python shift sign-extends
if high_bits == 0 or high_bits == -1:
# All high bits are copies of bit 2**31, so the value
# fits in a 4-byte signed int.
self.write(BININT + pack("<i", obj))
return
# Text pickle, or int too big to fit in signed 4-byte format.
self.write(INT + repr(obj) + '\n')
dispatch[IntType] = save_int
def save_long(self, obj, pack=struct.pack):
if self.proto >= 2:
bytes = encode_long(obj)
n = len(bytes)
if n < 256:
self.write(LONG1 + chr(n) + bytes)
else:
self.write(LONG4 + pack("<i", n) + bytes)
return
self.write(LONG + repr(obj) + '\n')
dispatch[LongType] = save_long
def save_float(self, obj, pack=struct.pack):
if self.bin:
self.write(BINFLOAT + pack('>d', obj))
else:
self.write(FLOAT + repr(obj) + '\n')
dispatch[FloatType] = save_float
def save_string(self, obj, pack=struct.pack):
if self.bin:
n = len(obj)
if n < 256:
self.write(SHORT_BINSTRING + chr(n) + obj)
else:
self.write(BINSTRING + pack("<i", n) + obj)
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_unicode(self, obj, pack=struct.pack):
if self.bin:
encoding = obj.encode('utf-8')
n = len(encoding)
self.write(BINUNICODE + pack("<i", n) + encoding)
else:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
self.write(UNICODE + obj.encode('raw-unicode-escape') + '\n')
self.memoize(obj)
dispatch[UnicodeType] = save_unicode
if StringType == UnicodeType:
# This is true for Jython
def save_string(self, obj, pack=struct.pack):
unicode = obj.isunicode()
if self.bin:
if unicode:
obj = obj.encode("utf-8")
l = len(obj)
if l < 256 and not unicode:
self.write(SHORT_BINSTRING + chr(l) + obj)
else:
s = pack("<i", l)
if unicode:
self.write(BINUNICODE + s + obj)
else:
self.write(BINSTRING + s + obj)
else:
if unicode:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
obj = obj.encode('raw-unicode-escape')
self.write(UNICODE + obj + '\n')
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_tuple(self, obj):
write = self.write
proto = self.proto
n = len(obj)
if n == 0:
if proto:
write(EMPTY_TUPLE)
else:
write(MARK + TUPLE)
return
save = self.save
memo = self.memo
if n <= 3 and proto >= 2:
for element in obj:
save(element)
# Subtle. Same as in the big comment below.
if id(obj) in memo:
get = self.get(memo[id(obj)][0])
write(POP * n + get)
else:
write(_tuplesize2code[n])
self.memoize(obj)
return
# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
write(MARK)
for element in obj:
save(element)
if id(obj) in memo:
# the tuple itself: the tuple is recursive. The proper action
# now is to throw away everything we put on the stack, and
# simply GET the tuple (it's already constructed). This check
get = self.get(memo[id(obj)][0])
if proto:
write(POP_MARK + get)
else:
write(POP * (n+1) + get)
return
self.write(TUPLE)
self.memoize(obj)
dispatch[TupleType] = save_tuple
# found a Pickler subclass in Zope3 that calls it, so it's not harmless
def save_empty_tuple(self, obj):
self.write(EMPTY_TUPLE)
def save_list(self, obj):
write = self.write
if self.bin:
write(EMPTY_LIST)
else:
write(MARK + LIST)
self.memoize(obj)
self._batch_appends(iter(obj))
dispatch[ListType] = save_list
# Keep in synch with cPickle's BATCHSIZE. Nothing will break if it gets
_BATCHSIZE = 1000
def _batch_appends(self, items):
save = self.save
write = self.write
if not self.bin:
for x in items:
save(x)
write(APPEND)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
x = items.next()
tmp.append(x)
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for x in tmp:
save(x)
write(APPENDS)
elif n:
save(tmp[0])
write(APPEND)
def save_dict(self, obj):
## Stackless addition BEGIN
modict_saver = self._pickle_moduledict(obj)
if modict_saver is not None:
return self.save_reduce(*modict_saver)
## Stackless addition END
write = self.write
if self.bin:
write(EMPTY_DICT)
else: # proto 0 -- can't use EMPTY_DICT
write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(obj.iteritems())
dispatch[DictionaryType] = save_dict
if not PyStringMap is None:
dispatch[PyStringMap] = save_dict
def _batch_setitems(self, items):
save = self.save
write = self.write
if not self.bin:
for k, v in items:
save(k)
save(v)
write(SETITEM)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
tmp.append(items.next())
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for k, v in tmp:
save(k)
save(v)
write(SETITEMS)
elif n:
k, v = tmp[0]
save(k)
save(v)
write(SETITEM)
def save_inst(self, obj):
cls = obj.__class__
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
_keep_alive(args, memo)
else:
args = ()
write(MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(OBJ)
else:
for arg in args:
save(arg)
write(INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
else:
stuff = getstate()
_keep_alive(stuff, memo)
save(stuff)
write(BUILD)
dispatch[InstanceType] = save_inst
def save_global(self, obj, name=None, pack=struct.pack):
write = self.write
memo = self.memo
if name is None:
name = obj.__name__
module = getattr(obj, "__module__", None)
if module is None:
module = whichmodule(obj, name)
try:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module, name))
else:
if klass is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module, name))
if self.proto >= 2:
code = _extension_registry.get((module, name))
if code:
assert code > 0
if code <= 0xff:
write(EXT1 + chr(code))
elif code <= 0xffff:
write("%c%c%c" % (EXT2, code&0xff, code>>8))
else:
write(EXT4 + pack("<i", code))
return
write(GLOBAL + module + '\n' + name + '\n')
self.memoize(obj)
def save_function(self, obj):
try:
return self.save_global(obj)
except PicklingError, e:
pass
reduce = dispatch_table.get(type(obj))
if reduce:
rv = reduce(obj)
else:
reduce = getattr(obj, "__reduce_ex__", None)
if reduce:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce:
rv = reduce()
else:
raise e
return self.save_reduce(obj=obj, *rv)
dispatch[ClassType] = save_global
dispatch[FunctionType] = save_function
dispatch[BuiltinFunctionType] = save_global
dispatch[TypeType] = save_global
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
memo[id(memo)]=[x]
classmap = {}
def whichmodule(func, funcname):
"""Figure out the module in which a function occurs.
Search sys.modules for the module.
Cache in classmap.
Return a module name.
If the function cannot be found, return "__main__".
"""
mod = getattr(func, "__module__", None)
if mod is not None:
return mod
if func in classmap:
return classmap[func]
for name, module in sys.modules.items():
if module is None:
continue
if name != '__main__' and getattr(module, funcname, None) is func:
break
else:
name = '__main__'
classmap[func] = name
return name
class Unpickler:
def __init__(self, file):
"""This takes a file-like object for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so no
proto argument is needed.
The file-like object must have two methods, a read() method that
takes an integer argument, and a readline() method that requires no
arguments. Both methods should return a string. Thus file-like
object can be a file object opened for reading, a StringIO object,
or any other custom object that meets this interface.
"""
self.readline = file.readline
self.read = file.read
self.memo = {}
def load(self):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
self.mark = object()
self.stack = []
self.append = self.stack.append
read = self.read
dispatch = self.dispatch
try:
while 1:
key = read(1)
dispatch[key](self)
except _Stop, stopinst:
return stopinst.value
# This could be sped by maintaining another stack, of indices at which
# the mark appears. For that matter, the latter stack would suffice,
# and we wouldn't need to push mark objects on self.stack at all.
def marker(self):
stack = self.stack
mark = self.mark
k = len(stack)-1
while stack[k] is not mark: k = k-1
return k
dispatch = {}
def load_eof(self):
raise EOFError
dispatch[''] = load_eof
def load_proto(self):
proto = ord(self.read(1))
if not 0 <= proto <= 2:
raise ValueError, "unsupported pickle protocol: %d" % proto
dispatch[PROTO] = load_proto
def load_persid(self):
pid = self.readline()[:-1]
self.append(self.persistent_load(pid))
dispatch[PERSID] = load_persid
def load_binpersid(self):
pid = self.stack.pop()
self.append(self.persistent_load(pid))
dispatch[BINPERSID] = load_binpersid
def load_none(self):
self.append(None)
dispatch[NONE] = load_none
def load_false(self):
self.append(False)
dispatch[NEWFALSE] = load_false
def load_true(self):
self.append(True)
dispatch[NEWTRUE] = load_true
def load_int(self):
data = self.readline()
if data == FALSE[1:]:
val = False
elif data == TRUE[1:]:
val = True
else:
try:
val = int(data)
except ValueError:
val = long(data)
self.append(val)
dispatch[INT] = load_int
def load_binint(self):
self.append(mloads('i' + self.read(4)))
dispatch[BININT] = load_binint
def load_binint1(self):
self.append(ord(self.read(1)))
dispatch[BININT1] = load_binint1
def load_binint2(self):
self.append(mloads('i' + self.read(2) + '\000\000'))
dispatch[BININT2] = load_binint2
def load_long(self):
self.append(long(self.readline()[:-1], 0))
dispatch[LONG] = load_long
def load_long1(self):
n = ord(self.read(1))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG1] = load_long1
def load_long4(self):
n = mloads('i' + self.read(4))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG4] = load_long4
def load_float(self):
self.append(float(self.readline()[:-1]))
dispatch[FLOAT] = load_float
def load_binfloat(self, unpack=struct.unpack):
self.append(unpack('>d', self.read(8))[0])
dispatch[BINFLOAT] = load_binfloat
def load_string(self):
rep = self.readline()[:-1]
for q in "\"'": # double or single quote
if rep.startswith(q):
if not rep.endswith(q):
raise ValueError, "insecure string pickle"
rep = rep[len(q):-len(q)]
break
else:
raise ValueError, "insecure string pickle"
self.append(rep.decode("string-escape"))
dispatch[STRING] = load_string
def load_binstring(self):
len = mloads('i' + self.read(4))
self.append(self.read(len))
dispatch[BINSTRING] = load_binstring
def load_unicode(self):
self.append(unicode(self.readline()[:-1],'raw-unicode-escape'))
dispatch[UNICODE] = load_unicode
def load_binunicode(self):
len = mloads('i' + self.read(4))
self.append(unicode(self.read(len),'utf-8'))
dispatch[BINUNICODE] = load_binunicode
def load_short_binstring(self):
len = ord(self.read(1))
self.append(self.read(len))
dispatch[SHORT_BINSTRING] = load_short_binstring
def load_tuple(self):
k = self.marker()
self.stack[k:] = [tuple(self.stack[k+1:])]
dispatch[TUPLE] = load_tuple
def load_empty_tuple(self):
self.stack.append(())
dispatch[EMPTY_TUPLE] = load_empty_tuple
def load_tuple1(self):
self.stack[-1] = (self.stack[-1],)
dispatch[TUPLE1] = load_tuple1
def load_tuple2(self):
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
dispatch[TUPLE2] = load_tuple2
def load_tuple3(self):
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
dispatch[TUPLE3] = load_tuple3
def load_empty_list(self):
self.stack.append([])
dispatch[EMPTY_LIST] = load_empty_list
def load_empty_dictionary(self):
self.stack.append({})
dispatch[EMPTY_DICT] = load_empty_dictionary
def load_list(self):
k = self.marker()
self.stack[k:] = [self.stack[k+1:]]
dispatch[LIST] = load_list
def load_dict(self):
k = self.marker()
d = {}
items = self.stack[k+1:]
for i in range(0, len(items), 2):
key = items[i]
value = items[i+1]
d[key] = value
self.stack[k:] = [d]
dispatch[DICT] = load_dict
# INST and OBJ differ only in how they get a class object. It's not
# only sensible to do the rest in a common routine, the two routines
# previously diverged and grew different bugs.
# klass is the class to instantiate, and k points to the topmost mark
# object, following which are the arguments for klass.__init__.
def _instantiate(self, klass, k):
args = tuple(self.stack[k+1:])
del self.stack[k:]
instantiated = 0
if (not args and
type(klass) is ClassType and
not hasattr(klass, "__getinitargs__")):
try:
value = _EmptyClass()
value.__class__ = klass
instantiated = 1
except RuntimeError:
# In restricted execution, assignment to inst.__class__ is
# prohibited
pass
if not instantiated:
try:
value = klass(*args)
except TypeError, err:
raise TypeError, "in constructor for %s: %s" % (
klass.__name__, str(err)), sys.exc_info()[2]
self.append(value)
def load_inst(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self._instantiate(klass, self.marker())
dispatch[INST] = load_inst
def load_obj(self):
# Stack is ... markobject classobject arg1 arg2 ...
k = self.marker()
klass = self.stack.pop(k+1)
self._instantiate(klass, k)
dispatch[OBJ] = load_obj
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
dispatch[NEWOBJ] = load_newobj
def load_global(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self.append(klass)
dispatch[GLOBAL] = load_global
def load_ext1(self):
code = ord(self.read(1))
self.get_extension(code)
dispatch[EXT1] = load_ext1
def load_ext2(self):
code = mloads('i' + self.read(2) + '\000\000')
self.get_extension(code)
dispatch[EXT2] = load_ext2
def load_ext4(self):
code = mloads('i' + self.read(4))
self.get_extension(code)
dispatch[EXT4] = load_ext4
def get_extension(self, code):
nil = []
obj = _extension_cache.get(code, nil)
if obj is not nil:
self.append(obj)
return
key = _inverted_registry.get(code)
if not key:
raise ValueError("unregistered extension code %d" % code)
obj = self.find_class(*key)
_extension_cache[code] = obj
self.append(obj)
def find_class(self, module, name):
# Subclasses may override this
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
value = func(*args)
stack[-1] = value
dispatch[REDUCE] = load_reduce
def load_pop(self):
del self.stack[-1]
dispatch[POP] = load_pop
def load_pop_mark(self):
k = self.marker()
del self.stack[k:]
dispatch[POP_MARK] = load_pop_mark
def load_dup(self):
self.append(self.stack[-1])
dispatch[DUP] = load_dup
def load_get(self):
self.append(self.memo[self.readline()[:-1]])
dispatch[GET] = load_get
def load_binget(self):
i = ord(self.read(1))
self.append(self.memo[repr(i)])
dispatch[BINGET] = load_binget
def load_long_binget(self):
i = mloads('i' + self.read(4))
self.append(self.memo[repr(i)])
dispatch[LONG_BINGET] = load_long_binget
def load_put(self):
self.memo[self.readline()[:-1]] = self.stack[-1]
dispatch[PUT] = load_put
def load_binput(self):
i = ord(self.read(1))
self.memo[repr(i)] = self.stack[-1]
dispatch[BINPUT] = load_binput
def load_long_binput(self):
i = mloads('i' + self.read(4))
self.memo[repr(i)] = self.stack[-1]
dispatch[LONG_BINPUT] = load_long_binput
def load_append(self):
stack = self.stack
value = stack.pop()
list = stack[-1]
list.append(value)
dispatch[APPEND] = load_append
def load_appends(self):
stack = self.stack
mark = self.marker()
list = stack[mark - 1]
list.extend(stack[mark + 1:])
del stack[mark:]
dispatch[APPENDS] = load_appends
def load_setitem(self):
stack = self.stack
value = stack.pop()
key = stack.pop()
dict = stack[-1]
dict[key] = value
dispatch[SETITEM] = load_setitem
def load_setitems(self):
stack = self.stack
mark = self.marker()
dict = stack[mark - 1]
for i in range(mark + 1, len(stack), 2):
dict[stack[i]] = stack[i + 1]
del stack[mark:]
dispatch[SETITEMS] = load_setitems
def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
setstate = getattr(inst, "__setstate__", None)
if setstate:
setstate(state)
return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
try:
inst.__dict__.update(state)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__
# is not accessible. Use the old way of unpickling
# the instance variables. This is a semantic
# difference when unpickling in restricted
# vs. unrestricted modes.
# Note, however, that cPickle has never tried to do the
# .update() business, and always uses
# PyObject_SetItem(inst.__dict__, key, value) in a
# loop over state.items().
for k, v in state.items():
setattr(inst, k, v)
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
dispatch[BUILD] = load_build
def load_mark(self):
self.append(self.mark)
dispatch[MARK] = load_mark
def load_stop(self):
value = self.stack.pop()
raise _Stop(value)
dispatch[STOP] = load_stop
# Helper class for load_inst/load_obj
class _EmptyClass:
pass
# Encode/decode longs in linear time.
import binascii as _binascii
def encode_long(x):
r"""Encode a long to a two's complement little-endian binary string.
Note that 0L is a special case, returning an empty string, to save a
byte in the LONG1 pickling context.
>>> encode_long(0L)
''
>>> encode_long(255L)
'\xff\x00'
>>> encode_long(32767L)
'\xff\x7f'
>>> encode_long(-256L)
'\x00\xff'
>>> encode_long(-32768L)
'\x00\x80'
>>> encode_long(-128L)
'\x80'
>>> encode_long(127L)
'\x7f'
>>>
"""
if x == 0:
return ''
if x > 0:
ashex = hex(x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# need an even # of nibbles for unhexlify
ashex = "0x0" + ashex[2:]
elif int(ashex[2], 16) >= 8:
# "looks negative", so need a byte of sign bits
ashex = "0x00" + ashex[2:]
else:
# Build the 256's-complement: (1L << nbytes) + x. The trick is
# to find the number of bytes in linear time (although that should
# really be a constant-time task).
ashex = hex(-x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# Extend to a full byte.
nibbles += 1
nbits = nibbles * 4
x += 1L << nbits
assert x > 0
ashex = hex(x)
njunkchars = 2 + ashex.endswith('L')
newnibbles = len(ashex) - njunkchars
if newnibbles < nibbles:
ashex = "0x" + "0" * (nibbles - newnibbles) + ashex[2:]
if int(ashex[2], 16) < 8:
# "looks positive", so need a byte of sign bits
ashex = "0xff" + ashex[2:]
if ashex.endswith('L'):
ashex = ashex[2:-1]
else:
ashex = ashex[2:]
assert len(ashex) & 1 == 0, (x, ashex)
binary = _binascii.unhexlify(ashex)
return binary[::-1]
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
>>> decode_long('')
0L
>>> decode_long("\xff\x00")
255L
>>> decode_long("\xff\x7f")
32767L
>>> decode_long("\x00\xff")
-256L
>>> decode_long("\x00\x80")
-32768L
>>> decode_long("\x80")
-128L
>>> decode_long("\x7f")
127L
"""
nbytes = len(data)
if nbytes == 0:
return 0L
ashex = _binascii.hexlify(data[::-1])
n = long(ashex, 16) # quadratic time before Python 2.3; linear now
if data[-1] >= '\x80':
n -= 1L << (nbytes * 8)
return n
# Shorthands
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def dump(obj, file, protocol=None):
Pickler(file, protocol).dump(obj)
def dumps(obj, protocol=None):
file = StringIO()
Pickler(file, protocol).dump(obj)
return file.getvalue()
def load(file):
return Unpickler(file).load()
def loads(str):
file = StringIO(str)
return Unpickler(file).load()
# Doctest
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| false
| true
|
790e1310d068ec75c2fa665636158e6e3a5e3abd
| 5,886
|
py
|
Python
|
Modules/Gekokujo_vanilla_enhanced/Code/Module_system/modmerger_header.py
|
roalyr/gekokujo_vanilla_enhanced
|
84d8cc1033be98357ac139fafbc1c10851274019
|
[
"MIT"
] | 1
|
2021-01-17T06:21:36.000Z
|
2021-01-17T06:21:36.000Z
|
Modules/Gekokujo_vanilla_enhanced/Code/Module_system/modmerger_header.py
|
roalyr/gekokujo_vanilla_enhanced
|
84d8cc1033be98357ac139fafbc1c10851274019
|
[
"MIT"
] | 2
|
2021-01-17T12:57:37.000Z
|
2021-02-08T02:16:45.000Z
|
Modules/Gekokujo_vanilla_enhanced/Code/Module_system/modmerger_header.py
|
roalyr/gekokujo_vanilla_enhanced
|
84d8cc1033be98357ac139fafbc1c10851274019
|
[
"MIT"
] | null | null | null |
# modmerger framework
# by sphere
modmerger_version = 201
# Note: the following is from Warband 1.127 module system.
from modmerger_options import *
# list of current module components
# not in use atm
mod_components = [
"animations",
"constants",
"dialogs",
"factions",
"game_menus",
"info",
"info_pages",
"items",
"map_icons",
"meshes",
"mission_templates",
"music",
"particle_systems",
"parties",
"party_templates",
"postfx",
"presentations",
"quests",
"scenes",
"scene_props",
"scripts",
"simple_triggers",
"skills",
"skins",
"sounds",
"strings",
"tableau_materials",
"triggers",
"troops",
"variables",
]
# these are components that do not need to be branded
mod_components0=[
"info",
]
# These are the components requiring full import of symbols. Currently only "constants"
mod_components1=[
"constants",
]
# these are components which passes in variable with same name as the component name itself
mod_components2=[
"animations",
"dialogs",
"game_menus",
"info_pages",
"items",
"map_icons",
"meshes",
"particle_systems",
"parties",
"party_templates",
"presentations",
"quests",
"scenes",
"scene_props",
"scripts",
"simple_triggers",
"skills",
"skins",
"sounds",
"strings",
"triggers",
"troops",
]
# This is a list of components with a list of the important global variables defined in it)
mod_components3={
#"info": ["export_dir"], # export_dir
"variables" : ["reserved_variables"] , # reserved_variables
"music": ["tracks"], # tracks
"tableau_materials" : ["tableaus"] , # tableaus
"postfx" : ["postfx_params"], # postfx_params
"factions" :["factions","default_kingdom_relations"],
"mission_templates": [
"mission_templates",
"multiplayer_server_check_belfry_movement",
"multiplayer_server_spawn_bots",
"multiplayer_server_manage_bots",
"multiplayer_server_check_polls",
"multiplayer_server_check_end_map",
"multiplayer_once_at_the_first_frame",
"multiplayer_battle_window_opened",
"common_battle_mission_start",
"common_battle_tab_press",
"common_battle_init_banner",
"common_arena_fight_tab_press",
"common_custom_battle_tab_press",
"custom_battle_check_victory_condition",
"custom_battle_check_defeat_condition",
"common_battle_victory_display",
"common_siege_question_answered",
"common_custom_battle_question_answered",
"common_custom_siege_init",
"common_siege_init",
"common_music_situation_update",
"common_siege_ai_trigger_init",
"common_siege_ai_trigger_init_2",
"common_siege_ai_trigger_init_after_2_secs",
"common_siege_defender_reinforcement_check",
"common_siege_defender_reinforcement_archer_reposition",
"common_siege_attacker_reinforcement_check",
"common_siege_attacker_do_not_stall",
"common_battle_check_friendly_kills",
"common_battle_check_victory_condition",
"common_battle_victory_display",
"common_siege_refill_ammo",
"common_siege_check_defeat_condition",
"common_battle_order_panel",
"common_battle_order_panel_tick",
"common_battle_inventory",
"common_inventory_not_available",
"common_siege_init_ai_and_belfry",
"common_siege_move_belfry",
"common_siege_rotate_belfry",
"common_siege_assign_men_to_belfry",
"tournament_triggers",
],
}
# fix for mb vanilla
if module_sys_info["version"] <= 1011:
mod_components.remove("info_pages")
mod_components.remove("postfx")
mod_components3["mission_templates"] = [ #1011 version
"mission_templates",
"common_battle_mission_start",
"common_battle_tab_press",
"common_arena_fight_tab_press",
"common_custom_battle_tab_press",
"common_battle_victory_display",
"common_siege_question_answered",
"common_custom_battle_question_answered",
"common_custom_siege_init",
"common_siege_init",
"common_music_situation_update",
"common_siege_ai_trigger_init",
"common_siege_ai_trigger_init_2",
"common_siege_ai_trigger_init_after_2_secs",
"common_siege_defender_reinforcement_check",
"common_siege_defender_reinforcement_archer_reposition",
"common_siege_attacker_reinforcement_check",
"common_siege_attacker_do_not_stall",
"common_battle_check_friendly_kills",
"common_battle_check_victory_condition",
"common_battle_victory_display",
"common_siege_refill_ammo",
"common_siege_check_defeat_condition",
"common_battle_order_panel",
"common_battle_order_panel_tick",
"common_battle_inventory",
"common_inventory_not_available",
"common_siege_init_ai_and_belfry",
"common_siege_move_belfry",
"common_siege_rotate_belfry",
"common_siege_assign_men_to_belfry",
]
# gets the type of component on whether it is found in mod_components1 or mod_components2. Those not found in either are returned as 0
def get_component_type(component_name):
comp_type = 0
try:
mod_components1.index(component_name)
comp_type |= 1
except ValueError:
pass
try:
mod_components2.index(component_name)
comp_type |= 2
except ValueError:
pass
try:
mod_components3[component_name]
comp_type |= 4
except KeyError:
pass
return comp_type
| 29.577889
| 136
| 0.663099
|
modmerger_version = 201
from modmerger_options import *
mod_components = [
"animations",
"constants",
"dialogs",
"factions",
"game_menus",
"info",
"info_pages",
"items",
"map_icons",
"meshes",
"mission_templates",
"music",
"particle_systems",
"parties",
"party_templates",
"postfx",
"presentations",
"quests",
"scenes",
"scene_props",
"scripts",
"simple_triggers",
"skills",
"skins",
"sounds",
"strings",
"tableau_materials",
"triggers",
"troops",
"variables",
]
mod_components0=[
"info",
]
mod_components1=[
"constants",
]
mod_components2=[
"animations",
"dialogs",
"game_menus",
"info_pages",
"items",
"map_icons",
"meshes",
"particle_systems",
"parties",
"party_templates",
"presentations",
"quests",
"scenes",
"scene_props",
"scripts",
"simple_triggers",
"skills",
"skins",
"sounds",
"strings",
"triggers",
"troops",
]
mod_components3={
es" : ["reserved_variables"] ,
"music": ["tracks"],
"tableau_materials" : ["tableaus"] ,
"postfx" : ["postfx_params"],
"factions" :["factions","default_kingdom_relations"],
"mission_templates": [
"mission_templates",
"multiplayer_server_check_belfry_movement",
"multiplayer_server_spawn_bots",
"multiplayer_server_manage_bots",
"multiplayer_server_check_polls",
"multiplayer_server_check_end_map",
"multiplayer_once_at_the_first_frame",
"multiplayer_battle_window_opened",
"common_battle_mission_start",
"common_battle_tab_press",
"common_battle_init_banner",
"common_arena_fight_tab_press",
"common_custom_battle_tab_press",
"custom_battle_check_victory_condition",
"custom_battle_check_defeat_condition",
"common_battle_victory_display",
"common_siege_question_answered",
"common_custom_battle_question_answered",
"common_custom_siege_init",
"common_siege_init",
"common_music_situation_update",
"common_siege_ai_trigger_init",
"common_siege_ai_trigger_init_2",
"common_siege_ai_trigger_init_after_2_secs",
"common_siege_defender_reinforcement_check",
"common_siege_defender_reinforcement_archer_reposition",
"common_siege_attacker_reinforcement_check",
"common_siege_attacker_do_not_stall",
"common_battle_check_friendly_kills",
"common_battle_check_victory_condition",
"common_battle_victory_display",
"common_siege_refill_ammo",
"common_siege_check_defeat_condition",
"common_battle_order_panel",
"common_battle_order_panel_tick",
"common_battle_inventory",
"common_inventory_not_available",
"common_siege_init_ai_and_belfry",
"common_siege_move_belfry",
"common_siege_rotate_belfry",
"common_siege_assign_men_to_belfry",
"tournament_triggers",
],
}
if module_sys_info["version"] <= 1011:
mod_components.remove("info_pages")
mod_components.remove("postfx")
mod_components3["mission_templates"] = [
"mission_templates",
"common_battle_mission_start",
"common_battle_tab_press",
"common_arena_fight_tab_press",
"common_custom_battle_tab_press",
"common_battle_victory_display",
"common_siege_question_answered",
"common_custom_battle_question_answered",
"common_custom_siege_init",
"common_siege_init",
"common_music_situation_update",
"common_siege_ai_trigger_init",
"common_siege_ai_trigger_init_2",
"common_siege_ai_trigger_init_after_2_secs",
"common_siege_defender_reinforcement_check",
"common_siege_defender_reinforcement_archer_reposition",
"common_siege_attacker_reinforcement_check",
"common_siege_attacker_do_not_stall",
"common_battle_check_friendly_kills",
"common_battle_check_victory_condition",
"common_battle_victory_display",
"common_siege_refill_ammo",
"common_siege_check_defeat_condition",
"common_battle_order_panel",
"common_battle_order_panel_tick",
"common_battle_inventory",
"common_inventory_not_available",
"common_siege_init_ai_and_belfry",
"common_siege_move_belfry",
"common_siege_rotate_belfry",
"common_siege_assign_men_to_belfry",
]
def get_component_type(component_name):
comp_type = 0
try:
mod_components1.index(component_name)
comp_type |= 1
except ValueError:
pass
try:
mod_components2.index(component_name)
comp_type |= 2
except ValueError:
pass
try:
mod_components3[component_name]
comp_type |= 4
except KeyError:
pass
return comp_type
| true
| true
|
790e132404ed50d08b1244917c18bfcaf513277d
| 568
|
py
|
Python
|
wetterdienst/util/parameter.py
|
meteoDaniel/wetterdienst
|
106a2fa9f887983281a6886c15bb3a845850dfb7
|
[
"MIT"
] | 3
|
2020-06-19T09:21:07.000Z
|
2020-06-30T22:12:42.000Z
|
wetterdienst/util/parameter.py
|
meteoDaniel/wetterdienst
|
106a2fa9f887983281a6886c15bb3a845850dfb7
|
[
"MIT"
] | 27
|
2020-06-17T23:10:37.000Z
|
2020-07-01T22:05:17.000Z
|
wetterdienst/util/parameter.py
|
meteoDaniel/wetterdienst
|
106a2fa9f887983281a6886c15bb3a845850dfb7
|
[
"MIT"
] | 1
|
2020-06-22T22:37:45.000Z
|
2020-06-22T22:37:45.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
class _GetAttrMeta(type):
# https://stackoverflow.com/questions/33727217/subscriptable-objects-in-class
def __getitem__(cls, x):
return getattr(cls, x)
def __iter__(cls):
"""Getting subclasses which usually represent resolutions"""
for attr in vars(cls):
if not attr.startswith("_"):
yield cls[attr]
class DatasetTreeCore(metaclass=_GetAttrMeta):
pass
| 31.555556
| 81
| 0.672535
|
class _GetAttrMeta(type):
def __getitem__(cls, x):
return getattr(cls, x)
def __iter__(cls):
for attr in vars(cls):
if not attr.startswith("_"):
yield cls[attr]
class DatasetTreeCore(metaclass=_GetAttrMeta):
pass
| true
| true
|
790e136eafc8c8f7d463b00694cbbbb6e567b49b
| 1,383
|
py
|
Python
|
src/util/load_sentence.py
|
lychyzclc/High-throughput-relation-extraction-algorithm
|
93530ddcb78df3f1b1b7fda34821fa307d095c74
|
[
"MIT"
] | 1
|
2021-01-04T03:15:50.000Z
|
2021-01-04T03:15:50.000Z
|
src/util/load_sentence.py
|
lychyzclc/High-throughput-relation-extraction-algorithm
|
93530ddcb78df3f1b1b7fda34821fa307d095c74
|
[
"MIT"
] | null | null | null |
src/util/load_sentence.py
|
lychyzclc/High-throughput-relation-extraction-algorithm
|
93530ddcb78df3f1b1b7fda34821fa307d095c74
|
[
"MIT"
] | null | null | null |
#!/bin/python
"""
This is a class for loading input sentences
"""
class SentenceAttr:
def __init__(self, attr_list):
self.article_id = attr_list[1]
self.title = attr_list[2]
self.sentence = attr_list[3]
self.article_structure = attr_list[4]
self.place = attr_list[5]
def __str__(self):
return "Article Id: " + self.article_id + "\n" + "Title: " + self.title + "\n"\
+"Sentence: " + self.sentence + "\n" +\
"Article Structure: " + self.article_structure + "\n" + "Place: " + self.place + "\n"
class LoadSentences:
def __init__(self, filepath, num):
self.filepath = filepath
self.num = num
"""对导入的文本做简单清洗"""
def Process(self, line):
line = line.replace('\n', '')
line_list = line.split("|")
return SentenceAttr(line_list)
"""逐行读取文件并返回迭代器"""
def Reader(self):
f = open(self.filepath)
line = f.readline()
count = 0
while line:
if count == self.num:
break
yield self.Process(line)
line = f.readline()
count += 1
f.close()
def test():
sentences_path = "../0_output.txt0.txt"
sentences = LoadSentences(sentences_path, 5).Reader()
for each in sentences:
print(each)
if __name__ == "__main__":
test()
| 24.263158
| 102
| 0.54953
|
class SentenceAttr:
def __init__(self, attr_list):
self.article_id = attr_list[1]
self.title = attr_list[2]
self.sentence = attr_list[3]
self.article_structure = attr_list[4]
self.place = attr_list[5]
def __str__(self):
return "Article Id: " + self.article_id + "\n" + "Title: " + self.title + "\n"\
+"Sentence: " + self.sentence + "\n" +\
"Article Structure: " + self.article_structure + "\n" + "Place: " + self.place + "\n"
class LoadSentences:
def __init__(self, filepath, num):
self.filepath = filepath
self.num = num
def Process(self, line):
line = line.replace('\n', '')
line_list = line.split("|")
return SentenceAttr(line_list)
def Reader(self):
f = open(self.filepath)
line = f.readline()
count = 0
while line:
if count == self.num:
break
yield self.Process(line)
line = f.readline()
count += 1
f.close()
def test():
sentences_path = "../0_output.txt0.txt"
sentences = LoadSentences(sentences_path, 5).Reader()
for each in sentences:
print(each)
if __name__ == "__main__":
test()
| true
| true
|
790e1487aaf6274b3e5915514fb45a95f0156ce1
| 3,349
|
py
|
Python
|
micromamba/tests/test_constructor.py
|
wulmer/mamba
|
5961d76afdd8b0f070bf0f2da396ef25289c965c
|
[
"BSD-3-Clause"
] | 2,262
|
2020-09-08T07:46:35.000Z
|
2022-03-31T21:11:35.000Z
|
micromamba/tests/test_constructor.py
|
wulmer/mamba
|
5961d76afdd8b0f070bf0f2da396ef25289c965c
|
[
"BSD-3-Clause"
] | 841
|
2020-09-07T15:22:43.000Z
|
2022-03-31T18:18:43.000Z
|
micromamba/tests/test_constructor.py
|
wulmer/mamba
|
5961d76afdd8b0f070bf0f2da396ef25289c965c
|
[
"BSD-3-Clause"
] | 132
|
2020-09-10T03:05:45.000Z
|
2022-03-29T12:32:47.000Z
|
import glob
import json
import os
import shutil
import subprocess
from .helpers import *
def constructor(*args, default_channel=True, no_rc=True, no_dry_run=False):
umamba = get_umamba()
cmd = [umamba, "constructor"] + [arg for arg in args if arg]
try:
res = subprocess.check_output(cmd)
if "--json" in args:
try:
j = json.loads(res)
return j
except json.decoder.JSONDecodeError as e:
print(f"Error when loading JSON output from {res}")
raise (e)
print(f"Error when executing '{' '.join(cmd)}'")
return res.decode()
except subprocess.CalledProcessError as e:
print(f"Error when executing '{' '.join(cmd)}'")
raise (e)
class TestInstall:
current_root_prefix = os.environ["MAMBA_ROOT_PREFIX"]
current_prefix = os.environ["CONDA_PREFIX"]
cache = os.path.join(current_root_prefix, "pkgs")
env_name = random_string()
root_prefix = os.path.expanduser(os.path.join("~", "tmproot" + random_string()))
prefix = os.path.join(root_prefix, "envs", env_name)
new_cache = os.path.join(root_prefix, "pkgs")
@classmethod
def setup_class(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.prefix
# speed-up the tests
os.environ["CONDA_PKGS_DIRS"] = TestInstall.new_cache
os.makedirs(TestInstall.new_cache, exist_ok=True)
root_pkgs = glob.glob(
os.path.join(TestInstall.current_root_prefix, "pkgs", "x*.tar.bz2")
)
urls = []
for pkg in root_pkgs:
shutil.copy(pkg, TestInstall.new_cache)
urls.append(
"http://testurl.com/conda-forge/linux-64/"
+ os.path.basename(pkg)
+ "#123412341234"
)
cls.pkgs = [os.path.basename(pkg) for pkg in root_pkgs]
with open(os.path.join(TestInstall.new_cache, "urls"), "w") as furls:
furls.write("\n".join(urls))
@classmethod
def teardown_class(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.current_root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.current_prefix
shutil.rmtree(TestInstall.root_prefix)
@classmethod
def teardown(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.prefix
def test_extract_pkgs(self):
constructor("--prefix", TestInstall.root_prefix, "--extract-conda-pkgs")
for pkg in self.pkgs:
extracted_pkg = os.path.join(
TestInstall.root_prefix, "pkgs", pkg.rsplit(".tar.bz2")[0]
)
with open(
os.path.join(extracted_pkg, "info", "repodata_record.json")
) as rr:
repodata_record = json.load(rr)
with open(os.path.join(extracted_pkg, "info", "index.json")) as ri:
index = json.load(ri)
assert repodata_record["fn"] == pkg
assert repodata_record["md5"] == "123412341234"
assert (
repodata_record["url"]
== "http://testurl.com/conda-forge/linux-64/" + pkg
)
assert repodata_record["depends"] == index["depends"]
| 34.173469
| 84
| 0.59779
|
import glob
import json
import os
import shutil
import subprocess
from .helpers import *
def constructor(*args, default_channel=True, no_rc=True, no_dry_run=False):
umamba = get_umamba()
cmd = [umamba, "constructor"] + [arg for arg in args if arg]
try:
res = subprocess.check_output(cmd)
if "--json" in args:
try:
j = json.loads(res)
return j
except json.decoder.JSONDecodeError as e:
print(f"Error when loading JSON output from {res}")
raise (e)
print(f"Error when executing '{' '.join(cmd)}'")
return res.decode()
except subprocess.CalledProcessError as e:
print(f"Error when executing '{' '.join(cmd)}'")
raise (e)
class TestInstall:
current_root_prefix = os.environ["MAMBA_ROOT_PREFIX"]
current_prefix = os.environ["CONDA_PREFIX"]
cache = os.path.join(current_root_prefix, "pkgs")
env_name = random_string()
root_prefix = os.path.expanduser(os.path.join("~", "tmproot" + random_string()))
prefix = os.path.join(root_prefix, "envs", env_name)
new_cache = os.path.join(root_prefix, "pkgs")
@classmethod
def setup_class(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.prefix
os.environ["CONDA_PKGS_DIRS"] = TestInstall.new_cache
os.makedirs(TestInstall.new_cache, exist_ok=True)
root_pkgs = glob.glob(
os.path.join(TestInstall.current_root_prefix, "pkgs", "x*.tar.bz2")
)
urls = []
for pkg in root_pkgs:
shutil.copy(pkg, TestInstall.new_cache)
urls.append(
"http://testurl.com/conda-forge/linux-64/"
+ os.path.basename(pkg)
+ "#123412341234"
)
cls.pkgs = [os.path.basename(pkg) for pkg in root_pkgs]
with open(os.path.join(TestInstall.new_cache, "urls"), "w") as furls:
furls.write("\n".join(urls))
@classmethod
def teardown_class(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.current_root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.current_prefix
shutil.rmtree(TestInstall.root_prefix)
@classmethod
def teardown(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.prefix
def test_extract_pkgs(self):
constructor("--prefix", TestInstall.root_prefix, "--extract-conda-pkgs")
for pkg in self.pkgs:
extracted_pkg = os.path.join(
TestInstall.root_prefix, "pkgs", pkg.rsplit(".tar.bz2")[0]
)
with open(
os.path.join(extracted_pkg, "info", "repodata_record.json")
) as rr:
repodata_record = json.load(rr)
with open(os.path.join(extracted_pkg, "info", "index.json")) as ri:
index = json.load(ri)
assert repodata_record["fn"] == pkg
assert repodata_record["md5"] == "123412341234"
assert (
repodata_record["url"]
== "http://testurl.com/conda-forge/linux-64/" + pkg
)
assert repodata_record["depends"] == index["depends"]
| true
| true
|
790e1536aa8ba9f68a72fdec36fac8cfb0962a8d
| 330
|
py
|
Python
|
test/src/lib/idol/py_mar/all/target/optional_method.py
|
lyric-com/idol
|
285005e9ddaa92b2284b7e9c28cd12f1e34746ec
|
[
"MIT"
] | null | null | null |
test/src/lib/idol/py_mar/all/target/optional_method.py
|
lyric-com/idol
|
285005e9ddaa92b2284b7e9c28cd12f1e34746ec
|
[
"MIT"
] | 2
|
2020-03-24T18:03:10.000Z
|
2020-03-31T10:41:56.000Z
|
test/src/lib/idol/py_mar/all/target/optional_method.py
|
lyric-com/idol
|
285005e9ddaa92b2284b7e9c28cd12f1e34746ec
|
[
"MIT"
] | null | null | null |
# This file was scaffold by idol_mar, but it will not be overwritten, so feel free to edit.
# This file will be regenerated if you delete it.
from ...codegen.all.target.optional_method import (
AllTargetOptionalMethodSchema as OptionalMethodSchemaCodegen,
)
class OptionalMethodSchema(OptionalMethodSchemaCodegen):
pass
| 33
| 91
| 0.79697
|
from ...codegen.all.target.optional_method import (
AllTargetOptionalMethodSchema as OptionalMethodSchemaCodegen,
)
class OptionalMethodSchema(OptionalMethodSchemaCodegen):
pass
| true
| true
|
790e15665d118e607ee6fb42f3600960688f5338
| 219
|
py
|
Python
|
src/python/WMCore/WMBS/Oracle/JobGroup/Exists.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 21
|
2015-11-19T16:18:45.000Z
|
2021-12-02T18:20:39.000Z
|
src/python/WMCore/WMBS/Oracle/JobGroup/Exists.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 5,671
|
2015-01-06T14:38:52.000Z
|
2022-03-31T22:11:14.000Z
|
src/python/WMCore/WMBS/Oracle/JobGroup/Exists.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 67
|
2015-01-21T15:55:38.000Z
|
2022-02-03T19:53:13.000Z
|
#!/usr/bin/env python
"""
_Exists_
Oracle implementation of JobGroup.Exists
"""
__all__ = []
from WMCore.WMBS.MySQL.JobGroup.Exists import Exists as ExistsJobGroupMySQL
class Exists(ExistsJobGroupMySQL):
pass
| 13.6875
| 75
| 0.757991
|
__all__ = []
from WMCore.WMBS.MySQL.JobGroup.Exists import Exists as ExistsJobGroupMySQL
class Exists(ExistsJobGroupMySQL):
pass
| true
| true
|
790e156e6fcdfcc6193771a8f0462b9a233f5e2e
| 509
|
py
|
Python
|
extra_tests/snippets/encoding.py
|
mainsail-org/RustPython
|
5d2d87c24f1ff7201fcc8d4fcffadb0ec12dc127
|
[
"CC-BY-4.0",
"MIT"
] | 11,058
|
2018-05-29T07:40:06.000Z
|
2022-03-31T11:38:42.000Z
|
extra_tests/snippets/encoding.py
|
mainsail-org/RustPython
|
5d2d87c24f1ff7201fcc8d4fcffadb0ec12dc127
|
[
"CC-BY-4.0",
"MIT"
] | 2,105
|
2018-06-01T10:07:16.000Z
|
2022-03-31T14:56:42.000Z
|
extra_tests/snippets/encoding.py
|
mainsail-org/RustPython
|
5d2d87c24f1ff7201fcc8d4fcffadb0ec12dc127
|
[
"CC-BY-4.0",
"MIT"
] | 914
|
2018-07-27T09:36:14.000Z
|
2022-03-31T19:56:34.000Z
|
from testutils import assert_raises
try:
b" \xff".decode("ascii")
except UnicodeDecodeError as e:
assert e.start == 3
assert e.end == 4
else:
assert False, "should have thrown UnicodeDecodeError"
assert_raises(UnicodeEncodeError, "¿como estás?".encode, "ascii")
def round_trip(s, encoding="utf-8"):
encoded = s.encode(encoding)
decoded = encoded.decode(encoding)
assert s == decoded
round_trip("👺♦ 𝐚Şđƒ ☆☝")
round_trip("☢🐣 ᖇ𝓤𝕊тⓟ𝕐𝕥卄σ𝔫 ♬👣")
round_trip("💀👌 ק𝔂tℍⓞ𝓷 3 🔥👤")
| 24.238095
| 66
| 0.667976
|
from testutils import assert_raises
try:
b" \xff".decode("ascii")
except UnicodeDecodeError as e:
assert e.start == 3
assert e.end == 4
else:
assert False, "should have thrown UnicodeDecodeError"
assert_raises(UnicodeEncodeError, "¿como estás?".encode, "ascii")
def round_trip(s, encoding="utf-8"):
encoded = s.encode(encoding)
decoded = encoded.decode(encoding)
assert s == decoded
round_trip("👺♦ 𝐚Şđƒ ☆☝")
round_trip("☢🐣 ᖇ𝓤𝕊тⓟ𝕐𝕥卄σ𝔫 ♬👣")
round_trip("💀👌 ק𝔂tℍⓞ𝓷 3 🔥👤")
| true
| true
|
790e17e8c0da2027ad0cb511c168c353056583aa
| 4,163
|
py
|
Python
|
pywikibot/titletranslate.py
|
jkjkjkjkjk/pywikibot-core
|
f3748c95ea694083ae00534973d0d1dd018a5b43
|
[
"MIT"
] | 2
|
2017-06-19T16:48:34.000Z
|
2017-07-07T14:15:28.000Z
|
pywikibot/titletranslate.py
|
jkjkjkjkjk/pywikibot-core
|
f3748c95ea694083ae00534973d0d1dd018a5b43
|
[
"MIT"
] | 11
|
2018-12-07T18:20:05.000Z
|
2022-03-11T23:12:42.000Z
|
pywikibot/titletranslate.py
|
jkjkjkjkjk/pywikibot-core
|
f3748c95ea694083ae00534973d0d1dd018a5b43
|
[
"MIT"
] | 3
|
2018-12-09T10:18:35.000Z
|
2020-09-12T13:50:14.000Z
|
# -*- coding: utf-8 -*-
"""Title translate module."""
#
# (C) Rob W.W. Hooft, 2003
# (C) Yuri Astrakhan, 2005
# (C) Pywikibot team, 2003-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: 790e17e8c0da2027ad0cb511c168c353056583aa $'
#
import re
import pywikibot
import pywikibot.date as date
from pywikibot import config
from pywikibot.tools import deprecated_args
@deprecated_args(family=None)
def translate(page=None, hints=None, auto=True, removebrackets=False,
site=None):
"""
Return a list of links to pages on other sites based on hints.
Entries for single page titles list those pages. Page titles for entries
such as "all:" or "xyz:" or "20:" are first built from the page title of
'page' and then listed. When 'removebrackets' is True, a trailing pair of
brackets and the text between them is removed from the page title.
If 'auto' is true, known year and date page titles are autotranslated
to all known target languages and inserted into the list.
"""
result = set()
assert page or site
if site is None and page:
site = page.site
if hints:
for h in hints:
if ':' not in h:
# argument given as -hint:xy where xy is a language code
codes = h
newname = ''
else:
codes, newname = h.split(':', 1)
if newname == '':
# if given as -hint:xy or -hint:xy:, assume that there should
# be a page in language xy with the same title as the page
# we're currently working on ...
if page is None:
continue
newname = page.title(withNamespace=False)
# ... unless we do want brackets
if removebrackets:
newname = re.sub(re.compile(r"\W*?\(.*?\)\W*?",
re.UNICODE), u" ", newname)
try:
number = int(codes)
codes = site.family.languages_by_size[:number]
except ValueError:
if codes == 'all':
codes = site.family.languages_by_size
elif codes in site.family.language_groups:
codes = site.family.language_groups[codes]
else:
codes = codes.split(',')
for newcode in codes:
if newcode in site.languages():
if newcode != site.code:
ns = page.namespace() if page else 0
x = pywikibot.Link(newname,
site.getSite(code=newcode),
defaultNamespace=ns)
result.add(x)
else:
if config.verbose_output:
pywikibot.output(u"Ignoring unknown language code %s"
% newcode)
# Autotranslate dates into all other languages, the rest will come from
# existing interwiki links.
if auto and page:
# search inside all dictionaries for this link
sitelang = page.site.code
dictName, value = date.getAutoFormat(sitelang, page.title())
if dictName:
if True:
pywikibot.output(
u'TitleTranslate: %s was recognized as %s with value %d'
% (page.title(), dictName, value))
for entryLang, entry in date.formats[dictName].items():
if entryLang not in site.languages():
continue
if entryLang != sitelang:
if True:
newname = entry(value)
x = pywikibot.Link(
newname,
pywikibot.Site(code=entryLang,
fam=site.family))
result.add(x)
return list(result)
| 37.504505
| 77
| 0.515974
|
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: 790e17e8c0da2027ad0cb511c168c353056583aa $'
import re
import pywikibot
import pywikibot.date as date
from pywikibot import config
from pywikibot.tools import deprecated_args
@deprecated_args(family=None)
def translate(page=None, hints=None, auto=True, removebrackets=False,
site=None):
result = set()
assert page or site
if site is None and page:
site = page.site
if hints:
for h in hints:
if ':' not in h:
codes = h
newname = ''
else:
codes, newname = h.split(':', 1)
if newname == '':
if page is None:
continue
newname = page.title(withNamespace=False)
# ... unless we do want brackets
if removebrackets:
newname = re.sub(re.compile(r"\W*?\(.*?\)\W*?",
re.UNICODE), u" ", newname)
try:
number = int(codes)
codes = site.family.languages_by_size[:number]
except ValueError:
if codes == 'all':
codes = site.family.languages_by_size
elif codes in site.family.language_groups:
codes = site.family.language_groups[codes]
else:
codes = codes.split(',')
for newcode in codes:
if newcode in site.languages():
if newcode != site.code:
ns = page.namespace() if page else 0
x = pywikibot.Link(newname,
site.getSite(code=newcode),
defaultNamespace=ns)
result.add(x)
else:
if config.verbose_output:
pywikibot.output(u"Ignoring unknown language code %s"
% newcode)
# Autotranslate dates into all other languages, the rest will come from
# existing interwiki links.
if auto and page:
# search inside all dictionaries for this link
sitelang = page.site.code
dictName, value = date.getAutoFormat(sitelang, page.title())
if dictName:
if True:
pywikibot.output(
u'TitleTranslate: %s was recognized as %s with value %d'
% (page.title(), dictName, value))
for entryLang, entry in date.formats[dictName].items():
if entryLang not in site.languages():
continue
if entryLang != sitelang:
if True:
newname = entry(value)
x = pywikibot.Link(
newname,
pywikibot.Site(code=entryLang,
fam=site.family))
result.add(x)
return list(result)
| true
| true
|
790e184447ef77e263091f70152537bcdf6ca866
| 546
|
py
|
Python
|
manage.py
|
sendsent/djorgification
|
b8a5c61acde0dc78a1f9f4e913cb735a3495c70d
|
[
"MIT"
] | null | null | null |
manage.py
|
sendsent/djorgification
|
b8a5c61acde0dc78a1f9f4e913cb735a3495c70d
|
[
"MIT"
] | null | null | null |
manage.py
|
sendsent/djorgification
|
b8a5c61acde0dc78a1f9f4e913cb735a3495c70d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djorgification.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.125
| 78
| 0.690476
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djorgification.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true
| true
|
790e188c3fc31d15b1293bb631f8f155e990503d
| 6,365
|
py
|
Python
|
kubernetes-the-hard-way/system/collections/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py
|
jkroepke/homelab
|
ffdd849e39b52972870f5552e734fd74cb1254a1
|
[
"Apache-2.0"
] | 5
|
2020-12-16T21:42:09.000Z
|
2022-03-28T16:04:32.000Z
|
kubernetes-the-hard-way/system/collections/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py
|
jkroepke/kubernetes-the-hard-way
|
70fd096a04addec0777744c9731a4e3fbdc40c8f
|
[
"Apache-2.0"
] | null | null | null |
kubernetes-the-hard-way/system/collections/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py
|
jkroepke/kubernetes-the-hard-way
|
70fd096a04addec0777744c9731a4e3fbdc40c8f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: utm_proxy_location
author:
- Johannes Brunswicker (@MatrixCrawler)
short_description: create, update or destroy reverse_proxy location entry in Sophos UTM
description:
- Create, update or destroy a reverse_proxy location entry in SOPHOS UTM.
- This module needs to have the REST Ability of the UTM to be activated.
options:
name:
description:
- The name of the object. Will be used to identify the entry
required: true
access_control:
description:
- whether to activate the access control for the location
type: str
default: '0'
choices:
- '0'
- '1'
allowed_networks:
description:
- A list of allowed networks
type: list
default: REF_NetworkAny
auth_profile:
description:
- The reference name of the auth profile
backend:
description:
- A list of backends that are connected with this location declaration
default: []
be_path:
description:
- The path of the backend
comment:
description:
- The optional comment string
denied_networks:
description:
- A list of denied network references
default: []
hot_standby:
description:
- Activate hot standby mode
type: bool
default: False
path:
description:
- The path of the location
default: "/"
status:
description:
- Whether the location is active or not
type: bool
default: True
stickysession_id:
description:
- The stickysession id
default: ROUTEID
stickysession_status:
description:
- Enable the stickysession
type: bool
default: False
websocket_passthrough:
description:
- Enable the websocket passthrough
type: bool
default: False
extends_documentation_fragment:
- community.general.utm
'''
EXAMPLES = """
- name: Create UTM proxy_location
utm_proxy_backend:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestLocationEntry
backend: REF_OBJECT_STRING
state: present
- name: Remove UTM proxy_location
utm_proxy_backend:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestLocationEntry
state: absent
"""
RETURN = """
result:
description: The utm object that was created
returned: success
type: complex
contains:
_ref:
description: The reference name of the object
type: str
_locked:
description: Whether or not the object is currently locked
type: bool
_type:
description: The type of the object
type: str
name:
description: The name of the object
type: str
access_control:
description: Whether to use access control state
type: str
allowed_networks:
description: List of allowed network reference names
type: list
auth_profile:
description: The auth profile reference name
type: str
backend:
description: The backend reference name
type: str
be_path:
description: The backend path
type: str
comment:
description: The comment string
type: str
denied_networks:
description: The list of the denied network names
type: list
hot_standby:
description: Use hot standy
type: bool
path:
description: Path name
type: str
status:
description: Whether the object is active or not
type: bool
stickysession_id:
description: The identifier of the stickysession
type: str
stickysession_status:
description: Whether to use stickysession or not
type: bool
websocket_passthrough:
description: Whether websocket passthrough will be used or not
type: bool
"""
from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
from ansible.module_utils._text import to_native
def main():
endpoint = "reverse_proxy/location"
key_to_check_for_changes = ["access_control", "allowed_networks", "auth_profile", "backend", "be_path", "comment",
"denied_networks", "hot_standby", "path", "status", "stickysession_id",
"stickysession_status", "websocket_passthrough"]
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True),
access_control=dict(type='str', required=False, default="0", choices=['0', '1']),
allowed_networks=dict(type='list', elements='str', required=False, default=['REF_NetworkAny']),
auth_profile=dict(type='str', required=False, default=""),
backend=dict(type='list', elements='str', required=False, default=[]),
be_path=dict(type='str', required=False, default=""),
comment=dict(type='str', required=False, default=""),
denied_networks=dict(type='list', elements='str', required=False, default=[]),
hot_standby=dict(type='bool', required=False, default=False),
path=dict(type='str', required=False, default="/"),
status=dict(type='bool', required=False, default=True),
stickysession_id=dict(type='str', required=False, default='ROUTEID'),
stickysession_status=dict(type='bool', required=False, default=False),
websocket_passthrough=dict(type='bool', required=False, default=False),
)
)
try:
UTM(module, endpoint, key_to_check_for_changes).execute()
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| 31.20098
| 118
| 0.614925
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: utm_proxy_location
author:
- Johannes Brunswicker (@MatrixCrawler)
short_description: create, update or destroy reverse_proxy location entry in Sophos UTM
description:
- Create, update or destroy a reverse_proxy location entry in SOPHOS UTM.
- This module needs to have the REST Ability of the UTM to be activated.
options:
name:
description:
- The name of the object. Will be used to identify the entry
required: true
access_control:
description:
- whether to activate the access control for the location
type: str
default: '0'
choices:
- '0'
- '1'
allowed_networks:
description:
- A list of allowed networks
type: list
default: REF_NetworkAny
auth_profile:
description:
- The reference name of the auth profile
backend:
description:
- A list of backends that are connected with this location declaration
default: []
be_path:
description:
- The path of the backend
comment:
description:
- The optional comment string
denied_networks:
description:
- A list of denied network references
default: []
hot_standby:
description:
- Activate hot standby mode
type: bool
default: False
path:
description:
- The path of the location
default: "/"
status:
description:
- Whether the location is active or not
type: bool
default: True
stickysession_id:
description:
- The stickysession id
default: ROUTEID
stickysession_status:
description:
- Enable the stickysession
type: bool
default: False
websocket_passthrough:
description:
- Enable the websocket passthrough
type: bool
default: False
extends_documentation_fragment:
- community.general.utm
'''
EXAMPLES = """
- name: Create UTM proxy_location
utm_proxy_backend:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestLocationEntry
backend: REF_OBJECT_STRING
state: present
- name: Remove UTM proxy_location
utm_proxy_backend:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestLocationEntry
state: absent
"""
RETURN = """
result:
description: The utm object that was created
returned: success
type: complex
contains:
_ref:
description: The reference name of the object
type: str
_locked:
description: Whether or not the object is currently locked
type: bool
_type:
description: The type of the object
type: str
name:
description: The name of the object
type: str
access_control:
description: Whether to use access control state
type: str
allowed_networks:
description: List of allowed network reference names
type: list
auth_profile:
description: The auth profile reference name
type: str
backend:
description: The backend reference name
type: str
be_path:
description: The backend path
type: str
comment:
description: The comment string
type: str
denied_networks:
description: The list of the denied network names
type: list
hot_standby:
description: Use hot standy
type: bool
path:
description: Path name
type: str
status:
description: Whether the object is active or not
type: bool
stickysession_id:
description: The identifier of the stickysession
type: str
stickysession_status:
description: Whether to use stickysession or not
type: bool
websocket_passthrough:
description: Whether websocket passthrough will be used or not
type: bool
"""
from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
from ansible.module_utils._text import to_native
def main():
endpoint = "reverse_proxy/location"
key_to_check_for_changes = ["access_control", "allowed_networks", "auth_profile", "backend", "be_path", "comment",
"denied_networks", "hot_standby", "path", "status", "stickysession_id",
"stickysession_status", "websocket_passthrough"]
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True),
access_control=dict(type='str', required=False, default="0", choices=['0', '1']),
allowed_networks=dict(type='list', elements='str', required=False, default=['REF_NetworkAny']),
auth_profile=dict(type='str', required=False, default=""),
backend=dict(type='list', elements='str', required=False, default=[]),
be_path=dict(type='str', required=False, default=""),
comment=dict(type='str', required=False, default=""),
denied_networks=dict(type='list', elements='str', required=False, default=[]),
hot_standby=dict(type='bool', required=False, default=False),
path=dict(type='str', required=False, default="/"),
status=dict(type='bool', required=False, default=True),
stickysession_id=dict(type='str', required=False, default='ROUTEID'),
stickysession_status=dict(type='bool', required=False, default=False),
websocket_passthrough=dict(type='bool', required=False, default=False),
)
)
try:
UTM(module, endpoint, key_to_check_for_changes).execute()
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| true
| true
|
790e18ac2c2f48915e12579beb0e6dcb4b5321c2
| 961
|
py
|
Python
|
test/test_settings.py
|
carlio/setoptconf-tmp
|
959733fa2babe3ae0afe3f8826977fdcf7b8c09a
|
[
"MIT"
] | null | null | null |
test/test_settings.py
|
carlio/setoptconf-tmp
|
959733fa2babe3ae0afe3f8826977fdcf7b8c09a
|
[
"MIT"
] | null | null | null |
test/test_settings.py
|
carlio/setoptconf-tmp
|
959733fa2babe3ae0afe3f8826977fdcf7b8c09a
|
[
"MIT"
] | null | null | null |
from decimal import Decimal
import setoptconf as soc
GOOD_NAMES = ("foo", "foo_bar", "foo123", "foo_bar_baz")
BAD_NAMES = ("_foo", "1foo", "FOO", "foo_", "foo__bar", "foo-bar")
def test_name():
for name in GOOD_NAMES:
yield check_good_name, name
for name in BAD_NAMES:
yield check_bad_name, name
def check_good_name(name):
setting = soc.StringSetting(name)
def check_bad_name(name):
try:
setting = soc.StringSetting(name)
except soc.NamingError:
pass
else:
assert False, "Invalid name allowed: %s" % name
def test_list_setting():
setting = soc.ListSetting("foo", soc.String)
assert setting.name == "foo"
setting.value = ["bar", "baz"]
assert setting.value == ["bar", "baz"]
def test_choice_setting():
setting = soc.ChoiceSetting("foo", ["bar", "baz"], soc.String)
assert setting.name == "foo"
setting.value = "baz"
assert setting.value == "baz"
| 19.612245
| 66
| 0.636837
|
from decimal import Decimal
import setoptconf as soc
GOOD_NAMES = ("foo", "foo_bar", "foo123", "foo_bar_baz")
BAD_NAMES = ("_foo", "1foo", "FOO", "foo_", "foo__bar", "foo-bar")
def test_name():
for name in GOOD_NAMES:
yield check_good_name, name
for name in BAD_NAMES:
yield check_bad_name, name
def check_good_name(name):
setting = soc.StringSetting(name)
def check_bad_name(name):
try:
setting = soc.StringSetting(name)
except soc.NamingError:
pass
else:
assert False, "Invalid name allowed: %s" % name
def test_list_setting():
setting = soc.ListSetting("foo", soc.String)
assert setting.name == "foo"
setting.value = ["bar", "baz"]
assert setting.value == ["bar", "baz"]
def test_choice_setting():
setting = soc.ChoiceSetting("foo", ["bar", "baz"], soc.String)
assert setting.name == "foo"
setting.value = "baz"
assert setting.value == "baz"
| true
| true
|
790e19a196cbe8574a7a57830504d6e3674e0e25
| 793
|
py
|
Python
|
venv/Scripts/f2py.py
|
jeremycward/ipp-core
|
c3dbebaf997b045da8385cb3dfab46820e40afda
|
[
"MIT"
] | 1
|
2019-12-04T15:38:54.000Z
|
2019-12-04T15:38:54.000Z
|
venv/Scripts/f2py.py
|
jeremycward/ipp-core
|
c3dbebaf997b045da8385cb3dfab46820e40afda
|
[
"MIT"
] | null | null | null |
venv/Scripts/f2py.py
|
jeremycward/ipp-core
|
c3dbebaf997b045da8385cb3dfab46820e40afda
|
[
"MIT"
] | 1
|
2019-12-04T15:42:57.000Z
|
2019-12-04T15:42:57.000Z
|
#!c:\users\jerem\dev\ipp-core\venv\scripts\python.exe
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
| 27.344828
| 67
| 0.641866
|
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
| true
| true
|
790e19bbff4454a6b0438be6595349c610dc7d0d
| 3,008
|
py
|
Python
|
testsuite/ui/views/admin/product/active_docs.py
|
dlaso99/3scale-tests
|
b31a3b3596af6d632b393e383c0417ea56bd95ca
|
[
"Apache-2.0"
] | 5
|
2021-11-04T14:09:24.000Z
|
2021-12-23T13:48:36.000Z
|
testsuite/ui/views/admin/product/active_docs.py
|
dlaso99/3scale-tests
|
b31a3b3596af6d632b393e383c0417ea56bd95ca
|
[
"Apache-2.0"
] | 41
|
2021-11-03T14:27:21.000Z
|
2022-03-29T14:46:16.000Z
|
testsuite/ui/views/admin/product/active_docs.py
|
dlaso99/3scale-tests
|
b31a3b3596af6d632b393e383c0417ea56bd95ca
|
[
"Apache-2.0"
] | 12
|
2021-11-03T17:28:31.000Z
|
2021-11-30T12:28:25.000Z
|
"""View representations of Product Active docs pages"""
from widgetastic_patternfly4 import PatternflyTable
from widgetastic.widget import View, Text
from testsuite.ui.views.admin.product import BaseProductView
from testsuite.ui.widgets.buttons import ThreescaleDeleteButton, ThreescaleEditButton
from testsuite.ui.widgets import ActiveDocV2Section, ActiveDocV3Section
from testsuite.ui.navigation import step
class ActiveDocsView(BaseProductView):
"""View representation of Active Docs list page"""
path_pattern = '/apiconfig/services/{product_id}/api_docs'
active_docs_table = PatternflyTable(locator="//*[@id='content']/table")
@step("ActiveDocsDetailView")
def detail(self, active_doc):
"""Navigate to active doc detail/preview page"""
self.active_docs_table.row(name=active_doc["name"]).name.click()
def prerequisite(self):
return BaseProductView
@property
def is_displayed(self):
return BaseProductView.is_displayed.fget(self) and self.active_docs_table.is_displayed and \
self.path in self.browser.url
class ActiveDocsDetailView(BaseProductView):
"""View representation of Active Docs Detail page"""
path_pattern = '/apiconfig/services/{product_id}/api_docs/{active_doc_id}/preview'
delete_btn = ThreescaleDeleteButton()
edit_btn = ThreescaleEditButton()
def __init__(self, parent, product, active_doc):
super().__init__(parent, product, active_doc_id=active_doc.entity_id)
@View.nested
# pylint: disable=invalid-name
class oas2(View):
"""OAS version 2 section"""
expand_operations_link = Text(locator="//*[contains(@class, 'expandResource')]")
collapse_operations_link = Text(locator="//*[contains(@class, 'collapseResource')]")
active_docs_section = ActiveDocV2Section()
def make_request(self, endpoint):
"""
Make request on preview page
:param endpoint: string of endpoint which should be tried
:return:
"""
self.expand_operations_link.click()
self.active_docs_section.try_it_out(endpoint)
@View.nested
# pylint: disable=invalid-name
class oas3(View):
"""OAS version 3 section"""
active_docs_section = ActiveDocV3Section()
server = Text("//label[@for='servers']/select/option")
def make_request(self, method, path, key):
"""
Make request on preview page
:param path string eg. /post, /get
:param method string eg. GET, POST
:param key string name of application
:return:
"""
self.active_docs_section.try_it_out(method, path, key)
def prerequisite(self):
return ActiveDocsView
@property
def is_displayed(self):
return BaseProductView.is_displayed.fget(self) and self.edit_btn.is_displayed and \
self.delete_btn.is_displayed and self.path in self.browser.url
| 37.6
| 100
| 0.683178
|
from widgetastic_patternfly4 import PatternflyTable
from widgetastic.widget import View, Text
from testsuite.ui.views.admin.product import BaseProductView
from testsuite.ui.widgets.buttons import ThreescaleDeleteButton, ThreescaleEditButton
from testsuite.ui.widgets import ActiveDocV2Section, ActiveDocV3Section
from testsuite.ui.navigation import step
class ActiveDocsView(BaseProductView):
path_pattern = '/apiconfig/services/{product_id}/api_docs'
active_docs_table = PatternflyTable(locator="//*[@id='content']/table")
@step("ActiveDocsDetailView")
def detail(self, active_doc):
self.active_docs_table.row(name=active_doc["name"]).name.click()
def prerequisite(self):
return BaseProductView
@property
def is_displayed(self):
return BaseProductView.is_displayed.fget(self) and self.active_docs_table.is_displayed and \
self.path in self.browser.url
class ActiveDocsDetailView(BaseProductView):
path_pattern = '/apiconfig/services/{product_id}/api_docs/{active_doc_id}/preview'
delete_btn = ThreescaleDeleteButton()
edit_btn = ThreescaleEditButton()
def __init__(self, parent, product, active_doc):
super().__init__(parent, product, active_doc_id=active_doc.entity_id)
@View.nested
class oas2(View):
expand_operations_link = Text(locator="//*[contains(@class, 'expandResource')]")
collapse_operations_link = Text(locator="//*[contains(@class, 'collapseResource')]")
active_docs_section = ActiveDocV2Section()
def make_request(self, endpoint):
self.expand_operations_link.click()
self.active_docs_section.try_it_out(endpoint)
@View.nested
class oas3(View):
active_docs_section = ActiveDocV3Section()
server = Text("//label[@for='servers']/select/option")
def make_request(self, method, path, key):
self.active_docs_section.try_it_out(method, path, key)
def prerequisite(self):
return ActiveDocsView
@property
def is_displayed(self):
return BaseProductView.is_displayed.fget(self) and self.edit_btn.is_displayed and \
self.delete_btn.is_displayed and self.path in self.browser.url
| true
| true
|
790e1ba9e0cfe7cddaa642de2929f900f3df40f2
| 2,954
|
py
|
Python
|
hypernets/tests/tabular/tb_cuml/drift_detection_test.py
|
lyhue1991/Hypernets
|
d726bd297869eacb0cba84376fbac30206bbb60a
|
[
"Apache-2.0"
] | 3
|
2022-03-25T23:27:44.000Z
|
2022-03-27T01:32:28.000Z
|
hypernets/tests/tabular/tb_cuml/drift_detection_test.py
|
lyhue1991/Hypernets
|
d726bd297869eacb0cba84376fbac30206bbb60a
|
[
"Apache-2.0"
] | null | null | null |
hypernets/tests/tabular/tb_cuml/drift_detection_test.py
|
lyhue1991/Hypernets
|
d726bd297869eacb0cba84376fbac30206bbb60a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
"""
import pandas as pd
from pandas.util import hash_pandas_object
from hypernets.tabular.datasets.dsutils import load_bank
from . import if_cuml_ready, is_cuml_installed
if is_cuml_installed:
import cudf
from hypernets.tabular.cuml_ex import CumlToolBox
dd_selector = CumlToolBox.feature_selector_with_drift_detection
@if_cuml_ready
class Test_drift_detection:
def test_shift_score(self):
df = load_bank().head(1000)
df = cudf.from_pandas(df)
selector = dd_selector()
scores = selector._covariate_shift_score(df[:700], df[700:])
print('_covariate_shift_score', scores)
assert scores['id'] >=0.95
def test_feature_selection(self):
df = load_bank()
df = cudf.from_pandas(df)
y = df.pop('y')
p = int(df.shape[0] * 0.8)
X_train = df[:p]
X_test = df[p:]
# = train_test_split(df, train_size=0.7, random_state=9527)
selector = dd_selector(remove_shift_variable=False,
auc_threshold=0.55,
min_features=15,
remove_size=0.2)
remain_features, history, scores = selector.select(X_train, X_test, copy_data=True)
assert len(remain_features) == 15
selector = dd_selector(remove_shift_variable=True,
auc_threshold=0.55,
min_features=15,
remove_size=0.2)
remain_features, history, scores = selector.select(X_train, X_test, copy_data=True)
assert len(remain_features) == 16
def test_drift_detector_split(self):
df = cudf.from_pandas(load_bank())
y = df.pop('y')
X_train, X_test = CumlToolBox.train_test_split(df.copy(), train_size=0.7, shuffle=True, random_state=9527)
dd = dd_selector().get_detector()
dd.fit(X_train, X_test)
assert len(dd.feature_names_) == 17
assert len(dd.feature_importances_) == 17
assert dd.auc_
assert len(dd.estimator_) == 5
proba = dd.predict_proba(df)
assert proba.shape[0] == df.shape[0]
df = cudf.from_pandas(load_bank())
y = df.pop('y')
p = int(df.shape[0] * 0.2)
X_train, X_test, y_train, y_test = dd.train_test_split(df.copy(), y, test_size=0.2)
assert X_train.shape == (df.shape[0] - p, df.shape[1])
assert y_train.shape == (df.shape[0] - p,)
assert X_test.shape == (p, df.shape[1])
assert y_test.shape == (p,)
df['y'] = y
X_train['y'] = y_train
X_test['y'] = y_test
df, X_train, X_test = CumlToolBox.to_local(df, X_train, X_test)
df_split = pd.concat([X_train, X_test])
df_hash = hash_pandas_object(df).sort_values()
splitted_hash = hash_pandas_object(df_split).sort_values()
assert (df_hash == splitted_hash).all()
| 35.590361
| 114
| 0.603927
|
import pandas as pd
from pandas.util import hash_pandas_object
from hypernets.tabular.datasets.dsutils import load_bank
from . import if_cuml_ready, is_cuml_installed
if is_cuml_installed:
import cudf
from hypernets.tabular.cuml_ex import CumlToolBox
dd_selector = CumlToolBox.feature_selector_with_drift_detection
@if_cuml_ready
class Test_drift_detection:
def test_shift_score(self):
df = load_bank().head(1000)
df = cudf.from_pandas(df)
selector = dd_selector()
scores = selector._covariate_shift_score(df[:700], df[700:])
print('_covariate_shift_score', scores)
assert scores['id'] >=0.95
def test_feature_selection(self):
df = load_bank()
df = cudf.from_pandas(df)
y = df.pop('y')
p = int(df.shape[0] * 0.8)
X_train = df[:p]
X_test = df[p:]
selector = dd_selector(remove_shift_variable=False,
auc_threshold=0.55,
min_features=15,
remove_size=0.2)
remain_features, history, scores = selector.select(X_train, X_test, copy_data=True)
assert len(remain_features) == 15
selector = dd_selector(remove_shift_variable=True,
auc_threshold=0.55,
min_features=15,
remove_size=0.2)
remain_features, history, scores = selector.select(X_train, X_test, copy_data=True)
assert len(remain_features) == 16
def test_drift_detector_split(self):
df = cudf.from_pandas(load_bank())
y = df.pop('y')
X_train, X_test = CumlToolBox.train_test_split(df.copy(), train_size=0.7, shuffle=True, random_state=9527)
dd = dd_selector().get_detector()
dd.fit(X_train, X_test)
assert len(dd.feature_names_) == 17
assert len(dd.feature_importances_) == 17
assert dd.auc_
assert len(dd.estimator_) == 5
proba = dd.predict_proba(df)
assert proba.shape[0] == df.shape[0]
df = cudf.from_pandas(load_bank())
y = df.pop('y')
p = int(df.shape[0] * 0.2)
X_train, X_test, y_train, y_test = dd.train_test_split(df.copy(), y, test_size=0.2)
assert X_train.shape == (df.shape[0] - p, df.shape[1])
assert y_train.shape == (df.shape[0] - p,)
assert X_test.shape == (p, df.shape[1])
assert y_test.shape == (p,)
df['y'] = y
X_train['y'] = y_train
X_test['y'] = y_test
df, X_train, X_test = CumlToolBox.to_local(df, X_train, X_test)
df_split = pd.concat([X_train, X_test])
df_hash = hash_pandas_object(df).sort_values()
splitted_hash = hash_pandas_object(df_split).sort_values()
assert (df_hash == splitted_hash).all()
| true
| true
|
790e1cd9944d798b9c162f3585b8ab6c63580cc5
| 11,096
|
py
|
Python
|
byol_pytorch/byol_pytorch.py
|
mariodoebler/byol-pytorch
|
4c1b6d27d86e0a9a39ecef6f6888038355943cd0
|
[
"MIT"
] | null | null | null |
byol_pytorch/byol_pytorch.py
|
mariodoebler/byol-pytorch
|
4c1b6d27d86e0a9a39ecef6f6888038355943cd0
|
[
"MIT"
] | null | null | null |
byol_pytorch/byol_pytorch.py
|
mariodoebler/byol-pytorch
|
4c1b6d27d86e0a9a39ecef6f6888038355943cd0
|
[
"MIT"
] | null | null | null |
import copy
from functools import wraps
import numpy as np
import wandb
import torchvision
import torch
import torch.nn.functional as F
from kornia import enhance, filters
from torchvision.transforms import RandomApply, RandomChoice
from atariari.methods.utils import EarlyStopping
from torch import nn
from torch.utils.data import BatchSampler, RandomSampler
def default(val, def_val):
return def_val if val is None else val
def flatten(t):
return t.reshape(t.shape[0], -1)
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
def get_module_device(module):
return next(module.parameters()).device
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
# loss fn
def loss_fn(x, y):
x = F.normalize(x, dim=-1, p=2)
y = F.normalize(y, dim=-1, p=2)
return 2 - 2 * (x * y).sum(dim=-1)
# augmentation utils
# class RandomApply(nn.Module):
# def __init__(self, fn, p):
# super().__init__()
# self.fn = fn
# self.p = p
# def forward(self, x):
# if random.random() > self.p:
# return x
# return self.fn(x)
# exponential moving average
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
# MLP class for projector and predictor
class MLP(nn.Module):
def __init__(self, dim, projection_size, hidden_size=4096):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, projection_size)
)
def forward(self, x):
return self.net(x)
# a wrapper class for the base neural network
# will manage the interception of the hidden layer output
# and pipe it into the projecter and predictor nets
class NetWrapper(nn.Module):
def __init__(self, net, projection_size, projection_hidden_size, layer=-2):
super().__init__()
self.net = net
self.layer = layer # final avg-pooling layer
self.projector = None
self.projection_size = projection_size
self.projection_hidden_size = projection_hidden_size
self.hidden = None
self.hook_registered = False
def _find_layer(self):
if type(self.layer) == str:
modules = dict([*self.net.named_modules()])
return modules.get(self.layer, None)
elif type(self.layer) == int:
children = [*self.net.children()]
return children[self.layer]
return None
def _hook(self, _, __, output):
self.hidden = flatten(output)
def _register_hook(self):
layer = self._find_layer()
assert layer is not None, f'hidden layer ({self.layer}) not found'
handle = layer.register_forward_hook(self._hook)
self.hook_registered = True
@singleton('projector')
def _get_projector(self, hidden):
_, dim = hidden.shape
projector = MLP(dim, self.projection_size, self.projection_hidden_size)
return projector.to(hidden)
def get_representation(self, x):
if self.layer == -1:
return self.net(x)
if not self.hook_registered:
self._register_hook()
_ = self.net(x)
hidden = self.hidden
self.hidden = None
assert hidden is not None, f'hidden layer {self.layer} never emitted an output'
return hidden
def forward(self, x):
representation = self.get_representation(x)
projector = self._get_projector(representation)
projection = projector(representation)
return projection
# main class
class BYOL(nn.Module):
def __init__(self, net, image_size, grayscale=True, num_frame_stack=1, batch_size=64, hidden_layer=-2, projection_size=256, projection_hidden_size=4096, augment_fn=None, augment_fn2=None, moving_average_decay=0.99, wandb=None, patience=15):
super().__init__()
# default SimCLR augmentation
#####
# IMPORTANT for kornia: parameters are often float!! e.g. 1. vs 1
# DEFAULT_AUG = nn.Sequential(
# RandomApply(augs.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),
# augs.RandomHorizontalFlip(),
# RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1),
# input tensor: float + normalized range [0,1]
# augs.RandomResizedCrop(
# size=(image_size, image_size), scale=(0.84, 1.), ratio=(1.,1.), p=1.0)
# augs.Normalize(mean=torch.tensor(
# [0.485, 0.456, 0.406]), std=torch.tensor([0.229, 0.224, 0.225]))
# )
kernel_size = (9, 9) # has to be ODD
kernel_std = np.random.uniform(low=0.1, high=2.0)
kernel_std = (kernel_std,)*2
aug_transform = torchvision.transforms.Compose([
RandomChoice(
[enhance.AdjustBrightness(0.4),
enhance.AdjustBrightness(0.3),
enhance.AdjustBrightness(0.2),
enhance.AdjustBrightness(0.1),
enhance.AdjustBrightness(0.0)]
),
RandomChoice(
[enhance.AdjustContrast(1.0),
enhance.AdjustContrast(0.9),
enhance.AdjustContrast(0.8),
enhance.AdjustContrast(0.7),
enhance.AdjustContrast(0.6)]
),
RandomApply([filters.GaussianBlur2d(
kernel_size, kernel_std)], p=0.5)
# RandomChoice(
# [enhance.AdjustContrast(1.0),
# enhance.AdjustContrast(1.0),
# enhance.AdjustContrast(1.0),
# filters.GaussianBlur2d((1, 1), (1, 1)),
# filters.GaussianBlur2d((3, 3), (1.5, 1.5))]
# )
])
self.augment1 = default(augment_fn, aug_transform)
self.augment2 = default(augment_fn2, self.augment1)
self.online_encoder = NetWrapper(
net, projection_size, projection_hidden_size, layer=hidden_layer)
self.target_encoder = None
self.target_ema_updater = EMA(moving_average_decay)
self.online_predictor = MLP(
projection_size, projection_size, projection_hidden_size)
self.batch_size = batch_size
# get device of network and make wrapper same device
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
print(f"Device is {self.device.type}")
self.to(self.device)
self.wandb = wandb
self.early_stopper = EarlyStopping(
patience=patience, verbose=False, wandb=self.wandb, name="encoder-byol")
if self.wandb:
wandb.watch(self.online_encoder, self.target_encoder,
self.online_predictor)
# send a mock image tensor to instantiate singleton parameters
assert grayscale
nr_channels = num_frame_stack
self.forward(torch.rand(batch_size, nr_channels,
210, 160, device=self.device))
self.opt = torch.optim.Adam(self.parameters(), lr=3e-4)
print(
f"Finished Initialization of BYOL with model {self.online_encoder.net.__class__.__name__}")
@singleton('target_encoder')
def _get_target_encoder(self):
target_encoder = copy.deepcopy(self.online_encoder)
set_requires_grad(target_encoder, False)
return target_encoder
def reset_moving_average(self):
del self.target_encoder
self.target_encoder = None
def update_moving_average(self):
assert self.target_encoder is not None, 'target encoder has not been created yet'
update_moving_average(self.target_ema_updater,
self.target_encoder, self.online_encoder)
def forward(self, x):
image_one, image_two = self.augment1(x), self.augment2(x)
online_proj_one = self.online_encoder(image_one)
online_proj_two = self.online_encoder(image_two)
online_pred_one = self.online_predictor(online_proj_one)
online_pred_two = self.online_predictor(online_proj_two)
with torch.no_grad():
target_encoder = self._get_target_encoder()
target_proj_one = target_encoder(image_one)
target_proj_two = target_encoder(image_two)
loss_one = loss_fn(online_pred_one, target_proj_two.detach())
loss_two = loss_fn(online_pred_two, target_proj_one.detach())
loss = loss_one + loss_two
return loss.mean()
def logResults(self, epoch_idx, epoch_loss, prefix=""):
print(f"{prefix} Epoch: {epoch_idx}, Loss: {epoch_loss}")
if self.wandb:
self.wandb.log({prefix + '_loss': epoch_loss},
step=epoch_idx, commit=False)
def doOneEpoch(self, nr_epoch, episodes):
mode = "train" if self.training else "val"
data_generator = generate_batch(episodes, self.batch_size, self.device)
for steps, batch in enumerate(data_generator):
print(f"batch nr {steps} for mode {mode}")
loss = self(batch)
self.opt.zero_grad()
loss.backward()
self.opt.step()
self.update_moving_average() # update moving average of target encoder
self.logResults(nr_epoch, loss / steps, prefix=mode)
if mode == "val":
self.early_stopper(-loss / steps, self.online_encoder)
def generate_batch(episodes, batch_size, device):
total_steps = sum([len(e) for e in episodes])
print('Total Steps: {}'.format(total_steps))
# Episode sampler
# Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch
sampler = BatchSampler(RandomSampler(range(len(episodes)),
replacement=True, num_samples=total_steps),
batch_size, drop_last=True)
for nr, indices in enumerate(sampler):
x = []
episodes_batch = [episodes[i] for i in indices]
# print(f"indices in sampler nr {nr} are {*indices,}")
for e in episodes_batch:
t = np.random.randint(0, len(e))
x.append(e[t])
yield torch.stack(x).float().to(device) / 255. # SCALING!!!!
| 34.246914
| 244
| 0.620944
|
import copy
from functools import wraps
import numpy as np
import wandb
import torchvision
import torch
import torch.nn.functional as F
from kornia import enhance, filters
from torchvision.transforms import RandomApply, RandomChoice
from atariari.methods.utils import EarlyStopping
from torch import nn
from torch.utils.data import BatchSampler, RandomSampler
def default(val, def_val):
return def_val if val is None else val
def flatten(t):
return t.reshape(t.shape[0], -1)
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
def get_module_device(module):
return next(module.parameters()).device
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
def loss_fn(x, y):
x = F.normalize(x, dim=-1, p=2)
y = F.normalize(y, dim=-1, p=2)
return 2 - 2 * (x * y).sum(dim=-1)
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
class MLP(nn.Module):
def __init__(self, dim, projection_size, hidden_size=4096):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, projection_size)
)
def forward(self, x):
return self.net(x)
class NetWrapper(nn.Module):
def __init__(self, net, projection_size, projection_hidden_size, layer=-2):
super().__init__()
self.net = net
self.layer = layer
self.projector = None
self.projection_size = projection_size
self.projection_hidden_size = projection_hidden_size
self.hidden = None
self.hook_registered = False
def _find_layer(self):
if type(self.layer) == str:
modules = dict([*self.net.named_modules()])
return modules.get(self.layer, None)
elif type(self.layer) == int:
children = [*self.net.children()]
return children[self.layer]
return None
def _hook(self, _, __, output):
self.hidden = flatten(output)
def _register_hook(self):
layer = self._find_layer()
assert layer is not None, f'hidden layer ({self.layer}) not found'
handle = layer.register_forward_hook(self._hook)
self.hook_registered = True
@singleton('projector')
def _get_projector(self, hidden):
_, dim = hidden.shape
projector = MLP(dim, self.projection_size, self.projection_hidden_size)
return projector.to(hidden)
def get_representation(self, x):
if self.layer == -1:
return self.net(x)
if not self.hook_registered:
self._register_hook()
_ = self.net(x)
hidden = self.hidden
self.hidden = None
assert hidden is not None, f'hidden layer {self.layer} never emitted an output'
return hidden
def forward(self, x):
representation = self.get_representation(x)
projector = self._get_projector(representation)
projection = projector(representation)
return projection
class BYOL(nn.Module):
def __init__(self, net, image_size, grayscale=True, num_frame_stack=1, batch_size=64, hidden_layer=-2, projection_size=256, projection_hidden_size=4096, augment_fn=None, augment_fn2=None, moving_average_decay=0.99, wandb=None, patience=15):
super().__init__()
kernel_size = (9, 9)
kernel_std = np.random.uniform(low=0.1, high=2.0)
kernel_std = (kernel_std,)*2
aug_transform = torchvision.transforms.Compose([
RandomChoice(
[enhance.AdjustBrightness(0.4),
enhance.AdjustBrightness(0.3),
enhance.AdjustBrightness(0.2),
enhance.AdjustBrightness(0.1),
enhance.AdjustBrightness(0.0)]
),
RandomChoice(
[enhance.AdjustContrast(1.0),
enhance.AdjustContrast(0.9),
enhance.AdjustContrast(0.8),
enhance.AdjustContrast(0.7),
enhance.AdjustContrast(0.6)]
),
RandomApply([filters.GaussianBlur2d(
kernel_size, kernel_std)], p=0.5)
])
self.augment1 = default(augment_fn, aug_transform)
self.augment2 = default(augment_fn2, self.augment1)
self.online_encoder = NetWrapper(
net, projection_size, projection_hidden_size, layer=hidden_layer)
self.target_encoder = None
self.target_ema_updater = EMA(moving_average_decay)
self.online_predictor = MLP(
projection_size, projection_size, projection_hidden_size)
self.batch_size = batch_size
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
print(f"Device is {self.device.type}")
self.to(self.device)
self.wandb = wandb
self.early_stopper = EarlyStopping(
patience=patience, verbose=False, wandb=self.wandb, name="encoder-byol")
if self.wandb:
wandb.watch(self.online_encoder, self.target_encoder,
self.online_predictor)
assert grayscale
nr_channels = num_frame_stack
self.forward(torch.rand(batch_size, nr_channels,
210, 160, device=self.device))
self.opt = torch.optim.Adam(self.parameters(), lr=3e-4)
print(
f"Finished Initialization of BYOL with model {self.online_encoder.net.__class__.__name__}")
@singleton('target_encoder')
def _get_target_encoder(self):
target_encoder = copy.deepcopy(self.online_encoder)
set_requires_grad(target_encoder, False)
return target_encoder
def reset_moving_average(self):
del self.target_encoder
self.target_encoder = None
def update_moving_average(self):
assert self.target_encoder is not None, 'target encoder has not been created yet'
update_moving_average(self.target_ema_updater,
self.target_encoder, self.online_encoder)
def forward(self, x):
image_one, image_two = self.augment1(x), self.augment2(x)
online_proj_one = self.online_encoder(image_one)
online_proj_two = self.online_encoder(image_two)
online_pred_one = self.online_predictor(online_proj_one)
online_pred_two = self.online_predictor(online_proj_two)
with torch.no_grad():
target_encoder = self._get_target_encoder()
target_proj_one = target_encoder(image_one)
target_proj_two = target_encoder(image_two)
loss_one = loss_fn(online_pred_one, target_proj_two.detach())
loss_two = loss_fn(online_pred_two, target_proj_one.detach())
loss = loss_one + loss_two
return loss.mean()
def logResults(self, epoch_idx, epoch_loss, prefix=""):
print(f"{prefix} Epoch: {epoch_idx}, Loss: {epoch_loss}")
if self.wandb:
self.wandb.log({prefix + '_loss': epoch_loss},
step=epoch_idx, commit=False)
def doOneEpoch(self, nr_epoch, episodes):
mode = "train" if self.training else "val"
data_generator = generate_batch(episodes, self.batch_size, self.device)
for steps, batch in enumerate(data_generator):
print(f"batch nr {steps} for mode {mode}")
loss = self(batch)
self.opt.zero_grad()
loss.backward()
self.opt.step()
self.update_moving_average()
self.logResults(nr_epoch, loss / steps, prefix=mode)
if mode == "val":
self.early_stopper(-loss / steps, self.online_encoder)
def generate_batch(episodes, batch_size, device):
total_steps = sum([len(e) for e in episodes])
print('Total Steps: {}'.format(total_steps))
sampler = BatchSampler(RandomSampler(range(len(episodes)),
replacement=True, num_samples=total_steps),
batch_size, drop_last=True)
for nr, indices in enumerate(sampler):
x = []
episodes_batch = [episodes[i] for i in indices]
for e in episodes_batch:
t = np.random.randint(0, len(e))
x.append(e[t])
yield torch.stack(x).float().to(device) / 255.
| true
| true
|
790e204c1dd4df66b9a6f2ed01c7e67e72752f5d
| 19,098
|
py
|
Python
|
tests/components/unifi/test_config_flow.py
|
sneakythr0ws/core
|
048f36c77ec488eee058df93efe76929054204ca
|
[
"Apache-2.0"
] | 1
|
2021-02-04T15:08:04.000Z
|
2021-02-04T15:08:04.000Z
|
tests/components/unifi/test_config_flow.py
|
sneakythr0ws/core
|
048f36c77ec488eee058df93efe76929054204ca
|
[
"Apache-2.0"
] | 32
|
2021-02-19T07:21:28.000Z
|
2022-03-31T06:06:39.000Z
|
tests/components/unifi/test_config_flow.py
|
sneakythr0ws/core
|
048f36c77ec488eee058df93efe76929054204ca
|
[
"Apache-2.0"
] | null | null | null |
"""Test UniFi config flow."""
from unittest.mock import patch
import aiounifi
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.unifi.const import (
CONF_ALLOW_BANDWIDTH_SENSORS,
CONF_ALLOW_UPTIME_SENSORS,
CONF_BLOCK_CLIENT,
CONF_CONTROLLER,
CONF_DETECTION_TIME,
CONF_DPI_RESTRICTIONS,
CONF_IGNORE_WIRED_BUG,
CONF_POE_CLIENTS,
CONF_SITE_ID,
CONF_SSID_FILTER,
CONF_TRACK_CLIENTS,
CONF_TRACK_DEVICES,
CONF_TRACK_WIRED_CLIENTS,
DOMAIN as UNIFI_DOMAIN,
)
from homeassistant.config_entries import SOURCE_REAUTH, SOURCE_USER
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
CONTENT_TYPE_JSON,
)
from .test_controller import setup_unifi_integration
from tests.common import MockConfigEntry
CLIENTS = [{"mac": "00:00:00:00:00:01"}]
DEVICES = [
{
"board_rev": 21,
"device_id": "mock-id",
"ip": "10.0.1.1",
"last_seen": 0,
"mac": "00:00:00:00:01:01",
"model": "U7PG2",
"name": "access_point",
"state": 1,
"type": "uap",
"version": "4.0.80.10875",
"wlan_overrides": [
{
"name": "SSID 3",
"radio": "na",
"radio_name": "wifi1",
"wlan_id": "012345678910111213141516",
},
{
"name": "",
"radio": "na",
"radio_name": "wifi1",
"wlan_id": "012345678910111213141516",
},
{
"radio": "na",
"radio_name": "wifi1",
"wlan_id": "012345678910111213141516",
},
],
}
]
WLANS = [
{"name": "SSID 1"},
{"name": "SSID 2", "name_combine_enabled": False, "name_combine_suffix": "_IOT"},
]
DPI_GROUPS = [
{
"_id": "5ba29dd8e3c58f026e9d7c4a",
"name": "Default",
"site_id": "5ba29dd4e3c58f026e9d7c38",
},
]
async def test_flow_works(hass, aioclient_mock, mock_discovery):
"""Test config flow."""
mock_discovery.return_value = "1"
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["data_schema"]({CONF_USERNAME: "", CONF_PASSWORD: ""}) == {
CONF_HOST: "unifi",
CONF_USERNAME: "",
CONF_PASSWORD: "",
CONF_PORT: 443,
CONF_VERIFY_SSL: False,
}
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [{"desc": "Site name", "name": "site_id", "role": "admin"}],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Site name"
assert result["data"] == {
CONF_CONTROLLER: {
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_SITE_ID: "site_id",
CONF_VERIFY_SSL: True,
}
}
async def test_flow_works_multiple_sites(hass, aioclient_mock):
"""Test config flow works when finding multiple sites."""
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [
{"name": "default", "role": "admin", "desc": "site name"},
{"name": "site2", "role": "admin", "desc": "site2 name"},
],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "site"
assert result["data_schema"]({"site": "default"})
assert result["data_schema"]({"site": "site2"})
async def test_flow_raise_already_configured(hass, aioclient_mock):
"""Test config flow aborts since a connected config entry already exists."""
await setup_unifi_integration(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [{"desc": "Site name", "name": "site_id", "role": "admin"}],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_flow_aborts_configuration_updated(hass, aioclient_mock):
"""Test config flow aborts since a connected config entry already exists."""
entry = MockConfigEntry(
domain=UNIFI_DOMAIN, data={"controller": {"host": "1.2.3.4", "site": "office"}}
)
entry.add_to_hass(hass)
entry = MockConfigEntry(
domain=UNIFI_DOMAIN, data={"controller": {"host": "1.2.3.4", "site": "site_id"}}
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [{"desc": "Site name", "name": "site_id", "role": "admin"}],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
with patch("homeassistant.components.unifi.async_setup_entry"):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "configuration_updated"
async def test_flow_fails_user_credentials_faulty(hass, aioclient_mock):
"""Test config flow."""
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
with patch("aiounifi.Controller.login", side_effect=aiounifi.errors.Unauthorized):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "faulty_credentials"}
async def test_flow_fails_controller_unavailable(hass, aioclient_mock):
"""Test config flow."""
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
with patch("aiounifi.Controller.login", side_effect=aiounifi.errors.RequestError):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "service_unavailable"}
async def test_reauth_flow_update_configuration(hass, aioclient_mock):
"""Verify reauth flow can update controller configuration."""
controller = await setup_unifi_integration(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": SOURCE_REAUTH},
data=controller.config_entry,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [{"desc": "Site name", "name": "site_id", "role": "admin"}],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "new_name",
CONF_PASSWORD: "new_pass",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert controller.host == "1.2.3.4"
assert controller.config_entry.data[CONF_CONTROLLER][CONF_USERNAME] == "new_name"
assert controller.config_entry.data[CONF_CONTROLLER][CONF_PASSWORD] == "new_pass"
async def test_advanced_option_flow(hass):
"""Test advanced config flow options."""
controller = await setup_unifi_integration(
hass,
clients_response=CLIENTS,
devices_response=DEVICES,
wlans_response=WLANS,
dpigroup_response=DPI_GROUPS,
dpiapp_response=[],
)
result = await hass.config_entries.options.async_init(
controller.config_entry.entry_id, context={"show_advanced_options": True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "device_tracker"
assert set(
result["data_schema"].schema[CONF_SSID_FILTER].options.keys()
).intersection(("SSID 1", "SSID 2", "SSID 2_IOT", "SSID 3"))
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_TRACK_CLIENTS: False,
CONF_TRACK_WIRED_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_SSID_FILTER: ["SSID 1", "SSID 2_IOT", "SSID 3"],
CONF_DETECTION_TIME: 100,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "client_control"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
CONF_POE_CLIENTS: False,
CONF_DPI_RESTRICTIONS: False,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "statistics_sensors"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_TRACK_CLIENTS: False,
CONF_TRACK_WIRED_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_SSID_FILTER: ["SSID 1", "SSID 2_IOT", "SSID 3"],
CONF_DETECTION_TIME: 100,
CONF_IGNORE_WIRED_BUG: False,
CONF_POE_CLIENTS: False,
CONF_DPI_RESTRICTIONS: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
}
async def test_simple_option_flow(hass):
"""Test simple config flow options."""
controller = await setup_unifi_integration(
hass,
clients_response=CLIENTS,
wlans_response=WLANS,
dpigroup_response=DPI_GROUPS,
dpiapp_response=[],
)
result = await hass.config_entries.options.async_init(
controller.config_entry.entry_id, context={"show_advanced_options": False}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "simple_options"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_TRACK_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_TRACK_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
}
async def test_form_ssdp(hass):
"""Test we get the form with ssdp source."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://192.168.208.1:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
context = next(
flow["context"]
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert context["title_placeholders"] == {
"host": "192.168.208.1",
"site": "default",
}
async def test_form_ssdp_aborts_if_host_already_exists(hass):
"""Test we abort if the host is already configured."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=UNIFI_DOMAIN,
data={"controller": {"host": "192.168.208.1", "site": "site_id"}},
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://192.168.208.1:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_form_ssdp_aborts_if_serial_already_exists(hass):
"""Test we abort if the serial is already configured."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=UNIFI_DOMAIN,
data={"controller": {"host": "1.2.3.4", "site": "site_id"}},
unique_id="e0:63:da:20:14:a9",
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://192.168.208.1:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_form_ssdp_gets_form_with_ignored_entry(hass):
"""Test we can still setup if there is an ignored entry."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=UNIFI_DOMAIN,
data={"not_controller_key": None},
source=config_entries.SOURCE_IGNORE,
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine New",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://1.2.3.4:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
context = next(
flow["context"]
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert context["title_placeholders"] == {
"host": "1.2.3.4",
"site": "default",
}
| 31.936455
| 88
| 0.60221
|
from unittest.mock import patch
import aiounifi
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.unifi.const import (
CONF_ALLOW_BANDWIDTH_SENSORS,
CONF_ALLOW_UPTIME_SENSORS,
CONF_BLOCK_CLIENT,
CONF_CONTROLLER,
CONF_DETECTION_TIME,
CONF_DPI_RESTRICTIONS,
CONF_IGNORE_WIRED_BUG,
CONF_POE_CLIENTS,
CONF_SITE_ID,
CONF_SSID_FILTER,
CONF_TRACK_CLIENTS,
CONF_TRACK_DEVICES,
CONF_TRACK_WIRED_CLIENTS,
DOMAIN as UNIFI_DOMAIN,
)
from homeassistant.config_entries import SOURCE_REAUTH, SOURCE_USER
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
CONTENT_TYPE_JSON,
)
from .test_controller import setup_unifi_integration
from tests.common import MockConfigEntry
CLIENTS = [{"mac": "00:00:00:00:00:01"}]
DEVICES = [
{
"board_rev": 21,
"device_id": "mock-id",
"ip": "10.0.1.1",
"last_seen": 0,
"mac": "00:00:00:00:01:01",
"model": "U7PG2",
"name": "access_point",
"state": 1,
"type": "uap",
"version": "4.0.80.10875",
"wlan_overrides": [
{
"name": "SSID 3",
"radio": "na",
"radio_name": "wifi1",
"wlan_id": "012345678910111213141516",
},
{
"name": "",
"radio": "na",
"radio_name": "wifi1",
"wlan_id": "012345678910111213141516",
},
{
"radio": "na",
"radio_name": "wifi1",
"wlan_id": "012345678910111213141516",
},
],
}
]
WLANS = [
{"name": "SSID 1"},
{"name": "SSID 2", "name_combine_enabled": False, "name_combine_suffix": "_IOT"},
]
DPI_GROUPS = [
{
"_id": "5ba29dd8e3c58f026e9d7c4a",
"name": "Default",
"site_id": "5ba29dd4e3c58f026e9d7c38",
},
]
async def test_flow_works(hass, aioclient_mock, mock_discovery):
mock_discovery.return_value = "1"
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["data_schema"]({CONF_USERNAME: "", CONF_PASSWORD: ""}) == {
CONF_HOST: "unifi",
CONF_USERNAME: "",
CONF_PASSWORD: "",
CONF_PORT: 443,
CONF_VERIFY_SSL: False,
}
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [{"desc": "Site name", "name": "site_id", "role": "admin"}],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Site name"
assert result["data"] == {
CONF_CONTROLLER: {
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_SITE_ID: "site_id",
CONF_VERIFY_SSL: True,
}
}
async def test_flow_works_multiple_sites(hass, aioclient_mock):
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [
{"name": "default", "role": "admin", "desc": "site name"},
{"name": "site2", "role": "admin", "desc": "site2 name"},
],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "site"
assert result["data_schema"]({"site": "default"})
assert result["data_schema"]({"site": "site2"})
async def test_flow_raise_already_configured(hass, aioclient_mock):
await setup_unifi_integration(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [{"desc": "Site name", "name": "site_id", "role": "admin"}],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_flow_aborts_configuration_updated(hass, aioclient_mock):
entry = MockConfigEntry(
domain=UNIFI_DOMAIN, data={"controller": {"host": "1.2.3.4", "site": "office"}}
)
entry.add_to_hass(hass)
entry = MockConfigEntry(
domain=UNIFI_DOMAIN, data={"controller": {"host": "1.2.3.4", "site": "site_id"}}
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [{"desc": "Site name", "name": "site_id", "role": "admin"}],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
with patch("homeassistant.components.unifi.async_setup_entry"):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "configuration_updated"
async def test_flow_fails_user_credentials_faulty(hass, aioclient_mock):
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
with patch("aiounifi.Controller.login", side_effect=aiounifi.errors.Unauthorized):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "faulty_credentials"}
async def test_flow_fails_controller_unavailable(hass, aioclient_mock):
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
with patch("aiounifi.Controller.login", side_effect=aiounifi.errors.RequestError):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "service_unavailable"}
async def test_reauth_flow_update_configuration(hass, aioclient_mock):
controller = await setup_unifi_integration(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": SOURCE_REAUTH},
data=controller.config_entry,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [{"desc": "Site name", "name": "site_id", "role": "admin"}],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "new_name",
CONF_PASSWORD: "new_pass",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert controller.host == "1.2.3.4"
assert controller.config_entry.data[CONF_CONTROLLER][CONF_USERNAME] == "new_name"
assert controller.config_entry.data[CONF_CONTROLLER][CONF_PASSWORD] == "new_pass"
async def test_advanced_option_flow(hass):
controller = await setup_unifi_integration(
hass,
clients_response=CLIENTS,
devices_response=DEVICES,
wlans_response=WLANS,
dpigroup_response=DPI_GROUPS,
dpiapp_response=[],
)
result = await hass.config_entries.options.async_init(
controller.config_entry.entry_id, context={"show_advanced_options": True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "device_tracker"
assert set(
result["data_schema"].schema[CONF_SSID_FILTER].options.keys()
).intersection(("SSID 1", "SSID 2", "SSID 2_IOT", "SSID 3"))
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_TRACK_CLIENTS: False,
CONF_TRACK_WIRED_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_SSID_FILTER: ["SSID 1", "SSID 2_IOT", "SSID 3"],
CONF_DETECTION_TIME: 100,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "client_control"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
CONF_POE_CLIENTS: False,
CONF_DPI_RESTRICTIONS: False,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "statistics_sensors"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_TRACK_CLIENTS: False,
CONF_TRACK_WIRED_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_SSID_FILTER: ["SSID 1", "SSID 2_IOT", "SSID 3"],
CONF_DETECTION_TIME: 100,
CONF_IGNORE_WIRED_BUG: False,
CONF_POE_CLIENTS: False,
CONF_DPI_RESTRICTIONS: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
}
async def test_simple_option_flow(hass):
controller = await setup_unifi_integration(
hass,
clients_response=CLIENTS,
wlans_response=WLANS,
dpigroup_response=DPI_GROUPS,
dpiapp_response=[],
)
result = await hass.config_entries.options.async_init(
controller.config_entry.entry_id, context={"show_advanced_options": False}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "simple_options"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_TRACK_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_TRACK_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
}
async def test_form_ssdp(hass):
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://192.168.208.1:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
context = next(
flow["context"]
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert context["title_placeholders"] == {
"host": "192.168.208.1",
"site": "default",
}
async def test_form_ssdp_aborts_if_host_already_exists(hass):
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=UNIFI_DOMAIN,
data={"controller": {"host": "192.168.208.1", "site": "site_id"}},
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://192.168.208.1:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_form_ssdp_aborts_if_serial_already_exists(hass):
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=UNIFI_DOMAIN,
data={"controller": {"host": "1.2.3.4", "site": "site_id"}},
unique_id="e0:63:da:20:14:a9",
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://192.168.208.1:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_form_ssdp_gets_form_with_ignored_entry(hass):
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=UNIFI_DOMAIN,
data={"not_controller_key": None},
source=config_entries.SOURCE_IGNORE,
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine New",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://1.2.3.4:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
context = next(
flow["context"]
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert context["title_placeholders"] == {
"host": "1.2.3.4",
"site": "default",
}
| true
| true
|
790e2088846b5164ace41ef7c8aefc6f52e063c5
| 4,244
|
py
|
Python
|
activity/womail/dailyTask.py
|
haygcao/UnicomDailyTask
|
08bb6c7de320a9355dfff066ebff9bf72d7619b4
|
[
"MIT"
] | 148
|
2021-09-13T03:20:28.000Z
|
2022-03-30T02:45:44.000Z
|
activity/womail/dailyTask.py
|
haygcao/UnicomDailyTask
|
08bb6c7de320a9355dfff066ebff9bf72d7619b4
|
[
"MIT"
] | 91
|
2021-09-13T03:20:05.000Z
|
2022-03-31T16:57:17.000Z
|
activity/womail/dailyTask.py
|
haygcao/UnicomDailyTask
|
08bb6c7de320a9355dfff066ebff9bf72d7619b4
|
[
"MIT"
] | 88
|
2021-09-14T09:33:42.000Z
|
2022-03-30T14:31:37.000Z
|
# -*- coding: utf8 -*-
import json
from activity.womail.womail import WoMail
class DailySign(WoMail):
def __init__(self, mobile, openId):
super(DailySign, self).__init__(mobile, openId)
self.session.headers.update({
# 'Origin': 'https://nyan.mail.wo.cn',
'Referer': 'https://nyan.mail.wo.cn/cn/sign/wap/index.html',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; MI 8 SE Build/OPM1.171019.019; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/78.0.3904.62 XWEB/2797 MMWEBSDK/20210501 Mobile Safari/537.36 MMWEBID/107 MicroMessenger/8.0.6.1900(0x28000635) Process/toolsmp WeChat/arm64 Weixin NetType/4G Language/zh_CN ABI/arm64',
'X-Requested-With': 'com.tencent.mm' # XMLHttpRequest
})
self.message = ''
def login(self):
url = f'https://nyan.mail.wo.cn/cn/sign/index/index?mobile={self.mobile}&userName=&openId={self.openId}'
self.session.get(url=url)
print(self.session.cookies.get_dict())
def index(self):
url = 'https://nyan.mail.wo.cn/cn/sign/wap/index.html'
self.session.get(url=url)
def userInfo(self):
url = f'https://nyan.mail.wo.cn/cn/sign/index/userinfo.do?rand={self.randomNum}'
resp = self.session.post(url=url)
data = resp.json()
try:
print(json.dumps(data, indent=4, ensure_ascii=False))
return str(data['result']['lastDay']), str(data['result']['keepSign'])
except:
print(resp.text)
def isLogin(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/isLoginMail.do?rand={self.randomNum}'
resp = self.session.post(url=url)
print(resp.text)
def check(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/checkin.do?rand={self.randomNum}'
resp = self.session.post(url=url)
print(resp.text)
def prizeDetail(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/prizes.do?rand={self.randomNum}'
resp = self.session.post(url=url)
data = resp.json()
if len(data['result']) > 3:
data['result'] = data['result'][:3]
print(json.dumps(data, indent=4, ensure_ascii=False))
def doTask(self, task_name):
url = f'https://nyan.mail.wo.cn/cn/sign/user/doTask.do?rand={self.randomNum}'
data = {
'taskName': task_name
}
resp = self.session.post(url=url, data=data)
print(resp.text)
def overTask(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/overtask.do?rand={self.randomNum}'
data = {
'taskLevel': '2'
}
resp = self.session.post(url=url, data=data)
data = resp.json()
print(json.dumps(data, indent=4, ensure_ascii=False))
result = [item['taskName'] for item in data['result']]
# data = {
# 'taskLevel': '1'
# }
return result
def run(self):
if int(self.now_date.replace('-', '')) > 20220228:
return
try:
self.login()
self.index()
result = self.overTask()
for task_name in ["loginmail", "clubactivity", "club"]: # , "download"
if task_name in result:
continue
self.doTask(task_name)
self.flushTime(1)
else:
print("积分签到任务已完成")
lastDay, keepSign = self.userInfo()
if keepSign == '21':
print('跳过21天之后的打卡')
self.message = '每日签到: 跳过21天之后的打卡'
self.recordLog(self.message)
return
else:
if self.now_date.replace('-', '') == lastDay:
print("今日已打卡")
return
else:
self.check()
self.prizeDetail()
lastDay, _ = self.userInfo()
if self.now_date.replace('-', '') == lastDay:
self.message = '每日签到: 已签到'
else:
self.message = '每日签到: 未签到'
self.recordLog(self.message)
except Exception as e:
print(e)
if __name__ == "__main__":
pass
| 36.273504
| 340
| 0.545476
|
import json
from activity.womail.womail import WoMail
class DailySign(WoMail):
def __init__(self, mobile, openId):
super(DailySign, self).__init__(mobile, openId)
self.session.headers.update({
'Referer': 'https://nyan.mail.wo.cn/cn/sign/wap/index.html',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; MI 8 SE Build/OPM1.171019.019; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/78.0.3904.62 XWEB/2797 MMWEBSDK/20210501 Mobile Safari/537.36 MMWEBID/107 MicroMessenger/8.0.6.1900(0x28000635) Process/toolsmp WeChat/arm64 Weixin NetType/4G Language/zh_CN ABI/arm64',
'X-Requested-With': 'com.tencent.mm'
})
self.message = ''
def login(self):
url = f'https://nyan.mail.wo.cn/cn/sign/index/index?mobile={self.mobile}&userName=&openId={self.openId}'
self.session.get(url=url)
print(self.session.cookies.get_dict())
def index(self):
url = 'https://nyan.mail.wo.cn/cn/sign/wap/index.html'
self.session.get(url=url)
def userInfo(self):
url = f'https://nyan.mail.wo.cn/cn/sign/index/userinfo.do?rand={self.randomNum}'
resp = self.session.post(url=url)
data = resp.json()
try:
print(json.dumps(data, indent=4, ensure_ascii=False))
return str(data['result']['lastDay']), str(data['result']['keepSign'])
except:
print(resp.text)
def isLogin(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/isLoginMail.do?rand={self.randomNum}'
resp = self.session.post(url=url)
print(resp.text)
def check(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/checkin.do?rand={self.randomNum}'
resp = self.session.post(url=url)
print(resp.text)
def prizeDetail(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/prizes.do?rand={self.randomNum}'
resp = self.session.post(url=url)
data = resp.json()
if len(data['result']) > 3:
data['result'] = data['result'][:3]
print(json.dumps(data, indent=4, ensure_ascii=False))
def doTask(self, task_name):
url = f'https://nyan.mail.wo.cn/cn/sign/user/doTask.do?rand={self.randomNum}'
data = {
'taskName': task_name
}
resp = self.session.post(url=url, data=data)
print(resp.text)
def overTask(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/overtask.do?rand={self.randomNum}'
data = {
'taskLevel': '2'
}
resp = self.session.post(url=url, data=data)
data = resp.json()
print(json.dumps(data, indent=4, ensure_ascii=False))
result = [item['taskName'] for item in data['result']]
return result
def run(self):
if int(self.now_date.replace('-', '')) > 20220228:
return
try:
self.login()
self.index()
result = self.overTask()
for task_name in ["loginmail", "clubactivity", "club"]:
if task_name in result:
continue
self.doTask(task_name)
self.flushTime(1)
else:
print("积分签到任务已完成")
lastDay, keepSign = self.userInfo()
if keepSign == '21':
print('跳过21天之后的打卡')
self.message = '每日签到: 跳过21天之后的打卡'
self.recordLog(self.message)
return
else:
if self.now_date.replace('-', '') == lastDay:
print("今日已打卡")
return
else:
self.check()
self.prizeDetail()
lastDay, _ = self.userInfo()
if self.now_date.replace('-', '') == lastDay:
self.message = '每日签到: 已签到'
else:
self.message = '每日签到: 未签到'
self.recordLog(self.message)
except Exception as e:
print(e)
if __name__ == "__main__":
pass
| true
| true
|
790e213f39a5e51fd0b29b215b73aaff94186dbc
| 4,135
|
py
|
Python
|
prepare_datasets_DRIVE.py
|
Hacker-007/E2
|
efb829da84734abfc6ac10e1ea20b5dcfd99c7f1
|
[
"MIT"
] | null | null | null |
prepare_datasets_DRIVE.py
|
Hacker-007/E2
|
efb829da84734abfc6ac10e1ea20b5dcfd99c7f1
|
[
"MIT"
] | null | null | null |
prepare_datasets_DRIVE.py
|
Hacker-007/E2
|
efb829da84734abfc6ac10e1ea20b5dcfd99c7f1
|
[
"MIT"
] | null | null | null |
#==========================================================
#
# This prepare the hdf5 datasets of the DRIVE database
#
#============================================================
import os
import h5py
import numpy as np
from PIL import Image
#content/add2/E2/DRIVE_datasets_training_testing
def write_hdf5(arr,outfile):
with h5py.File(outfile,"w") as f:
f.create_dataset("image", data=arr, dtype=arr.dtype)
#------------Path of the images --------------------------------------------------------------
#train
original_imgs_train = "/content/add2/E2/training/images/"
groundTruth_imgs_train = "/content/add2/E2/training/1st_manual/"
borderMasks_imgs_train = "/content/add2/E2/training/mask/"
#test
original_imgs_test = "/content/add2/E2//test/images/"
groundTruth_imgs_test = "/content/add2/E2/test/1st_manual/"
borderMasks_imgs_test = "content/add2/E2/test/mask/"
#---------------------------------------------------------------------------------------------
Nimgs = 20
channels = 3
height = 584
width = 565
dataset_path = "/content/add2/E2/DRIVE_datasets_training_testing/"
def get_datasets(imgs_dir,groundTruth_dir,borderMasks_dir,train_test="null"):
imgs = np.empty((Nimgs,height,width,channels))
groundTruth = np.empty((Nimgs,height,width))
border_masks = np.empty((Nimgs,height,width))
for path, subdirs, files in os.walk(imgs_dir): #list all files, directories in the path
for i in range(len(files)):
#original
print ("original image: " +files[i])
img = Image.open(imgs_dir+files[i])
imgs[i] = np.asarray(img)
#corresponding ground truth
groundTruth_name = files[i][0:2] + "_manual1.gif"
print ("ground truth name: " + groundTruth_name)
g_truth = Image.open(groundTruth_dir + groundTruth_name)
groundTruth[i] = np.asarray(g_truth)
#corresponding border masks
border_masks_name = ""
if train_test=="train":
border_masks_name = files[i][0:2] + "_training_mask.gif"
elif train_test=="test":
border_masks_name = files[i][0:2] + "_test_mask.gif"
else:
print ("specify if train or test!!")
exit()
print ("border masks name: " + border_masks_name)
b_mask = Image.open(borderMasks_dir + border_masks_name)
border_masks[i] = np.asarray(b_mask)
print ("imgs max: " +str(np.max(imgs)))
print ("imgs min: " +str(np.min(imgs)))
assert(np.max(groundTruth)==255 and np.max(border_masks)==255)
assert(np.min(groundTruth)==0 and np.min(border_masks)==0)
print ("ground truth and border masks are correctly withih pixel value range 0-255 (black-white)")
#reshaping for my standard tensors
imgs = np.transpose(imgs,(0,3,1,2))
assert(imgs.shape == (Nimgs,channels,height,width))
groundTruth = np.reshape(groundTruth,(Nimgs,1,height,width))
border_masks = np.reshape(border_masks,(Nimgs,1,height,width))
assert(groundTruth.shape == (Nimgs,1,height,width))
assert(border_masks.shape == (Nimgs,1,height,width))
return imgs, groundTruth, border_masks
if not os.path.exists(dataset_path):
os.makedirs(dataset_path)
#getting the training datasets
imgs_train, groundTruth_train, border_masks_train = get_datasets(original_imgs_train,groundTruth_imgs_train,borderMasks_imgs_train,"train")
print ("saving train datasets")
write_hdf5(imgs_train, dataset_path + "DRIVE_dataset_imgs_train.hdf5")
write_hdf5(groundTruth_train, dataset_path + "DRIVE_dataset_groundTruth_train.hdf5")
write_hdf5(border_masks_train,dataset_path + "DRIVE_dataset_borderMasks_train.hdf5")
#getting the testing datasets
imgs_test, groundTruth_test, border_masks_test = get_datasets(original_imgs_test,groundTruth_imgs_test,borderMasks_imgs_test,"test")
print ("saving test datasets")
write_hdf5(imgs_test,dataset_path + "DRIVE_dataset_imgs_test.hdf5")
write_hdf5(groundTruth_test, dataset_path + "DRIVE_dataset_groundTruth_test.hdf5")
write_hdf5(border_masks_test,dataset_path + "DRIVE_dataset_borderMasks_test.hdf5")
| 44.462366
| 139
| 0.662636
|
import os
import h5py
import numpy as np
from PIL import Image
def write_hdf5(arr,outfile):
with h5py.File(outfile,"w") as f:
f.create_dataset("image", data=arr, dtype=arr.dtype)
original_imgs_train = "/content/add2/E2/training/images/"
groundTruth_imgs_train = "/content/add2/E2/training/1st_manual/"
borderMasks_imgs_train = "/content/add2/E2/training/mask/"
original_imgs_test = "/content/add2/E2//test/images/"
groundTruth_imgs_test = "/content/add2/E2/test/1st_manual/"
borderMasks_imgs_test = "content/add2/E2/test/mask/"
Nimgs = 20
channels = 3
height = 584
width = 565
dataset_path = "/content/add2/E2/DRIVE_datasets_training_testing/"
def get_datasets(imgs_dir,groundTruth_dir,borderMasks_dir,train_test="null"):
imgs = np.empty((Nimgs,height,width,channels))
groundTruth = np.empty((Nimgs,height,width))
border_masks = np.empty((Nimgs,height,width))
for path, subdirs, files in os.walk(imgs_dir):
for i in range(len(files)):
print ("original image: " +files[i])
img = Image.open(imgs_dir+files[i])
imgs[i] = np.asarray(img)
groundTruth_name = files[i][0:2] + "_manual1.gif"
print ("ground truth name: " + groundTruth_name)
g_truth = Image.open(groundTruth_dir + groundTruth_name)
groundTruth[i] = np.asarray(g_truth)
border_masks_name = ""
if train_test=="train":
border_masks_name = files[i][0:2] + "_training_mask.gif"
elif train_test=="test":
border_masks_name = files[i][0:2] + "_test_mask.gif"
else:
print ("specify if train or test!!")
exit()
print ("border masks name: " + border_masks_name)
b_mask = Image.open(borderMasks_dir + border_masks_name)
border_masks[i] = np.asarray(b_mask)
print ("imgs max: " +str(np.max(imgs)))
print ("imgs min: " +str(np.min(imgs)))
assert(np.max(groundTruth)==255 and np.max(border_masks)==255)
assert(np.min(groundTruth)==0 and np.min(border_masks)==0)
print ("ground truth and border masks are correctly withih pixel value range 0-255 (black-white)")
imgs = np.transpose(imgs,(0,3,1,2))
assert(imgs.shape == (Nimgs,channels,height,width))
groundTruth = np.reshape(groundTruth,(Nimgs,1,height,width))
border_masks = np.reshape(border_masks,(Nimgs,1,height,width))
assert(groundTruth.shape == (Nimgs,1,height,width))
assert(border_masks.shape == (Nimgs,1,height,width))
return imgs, groundTruth, border_masks
if not os.path.exists(dataset_path):
os.makedirs(dataset_path)
imgs_train, groundTruth_train, border_masks_train = get_datasets(original_imgs_train,groundTruth_imgs_train,borderMasks_imgs_train,"train")
print ("saving train datasets")
write_hdf5(imgs_train, dataset_path + "DRIVE_dataset_imgs_train.hdf5")
write_hdf5(groundTruth_train, dataset_path + "DRIVE_dataset_groundTruth_train.hdf5")
write_hdf5(border_masks_train,dataset_path + "DRIVE_dataset_borderMasks_train.hdf5")
imgs_test, groundTruth_test, border_masks_test = get_datasets(original_imgs_test,groundTruth_imgs_test,borderMasks_imgs_test,"test")
print ("saving test datasets")
write_hdf5(imgs_test,dataset_path + "DRIVE_dataset_imgs_test.hdf5")
write_hdf5(groundTruth_test, dataset_path + "DRIVE_dataset_groundTruth_test.hdf5")
write_hdf5(border_masks_test,dataset_path + "DRIVE_dataset_borderMasks_test.hdf5")
| true
| true
|
790e22238d6b930f0cab30d29405c0884e850539
| 1,659
|
py
|
Python
|
src/Simple_Fraud_Detection/solution/01_fill_fraud_db_with_nodes.py
|
LFeret/masterseminar
|
9ec038f7a5b1dc9725ef0c460b147ee26dd6ab2b
|
[
"MIT"
] | null | null | null |
src/Simple_Fraud_Detection/solution/01_fill_fraud_db_with_nodes.py
|
LFeret/masterseminar
|
9ec038f7a5b1dc9725ef0c460b147ee26dd6ab2b
|
[
"MIT"
] | null | null | null |
src/Simple_Fraud_Detection/solution/01_fill_fraud_db_with_nodes.py
|
LFeret/masterseminar
|
9ec038f7a5b1dc9725ef0c460b147ee26dd6ab2b
|
[
"MIT"
] | null | null | null |
import os
import sys
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
)
from src.DbHelper import DbHelper
persons = [
'Lucy',
'Franz',
'Susanne',
'Jonathan',
'Max',
'Stephan',
'Julian',
'Frederike',
'Amy',
'Miriam',
'Jonas',
'Anna',
'Sebastian'
]
addresses = [ f'Musterstraße {i}' for i in range(1,11)]
accounts = [ f'Bank Account {i}' for i in range(1, 14)]
phones = [f'Phone Number {i}' for i in range(1,12)]
creditcards = [f'Credit Card Number {i}' for i in range(1,14)]
socialsecuritynumbers = [f'SSN {i}' for i in range(1,10)]
nodes = {
'Person':('name', persons),
'Address':('address', addresses),
'BankAccount':('account', accounts),
'CreditCard':('number', creditcards),
'SSN':('ssn', socialsecuritynumbers)
}
if __name__ == "__main__":
# See https://neo4j.com/developer/aura-connect-driver/ for Aura specific connection URL.
scheme = "neo4j" # Connecting to Aura, use the "neo4j+s" URI scheme
host_name = "localhost"
port = 7687 # Bolt Port https://neo4j.com/docs/operations-manual/current/configuration/ports/ | .NET | Java | JavaScript | Go | Python
url = f"{scheme}://{host_name}:{port}"
user = 'neo4j'
password = 'neo4j'
db_helper = DbHelper(url, user, password)
for Label, values in nodes.items():
PropertyKey = values[0]
for PropertyValue in values[1]:
db_helper.run_query(
'CREATE (node:' + Label + ' {' + PropertyKey + ': "' + PropertyValue + '" }) RETURN node.' + PropertyKey
)
db_helper.close()
| 28.118644
| 139
| 0.614225
|
import os
import sys
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
)
from src.DbHelper import DbHelper
persons = [
'Lucy',
'Franz',
'Susanne',
'Jonathan',
'Max',
'Stephan',
'Julian',
'Frederike',
'Amy',
'Miriam',
'Jonas',
'Anna',
'Sebastian'
]
addresses = [ f'Musterstraße {i}' for i in range(1,11)]
accounts = [ f'Bank Account {i}' for i in range(1, 14)]
phones = [f'Phone Number {i}' for i in range(1,12)]
creditcards = [f'Credit Card Number {i}' for i in range(1,14)]
socialsecuritynumbers = [f'SSN {i}' for i in range(1,10)]
nodes = {
'Person':('name', persons),
'Address':('address', addresses),
'BankAccount':('account', accounts),
'CreditCard':('number', creditcards),
'SSN':('ssn', socialsecuritynumbers)
}
if __name__ == "__main__":
scheme = "neo4j"
host_name = "localhost"
port = 7687
url = f"{scheme}://{host_name}:{port}"
user = 'neo4j'
password = 'neo4j'
db_helper = DbHelper(url, user, password)
for Label, values in nodes.items():
PropertyKey = values[0]
for PropertyValue in values[1]:
db_helper.run_query(
'CREATE (node:' + Label + ' {' + PropertyKey + ': "' + PropertyValue + '" }) RETURN node.' + PropertyKey
)
db_helper.close()
| true
| true
|
790e239bb26417d30d76967fc7b1f92fb0be852f
| 10,953
|
py
|
Python
|
theano/sandbox/neighbourhoods.py
|
jych/Theano
|
d7d722faa96aac95c19f460bf60e8e8654ff58df
|
[
"BSD-3-Clause"
] | null | null | null |
theano/sandbox/neighbourhoods.py
|
jych/Theano
|
d7d722faa96aac95c19f460bf60e8e8654ff58df
|
[
"BSD-3-Clause"
] | null | null | null |
theano/sandbox/neighbourhoods.py
|
jych/Theano
|
d7d722faa96aac95c19f460bf60e8e8654ff58df
|
[
"BSD-3-Clause"
] | null | null | null |
"""WARNING: This code is not recommanded. It is not finished, it is
slower then the version in sandbox/neighbours.py, and it do not work
on the GPU.
We only keep this version here as it is a little bit more generic, so
it cover more cases. But thoses cases aren't needed frequently, so you
probably don't want to use this version, go see neighbours.py!!!!!!!
"""
import numpy
from six.moves import xrange
import six.moves.builtins as builtins
import theano
from theano import gof, Op
class NeighbourhoodsFromImages(Op):
__props__ = ("n_dims_before", "dims_neighbourhoods", "strides",
"ignore_border", "inverse")
def __init__(self, n_dims_before, dims_neighbourhoods,
strides=None, ignore_border=False, inverse=False):
"""
This extracts neighbourhoods from "images", but in a
dimension-generic manner.
In the 2D case, this is similar to downsampling, but instead of reducing
a group of 2x2 pixels (for example) to a single new pixel in the output,
you place those 4 pixels in a row.
For example, say you have this 2x4 image::
[ [ 0.5, 0.6, 0.7, 0.8 ],
[ 0.1, 0.2, 0.3, 0.4 ] ]
and you want to extract 2x2 neighbourhoods. This op would then produce::
[ [ [ 0.5, 0.6, 0.1, 0.2 ] ], # the first 2x2 group of pixels
[ [ 0.7, 0.8, 0.3, 0.4 ] ] ] # the second one
so think of a 2D downsampling where each pixel of the resulting array
is replaced by an array containing the (flattened) pixels of the
corresponding neighbourhood.
If you provide a stack of 2D image, or multiple stacks, each image
will be treated independently, and the first dimensions of the array
will be preserved as such.
This also makes sense in the 1D or 3D case. Below I'll still be calling
those "images", by analogy.
In the 1D case, you're
extracting subsequences from the original sequence. In the 3D case,
you're extracting cuboids. If you ever find a 4D use, tell me! It
should be possible, anyhow.
Parameters
----------
n_dims_before : int
Number of dimensions preceding the "images".
dims_neighbourhoods : tuple of ints
Exact shape of windows to be extracted (e.g. (2,2) in the case above).
n_dims_before + len(dims_neighbourhoods) should be equal to the
number of dimensions in the input given to the op.
strides : tuple of int
Number of elements to skip when moving to the next neighbourhood,
for each dimension of dims_neighbourhoods. There can be overlap
between neighbourhoods, or gaps.
ignore_border : bool
If the dimensions of the neighbourhoods don't exactly divide the
dimensions of the "images", you can either fill the last
neighbourhood with zeros (False) or drop it entirely (True).
inverse : bool
You shouldn't have to use this. Only used by child class
ImagesFromNeighbourhoods which simply reverses the assignment.
"""
self.n_dims_before = n_dims_before
self.dims_neighbourhoods = dims_neighbourhoods
if strides is not None:
self.strides = strides
else:
self.strides = dims_neighbourhoods
self.ignore_border = ignore_border
self.inverse = inverse
self.code_string, self.code = self.make_py_code()
def __str__(self):
return '%s{%s,%s,%s,%s}' % (self.__class__.__name__,
self.n_dims_before,
self.dims_neighbourhoods,
self.strides,
self.ignore_border)
def out_shape(self, input_shape):
dims = list(input_shape[:self.n_dims_before])
num_strides = [0 for i in xrange(len(self.strides))]
neigh_flattened_dim = 1
for i, ds in enumerate(self.dims_neighbourhoods):
cur_stride = self.strides[i]
input_dim = input_shape[i + self.n_dims_before]
target_dim = input_dim // cur_stride
if not self.ignore_border and (input_dim % cur_stride) != 0:
target_dim += 1
num_strides[i] = target_dim
dims.append(target_dim)
neigh_flattened_dim *= ds
dims.append(neigh_flattened_dim)
return dims, num_strides
# for inverse mode
# "output" here actually referes to the Op's input shape (but it's inverse mode)
def in_shape(self, output_shape):
out_dims = list(output_shape[:self.n_dims_before])
num_strides = []
# in the inverse case we don't worry about borders:
# they either have been filled with zeros, or have been cropped
for i, ds in enumerate(self.dims_neighbourhoods):
# the number of strides performed by NeighFromImg is
# directly given by this shape
num_strides.append(output_shape[self.n_dims_before + i])
# our Op's output image must be at least this wide
at_least_width = num_strides[i] * self.strides[i]
# ... which gives us this number of neighbourhoods
num_neigh = at_least_width // ds
if at_least_width % ds != 0:
num_neigh += 1
# making the final Op's output dimension this wide
out_dims.append(num_neigh * ds)
return out_dims, num_strides
def make_node(self, x):
x = theano.tensor.as_tensor_variable(x)
if self.inverse:
# +1 in the inverse case
if x.type.ndim != (self.n_dims_before +
len(self.dims_neighbourhoods) + 1):
raise TypeError()
else:
if x.type.ndim != (self.n_dims_before +
len(self.dims_neighbourhoods)):
raise TypeError()
return gof.Apply(self, [x], [x.type()])
def perform(self, node, inp, out):
x, = inp
z, = out
if self.inverse:
# +1 in the inverse case
if len(x.shape) != (self.n_dims_before +
len(self.dims_neighbourhoods) + 1):
raise ValueError("Images passed as input don't match the "
"dimensions passed when this (inversed) "
"Apply node was created")
prod = 1
for dim in self.dims_neighbourhoods:
prod *= dim
if x.shape[-1] != prod:
raise ValueError("Last dimension of neighbourhoods (%s) is not"
" the product of the neighbourhoods dimensions"
" (%s)" % (str(x.shape[-1]), str(prod)))
else:
if len(x.shape) != (self.n_dims_before +
len(self.dims_neighbourhoods)):
raise ValueError("Images passed as input don't match the "
"dimensions passed when this Apply node "
"was created")
if self.inverse:
input_shape, num_strides = self.in_shape(x.shape)
out_shape, dummy = self.out_shape(input_shape)
else:
input_shape = x.shape
out_shape, num_strides = self.out_shape(input_shape)
if z[0] is None:
if self.inverse:
z[0] = numpy.zeros(input_shape)
else:
z[0] = numpy.zeros(out_shape)
z[0] = theano._asarray(z[0], dtype=x.dtype)
exec(self.code)
def make_py_code(self):
code = self._py_outerloops()
for i in xrange(len(self.strides)):
code += self._py_innerloop(i)
code += self._py_assignment()
return code, builtins.compile(code, '<string>', 'exec')
def _py_outerloops(self):
code_before = ""
for dim_idx in xrange(self.n_dims_before):
code_before += ('\t' * (dim_idx)) + \
"for outer_idx_%d in xrange(input_shape[%d]):\n" % \
(dim_idx, dim_idx)
return code_before
def _py_innerloop(self, inner_dim_no):
base_indent = ('\t' * (self.n_dims_before + inner_dim_no * 2))
code_before = base_indent + \
"for stride_idx_%d in xrange(num_strides[%d]):\n" % \
(inner_dim_no, inner_dim_no)
base_indent += '\t'
code_before += base_indent + \
"dim_%d_offset = stride_idx_%d * self.strides[%d]\n" %\
(inner_dim_no, inner_dim_no, inner_dim_no)
code_before += base_indent + \
"max_neigh_idx_%d = input_shape[%d] - dim_%d_offset\n" % \
(inner_dim_no, self.n_dims_before + inner_dim_no, inner_dim_no)
code_before += base_indent + \
("for neigh_idx_%d in xrange(min(max_neigh_idx_%d,"
" self.dims_neighbourhoods[%d])):\n") %\
(inner_dim_no, inner_dim_no, inner_dim_no)
return code_before
def _py_flattened_idx(self):
return "+".join(["neigh_strides[%d]*neigh_idx_%d" % (i, i)
for i in xrange(len(self.strides))])
def _py_assignment(self):
input_idx = "".join(["outer_idx_%d," % (i,)
for i in xrange(self.n_dims_before)])
input_idx += "".join(["dim_%d_offset+neigh_idx_%d," %
(i, i) for i in xrange(len(self.strides))])
out_idx = "".join(
["outer_idx_%d," % (i,) for i in xrange(self.n_dims_before)] +
["stride_idx_%d," % (i,) for i in xrange(len(self.strides))])
out_idx += self._py_flattened_idx()
# return_val = '\t' * (self.n_dims_before + len(self.strides)*2)
# return_val += "print "+input_idx+"'\\n',"+out_idx+"\n"
return_val = '\t' * (self.n_dims_before + len(self.strides) * 2)
if self.inverse:
# remember z and x are inversed:
# z is the Op's output, but has input_shape
# x is the Op's input, but has out_shape
return_val += "z[0][%s] = x[%s]\n" % (input_idx, out_idx)
else:
return_val += "z[0][%s] = x[%s]\n" % (out_idx, input_idx)
return return_val
class ImagesFromNeighbourhoods(NeighbourhoodsFromImages):
def __init__(self, n_dims_before, dims_neighbourhoods,
strides=None, ignore_border=False):
NeighbourhoodsFromImages.__init__(self, n_dims_before,
dims_neighbourhoods,
strides=strides,
ignore_border=ignore_border,
inverse=True)
# and that's all there is to it
| 40.869403
| 84
| 0.570711
|
import numpy
from six.moves import xrange
import six.moves.builtins as builtins
import theano
from theano import gof, Op
class NeighbourhoodsFromImages(Op):
__props__ = ("n_dims_before", "dims_neighbourhoods", "strides",
"ignore_border", "inverse")
def __init__(self, n_dims_before, dims_neighbourhoods,
strides=None, ignore_border=False, inverse=False):
self.n_dims_before = n_dims_before
self.dims_neighbourhoods = dims_neighbourhoods
if strides is not None:
self.strides = strides
else:
self.strides = dims_neighbourhoods
self.ignore_border = ignore_border
self.inverse = inverse
self.code_string, self.code = self.make_py_code()
def __str__(self):
return '%s{%s,%s,%s,%s}' % (self.__class__.__name__,
self.n_dims_before,
self.dims_neighbourhoods,
self.strides,
self.ignore_border)
def out_shape(self, input_shape):
dims = list(input_shape[:self.n_dims_before])
num_strides = [0 for i in xrange(len(self.strides))]
neigh_flattened_dim = 1
for i, ds in enumerate(self.dims_neighbourhoods):
cur_stride = self.strides[i]
input_dim = input_shape[i + self.n_dims_before]
target_dim = input_dim // cur_stride
if not self.ignore_border and (input_dim % cur_stride) != 0:
target_dim += 1
num_strides[i] = target_dim
dims.append(target_dim)
neigh_flattened_dim *= ds
dims.append(neigh_flattened_dim)
return dims, num_strides
def in_shape(self, output_shape):
out_dims = list(output_shape[:self.n_dims_before])
num_strides = []
# they either have been filled with zeros, or have been cropped
for i, ds in enumerate(self.dims_neighbourhoods):
# the number of strides performed by NeighFromImg is
# directly given by this shape
num_strides.append(output_shape[self.n_dims_before + i])
# our Op's output image must be at least this wide
at_least_width = num_strides[i] * self.strides[i]
num_neigh = at_least_width // ds
if at_least_width % ds != 0:
num_neigh += 1
out_dims.append(num_neigh * ds)
return out_dims, num_strides
def make_node(self, x):
x = theano.tensor.as_tensor_variable(x)
if self.inverse:
# +1 in the inverse case
if x.type.ndim != (self.n_dims_before +
len(self.dims_neighbourhoods) + 1):
raise TypeError()
else:
if x.type.ndim != (self.n_dims_before +
len(self.dims_neighbourhoods)):
raise TypeError()
return gof.Apply(self, [x], [x.type()])
def perform(self, node, inp, out):
x, = inp
z, = out
if self.inverse:
# +1 in the inverse case
if len(x.shape) != (self.n_dims_before +
len(self.dims_neighbourhoods) + 1):
raise ValueError("Images passed as input don't match the "
"dimensions passed when this (inversed) "
"Apply node was created")
prod = 1
for dim in self.dims_neighbourhoods:
prod *= dim
if x.shape[-1] != prod:
raise ValueError("Last dimension of neighbourhoods (%s) is not"
" the product of the neighbourhoods dimensions"
" (%s)" % (str(x.shape[-1]), str(prod)))
else:
if len(x.shape) != (self.n_dims_before +
len(self.dims_neighbourhoods)):
raise ValueError("Images passed as input don't match the "
"dimensions passed when this Apply node "
"was created")
if self.inverse:
input_shape, num_strides = self.in_shape(x.shape)
out_shape, dummy = self.out_shape(input_shape)
else:
input_shape = x.shape
out_shape, num_strides = self.out_shape(input_shape)
if z[0] is None:
if self.inverse:
z[0] = numpy.zeros(input_shape)
else:
z[0] = numpy.zeros(out_shape)
z[0] = theano._asarray(z[0], dtype=x.dtype)
exec(self.code)
def make_py_code(self):
code = self._py_outerloops()
for i in xrange(len(self.strides)):
code += self._py_innerloop(i)
code += self._py_assignment()
return code, builtins.compile(code, '<string>', 'exec')
def _py_outerloops(self):
code_before = ""
for dim_idx in xrange(self.n_dims_before):
code_before += ('\t' * (dim_idx)) + \
"for outer_idx_%d in xrange(input_shape[%d]):\n" % \
(dim_idx, dim_idx)
return code_before
def _py_innerloop(self, inner_dim_no):
base_indent = ('\t' * (self.n_dims_before + inner_dim_no * 2))
code_before = base_indent + \
"for stride_idx_%d in xrange(num_strides[%d]):\n" % \
(inner_dim_no, inner_dim_no)
base_indent += '\t'
code_before += base_indent + \
"dim_%d_offset = stride_idx_%d * self.strides[%d]\n" %\
(inner_dim_no, inner_dim_no, inner_dim_no)
code_before += base_indent + \
"max_neigh_idx_%d = input_shape[%d] - dim_%d_offset\n" % \
(inner_dim_no, self.n_dims_before + inner_dim_no, inner_dim_no)
code_before += base_indent + \
("for neigh_idx_%d in xrange(min(max_neigh_idx_%d,"
" self.dims_neighbourhoods[%d])):\n") %\
(inner_dim_no, inner_dim_no, inner_dim_no)
return code_before
def _py_flattened_idx(self):
return "+".join(["neigh_strides[%d]*neigh_idx_%d" % (i, i)
for i in xrange(len(self.strides))])
def _py_assignment(self):
input_idx = "".join(["outer_idx_%d," % (i,)
for i in xrange(self.n_dims_before)])
input_idx += "".join(["dim_%d_offset+neigh_idx_%d," %
(i, i) for i in xrange(len(self.strides))])
out_idx = "".join(
["outer_idx_%d," % (i,) for i in xrange(self.n_dims_before)] +
["stride_idx_%d," % (i,) for i in xrange(len(self.strides))])
out_idx += self._py_flattened_idx()
# return_val = '\t' * (self.n_dims_before + len(self.strides)*2)
# return_val += "print "+input_idx+"'\\n',"+out_idx+"\n"
return_val = '\t' * (self.n_dims_before + len(self.strides) * 2)
if self.inverse:
# remember z and x are inversed:
# z is the Op's output, but has input_shape
return_val += "z[0][%s] = x[%s]\n" % (input_idx, out_idx)
else:
return_val += "z[0][%s] = x[%s]\n" % (out_idx, input_idx)
return return_val
class ImagesFromNeighbourhoods(NeighbourhoodsFromImages):
def __init__(self, n_dims_before, dims_neighbourhoods,
strides=None, ignore_border=False):
NeighbourhoodsFromImages.__init__(self, n_dims_before,
dims_neighbourhoods,
strides=strides,
ignore_border=ignore_border,
inverse=True)
# and that's all there is to it
| true
| true
|
790e23d8e2490e23b11407aae9aee8c5fd2d1e05
| 69,292
|
py
|
Python
|
verticapy/learn/tsa.py
|
afard/VerticaPy
|
ecbee0027a208ba53b31438e5b2f4577af95a07e
|
[
"Apache-2.0"
] | 52
|
2020-06-29T12:31:14.000Z
|
2022-03-31T20:24:23.000Z
|
verticapy/learn/tsa.py
|
afard/VerticaPy
|
ecbee0027a208ba53b31438e5b2f4577af95a07e
|
[
"Apache-2.0"
] | 175
|
2020-07-13T18:16:28.000Z
|
2022-03-31T14:01:45.000Z
|
verticapy/learn/tsa.py
|
afard/VerticaPy
|
ecbee0027a208ba53b31438e5b2f4577af95a07e
|
[
"Apache-2.0"
] | 21
|
2020-07-07T22:53:10.000Z
|
2022-03-04T11:30:48.000Z
|
# (c) Copyright [2018-2021] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality to use to conduct
# data science projects on data stored in Vertica, taking advantage Vertica’s
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to solve all of these problems. The idea is simple: instead
# of moving data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# Standard Python Modules
import math, warnings
from typing import Union
# VerticaPy Modules
from verticapy.learn.vmodel import *
from verticapy.learn.linear_model import LinearRegression
from verticapy import vDataFrame
from verticapy.plot import gen_colors
from verticapy.learn.tools import *
# Other Python Modules
from dateutil.parser import parse
import matplotlib.pyplot as plt
# ---#
class SARIMAX(Regressor):
"""
---------------------------------------------------------------------------
[Beta Version]
Creates an SARIMAX object using the Vertica Linear Regression algorithm on
the data.
Parameters
----------
name: str
Name of the the model. The model will be stored in the DB.
cursor: DBcursor, optional
Vertica database cursor.
p: int, optional
Order of the AR (Auto-Regressive) part.
d: int, optional
Order of the I (Integrated) part.
q: int, optional
Order of the MA (Moving-Average) part.
P: int, optional
Order of the seasonal AR (Auto-Regressive) part.
D: int, optional
Order of the seasonal I (Integrated) part.
Q: int, optional
Order of the seasonal MA (Moving-Average) part.
s: int, optional
Span of the seasonality.
tol: float, optional
Determines whether the algorithm has reached the specified accuracy result.
max_iter: int, optional
Determines the maximum number of iterations the algorithm performs before
achieving the specified accuracy result.
solver: str, optional
The optimizer method to use to train the model.
Newton : Newton Method
BFGS : Broyden Fletcher Goldfarb Shanno
max_pik: int, optional
Number of inverse MA coefficient used to approximate the MA.
papprox_ma: int, optional
the p of the AR(p) used to approximate the MA coefficients.
"""
def __init__(
self,
name: str,
cursor=None,
p: int = 0,
d: int = 0,
q: int = 0,
P: int = 0,
D: int = 0,
Q: int = 0,
s: int = 0,
tol: float = 1e-4,
max_iter: int = 1000,
solver: str = "Newton",
max_pik: int = 100,
papprox_ma: int = 200,
):
check_types([("name", name, [str],)])
self.type, self.name = "SARIMAX", name
self.set_params(
{
"p": p,
"d": d,
"q": q,
"P": P,
"D": D,
"Q": Q,
"s": s,
"tol": tol,
"max_iter": max_iter,
"solver": solver,
"max_pik": max_pik,
"papprox_ma": papprox_ma,
}
)
if self.parameters["s"] == 0:
assert (
self.parameters["D"] == 0
and self.parameters["P"] == 0
and self.parameters["Q"] == 0
), ParameterError(
"In case of non-seasonality (s = 0), all the parameters P, D or Q must be equal to 0."
)
else:
assert (
self.parameters["D"] > 0
or self.parameters["P"] > 0
or self.parameters["Q"] > 0
), ParameterError(
"In case of seasonality (s > 0), at least one of the parameters P, D or Q must be strictly greater than 0."
)
cursor = check_cursor(cursor)[0]
self.cursor = cursor
version(cursor=cursor, condition=[8, 0, 0])
# ---#
def deploySQL(self):
"""
---------------------------------------------------------------------------
Returns the SQL code needed to deploy the model.
Returns
-------
str
the SQL code needed to deploy the model.
"""
sql = self.deploy_predict_
if (self.parameters["d"] > 0) or (
self.parameters["D"] > 0 and self.parameters["s"] > 0
):
for i in range(0, self.parameters["d"] + 1):
for k in range(
0, max((self.parameters["D"] + 1) * min(1, self.parameters["s"]), 1)
):
if (k, i) != (0, 0):
comb_i_d = (
math.factorial(self.parameters["d"])
/ math.factorial(self.parameters["d"] - i)
/ math.factorial(i)
)
comb_k_D = (
math.factorial(self.parameters["D"])
/ math.factorial(self.parameters["D"] - k)
/ math.factorial(k)
)
sql += " + {} * LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])".format(
(-1) ** (i + k + 1) * comb_i_d * comb_k_D,
i + self.parameters["s"] * k,
)
return sql
# ---#
def fpredict(self, L: list):
"""
---------------------------------------------------------------------------
Computes the prediction.
Parameters
----------
L: list
List containing the data. It must be a two-dimensional list containing
multiple rows. Each row must include as first element the ordered predictor
and as nth elements the nth - 1 exogenous variable (nth > 2).
Returns
-------
float
the prediction.
"""
def sub_arp(L: list):
L_final = []
for i in range(len(L)):
result = L[-i]
for i in range(len(self.coef_.values["coefficient"])):
elem = self.coef_.values["predictor"][i]
if elem.lower() == "intercept":
result -= self.coef_.values["coefficient"][i]
elif elem.lower()[0:2] == "ar":
nb = int(elem[2:])
try:
result -= self.coef_.values["coefficient"][i] * L[-nb]
except:
result = None
L_final = [result] + L_final
return L_final
def fepsilon(L: list):
if self.parameters["p"] > 0 or self.parameters["P"] > 0:
L_tmp = sub_arp(L)
else:
L_tmp = L
try:
result = L_tmp[-1] - self.ma_avg_
for i in range(1, self.parameters["max_pik"]):
result -= self.ma_piq_.values["coefficient"][i] * (
L_tmp[-i] - self.ma_avg_
)
return result
except:
return 0
if (
self.parameters["p"] == 0
and self.parameters["q"] == 0
and self.parameters["d"] == 0
and self.parameters["s"] == 0
and not (self.exogenous)
):
return self.ma_avg_
try:
yt = [elem[0] for elem in L]
yt_copy = [elem[0] for elem in L]
yt.reverse()
if self.parameters["d"] > 0:
for i in range(self.parameters["d"]):
yt = [yt[i - 1] - yt[i] for i in range(1, len(yt))]
if self.parameters["D"] > 0 and self.parameters["s"] > 0:
for i in range(self.parameters["D"]):
yt = [
yt[i - self.parameters["s"]] - yt[i]
for i in range(self.parameters["s"], len(yt))
]
yt.reverse()
result, j = 0, 1
for i in range(len(self.coef_.values["coefficient"])):
elem = self.coef_.values["predictor"][i]
if elem.lower() == "intercept":
result += self.coef_.values["coefficient"][i]
elif elem.lower()[0:2] == "ar":
nb = int(elem[2:])
result += self.coef_.values["coefficient"][i] * yt[-nb]
elif elem.lower()[0:2] == "ma":
nb = int(elem[2:])
result += self.coef_.values["coefficient"][i] * fepsilon(
yt[: -nb - 1]
)
else:
result += self.coef_.values["coefficient"][i] * L[-1][j]
j += 1
for i in range(0, self.parameters["d"] + 1):
for k in range(
0, max((self.parameters["D"] + 1) * min(1, self.parameters["s"]), 1)
):
if (k, i) != (0, 0):
comb_i_d = (
math.factorial(self.parameters["d"])
/ math.factorial(self.parameters["d"] - i)
/ math.factorial(i)
)
comb_k_D = (
math.factorial(self.parameters["D"])
/ math.factorial(self.parameters["D"] - k)
/ math.factorial(k)
)
result += (
(-1) ** (i + k + 1)
* comb_i_d
* comb_k_D
* yt_copy[-(i + self.parameters["s"] * k)]
)
return result
except:
return None
# ---#
def fit(
self,
input_relation: Union[vDataFrame, str],
y: str,
ts: str,
X: list = [],
test_relation: Union[vDataFrame, str] = "",
):
"""
---------------------------------------------------------------------------
Trains the model.
Parameters
----------
input_relation: str/vDataFrame
Training relation.
y: str
Response column.
ts: str
vcolumn used to order the data.
X: list, optional
exogenous columns used to fit the model.
test_relation: str/vDataFrame, optional
Relation used to test the model.
Returns
-------
object
model
"""
check_types(
[
("input_relation", input_relation, [str, vDataFrame],),
("y", y, [str],),
("test_relation", test_relation, [str, vDataFrame],),
("ts", ts, [str],),
]
)
self.cursor = check_cursor(self.cursor, input_relation, True)[0]
# Initialization
does_model_exist(name=self.name, cursor=self.cursor, raise_error=True)
self.input_relation = (
input_relation
if isinstance(input_relation, str)
else input_relation.__genSQL__()
)
if isinstance(test_relation, vDataFrame):
self.test_relation = test_relation.__genSQL__()
elif test_relation:
self.test_relation = test_relation
else:
self.test_relation = self.input_relation
self.y, self.ts, self.deploy_predict_ = str_column(y), str_column(ts), ""
self.coef_ = tablesample({"predictor": [], "coefficient": []})
self.ma_avg_, self.ma_piq_ = None, None
X, schema = [str_column(elem) for elem in X], schema_relation(self.name)[0]
self.X, self.exogenous = [], X
relation = (
"(SELECT *, [VerticaPy_y] AS VerticaPy_y_copy FROM {}) VERTICAPY_SUBTABLE "
)
model = LinearRegression(
name=self.name,
solver=self.parameters["solver"],
max_iter=self.parameters["max_iter"],
tol=self.parameters["tol"],
)
if (
self.parameters["p"] == 0
and self.parameters["q"] == 0
and self.parameters["d"] == 0
and self.parameters["s"] == 0
and not (self.exogenous)
):
query = "SELECT AVG({}) FROM {}".format(self.y, self.input_relation)
self.ma_avg_ = self.cursor.execute(query).fetchone()[0]
self.deploy_predict_ = str(self.ma_avg_)
# I(d)
if self.parameters["d"] > 0:
for i in range(self.parameters["d"]):
relation = "(SELECT [VerticaPy_y] - LAG([VerticaPy_y], 1) OVER (ORDER BY [VerticaPy_ts]) AS [VerticaPy_y], VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE".format(
relation
)
if self.parameters["D"] > 0 and self.parameters["s"] > 0:
for i in range(self.parameters["D"]):
relation = "(SELECT [VerticaPy_y] - LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS [VerticaPy_y], VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE".format(
self.parameters["s"], relation
)
def drop_temp_elem(self, schema):
try:
with warnings.catch_warnings(record=True) as w:
drop(
"{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, get_session(self.cursor)
),
cursor=self.cursor,
method="view",
)
except:
pass
# AR(p)
if self.parameters["p"] > 0 or self.parameters["P"] > 0:
columns = [
"LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}".format(
i, i
)
for i in range(1, self.parameters["p"] + 1)
]
AR = ["AR{}".format(i) for i in range(1, self.parameters["p"] + 1)]
if self.parameters["s"] > 0:
for i in range(1, self.parameters["P"] + 1):
if (i * self.parameters["s"]) not in (
range(1, self.parameters["p"] + 1)
):
columns += [
"LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}".format(
i * self.parameters["s"], i * self.parameters["s"]
)
]
AR += ["AR{}".format(i * self.parameters["s"])]
relation = "(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), relation
)
drop_temp_elem(self, schema)
query = "CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}".format(
schema,
get_session(self.cursor),
relation.format(self.input_relation)
.replace("[VerticaPy_ts]", self.ts)
.replace("[VerticaPy_y]", self.y)
.replace("[VerticaPy_key_columns]", ", " + ", ".join([self.ts] + X)),
)
try:
self.cursor.execute(query)
self.X += AR + X
model.fit(
input_relation="{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, get_session(self.cursor)
),
X=self.X,
y=self.y,
)
except:
drop_temp_elem(self, schema)
raise
drop_temp_elem(self, schema)
self.coef_.values["predictor"] = model.coef_.values["predictor"]
self.coef_.values["coefficient"] = model.coef_.values["coefficient"]
alphaq = model.coef_.values["coefficient"]
model.drop()
epsilon_final = (
"[VerticaPy_y] - "
+ str(alphaq[0])
+ " - "
+ " - ".join(
[
str(alphaq[i])
+ " * "
+ "LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts])".format(
i
)
for i in range(1, self.parameters["p"] + 1)
]
)
)
self.deploy_predict_ = (
str(alphaq[0])
+ " + "
+ " + ".join(
[
str(alphaq[i])
+ " * "
+ "LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])".format(
i
)
for i in range(1, self.parameters["p"] + 1)
]
)
)
if self.parameters["s"] > 0 and self.parameters["P"] > 0:
epsilon_final += " - " + " - ".join(
[
str(alphaq[i])
+ " * "
+ "LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts])".format(
i * self.parameters["s"]
)
for i in range(
self.parameters["p"] + 1,
self.parameters["p"]
+ (self.parameters["P"] if self.parameters["s"] > 0 else 0)
+ 1,
)
]
)
self.deploy_predict_ += " + " + " + ".join(
[
str(alphaq[i])
+ " * "
+ "LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])".format(
i * self.parameters["s"]
)
for i in range(
self.parameters["p"] + 1,
self.parameters["p"]
+ (self.parameters["P"] if self.parameters["s"] > 0 else 0)
+ 1,
)
]
)
for idx, elem in enumerate(X):
epsilon_final += " - {} * [X{}]".format(
alphaq[
idx
+ self.parameters["p"]
+ (self.parameters["P"] if self.parameters["s"] > 0 else 0)
+ 1
],
idx,
)
self.deploy_predict_ += " + {} * [X{}]".format(
alphaq[
idx
+ self.parameters["p"]
+ (self.parameters["P"] if self.parameters["s"] > 0 else 0)
+ 1
],
idx,
)
relation = "(SELECT {} AS [VerticaPy_y], {}, VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE".format(
epsilon_final, ", ".join(AR), relation
)
# MA(q)
if self.parameters["q"] > 0 or (
self.parameters["Q"] > 0 and self.parameters["s"] > 0
):
transform_relation = relation.replace("[VerticaPy_y]", y).replace(
"[VerticaPy_ts]", ts
)
transform_relation = transform_relation.replace(
"[VerticaPy_key_columns]", ", " + ", ".join(X + [ts])
)
for idx, elem in enumerate(X):
transform_relation = transform_relation.replace(
"[X{}]".format(idx), elem
)
query = "SELECT COUNT(*), AVG({}) FROM {}".format(
self.y, transform_relation.format(self.input_relation)
)
result = self.cursor.execute(query).fetchone()
self.ma_avg_ = result[1]
n = result[0]
n = max(
max(
min(max(n ** (1.0 / 3.0), 8), self.parameters["papprox_ma"]),
self.parameters["q"],
),
self.parameters["Q"] * self.parameters["s"] + 1,
)
n = int(n)
columns = [
"LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS ARq{}".format(
i, i
)
for i in range(1, n)
]
ARq = ["ARq{}".format(i) for i in range(1, n)]
tmp_relation = "(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), relation
)
for idx, elem in enumerate(X):
tmp_relation = tmp_relation.replace("[X{}]".format(idx), elem)
drop_temp_elem(self, schema)
query = "CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}".format(
schema,
get_session(self.cursor),
tmp_relation.format(self.input_relation)
.replace("[VerticaPy_ts]", self.ts)
.replace("[VerticaPy_y]", self.y)
.replace("[VerticaPy_key_columns]", ", " + ", ".join([self.ts] + X)),
)
try:
self.cursor.execute(query)
model.fit(
input_relation="{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, get_session(self.cursor)
),
X=ARq,
y=self.y,
)
except:
drop_temp_elem(self, schema)
raise
drop_temp_elem(self, schema)
if not (self.coef_.values["predictor"]):
self.coef_.values["predictor"] += ["Intercept"]
self.coef_.values["coefficient"] += [self.ma_avg_]
self.deploy_predict_ = str(self.ma_avg_)
alphaq = model.coef_.values["coefficient"][1:]
model.drop()
thetaq, piq = [], [-1] + []
for j in range(0, len(alphaq)):
thetaq += [
sum([alphaq[j - i - 1] * thetaq[i] for i in range(0, j)])
+ alphaq[j]
]
for j in range(self.parameters["q"]):
self.coef_.values["predictor"] += ["ma{}".format(j + 1)]
self.coef_.values["coefficient"] += [thetaq[j]]
self.deploy_predict_ += " + {} * MA{}".format(thetaq[j], j + 1)
if self.parameters["s"] > 0:
for j in range(1, self.parameters["Q"] + 1):
self.coef_.values["predictor"] += [
"ma{}".format(self.parameters["s"] * j)
]
self.coef_.values["coefficient"] += [
thetaq[self.parameters["s"] * j - 1]
]
self.deploy_predict_ += " + {} * MA{}".format(
thetaq[self.parameters["s"] * j - 1], self.parameters["s"] * j
)
for j in range(0, self.parameters["max_pik"]):
piq_tmp = 0
for i in range(0, self.parameters["q"]):
if j - i > 0:
piq_tmp -= thetaq[i] * piq[j - i]
elif j - i == 0:
piq_tmp -= thetaq[i]
piq = piq + [piq_tmp]
self.ma_piq_ = tablesample({"coefficient": piq})
epsilon = (
"[VerticaPy_y] - "
+ str(self.ma_avg_)
+ " - "
+ " - ".join(
[
str((piq[i]))
+ " * "
+ "LAG([VerticaPy_y] - {}, {}) OVER (ORDER BY [VerticaPy_ts])".format(
self.ma_avg_, i
)
for i in range(1, self.parameters["max_pik"])
]
)
)
epsilon += " AS MA0"
relation = "(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE".format(
epsilon, relation
)
columns = [
"LAG(MA0, {}) OVER (ORDER BY [VerticaPy_ts]) AS MA{}".format(i, i)
for i in range(1, self.parameters["q"] + 1)
]
MA = ["MA{}".format(i) for i in range(1, self.parameters["q"] + 1)]
if self.parameters["s"] > 0:
columns += [
"LAG(MA0, {}) OVER (ORDER BY [VerticaPy_ts]) AS MA{}".format(
i * self.parameters["s"], i * self.parameters["s"]
)
for i in range(1, self.parameters["Q"] + 1)
]
MA += [
"MA{}".format(i * self.parameters["s"])
for i in range(1, self.parameters["Q"] + 1)
]
relation = "(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), relation
)
self.X += MA
transform_relation = relation.replace("[VerticaPy_y]", y).replace(
"[VerticaPy_ts]", ts
)
transform_relation = transform_relation.replace(
"[VerticaPy_key_columns]", ", " + ", ".join(X + [ts])
)
for idx, elem in enumerate(X):
transform_relation = transform_relation.replace(
"[X{}]".format(idx), elem
)
self.transform_relation = relation
model_save = {
"type": "SARIMAX",
"input_relation": self.input_relation,
"test_relation": self.test_relation,
"transform_relation": self.transform_relation,
"deploy_predict": self.deploy_predict_,
"ma_avg": self.ma_avg_,
"ma_piq": self.ma_piq_.values if (self.ma_piq_) else None,
"X": self.X,
"y": self.y,
"ts": self.ts,
"exogenous": self.exogenous,
"coef": self.coef_.values,
"p": self.parameters["p"],
"d": self.parameters["d"],
"q": self.parameters["q"],
"P": self.parameters["P"],
"D": self.parameters["D"],
"Q": self.parameters["Q"],
"s": self.parameters["s"],
"tol": self.parameters["tol"],
"max_iter": self.parameters["max_iter"],
"solver": self.parameters["solver"],
"max_pik": self.parameters["max_pik"],
"papprox_ma": self.parameters["papprox_ma"],
}
insert_verticapy_schema(
model_name=self.name,
model_type="SARIMAX",
model_save=model_save,
cursor=self.cursor,
)
return self
# ---#
def plot(
self,
vdf: vDataFrame = None,
y: str = "",
ts: str = "",
X: list = [],
dynamic: bool = False,
one_step: bool = True,
observed: bool = True,
confidence: bool = True,
nlead: int = 10,
nlast: int = 0,
limit: int = 1000,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the SARIMAX model.
Parameters
----------
vdf: vDataFrame, optional
Object to use to run the prediction.
y: str, optional
Response column.
ts: str, optional
vcolumn used to order the data.
X: list, optional
exogenous vcolumns.
dynamic: bool, optional
If set to True, the dynamic forecast will be drawn.
one_step: bool, optional
If set to True, the one step ahead forecast will be drawn.
observed: bool, optional
If set to True, the observation will be drawn.
confidence: bool, optional
If set to True, the confidence ranges will be drawn.
nlead: int, optional
Number of predictions computed by the dynamic forecast after
the last ts date.
nlast: int, optional
The dynamic forecast will start nlast values before the last
ts date.
limit: int, optional
Maximum number of past elements to use.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
"""
if not (vdf):
vdf = vdf_from_relation(relation=self.input_relation, cursor=self.cursor)
check_types(
[
("limit", limit, [int, float],),
("nlead", nlead, [int, float],),
("dynamic", dynamic, [bool],),
("observed", observed, [bool],),
("one_step", one_step, [bool],),
("confidence", confidence, [bool],),
("vdf", vdf, [vDataFrame],),
],
)
delta_limit, limit = (
limit,
max(
max(
limit,
self.parameters["p"] + 1 + nlast,
self.parameters["P"] * self.parameters["s"] + 1 + nlast,
),
200,
),
)
delta_limit = max(limit - delta_limit - nlast, 0)
assert dynamic or one_step or observed, ParameterError(
"No option selected.\n You should set either dynamic, one_step or observed to True."
)
assert nlead + nlast > 0 or not (dynamic), ParameterError(
"Dynamic Plots are only possible if either parameter 'nlead' is greater than 0 or parameter 'nlast' is greater than 0, and parameter 'dynamic' is set to True."
)
if dynamic:
assert not (self.exogenous), Exception(
"Dynamic Plots are only possible for SARIMA models (no exegenous variables), not SARIMAX."
)
if not (y):
y = self.y
if not (ts):
ts = self.ts
if not (X):
X = self.exogenous
result = self.predict(
vdf=vdf, y=y, ts=ts, X=X, nlead=0, name="_verticapy_prediction_"
)
error_eps = 1.96 * math.sqrt(self.score(method="mse"))
print_info = verticapy.options["print_info"]
verticapy.options["print_info"] = False
try:
result = (
result.select([ts, y, "_verticapy_prediction_"])
.dropna()
.sort([ts])
.tail(limit)
.values
)
except:
verticapy.options["print_info"] = print_info
raise
verticapy.options["print_info"] = print_info
columns = [elem for elem in result]
if isinstance(result[columns[0]][0], str):
result[columns[0]] = [parse(elem) for elem in result[columns[0]]]
true_value = [result[columns[0]], result[columns[1]]]
one_step_ahead = [result[columns[0]], result[columns[2]]]
lower_osa, upper_osa = (
[
float(elem) - error_eps if elem != None else None
for elem in one_step_ahead[1]
],
[
float(elem) + error_eps if elem != None else None
for elem in one_step_ahead[1]
],
)
if dynamic:
deltat = result[columns[0]][-1] - result[columns[0]][-2]
lead_time_list = []
if nlast > 0:
lead_list = [[elem] for elem in result[columns[1]][:-nlast]]
else:
lead_list = [[elem] for elem in result[columns[1]]]
for i in range(nlast):
lead_list += [[self.fpredict(lead_list)]]
lead_time_list += [result[columns[0]][i - nlast]]
if lead_time_list:
start_time = lead_time_list[-1]
else:
start_time = result[columns[0]][-1]
for i in range(nlead):
lead_list += [[self.fpredict(lead_list)]]
lead_time_list += [start_time + (i + 1) * deltat]
dynamic_forecast = (
[result[columns[0]][-nlast - 1]] + lead_time_list,
[result[columns[1]][-nlast - 1]]
+ [elem[0] for elem in lead_list[-nlast - nlead :]],
)
lower_d, upper_d = [], []
for i in range(len(dynamic_forecast[1])):
if (
self.parameters["s"] > 0
and self.parameters["p"] == 0
and self.parameters["d"] == 0
and self.parameters["q"] == 0
):
delta_error = error_eps * math.sqrt(
int(i / self.parameters["s"]) + 1
)
else:
delta_error = error_eps * math.sqrt(i + 1)
lower_d += [float(dynamic_forecast[1][i]) - delta_error]
upper_d += [float(dynamic_forecast[1][i]) + delta_error]
else:
lower_d, upper_d, dynamic_forecast = [], [], ([], [])
alpha = 0.3
if not (ax):
fig, ax = plt.subplots()
if isnotebook():
fig.set_size_inches(10, 6)
ax.grid()
colors = gen_colors()
param1 = {
"color": colors[2],
"linewidth": 2,
}
param2 = {
"color": colors[3],
"linewidth": 2,
"linestyle": ":",
}
param3 = {
"color": colors[0],
"linewidth": 2,
"linestyle": "dashed",
}
if dynamic:
ax.fill_between(
dynamic_forecast[0],
1.02
* float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
1.02
* float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
alpha=0.04,
color=updated_dict(param3, style_kwds, 2)["color"],
)
if confidence:
ax.fill_between(
dynamic_forecast[0], lower_d, upper_d, alpha=0.08, color="#555555"
)
ax.plot(dynamic_forecast[0], lower_d, alpha=0.08, color="#000000")
ax.plot(dynamic_forecast[0], upper_d, alpha=0.08, color="#000000")
ax.plot(
dynamic_forecast[0],
dynamic_forecast[1],
label="Dynamic Forecast",
**updated_dict(param3, style_kwds, 2),
)
if one_step:
if confidence:
ax.fill_between(
one_step_ahead[0][delta_limit:],
lower_osa[delta_limit:],
upper_osa[delta_limit:],
alpha=0.04,
color="#555555",
)
ax.plot(
one_step_ahead[0][delta_limit:],
lower_osa[delta_limit:],
alpha=0.04,
color="#000000",
)
ax.plot(
one_step_ahead[0][delta_limit:],
upper_osa[delta_limit:],
alpha=0.04,
color="#000000",
)
ax.plot(
one_step_ahead[0][delta_limit:],
one_step_ahead[1][delta_limit:],
label="One-step ahead Forecast",
**updated_dict(param2, style_kwds, 1),
)
if observed:
ax.plot(
true_value[0][delta_limit:],
true_value[1][delta_limit:],
label="Observed",
**updated_dict(param1, style_kwds, 0),
)
ax.set_title(
"SARIMAX({},{},{})({},{},{})_{}".format(
self.parameters["p"],
self.parameters["d"],
self.parameters["q"],
self.parameters["P"],
self.parameters["D"],
self.parameters["Q"],
self.parameters["s"],
)
)
ax.set_xlabel(ts)
ax.legend(loc="center left", bbox_to_anchor=[1, 0.5])
ax.set_ylim(
1.02 * float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
1.02 * float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
return ax
# ---#
def predict(
self,
vdf: vDataFrame,
y: str = "",
ts: str = "",
X: list = [],
nlead: int = 0,
name: str = "",
):
"""
---------------------------------------------------------------------------
Predicts using the input relation.
Parameters
----------
vdf: vDataFrame
Object to use to run the prediction.
y: str, optional
Response column.
ts: str, optional
vcolumn used to order the data.
X: list, optional
exogenous vcolumns.
nlead: int, optional
Number of records to predict after the last ts date.
name: str, optional
Name of the added vcolumn. If empty, a name will be generated.
Returns
-------
vDataFrame
object including the prediction.
"""
check_types(
[
("name", name, [str],),
("y", y, [str],),
("ts", ts, [str],),
("X", X, [list],),
("nlead", nlead, [int, float],),
("vdf", vdf, [vDataFrame],),
],
)
if not (y):
y = self.y
if not (ts):
ts = self.ts
if not (X):
X = self.exogenous
columns_check([y, ts], vdf)
y, ts = vdf_columns_names([y, ts], vdf)
name = (
"{}_".format(self.type) + "".join(ch for ch in self.name if ch.isalnum())
if not (name)
else name
)
key_columns = ", " + ", ".join(vdf.get_columns(exclude_columns=[y]))
transform_relation = self.transform_relation.replace(
"[VerticaPy_y]", y
).replace("[VerticaPy_ts]", ts)
transform_relation = transform_relation.replace(
"[VerticaPy_key_columns]", key_columns
)
predictSQL = self.deploySQL().replace("[VerticaPy_y]", y).replace(
"[VerticaPy_ts]", ts
) + " AS {}".format(name)
for idx, elem in enumerate(X):
transform_relation = transform_relation.replace("[X{}]".format(idx), elem)
predictSQL = predictSQL.replace("[X{}]".format(idx), elem)
columns = (
vdf.get_columns(exclude_columns=[y])
+ [predictSQL]
+ ["VerticaPy_y_copy AS {}".format(y)]
)
relation = vdf.__genSQL__()
for i in range(nlead):
query = "SELECT ({} - LAG({}, 1) OVER (ORDER BY {}))::VARCHAR FROM {} ORDER BY {} DESC LIMIT 1".format(
ts, ts, ts, relation, ts
)
deltat = vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()[0]
query = "SELECT (MAX({}) + '{}'::interval)::VARCHAR FROM {}".format(
ts, deltat, relation
)
next_t = vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()[0]
if i == 0:
first_t = next_t
new_line = "SELECT '{}'::TIMESTAMP AS {}, {}".format(
next_t,
ts,
", ".join(
[
"NULL AS {}".format(column)
for column in vdf.get_columns(exclude_columns=[ts])
]
),
)
relation_tmp = "(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE".format(
", ".join([ts] + vdf.get_columns(exclude_columns=[ts])),
relation,
new_line,
)
query = "SELECT {} FROM {} ORDER BY {} DESC LIMIT 1".format(
self.deploySQL()
.replace("[VerticaPy_y]", y)
.replace("[VerticaPy_ts]", ts),
transform_relation.format(relation_tmp),
ts,
)
prediction = (
vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()[0]
)
columns_tmp = vdf.get_columns(exclude_columns=[ts, y])
new_line = "SELECT '{}'::TIMESTAMP AS {}, {} AS {} {}".format(
next_t,
ts,
prediction,
y,
(", " if (columns_tmp) else "")
+ ", ".join(["NULL AS {}".format(column) for column in columns_tmp]),
)
relation = "(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE".format(
", ".join([ts, y] + vdf.get_columns(exclude_columns=[ts, y])),
relation,
new_line,
)
final_relation = "(SELECT {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), transform_relation.format(relation)
)
result = vdf_from_relation(final_relation, "SARIMAX", self.cursor,)
if nlead > 0:
result[y].apply(
"CASE WHEN {} >= '{}' THEN NULL ELSE {} END".format(ts, first_t, "{}")
)
return result
# ---#
class VAR(Regressor):
"""
---------------------------------------------------------------------------
[Beta Version]
Creates an VAR object using the Vertica Linear Regression algorithm on the
data.
Parameters
----------
name: str
Name of the the model. The model will be stored in the DB.
cursor: DBcursor, optional
Vertica database cursor.
p: int, optional
Order of the AR (Auto-Regressive) part.
tol: float, optional
Determines whether the algorithm has reached the specified accuracy result.
max_iter: int, optional
Determines the maximum number of iterations the algorithm performs before
achieving the specified accuracy result.
solver: str, optional
The optimizer method to use to train the model.
Newton : Newton Method
BFGS : Broyden Fletcher Goldfarb Shanno
"""
def __init__(
self,
name: str,
cursor=None,
p: int = 1,
tol: float = 1e-4,
max_iter: int = 1000,
solver: str = "Newton",
):
check_types([("name", name, [str],)])
self.type, self.name = "VAR", name
assert p > 0, ParameterError(
"Parameter 'p' must be greater than 0 to build a VAR model."
)
self.set_params(
{"p": p, "tol": tol, "max_iter": max_iter, "solver": solver,}
)
cursor = check_cursor(cursor)[0]
self.cursor = cursor
version(cursor=cursor, condition=[8, 0, 0])
# ---#
def deploySQL(self):
"""
---------------------------------------------------------------------------
Returns the SQL code needed to deploy the model.
Returns
-------
str
the SQL code needed to deploy the model.
"""
sql = []
for idx, coefs in enumerate(self.coef_):
coefs_tmp = coefs.values["coefficient"]
predictors_tmp = coefs.values["predictor"]
sql += [
str(coefs_tmp[0])
+ " + "
+ " + ".join(
[
str(coefs_tmp[i]) + " * " + str(predictors_tmp[i])
for i in range(1, len(coefs_tmp))
]
)
]
return sql
# ---#
def features_importance(
self, X_idx: int = 0, ax=None, show: bool = True, **style_kwds,
):
"""
---------------------------------------------------------------------------
Computes the model's features importance.
Parameters
----------
X_idx: int/str, optional
Index of the main vector vcolumn used to draw the features importance.
It can also be the name of a predictor vcolumn.
ax: Matplotlib axes object, optional
The axes to plot on.
show: bool
If set to True, draw the features importance.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
"""
check_types([("X_idx", X_idx, [int, float, str],), ("show", show, [bool],),],)
if isinstance(X_idx, str):
X_idx = str_column(X_idx).lower()
for idx, elem in enumerate(self.X):
if str_column(elem).lower() == X_idx:
X_idx = idx
break
assert (
isinstance(X_idx, (float, int)) and len(self.X) > X_idx >= 0
), ParameterError(
"The index of the vcolumn to draw 'X_idx' must be between 0 and {}. It can also be the name of a predictor vcolumn.".format(
len(self.X)
)
)
relation = self.transform_relation.replace("[VerticaPy_ts]", self.ts).format(
self.test_relation
)
for idx, elem in enumerate(self.X):
relation = relation.replace("[X{}]".format(idx), elem)
min_max = (
vdf_from_relation(relation=self.input_relation, cursor=self.cursor)
.agg(func=["min", "max"], columns=self.X)
.transpose()
)
coefficient = self.coef_[X_idx].values
coeff_importances = {}
coeff_sign = {}
for idx, coef in enumerate(coefficient["predictor"]):
if idx > 0:
predictor = int(coef.split("_")[0].replace("ar", ""))
predictor = str_column(self.X[predictor])
minimum, maximum = min_max[predictor]
val = coefficient["coefficient"][idx]
coeff_importances[coef] = abs(val) * (maximum - minimum)
coeff_sign[coef] = 1 if val >= 0 else -1
total = sum([coeff_importances[elem] for elem in coeff_importances])
for elem in coeff_importances:
coeff_importances[elem] = 100 * coeff_importances[elem] / total
if show:
plot_importance(
coeff_importances, coeff_sign, print_legend=True, ax=ax, **style_kwds,
)
importances = {"index": ["importance", "sign"]}
for elem in coeff_importances:
importances[elem] = [coeff_importances[elem], coeff_sign[elem]]
return tablesample(values=importances).transpose()
# ---#
def fit(
self,
input_relation: Union[vDataFrame, str],
X: list,
ts: str,
test_relation: Union[vDataFrame, str] = "",
):
"""
---------------------------------------------------------------------------
Trains the model.
Parameters
----------
input_relation: str/vDataFrame
Training relation.
X: list
List of the response columns.
ts: str
vcolumn used to order the data.
test_relation: str/vDataFrame, optional
Relation used to test the model.
Returns
-------
object
self
"""
check_types(
[
("input_relation", input_relation, [str, vDataFrame],),
("X", X, [list],),
("ts", ts, [str],),
("test_relation", test_relation, [str, vDataFrame],),
]
)
self.cursor = check_cursor(self.cursor, input_relation, True)[0]
# Initialization
does_model_exist(name=self.name, cursor=self.cursor, raise_error=True)
self.input_relation = (
input_relation
if isinstance(input_relation, str)
else input_relation.__genSQL__()
)
if isinstance(test_relation, vDataFrame):
self.test_relation = test_relation.__genSQL__()
elif test_relation:
self.test_relation = test_relation
else:
self.test_relation = self.input_relation
self.ts, self.deploy_predict_ = str_column(ts), []
self.X, schema = [str_column(elem) for elem in X], schema_relation(self.name)[0]
model = LinearRegression(
name=self.name,
solver=self.parameters["solver"],
max_iter=self.parameters["max_iter"],
tol=self.parameters["tol"],
)
# AR(p)
columns, AR = [], []
for idx, elem in enumerate(self.X):
for i in range(1, self.parameters["p"] + 1):
columns += [
"LAG([X{}], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}_{}".format(
idx, i, idx, i
)
]
AR += ["AR{}_{}".format(idx, i)]
self.transform_relation = "(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), "{}"
)
relation = self.transform_relation.replace("[VerticaPy_ts]", self.ts).format(
self.input_relation
)
for idx, elem in enumerate(self.X):
relation = relation.replace("[X{}]".format(idx), elem)
def drop_temp_elem(self, schema):
try:
with warnings.catch_warnings(record=True) as w:
drop(
"{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, get_session(self.cursor)
),
cursor=self.cursor,
method="view",
)
except:
pass
drop_temp_elem(self, schema)
try:
query = "CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}".format(
schema, get_session(self.cursor), relation
)
self.cursor.execute(query)
self.coef_ = []
for elem in X:
model.fit(
input_relation="{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, get_session(self.cursor)
),
X=AR,
y=elem,
)
self.coef_ += [model.coef_]
model.drop()
except:
drop_temp_elem(self, schema)
raise
drop_temp_elem(self, schema)
model_save = {
"type": "VAR",
"input_relation": self.input_relation,
"test_relation": self.test_relation,
"transform_relation": self.transform_relation,
"deploy_predict": self.deploy_predict_,
"X": self.X,
"ts": self.ts,
"p": self.parameters["p"],
"tol": self.parameters["tol"],
"max_iter": self.parameters["max_iter"],
"solver": self.parameters["solver"],
}
for idx, elem in enumerate(self.coef_):
model_save["coef_{}".format(idx)] = elem.values
insert_verticapy_schema(
model_name=self.name,
model_type="VAR",
model_save=model_save,
cursor=self.cursor,
)
return self
# ---#
def fpredict(self, L: list):
"""
---------------------------------------------------------------------------
Computes the prediction.
Parameters
----------
L: list
List containing the data. It must be a two-dimensional list containing
multiple rows. Each row must include as first element the ordered predictor
and as nth elements the nth - 1 exogenous variable (nth > 2).
Returns
-------
float
the prediction.
"""
try:
result = []
result_tmp = 0
for i in range(len(self.X)):
result_tmp = 0
for j in range(len(self.coef_[i].values["coefficient"])):
elem = self.coef_[i].values["predictor"][j]
if elem.lower() == "intercept":
result_tmp += self.coef_[i].values["coefficient"][j]
else:
ni, nj = elem[2:].split("_")
ni, nj = int(ni), int(nj)
result_tmp += (
self.coef_[i].values["coefficient"][j] * L[-nj][ni]
)
result += [result_tmp]
return result
except:
return None
# ---#
def plot(
self,
vdf: vDataFrame = None,
X: list = [],
ts: str = "",
X_idx: int = 0,
dynamic: bool = False,
one_step: bool = True,
observed: bool = True,
confidence: bool = True,
nlead: int = 10,
nlast: int = 0,
limit: int = 1000,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the VAR model.
Parameters
----------
vdf: vDataFrame
Object to use to run the prediction.
X: list, optional
List of the response columns.
ts: str, optional
vcolumn used to order the data.
X_idx: int, optional
Index of the main vector vcolumn to draw. It can also be the name of a
predictor vcolumn.
dynamic: bool, optional
If set to True, the dynamic forecast will be drawn.
one_step: bool, optional
If set to True, the one step ahead forecast will be drawn.
observed: bool, optional
If set to True, the observation will be drawn.
confidence: bool, optional
If set to True, the confidence ranges will be drawn.
nlead: int, optional
Number of predictions computed by the dynamic forecast after
the last ts date.
nlast: int, optional
The dynamic forecast will start nlast values before the last
ts date.
limit: int, optional
Maximum number of past elements to use.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
"""
if not (vdf):
vdf = vdf_from_relation(relation=self.input_relation, cursor=self.cursor)
check_types(
[
("limit", limit, [int, float],),
("nlead", nlead, [int, float],),
("X_idx", X_idx, [int, float, str],),
("dynamic", dynamic, [bool],),
("observed", observed, [bool],),
("one_step", one_step, [bool],),
("confidence", confidence, [bool],),
("vdf", vdf, [vDataFrame],),
],
)
delta_limit, limit = (
limit,
max(max(limit, self.parameters["p"] + 1 + nlast), 200),
)
delta_limit = max(limit - delta_limit - nlast, 0)
if not (ts):
ts = self.ts
if not (X):
X = self.X
assert dynamic or one_step or observed, ParameterError(
"No option selected.\n You should set either dynamic, one_step or observed to True."
)
assert nlead + nlast > 0 or not (dynamic), ParameterError(
"Dynamic Plots are only possible if either parameter 'nlead' is greater than 0 or parameter 'nlast' is greater than 0, and parameter 'dynamic' is set to True."
)
if isinstance(X_idx, str):
X_idx = str_column(X_idx).lower()
for idx, elem in enumerate(X):
if str_column(elem).lower() == X_idx:
X_idx = idx
break
assert (
isinstance(X_idx, (float, int)) and len(self.X) > X_idx >= 0
), ParameterError(
"The index of the vcolumn to draw 'X_idx' must be between 0 and {}. It can also be the name of a predictor vcolumn.".format(
len(self.X)
)
)
result_all = self.predict(
vdf=vdf,
X=X,
ts=ts,
nlead=0,
name=[
"_verticapy_prediction_{}_".format(idx) for idx in range(len(self.X))
],
)
y, prediction = X[X_idx], "_verticapy_prediction_{}_".format(X_idx)
error_eps = 1.96 * math.sqrt(self.score(method="mse").values["mse"][X_idx])
print_info = verticapy.options["print_info"]
verticapy.options["print_info"] = False
try:
result = (
result_all.select([ts, y, prediction])
.dropna()
.sort([ts])
.tail(limit)
.values
)
except:
verticapy.options["print_info"] = print_info
raise
verticapy.options["print_info"] = print_info
columns = [elem for elem in result]
if isinstance(result[columns[0]][0], str):
result[columns[0]] = [parse(elem) for elem in result[columns[0]]]
true_value = [result[columns[0]], result[columns[1]]]
one_step_ahead = [result[columns[0]], result[columns[2]]]
lower_osa, upper_osa = (
[
float(elem) - error_eps if elem != None else None
for elem in one_step_ahead[1]
],
[
float(elem) + error_eps if elem != None else None
for elem in one_step_ahead[1]
],
)
if dynamic:
print_info = verticapy.options["print_info"]
verticapy.options["print_info"] = False
try:
result = (
result_all.select([ts] + X).dropna().sort([ts]).tail(limit).values
)
except:
verticapy.options["print_info"] = print_info
raise
verticapy.options["print_info"] = print_info
columns = [elem for elem in result]
if isinstance(result[columns[0]][0], str):
result[columns[0]] = [parse(elem) for elem in result[columns[0]]]
deltat = result[columns[0]][-1] - result[columns[0]][-2]
lead_time_list, lead_list = [], []
if nlast > 0:
for i in range(len(result[columns[0]][:-nlast])):
lead_list += [[result[elem][i] for elem in columns[1:]]]
else:
for i in range(len(result[columns[0]])):
lead_list += [[result[elem][i] for elem in columns[1:]]]
for i in range(nlast):
lead_list += [self.fpredict(lead_list)]
lead_time_list += [result[columns[0]][i - nlast]]
if lead_time_list:
start_time = lead_time_list[-1]
else:
start_time = result[columns[0]][-1]
for i in range(nlead):
lead_list += [self.fpredict(lead_list)]
lead_time_list += [start_time + (i + 1) * deltat]
dynamic_forecast = (
[result[columns[0]][-nlast - 1]] + lead_time_list,
[result[columns[1 + X_idx]][-nlast - 1]]
+ [elem[X_idx] for elem in lead_list[-nlast - nlead :]],
)
lower_d, upper_d = [], []
for i in range(len(dynamic_forecast[1])):
delta_error = error_eps * math.sqrt(i + 1)
lower_d += [float(dynamic_forecast[1][i]) - delta_error]
upper_d += [float(dynamic_forecast[1][i]) + delta_error]
else:
lower_d, upper_d, dynamic_forecast = [], [], ([], [])
alpha = 0.3
if not (ax):
fig, ax = plt.subplots()
if isnotebook():
fig.set_size_inches(10, 6)
ax.grid()
colors = gen_colors()
param1 = {
"color": colors[2],
"linewidth": 2,
}
param2 = {
"color": colors[3],
"linewidth": 2,
"linestyle": ":",
}
param3 = {
"color": colors[0],
"linewidth": 2,
"linestyle": "dashed",
}
if dynamic:
ax.fill_between(
dynamic_forecast[0],
1.02
* float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
1.02
* float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
alpha=0.04,
color=updated_dict(param3, style_kwds, 2)["color"],
)
if confidence:
ax.fill_between(
dynamic_forecast[0], lower_d, upper_d, alpha=0.08, color="#555555"
)
ax.plot(dynamic_forecast[0], lower_d, alpha=0.08, color="#000000")
ax.plot(dynamic_forecast[0], upper_d, alpha=0.08, color="#000000")
ax.plot(
dynamic_forecast[0],
dynamic_forecast[1],
label="Dynamic Forecast",
**updated_dict(param3, style_kwds, 2),
)
if one_step:
if confidence:
ax.fill_between(
one_step_ahead[0][delta_limit:],
lower_osa[delta_limit:],
upper_osa[delta_limit:],
alpha=0.04,
color="#555555",
)
ax.plot(
one_step_ahead[0][delta_limit:],
lower_osa[delta_limit:],
alpha=0.04,
color="#000000",
)
ax.plot(
one_step_ahead[0][delta_limit:],
upper_osa[delta_limit:],
alpha=0.04,
color="#000000",
)
ax.plot(
one_step_ahead[0][delta_limit:],
one_step_ahead[1][delta_limit:],
label="One-step ahead Forecast",
**updated_dict(param2, style_kwds, 1),
)
if observed:
ax.plot(
true_value[0][delta_limit:],
true_value[1][delta_limit:],
label="Observed",
**updated_dict(param1, style_kwds, 0),
)
ax.set_title("VAR({}) [{}]".format(self.parameters["p"], y))
ax.set_xlabel(ts)
ax.legend(loc="center left", bbox_to_anchor=[1, 0.5])
ax.set_ylim(
1.02 * float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
1.02 * float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
return ax
# ---#
def predict(
self,
vdf: vDataFrame,
X: list = [],
ts: str = "",
nlead: int = 0,
name: list = [],
):
"""
---------------------------------------------------------------------------
Predicts using the input relation.
Parameters
----------
vdf: vDataFrame
Object to use to run the prediction.
X: list, optional
List of the response columns.
ts: str, optional
vcolumn used to order the data.
nlead: int, optional
Number of records to predict after the last ts date.
name: list, optional
Names of the added vcolumns. If empty, names will be generated.
Returns
-------
vDataFrame
object including the prediction.
"""
check_types(
[
("name", name, [list],),
("ts", ts, [str],),
("nlead", nlead, [int, float],),
("X", X, [list],),
("vdf", vdf, [vDataFrame],),
],
)
if not (ts):
ts = self.ts
if not (X):
X = self.X
columns_check(X + [ts], vdf)
X = vdf_columns_names(X, vdf)
ts = vdf_columns_names([ts], vdf)[0]
all_pred, names = [], []
transform_relation = self.transform_relation.replace("[VerticaPy_ts]", self.ts)
for idx, elem in enumerate(X):
name_tmp = (
"{}_".format(self.type) + "".join(ch for ch in elem if ch.isalnum())
if len(name) != len(X)
else name[idx]
)
all_pred += ["{} AS {}".format(self.deploySQL()[idx], name_tmp)]
transform_relation = transform_relation.replace("[X{}]".format(idx), elem)
columns = vdf.get_columns() + all_pred
relation = vdf.__genSQL__()
for i in range(nlead):
query = "SELECT ({} - LAG({}, 1) OVER (ORDER BY {}))::VARCHAR FROM {} ORDER BY {} DESC LIMIT 1".format(
ts, ts, ts, relation, ts
)
deltat = vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()[0]
query = "SELECT (MAX({}) + '{}'::interval)::VARCHAR FROM {}".format(
ts, deltat, relation
)
next_t = vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()[0]
if i == 0:
first_t = next_t
new_line = "SELECT '{}'::TIMESTAMP AS {}, {}".format(
next_t,
ts,
", ".join(
[
"NULL AS {}".format(column)
for column in vdf.get_columns(exclude_columns=[ts])
]
),
)
relation_tmp = "(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE".format(
", ".join([ts] + vdf.get_columns(exclude_columns=[ts])),
relation,
new_line,
)
query = "SELECT {} FROM {} ORDER BY {} DESC LIMIT 1".format(
", ".join(self.deploySQL()), transform_relation.format(relation_tmp), ts
)
prediction = vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()
for idx, elem in enumerate(X):
prediction[idx] = "{} AS {}".format(prediction[idx], elem)
columns_tmp = vdf.get_columns(exclude_columns=[ts] + X)
new_line = "SELECT '{}'::TIMESTAMP AS {}, {} {}".format(
next_t,
ts,
", ".join(prediction),
(", " if (columns_tmp) else "")
+ ", ".join(["NULL AS {}".format(column) for column in columns_tmp]),
)
relation = "(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE".format(
", ".join([ts] + X + vdf.get_columns(exclude_columns=[ts] + X)),
relation,
new_line,
)
final_relation = "(SELECT {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), transform_relation.format(relation)
)
result = vdf_from_relation(final_relation, "VAR", self.cursor,)
if nlead > 0:
for elem in X:
result[elem].apply(
"CASE WHEN {} >= '{}' THEN NULL ELSE {} END".format(
ts, first_t, "{}"
)
)
return result
| 37.536295
| 200
| 0.462391
|
import math, warnings
from typing import Union
from verticapy.learn.vmodel import *
from verticapy.learn.linear_model import LinearRegression
from verticapy import vDataFrame
from verticapy.plot import gen_colors
from verticapy.learn.tools import *
from dateutil.parser import parse
import matplotlib.pyplot as plt
class SARIMAX(Regressor):
def __init__(
self,
name: str,
cursor=None,
p: int = 0,
d: int = 0,
q: int = 0,
P: int = 0,
D: int = 0,
Q: int = 0,
s: int = 0,
tol: float = 1e-4,
max_iter: int = 1000,
solver: str = "Newton",
max_pik: int = 100,
papprox_ma: int = 200,
):
check_types([("name", name, [str],)])
self.type, self.name = "SARIMAX", name
self.set_params(
{
"p": p,
"d": d,
"q": q,
"P": P,
"D": D,
"Q": Q,
"s": s,
"tol": tol,
"max_iter": max_iter,
"solver": solver,
"max_pik": max_pik,
"papprox_ma": papprox_ma,
}
)
if self.parameters["s"] == 0:
assert (
self.parameters["D"] == 0
and self.parameters["P"] == 0
and self.parameters["Q"] == 0
), ParameterError(
"In case of non-seasonality (s = 0), all the parameters P, D or Q must be equal to 0."
)
else:
assert (
self.parameters["D"] > 0
or self.parameters["P"] > 0
or self.parameters["Q"] > 0
), ParameterError(
"In case of seasonality (s > 0), at least one of the parameters P, D or Q must be strictly greater than 0."
)
cursor = check_cursor(cursor)[0]
self.cursor = cursor
version(cursor=cursor, condition=[8, 0, 0])
def deploySQL(self):
sql = self.deploy_predict_
if (self.parameters["d"] > 0) or (
self.parameters["D"] > 0 and self.parameters["s"] > 0
):
for i in range(0, self.parameters["d"] + 1):
for k in range(
0, max((self.parameters["D"] + 1) * min(1, self.parameters["s"]), 1)
):
if (k, i) != (0, 0):
comb_i_d = (
math.factorial(self.parameters["d"])
/ math.factorial(self.parameters["d"] - i)
/ math.factorial(i)
)
comb_k_D = (
math.factorial(self.parameters["D"])
/ math.factorial(self.parameters["D"] - k)
/ math.factorial(k)
)
sql += " + {} * LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])".format(
(-1) ** (i + k + 1) * comb_i_d * comb_k_D,
i + self.parameters["s"] * k,
)
return sql
def fpredict(self, L: list):
def sub_arp(L: list):
L_final = []
for i in range(len(L)):
result = L[-i]
for i in range(len(self.coef_.values["coefficient"])):
elem = self.coef_.values["predictor"][i]
if elem.lower() == "intercept":
result -= self.coef_.values["coefficient"][i]
elif elem.lower()[0:2] == "ar":
nb = int(elem[2:])
try:
result -= self.coef_.values["coefficient"][i] * L[-nb]
except:
result = None
L_final = [result] + L_final
return L_final
def fepsilon(L: list):
if self.parameters["p"] > 0 or self.parameters["P"] > 0:
L_tmp = sub_arp(L)
else:
L_tmp = L
try:
result = L_tmp[-1] - self.ma_avg_
for i in range(1, self.parameters["max_pik"]):
result -= self.ma_piq_.values["coefficient"][i] * (
L_tmp[-i] - self.ma_avg_
)
return result
except:
return 0
if (
self.parameters["p"] == 0
and self.parameters["q"] == 0
and self.parameters["d"] == 0
and self.parameters["s"] == 0
and not (self.exogenous)
):
return self.ma_avg_
try:
yt = [elem[0] for elem in L]
yt_copy = [elem[0] for elem in L]
yt.reverse()
if self.parameters["d"] > 0:
for i in range(self.parameters["d"]):
yt = [yt[i - 1] - yt[i] for i in range(1, len(yt))]
if self.parameters["D"] > 0 and self.parameters["s"] > 0:
for i in range(self.parameters["D"]):
yt = [
yt[i - self.parameters["s"]] - yt[i]
for i in range(self.parameters["s"], len(yt))
]
yt.reverse()
result, j = 0, 1
for i in range(len(self.coef_.values["coefficient"])):
elem = self.coef_.values["predictor"][i]
if elem.lower() == "intercept":
result += self.coef_.values["coefficient"][i]
elif elem.lower()[0:2] == "ar":
nb = int(elem[2:])
result += self.coef_.values["coefficient"][i] * yt[-nb]
elif elem.lower()[0:2] == "ma":
nb = int(elem[2:])
result += self.coef_.values["coefficient"][i] * fepsilon(
yt[: -nb - 1]
)
else:
result += self.coef_.values["coefficient"][i] * L[-1][j]
j += 1
for i in range(0, self.parameters["d"] + 1):
for k in range(
0, max((self.parameters["D"] + 1) * min(1, self.parameters["s"]), 1)
):
if (k, i) != (0, 0):
comb_i_d = (
math.factorial(self.parameters["d"])
/ math.factorial(self.parameters["d"] - i)
/ math.factorial(i)
)
comb_k_D = (
math.factorial(self.parameters["D"])
/ math.factorial(self.parameters["D"] - k)
/ math.factorial(k)
)
result += (
(-1) ** (i + k + 1)
* comb_i_d
* comb_k_D
* yt_copy[-(i + self.parameters["s"] * k)]
)
return result
except:
return None
def fit(
self,
input_relation: Union[vDataFrame, str],
y: str,
ts: str,
X: list = [],
test_relation: Union[vDataFrame, str] = "",
):
check_types(
[
("input_relation", input_relation, [str, vDataFrame],),
("y", y, [str],),
("test_relation", test_relation, [str, vDataFrame],),
("ts", ts, [str],),
]
)
self.cursor = check_cursor(self.cursor, input_relation, True)[0]
does_model_exist(name=self.name, cursor=self.cursor, raise_error=True)
self.input_relation = (
input_relation
if isinstance(input_relation, str)
else input_relation.__genSQL__()
)
if isinstance(test_relation, vDataFrame):
self.test_relation = test_relation.__genSQL__()
elif test_relation:
self.test_relation = test_relation
else:
self.test_relation = self.input_relation
self.y, self.ts, self.deploy_predict_ = str_column(y), str_column(ts), ""
self.coef_ = tablesample({"predictor": [], "coefficient": []})
self.ma_avg_, self.ma_piq_ = None, None
X, schema = [str_column(elem) for elem in X], schema_relation(self.name)[0]
self.X, self.exogenous = [], X
relation = (
"(SELECT *, [VerticaPy_y] AS VerticaPy_y_copy FROM {}) VERTICAPY_SUBTABLE "
)
model = LinearRegression(
name=self.name,
solver=self.parameters["solver"],
max_iter=self.parameters["max_iter"],
tol=self.parameters["tol"],
)
if (
self.parameters["p"] == 0
and self.parameters["q"] == 0
and self.parameters["d"] == 0
and self.parameters["s"] == 0
and not (self.exogenous)
):
query = "SELECT AVG({}) FROM {}".format(self.y, self.input_relation)
self.ma_avg_ = self.cursor.execute(query).fetchone()[0]
self.deploy_predict_ = str(self.ma_avg_)
if self.parameters["d"] > 0:
for i in range(self.parameters["d"]):
relation = "(SELECT [VerticaPy_y] - LAG([VerticaPy_y], 1) OVER (ORDER BY [VerticaPy_ts]) AS [VerticaPy_y], VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE".format(
relation
)
if self.parameters["D"] > 0 and self.parameters["s"] > 0:
for i in range(self.parameters["D"]):
relation = "(SELECT [VerticaPy_y] - LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS [VerticaPy_y], VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE".format(
self.parameters["s"], relation
)
def drop_temp_elem(self, schema):
try:
with warnings.catch_warnings(record=True) as w:
drop(
"{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, get_session(self.cursor)
),
cursor=self.cursor,
method="view",
)
except:
pass
if self.parameters["p"] > 0 or self.parameters["P"] > 0:
columns = [
"LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}".format(
i, i
)
for i in range(1, self.parameters["p"] + 1)
]
AR = ["AR{}".format(i) for i in range(1, self.parameters["p"] + 1)]
if self.parameters["s"] > 0:
for i in range(1, self.parameters["P"] + 1):
if (i * self.parameters["s"]) not in (
range(1, self.parameters["p"] + 1)
):
columns += [
"LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}".format(
i * self.parameters["s"], i * self.parameters["s"]
)
]
AR += ["AR{}".format(i * self.parameters["s"])]
relation = "(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), relation
)
drop_temp_elem(self, schema)
query = "CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}".format(
schema,
get_session(self.cursor),
relation.format(self.input_relation)
.replace("[VerticaPy_ts]", self.ts)
.replace("[VerticaPy_y]", self.y)
.replace("[VerticaPy_key_columns]", ", " + ", ".join([self.ts] + X)),
)
try:
self.cursor.execute(query)
self.X += AR + X
model.fit(
input_relation="{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, get_session(self.cursor)
),
X=self.X,
y=self.y,
)
except:
drop_temp_elem(self, schema)
raise
drop_temp_elem(self, schema)
self.coef_.values["predictor"] = model.coef_.values["predictor"]
self.coef_.values["coefficient"] = model.coef_.values["coefficient"]
alphaq = model.coef_.values["coefficient"]
model.drop()
epsilon_final = (
"[VerticaPy_y] - "
+ str(alphaq[0])
+ " - "
+ " - ".join(
[
str(alphaq[i])
+ " * "
+ "LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts])".format(
i
)
for i in range(1, self.parameters["p"] + 1)
]
)
)
self.deploy_predict_ = (
str(alphaq[0])
+ " + "
+ " + ".join(
[
str(alphaq[i])
+ " * "
+ "LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])".format(
i
)
for i in range(1, self.parameters["p"] + 1)
]
)
)
if self.parameters["s"] > 0 and self.parameters["P"] > 0:
epsilon_final += " - " + " - ".join(
[
str(alphaq[i])
+ " * "
+ "LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts])".format(
i * self.parameters["s"]
)
for i in range(
self.parameters["p"] + 1,
self.parameters["p"]
+ (self.parameters["P"] if self.parameters["s"] > 0 else 0)
+ 1,
)
]
)
self.deploy_predict_ += " + " + " + ".join(
[
str(alphaq[i])
+ " * "
+ "LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])".format(
i * self.parameters["s"]
)
for i in range(
self.parameters["p"] + 1,
self.parameters["p"]
+ (self.parameters["P"] if self.parameters["s"] > 0 else 0)
+ 1,
)
]
)
for idx, elem in enumerate(X):
epsilon_final += " - {} * [X{}]".format(
alphaq[
idx
+ self.parameters["p"]
+ (self.parameters["P"] if self.parameters["s"] > 0 else 0)
+ 1
],
idx,
)
self.deploy_predict_ += " + {} * [X{}]".format(
alphaq[
idx
+ self.parameters["p"]
+ (self.parameters["P"] if self.parameters["s"] > 0 else 0)
+ 1
],
idx,
)
relation = "(SELECT {} AS [VerticaPy_y], {}, VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE".format(
epsilon_final, ", ".join(AR), relation
)
if self.parameters["q"] > 0 or (
self.parameters["Q"] > 0 and self.parameters["s"] > 0
):
transform_relation = relation.replace("[VerticaPy_y]", y).replace(
"[VerticaPy_ts]", ts
)
transform_relation = transform_relation.replace(
"[VerticaPy_key_columns]", ", " + ", ".join(X + [ts])
)
for idx, elem in enumerate(X):
transform_relation = transform_relation.replace(
"[X{}]".format(idx), elem
)
query = "SELECT COUNT(*), AVG({}) FROM {}".format(
self.y, transform_relation.format(self.input_relation)
)
result = self.cursor.execute(query).fetchone()
self.ma_avg_ = result[1]
n = result[0]
n = max(
max(
min(max(n ** (1.0 / 3.0), 8), self.parameters["papprox_ma"]),
self.parameters["q"],
),
self.parameters["Q"] * self.parameters["s"] + 1,
)
n = int(n)
columns = [
"LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS ARq{}".format(
i, i
)
for i in range(1, n)
]
ARq = ["ARq{}".format(i) for i in range(1, n)]
tmp_relation = "(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), relation
)
for idx, elem in enumerate(X):
tmp_relation = tmp_relation.replace("[X{}]".format(idx), elem)
drop_temp_elem(self, schema)
query = "CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}".format(
schema,
get_session(self.cursor),
tmp_relation.format(self.input_relation)
.replace("[VerticaPy_ts]", self.ts)
.replace("[VerticaPy_y]", self.y)
.replace("[VerticaPy_key_columns]", ", " + ", ".join([self.ts] + X)),
)
try:
self.cursor.execute(query)
model.fit(
input_relation="{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, get_session(self.cursor)
),
X=ARq,
y=self.y,
)
except:
drop_temp_elem(self, schema)
raise
drop_temp_elem(self, schema)
if not (self.coef_.values["predictor"]):
self.coef_.values["predictor"] += ["Intercept"]
self.coef_.values["coefficient"] += [self.ma_avg_]
self.deploy_predict_ = str(self.ma_avg_)
alphaq = model.coef_.values["coefficient"][1:]
model.drop()
thetaq, piq = [], [-1] + []
for j in range(0, len(alphaq)):
thetaq += [
sum([alphaq[j - i - 1] * thetaq[i] for i in range(0, j)])
+ alphaq[j]
]
for j in range(self.parameters["q"]):
self.coef_.values["predictor"] += ["ma{}".format(j + 1)]
self.coef_.values["coefficient"] += [thetaq[j]]
self.deploy_predict_ += " + {} * MA{}".format(thetaq[j], j + 1)
if self.parameters["s"] > 0:
for j in range(1, self.parameters["Q"] + 1):
self.coef_.values["predictor"] += [
"ma{}".format(self.parameters["s"] * j)
]
self.coef_.values["coefficient"] += [
thetaq[self.parameters["s"] * j - 1]
]
self.deploy_predict_ += " + {} * MA{}".format(
thetaq[self.parameters["s"] * j - 1], self.parameters["s"] * j
)
for j in range(0, self.parameters["max_pik"]):
piq_tmp = 0
for i in range(0, self.parameters["q"]):
if j - i > 0:
piq_tmp -= thetaq[i] * piq[j - i]
elif j - i == 0:
piq_tmp -= thetaq[i]
piq = piq + [piq_tmp]
self.ma_piq_ = tablesample({"coefficient": piq})
epsilon = (
"[VerticaPy_y] - "
+ str(self.ma_avg_)
+ " - "
+ " - ".join(
[
str((piq[i]))
+ " * "
+ "LAG([VerticaPy_y] - {}, {}) OVER (ORDER BY [VerticaPy_ts])".format(
self.ma_avg_, i
)
for i in range(1, self.parameters["max_pik"])
]
)
)
epsilon += " AS MA0"
relation = "(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE".format(
epsilon, relation
)
columns = [
"LAG(MA0, {}) OVER (ORDER BY [VerticaPy_ts]) AS MA{}".format(i, i)
for i in range(1, self.parameters["q"] + 1)
]
MA = ["MA{}".format(i) for i in range(1, self.parameters["q"] + 1)]
if self.parameters["s"] > 0:
columns += [
"LAG(MA0, {}) OVER (ORDER BY [VerticaPy_ts]) AS MA{}".format(
i * self.parameters["s"], i * self.parameters["s"]
)
for i in range(1, self.parameters["Q"] + 1)
]
MA += [
"MA{}".format(i * self.parameters["s"])
for i in range(1, self.parameters["Q"] + 1)
]
relation = "(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), relation
)
self.X += MA
transform_relation = relation.replace("[VerticaPy_y]", y).replace(
"[VerticaPy_ts]", ts
)
transform_relation = transform_relation.replace(
"[VerticaPy_key_columns]", ", " + ", ".join(X + [ts])
)
for idx, elem in enumerate(X):
transform_relation = transform_relation.replace(
"[X{}]".format(idx), elem
)
self.transform_relation = relation
model_save = {
"type": "SARIMAX",
"input_relation": self.input_relation,
"test_relation": self.test_relation,
"transform_relation": self.transform_relation,
"deploy_predict": self.deploy_predict_,
"ma_avg": self.ma_avg_,
"ma_piq": self.ma_piq_.values if (self.ma_piq_) else None,
"X": self.X,
"y": self.y,
"ts": self.ts,
"exogenous": self.exogenous,
"coef": self.coef_.values,
"p": self.parameters["p"],
"d": self.parameters["d"],
"q": self.parameters["q"],
"P": self.parameters["P"],
"D": self.parameters["D"],
"Q": self.parameters["Q"],
"s": self.parameters["s"],
"tol": self.parameters["tol"],
"max_iter": self.parameters["max_iter"],
"solver": self.parameters["solver"],
"max_pik": self.parameters["max_pik"],
"papprox_ma": self.parameters["papprox_ma"],
}
insert_verticapy_schema(
model_name=self.name,
model_type="SARIMAX",
model_save=model_save,
cursor=self.cursor,
)
return self
def plot(
self,
vdf: vDataFrame = None,
y: str = "",
ts: str = "",
X: list = [],
dynamic: bool = False,
one_step: bool = True,
observed: bool = True,
confidence: bool = True,
nlead: int = 10,
nlast: int = 0,
limit: int = 1000,
ax=None,
**style_kwds,
):
if not (vdf):
vdf = vdf_from_relation(relation=self.input_relation, cursor=self.cursor)
check_types(
[
("limit", limit, [int, float],),
("nlead", nlead, [int, float],),
("dynamic", dynamic, [bool],),
("observed", observed, [bool],),
("one_step", one_step, [bool],),
("confidence", confidence, [bool],),
("vdf", vdf, [vDataFrame],),
],
)
delta_limit, limit = (
limit,
max(
max(
limit,
self.parameters["p"] + 1 + nlast,
self.parameters["P"] * self.parameters["s"] + 1 + nlast,
),
200,
),
)
delta_limit = max(limit - delta_limit - nlast, 0)
assert dynamic or one_step or observed, ParameterError(
"No option selected.\n You should set either dynamic, one_step or observed to True."
)
assert nlead + nlast > 0 or not (dynamic), ParameterError(
"Dynamic Plots are only possible if either parameter 'nlead' is greater than 0 or parameter 'nlast' is greater than 0, and parameter 'dynamic' is set to True."
)
if dynamic:
assert not (self.exogenous), Exception(
"Dynamic Plots are only possible for SARIMA models (no exegenous variables), not SARIMAX."
)
if not (y):
y = self.y
if not (ts):
ts = self.ts
if not (X):
X = self.exogenous
result = self.predict(
vdf=vdf, y=y, ts=ts, X=X, nlead=0, name="_verticapy_prediction_"
)
error_eps = 1.96 * math.sqrt(self.score(method="mse"))
print_info = verticapy.options["print_info"]
verticapy.options["print_info"] = False
try:
result = (
result.select([ts, y, "_verticapy_prediction_"])
.dropna()
.sort([ts])
.tail(limit)
.values
)
except:
verticapy.options["print_info"] = print_info
raise
verticapy.options["print_info"] = print_info
columns = [elem for elem in result]
if isinstance(result[columns[0]][0], str):
result[columns[0]] = [parse(elem) for elem in result[columns[0]]]
true_value = [result[columns[0]], result[columns[1]]]
one_step_ahead = [result[columns[0]], result[columns[2]]]
lower_osa, upper_osa = (
[
float(elem) - error_eps if elem != None else None
for elem in one_step_ahead[1]
],
[
float(elem) + error_eps if elem != None else None
for elem in one_step_ahead[1]
],
)
if dynamic:
deltat = result[columns[0]][-1] - result[columns[0]][-2]
lead_time_list = []
if nlast > 0:
lead_list = [[elem] for elem in result[columns[1]][:-nlast]]
else:
lead_list = [[elem] for elem in result[columns[1]]]
for i in range(nlast):
lead_list += [[self.fpredict(lead_list)]]
lead_time_list += [result[columns[0]][i - nlast]]
if lead_time_list:
start_time = lead_time_list[-1]
else:
start_time = result[columns[0]][-1]
for i in range(nlead):
lead_list += [[self.fpredict(lead_list)]]
lead_time_list += [start_time + (i + 1) * deltat]
dynamic_forecast = (
[result[columns[0]][-nlast - 1]] + lead_time_list,
[result[columns[1]][-nlast - 1]]
+ [elem[0] for elem in lead_list[-nlast - nlead :]],
)
lower_d, upper_d = [], []
for i in range(len(dynamic_forecast[1])):
if (
self.parameters["s"] > 0
and self.parameters["p"] == 0
and self.parameters["d"] == 0
and self.parameters["q"] == 0
):
delta_error = error_eps * math.sqrt(
int(i / self.parameters["s"]) + 1
)
else:
delta_error = error_eps * math.sqrt(i + 1)
lower_d += [float(dynamic_forecast[1][i]) - delta_error]
upper_d += [float(dynamic_forecast[1][i]) + delta_error]
else:
lower_d, upper_d, dynamic_forecast = [], [], ([], [])
alpha = 0.3
if not (ax):
fig, ax = plt.subplots()
if isnotebook():
fig.set_size_inches(10, 6)
ax.grid()
colors = gen_colors()
param1 = {
"color": colors[2],
"linewidth": 2,
}
param2 = {
"color": colors[3],
"linewidth": 2,
"linestyle": ":",
}
param3 = {
"color": colors[0],
"linewidth": 2,
"linestyle": "dashed",
}
if dynamic:
ax.fill_between(
dynamic_forecast[0],
1.02
* float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
1.02
* float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
alpha=0.04,
color=updated_dict(param3, style_kwds, 2)["color"],
)
if confidence:
ax.fill_between(
dynamic_forecast[0], lower_d, upper_d, alpha=0.08, color="#555555"
)
ax.plot(dynamic_forecast[0], lower_d, alpha=0.08, color="#000000")
ax.plot(dynamic_forecast[0], upper_d, alpha=0.08, color="#000000")
ax.plot(
dynamic_forecast[0],
dynamic_forecast[1],
label="Dynamic Forecast",
**updated_dict(param3, style_kwds, 2),
)
if one_step:
if confidence:
ax.fill_between(
one_step_ahead[0][delta_limit:],
lower_osa[delta_limit:],
upper_osa[delta_limit:],
alpha=0.04,
color="#555555",
)
ax.plot(
one_step_ahead[0][delta_limit:],
lower_osa[delta_limit:],
alpha=0.04,
color="#000000",
)
ax.plot(
one_step_ahead[0][delta_limit:],
upper_osa[delta_limit:],
alpha=0.04,
color="#000000",
)
ax.plot(
one_step_ahead[0][delta_limit:],
one_step_ahead[1][delta_limit:],
label="One-step ahead Forecast",
**updated_dict(param2, style_kwds, 1),
)
if observed:
ax.plot(
true_value[0][delta_limit:],
true_value[1][delta_limit:],
label="Observed",
**updated_dict(param1, style_kwds, 0),
)
ax.set_title(
"SARIMAX({},{},{})({},{},{})_{}".format(
self.parameters["p"],
self.parameters["d"],
self.parameters["q"],
self.parameters["P"],
self.parameters["D"],
self.parameters["Q"],
self.parameters["s"],
)
)
ax.set_xlabel(ts)
ax.legend(loc="center left", bbox_to_anchor=[1, 0.5])
ax.set_ylim(
1.02 * float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
1.02 * float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
return ax
def predict(
self,
vdf: vDataFrame,
y: str = "",
ts: str = "",
X: list = [],
nlead: int = 0,
name: str = "",
):
check_types(
[
("name", name, [str],),
("y", y, [str],),
("ts", ts, [str],),
("X", X, [list],),
("nlead", nlead, [int, float],),
("vdf", vdf, [vDataFrame],),
],
)
if not (y):
y = self.y
if not (ts):
ts = self.ts
if not (X):
X = self.exogenous
columns_check([y, ts], vdf)
y, ts = vdf_columns_names([y, ts], vdf)
name = (
"{}_".format(self.type) + "".join(ch for ch in self.name if ch.isalnum())
if not (name)
else name
)
key_columns = ", " + ", ".join(vdf.get_columns(exclude_columns=[y]))
transform_relation = self.transform_relation.replace(
"[VerticaPy_y]", y
).replace("[VerticaPy_ts]", ts)
transform_relation = transform_relation.replace(
"[VerticaPy_key_columns]", key_columns
)
predictSQL = self.deploySQL().replace("[VerticaPy_y]", y).replace(
"[VerticaPy_ts]", ts
) + " AS {}".format(name)
for idx, elem in enumerate(X):
transform_relation = transform_relation.replace("[X{}]".format(idx), elem)
predictSQL = predictSQL.replace("[X{}]".format(idx), elem)
columns = (
vdf.get_columns(exclude_columns=[y])
+ [predictSQL]
+ ["VerticaPy_y_copy AS {}".format(y)]
)
relation = vdf.__genSQL__()
for i in range(nlead):
query = "SELECT ({} - LAG({}, 1) OVER (ORDER BY {}))::VARCHAR FROM {} ORDER BY {} DESC LIMIT 1".format(
ts, ts, ts, relation, ts
)
deltat = vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()[0]
query = "SELECT (MAX({}) + '{}'::interval)::VARCHAR FROM {}".format(
ts, deltat, relation
)
next_t = vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()[0]
if i == 0:
first_t = next_t
new_line = "SELECT '{}'::TIMESTAMP AS {}, {}".format(
next_t,
ts,
", ".join(
[
"NULL AS {}".format(column)
for column in vdf.get_columns(exclude_columns=[ts])
]
),
)
relation_tmp = "(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE".format(
", ".join([ts] + vdf.get_columns(exclude_columns=[ts])),
relation,
new_line,
)
query = "SELECT {} FROM {} ORDER BY {} DESC LIMIT 1".format(
self.deploySQL()
.replace("[VerticaPy_y]", y)
.replace("[VerticaPy_ts]", ts),
transform_relation.format(relation_tmp),
ts,
)
prediction = (
vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()[0]
)
columns_tmp = vdf.get_columns(exclude_columns=[ts, y])
new_line = "SELECT '{}'::TIMESTAMP AS {}, {} AS {} {}".format(
next_t,
ts,
prediction,
y,
(", " if (columns_tmp) else "")
+ ", ".join(["NULL AS {}".format(column) for column in columns_tmp]),
)
relation = "(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE".format(
", ".join([ts, y] + vdf.get_columns(exclude_columns=[ts, y])),
relation,
new_line,
)
final_relation = "(SELECT {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), transform_relation.format(relation)
)
result = vdf_from_relation(final_relation, "SARIMAX", self.cursor,)
if nlead > 0:
result[y].apply(
"CASE WHEN {} >= '{}' THEN NULL ELSE {} END".format(ts, first_t, "{}")
)
return result
class VAR(Regressor):
def __init__(
self,
name: str,
cursor=None,
p: int = 1,
tol: float = 1e-4,
max_iter: int = 1000,
solver: str = "Newton",
):
check_types([("name", name, [str],)])
self.type, self.name = "VAR", name
assert p > 0, ParameterError(
"Parameter 'p' must be greater than 0 to build a VAR model."
)
self.set_params(
{"p": p, "tol": tol, "max_iter": max_iter, "solver": solver,}
)
cursor = check_cursor(cursor)[0]
self.cursor = cursor
version(cursor=cursor, condition=[8, 0, 0])
def deploySQL(self):
sql = []
for idx, coefs in enumerate(self.coef_):
coefs_tmp = coefs.values["coefficient"]
predictors_tmp = coefs.values["predictor"]
sql += [
str(coefs_tmp[0])
+ " + "
+ " + ".join(
[
str(coefs_tmp[i]) + " * " + str(predictors_tmp[i])
for i in range(1, len(coefs_tmp))
]
)
]
return sql
def features_importance(
self, X_idx: int = 0, ax=None, show: bool = True, **style_kwds,
):
check_types([("X_idx", X_idx, [int, float, str],), ("show", show, [bool],),],)
if isinstance(X_idx, str):
X_idx = str_column(X_idx).lower()
for idx, elem in enumerate(self.X):
if str_column(elem).lower() == X_idx:
X_idx = idx
break
assert (
isinstance(X_idx, (float, int)) and len(self.X) > X_idx >= 0
), ParameterError(
"The index of the vcolumn to draw 'X_idx' must be between 0 and {}. It can also be the name of a predictor vcolumn.".format(
len(self.X)
)
)
relation = self.transform_relation.replace("[VerticaPy_ts]", self.ts).format(
self.test_relation
)
for idx, elem in enumerate(self.X):
relation = relation.replace("[X{}]".format(idx), elem)
min_max = (
vdf_from_relation(relation=self.input_relation, cursor=self.cursor)
.agg(func=["min", "max"], columns=self.X)
.transpose()
)
coefficient = self.coef_[X_idx].values
coeff_importances = {}
coeff_sign = {}
for idx, coef in enumerate(coefficient["predictor"]):
if idx > 0:
predictor = int(coef.split("_")[0].replace("ar", ""))
predictor = str_column(self.X[predictor])
minimum, maximum = min_max[predictor]
val = coefficient["coefficient"][idx]
coeff_importances[coef] = abs(val) * (maximum - minimum)
coeff_sign[coef] = 1 if val >= 0 else -1
total = sum([coeff_importances[elem] for elem in coeff_importances])
for elem in coeff_importances:
coeff_importances[elem] = 100 * coeff_importances[elem] / total
if show:
plot_importance(
coeff_importances, coeff_sign, print_legend=True, ax=ax, **style_kwds,
)
importances = {"index": ["importance", "sign"]}
for elem in coeff_importances:
importances[elem] = [coeff_importances[elem], coeff_sign[elem]]
return tablesample(values=importances).transpose()
def fit(
self,
input_relation: Union[vDataFrame, str],
X: list,
ts: str,
test_relation: Union[vDataFrame, str] = "",
):
check_types(
[
("input_relation", input_relation, [str, vDataFrame],),
("X", X, [list],),
("ts", ts, [str],),
("test_relation", test_relation, [str, vDataFrame],),
]
)
self.cursor = check_cursor(self.cursor, input_relation, True)[0]
does_model_exist(name=self.name, cursor=self.cursor, raise_error=True)
self.input_relation = (
input_relation
if isinstance(input_relation, str)
else input_relation.__genSQL__()
)
if isinstance(test_relation, vDataFrame):
self.test_relation = test_relation.__genSQL__()
elif test_relation:
self.test_relation = test_relation
else:
self.test_relation = self.input_relation
self.ts, self.deploy_predict_ = str_column(ts), []
self.X, schema = [str_column(elem) for elem in X], schema_relation(self.name)[0]
model = LinearRegression(
name=self.name,
solver=self.parameters["solver"],
max_iter=self.parameters["max_iter"],
tol=self.parameters["tol"],
)
columns, AR = [], []
for idx, elem in enumerate(self.X):
for i in range(1, self.parameters["p"] + 1):
columns += [
"LAG([X{}], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}_{}".format(
idx, i, idx, i
)
]
AR += ["AR{}_{}".format(idx, i)]
self.transform_relation = "(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), "{}"
)
relation = self.transform_relation.replace("[VerticaPy_ts]", self.ts).format(
self.input_relation
)
for idx, elem in enumerate(self.X):
relation = relation.replace("[X{}]".format(idx), elem)
def drop_temp_elem(self, schema):
try:
with warnings.catch_warnings(record=True) as w:
drop(
"{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, get_session(self.cursor)
),
cursor=self.cursor,
method="view",
)
except:
pass
drop_temp_elem(self, schema)
try:
query = "CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}".format(
schema, get_session(self.cursor), relation
)
self.cursor.execute(query)
self.coef_ = []
for elem in X:
model.fit(
input_relation="{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, get_session(self.cursor)
),
X=AR,
y=elem,
)
self.coef_ += [model.coef_]
model.drop()
except:
drop_temp_elem(self, schema)
raise
drop_temp_elem(self, schema)
model_save = {
"type": "VAR",
"input_relation": self.input_relation,
"test_relation": self.test_relation,
"transform_relation": self.transform_relation,
"deploy_predict": self.deploy_predict_,
"X": self.X,
"ts": self.ts,
"p": self.parameters["p"],
"tol": self.parameters["tol"],
"max_iter": self.parameters["max_iter"],
"solver": self.parameters["solver"],
}
for idx, elem in enumerate(self.coef_):
model_save["coef_{}".format(idx)] = elem.values
insert_verticapy_schema(
model_name=self.name,
model_type="VAR",
model_save=model_save,
cursor=self.cursor,
)
return self
def fpredict(self, L: list):
try:
result = []
result_tmp = 0
for i in range(len(self.X)):
result_tmp = 0
for j in range(len(self.coef_[i].values["coefficient"])):
elem = self.coef_[i].values["predictor"][j]
if elem.lower() == "intercept":
result_tmp += self.coef_[i].values["coefficient"][j]
else:
ni, nj = elem[2:].split("_")
ni, nj = int(ni), int(nj)
result_tmp += (
self.coef_[i].values["coefficient"][j] * L[-nj][ni]
)
result += [result_tmp]
return result
except:
return None
def plot(
self,
vdf: vDataFrame = None,
X: list = [],
ts: str = "",
X_idx: int = 0,
dynamic: bool = False,
one_step: bool = True,
observed: bool = True,
confidence: bool = True,
nlead: int = 10,
nlast: int = 0,
limit: int = 1000,
ax=None,
**style_kwds,
):
if not (vdf):
vdf = vdf_from_relation(relation=self.input_relation, cursor=self.cursor)
check_types(
[
("limit", limit, [int, float],),
("nlead", nlead, [int, float],),
("X_idx", X_idx, [int, float, str],),
("dynamic", dynamic, [bool],),
("observed", observed, [bool],),
("one_step", one_step, [bool],),
("confidence", confidence, [bool],),
("vdf", vdf, [vDataFrame],),
],
)
delta_limit, limit = (
limit,
max(max(limit, self.parameters["p"] + 1 + nlast), 200),
)
delta_limit = max(limit - delta_limit - nlast, 0)
if not (ts):
ts = self.ts
if not (X):
X = self.X
assert dynamic or one_step or observed, ParameterError(
"No option selected.\n You should set either dynamic, one_step or observed to True."
)
assert nlead + nlast > 0 or not (dynamic), ParameterError(
"Dynamic Plots are only possible if either parameter 'nlead' is greater than 0 or parameter 'nlast' is greater than 0, and parameter 'dynamic' is set to True."
)
if isinstance(X_idx, str):
X_idx = str_column(X_idx).lower()
for idx, elem in enumerate(X):
if str_column(elem).lower() == X_idx:
X_idx = idx
break
assert (
isinstance(X_idx, (float, int)) and len(self.X) > X_idx >= 0
), ParameterError(
"The index of the vcolumn to draw 'X_idx' must be between 0 and {}. It can also be the name of a predictor vcolumn.".format(
len(self.X)
)
)
result_all = self.predict(
vdf=vdf,
X=X,
ts=ts,
nlead=0,
name=[
"_verticapy_prediction_{}_".format(idx) for idx in range(len(self.X))
],
)
y, prediction = X[X_idx], "_verticapy_prediction_{}_".format(X_idx)
error_eps = 1.96 * math.sqrt(self.score(method="mse").values["mse"][X_idx])
print_info = verticapy.options["print_info"]
verticapy.options["print_info"] = False
try:
result = (
result_all.select([ts, y, prediction])
.dropna()
.sort([ts])
.tail(limit)
.values
)
except:
verticapy.options["print_info"] = print_info
raise
verticapy.options["print_info"] = print_info
columns = [elem for elem in result]
if isinstance(result[columns[0]][0], str):
result[columns[0]] = [parse(elem) for elem in result[columns[0]]]
true_value = [result[columns[0]], result[columns[1]]]
one_step_ahead = [result[columns[0]], result[columns[2]]]
lower_osa, upper_osa = (
[
float(elem) - error_eps if elem != None else None
for elem in one_step_ahead[1]
],
[
float(elem) + error_eps if elem != None else None
for elem in one_step_ahead[1]
],
)
if dynamic:
print_info = verticapy.options["print_info"]
verticapy.options["print_info"] = False
try:
result = (
result_all.select([ts] + X).dropna().sort([ts]).tail(limit).values
)
except:
verticapy.options["print_info"] = print_info
raise
verticapy.options["print_info"] = print_info
columns = [elem for elem in result]
if isinstance(result[columns[0]][0], str):
result[columns[0]] = [parse(elem) for elem in result[columns[0]]]
deltat = result[columns[0]][-1] - result[columns[0]][-2]
lead_time_list, lead_list = [], []
if nlast > 0:
for i in range(len(result[columns[0]][:-nlast])):
lead_list += [[result[elem][i] for elem in columns[1:]]]
else:
for i in range(len(result[columns[0]])):
lead_list += [[result[elem][i] for elem in columns[1:]]]
for i in range(nlast):
lead_list += [self.fpredict(lead_list)]
lead_time_list += [result[columns[0]][i - nlast]]
if lead_time_list:
start_time = lead_time_list[-1]
else:
start_time = result[columns[0]][-1]
for i in range(nlead):
lead_list += [self.fpredict(lead_list)]
lead_time_list += [start_time + (i + 1) * deltat]
dynamic_forecast = (
[result[columns[0]][-nlast - 1]] + lead_time_list,
[result[columns[1 + X_idx]][-nlast - 1]]
+ [elem[X_idx] for elem in lead_list[-nlast - nlead :]],
)
lower_d, upper_d = [], []
for i in range(len(dynamic_forecast[1])):
delta_error = error_eps * math.sqrt(i + 1)
lower_d += [float(dynamic_forecast[1][i]) - delta_error]
upper_d += [float(dynamic_forecast[1][i]) + delta_error]
else:
lower_d, upper_d, dynamic_forecast = [], [], ([], [])
alpha = 0.3
if not (ax):
fig, ax = plt.subplots()
if isnotebook():
fig.set_size_inches(10, 6)
ax.grid()
colors = gen_colors()
param1 = {
"color": colors[2],
"linewidth": 2,
}
param2 = {
"color": colors[3],
"linewidth": 2,
"linestyle": ":",
}
param3 = {
"color": colors[0],
"linewidth": 2,
"linestyle": "dashed",
}
if dynamic:
ax.fill_between(
dynamic_forecast[0],
1.02
* float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
1.02
* float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
alpha=0.04,
color=updated_dict(param3, style_kwds, 2)["color"],
)
if confidence:
ax.fill_between(
dynamic_forecast[0], lower_d, upper_d, alpha=0.08, color="#555555"
)
ax.plot(dynamic_forecast[0], lower_d, alpha=0.08, color="#000000")
ax.plot(dynamic_forecast[0], upper_d, alpha=0.08, color="#000000")
ax.plot(
dynamic_forecast[0],
dynamic_forecast[1],
label="Dynamic Forecast",
**updated_dict(param3, style_kwds, 2),
)
if one_step:
if confidence:
ax.fill_between(
one_step_ahead[0][delta_limit:],
lower_osa[delta_limit:],
upper_osa[delta_limit:],
alpha=0.04,
color="#555555",
)
ax.plot(
one_step_ahead[0][delta_limit:],
lower_osa[delta_limit:],
alpha=0.04,
color="#000000",
)
ax.plot(
one_step_ahead[0][delta_limit:],
upper_osa[delta_limit:],
alpha=0.04,
color="#000000",
)
ax.plot(
one_step_ahead[0][delta_limit:],
one_step_ahead[1][delta_limit:],
label="One-step ahead Forecast",
**updated_dict(param2, style_kwds, 1),
)
if observed:
ax.plot(
true_value[0][delta_limit:],
true_value[1][delta_limit:],
label="Observed",
**updated_dict(param1, style_kwds, 0),
)
ax.set_title("VAR({}) [{}]".format(self.parameters["p"], y))
ax.set_xlabel(ts)
ax.legend(loc="center left", bbox_to_anchor=[1, 0.5])
ax.set_ylim(
1.02 * float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
1.02 * float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
return ax
def predict(
self,
vdf: vDataFrame,
X: list = [],
ts: str = "",
nlead: int = 0,
name: list = [],
):
check_types(
[
("name", name, [list],),
("ts", ts, [str],),
("nlead", nlead, [int, float],),
("X", X, [list],),
("vdf", vdf, [vDataFrame],),
],
)
if not (ts):
ts = self.ts
if not (X):
X = self.X
columns_check(X + [ts], vdf)
X = vdf_columns_names(X, vdf)
ts = vdf_columns_names([ts], vdf)[0]
all_pred, names = [], []
transform_relation = self.transform_relation.replace("[VerticaPy_ts]", self.ts)
for idx, elem in enumerate(X):
name_tmp = (
"{}_".format(self.type) + "".join(ch for ch in elem if ch.isalnum())
if len(name) != len(X)
else name[idx]
)
all_pred += ["{} AS {}".format(self.deploySQL()[idx], name_tmp)]
transform_relation = transform_relation.replace("[X{}]".format(idx), elem)
columns = vdf.get_columns() + all_pred
relation = vdf.__genSQL__()
for i in range(nlead):
query = "SELECT ({} - LAG({}, 1) OVER (ORDER BY {}))::VARCHAR FROM {} ORDER BY {} DESC LIMIT 1".format(
ts, ts, ts, relation, ts
)
deltat = vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()[0]
query = "SELECT (MAX({}) + '{}'::interval)::VARCHAR FROM {}".format(
ts, deltat, relation
)
next_t = vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()[0]
if i == 0:
first_t = next_t
new_line = "SELECT '{}'::TIMESTAMP AS {}, {}".format(
next_t,
ts,
", ".join(
[
"NULL AS {}".format(column)
for column in vdf.get_columns(exclude_columns=[ts])
]
),
)
relation_tmp = "(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE".format(
", ".join([ts] + vdf.get_columns(exclude_columns=[ts])),
relation,
new_line,
)
query = "SELECT {} FROM {} ORDER BY {} DESC LIMIT 1".format(
", ".join(self.deploySQL()), transform_relation.format(relation_tmp), ts
)
prediction = vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()
for idx, elem in enumerate(X):
prediction[idx] = "{} AS {}".format(prediction[idx], elem)
columns_tmp = vdf.get_columns(exclude_columns=[ts] + X)
new_line = "SELECT '{}'::TIMESTAMP AS {}, {} {}".format(
next_t,
ts,
", ".join(prediction),
(", " if (columns_tmp) else "")
+ ", ".join(["NULL AS {}".format(column) for column in columns_tmp]),
)
relation = "(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE".format(
", ".join([ts] + X + vdf.get_columns(exclude_columns=[ts] + X)),
relation,
new_line,
)
final_relation = "(SELECT {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), transform_relation.format(relation)
)
result = vdf_from_relation(final_relation, "VAR", self.cursor,)
if nlead > 0:
for elem in X:
result[elem].apply(
"CASE WHEN {} >= '{}' THEN NULL ELSE {} END".format(
ts, first_t, "{}"
)
)
return result
| true
| true
|
790e24a58f0f165be07f8d3ab876fe28026896ef
| 2,501
|
py
|
Python
|
tensorflow_io/hadoop/python/ops/hadoop_dataset_ops.py
|
jiachengxu/io
|
0ef0f21193d7a48c50f8cddeaa1f0fb3056040ea
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:11:23.000Z
|
2019-10-10T06:11:23.000Z
|
tensorflow_io/hadoop/python/ops/hadoop_dataset_ops.py
|
jiachengxu/io
|
0ef0f21193d7a48c50f8cddeaa1f0fb3056040ea
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_io/hadoop/python/ops/hadoop_dataset_ops.py
|
jiachengxu/io
|
0ef0f21193d7a48c50f8cddeaa1f0fb3056040ea
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:11:24.000Z
|
2019-10-10T06:11:24.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceFile Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow import dtypes
from tensorflow.compat.v1 import data
from tensorflow_io.core.python.ops import _load_library
hadoop_ops = _load_library('_hadoop_ops.so')
class SequenceFileDataset(data.Dataset):
"""A Sequence File Dataset that reads the sequence file."""
def __init__(self, filenames):
"""Create a `SequenceFileDataset`.
`SequenceFileDataset` allows a user to read data from a hadoop sequence
file. A sequence file consists of (key value) pairs sequentially. At
the moment, `org.apache.hadoop.io.Text` is the only serialization type
being supported, and there is no compression support.
For example:
```python
dataset = SequenceFileDataset("/foo/bar.seq")
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Prints the (key, value) pairs inside a hadoop sequence file.
while True:
try:
print(sess.run(next_element))
except tf.errors.OutOfRangeError:
break
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
"""
self._filenames = tf.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
super(SequenceFileDataset, self).__init__()
def _inputs(self):
return []
def _as_variant_tensor(self):
return hadoop_ops.sequence_file_dataset(
self._filenames, (dtypes.string, dtypes.string))
@property
def output_classes(self):
return tf.Tensor, tf.Tensor
@property
def output_shapes(self):
return (tf.TensorShape([]), tf.TensorShape([]))
@property
def output_types(self):
return dtypes.string, dtypes.string
| 32.480519
| 80
| 0.708517
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow import dtypes
from tensorflow.compat.v1 import data
from tensorflow_io.core.python.ops import _load_library
hadoop_ops = _load_library('_hadoop_ops.so')
class SequenceFileDataset(data.Dataset):
def __init__(self, filenames):
self._filenames = tf.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
super(SequenceFileDataset, self).__init__()
def _inputs(self):
return []
def _as_variant_tensor(self):
return hadoop_ops.sequence_file_dataset(
self._filenames, (dtypes.string, dtypes.string))
@property
def output_classes(self):
return tf.Tensor, tf.Tensor
@property
def output_shapes(self):
return (tf.TensorShape([]), tf.TensorShape([]))
@property
def output_types(self):
return dtypes.string, dtypes.string
| true
| true
|
790e257dcd48872f99f2a5b30ace865b4698aae7
| 2,177
|
py
|
Python
|
kchart/charts/views.py
|
pmrowla/kchart
|
23c2003281614ce6d2de38ca977c53ce631507e4
|
[
"MIT"
] | 2
|
2016-09-02T05:59:16.000Z
|
2021-05-08T01:21:49.000Z
|
kchart/charts/views.py
|
pmrowla/kchart
|
23c2003281614ce6d2de38ca977c53ce631507e4
|
[
"MIT"
] | null | null | null |
kchart/charts/views.py
|
pmrowla/kchart
|
23c2003281614ce6d2de38ca977c53ce631507e4
|
[
"MIT"
] | 1
|
2016-09-02T05:59:16.000Z
|
2016-09-02T05:59:16.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from django.contrib import messages
from django.db.models import Count
from django.views.generic import (
DetailView,
TemplateView,
)
from .models import (
AggregateHourlySongChart,
HourlySongChart,
HourlySongChartEntry,
Song,
)
from .utils import KR_TZ
class HourlySongChartView(DetailView):
template_name = 'charts/hourlysongchart_detail.html'
def _get_hour(self, msg=False):
chart_date = self.request.GET.get('date', None)
if chart_date:
try:
hour = self.request.GET.get('hour', '00')
return KR_TZ.localize(datetime.strptime('{}{}'.format(chart_date, hour), '%Y%m%d%H'))
except ValueError:
if msg:
messages.error(self.request, 'Invalid date/hour parameters.')
return AggregateHourlySongChart.objects.latest('hour').hour.astimezone(KR_TZ)
def get_context_data(self, **kwargs):
context = super(HourlySongChartView, self).get_context_data(**kwargs)
context['hour'] = self._get_hour()
return context
def get_object(self):
hour = self._get_hour(msg=True)
return AggregateHourlySongChart.get_cached_chart(hour)
class StatsView(TemplateView):
template_name = 'charts/stats.html'
def get_context_data(self, **kwargs):
context = super(StatsView, self).get_context_data(**kwargs)
for slug in ['melon', 'genie', 'bugs', 'mnet']:
context['{}_earliest'.format(slug)] = HourlySongChart.objects.filter(
chart__service__slug=slug).earliest('hour').hour
context['song_count'] = HourlySongChartEntry.objects.aggregate(
song_count=Count('song', distinct=True))['song_count']
context['artist_count'] = HourlySongChartEntry.objects.aggregate(
artist_count=Count('song__artists', distinct=True))['artist_count']
context['album_count'] = HourlySongChartEntry.objects.aggregate(
album_count=Count('song__album', distinct=True))['album_count']
return context
| 34.555556
| 101
| 0.66881
|
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from django.contrib import messages
from django.db.models import Count
from django.views.generic import (
DetailView,
TemplateView,
)
from .models import (
AggregateHourlySongChart,
HourlySongChart,
HourlySongChartEntry,
Song,
)
from .utils import KR_TZ
class HourlySongChartView(DetailView):
template_name = 'charts/hourlysongchart_detail.html'
def _get_hour(self, msg=False):
chart_date = self.request.GET.get('date', None)
if chart_date:
try:
hour = self.request.GET.get('hour', '00')
return KR_TZ.localize(datetime.strptime('{}{}'.format(chart_date, hour), '%Y%m%d%H'))
except ValueError:
if msg:
messages.error(self.request, 'Invalid date/hour parameters.')
return AggregateHourlySongChart.objects.latest('hour').hour.astimezone(KR_TZ)
def get_context_data(self, **kwargs):
context = super(HourlySongChartView, self).get_context_data(**kwargs)
context['hour'] = self._get_hour()
return context
def get_object(self):
hour = self._get_hour(msg=True)
return AggregateHourlySongChart.get_cached_chart(hour)
class StatsView(TemplateView):
template_name = 'charts/stats.html'
def get_context_data(self, **kwargs):
context = super(StatsView, self).get_context_data(**kwargs)
for slug in ['melon', 'genie', 'bugs', 'mnet']:
context['{}_earliest'.format(slug)] = HourlySongChart.objects.filter(
chart__service__slug=slug).earliest('hour').hour
context['song_count'] = HourlySongChartEntry.objects.aggregate(
song_count=Count('song', distinct=True))['song_count']
context['artist_count'] = HourlySongChartEntry.objects.aggregate(
artist_count=Count('song__artists', distinct=True))['artist_count']
context['album_count'] = HourlySongChartEntry.objects.aggregate(
album_count=Count('song__album', distinct=True))['album_count']
return context
| true
| true
|
790e259abafc3b78efd22c4e49725337604761c5
| 55
|
py
|
Python
|
src/__init__.py
|
codespacedot/CodeSpaceAPI
|
22b457088aa592c4fb9111718810075d2643d9ca
|
[
"Apache-2.0"
] | 3
|
2021-07-05T17:28:14.000Z
|
2021-12-07T10:08:14.000Z
|
src/__init__.py
|
git-vish/CodeSpaceAPI
|
7ad4327e0eef3019098730358c4a23312bc85615
|
[
"Apache-2.0"
] | 2
|
2021-07-29T13:55:15.000Z
|
2021-07-31T16:49:03.000Z
|
src/__init__.py
|
git-vish/CodeSpaceAPI
|
7ad4327e0eef3019098730358c4a23312bc85615
|
[
"Apache-2.0"
] | 3
|
2021-07-01T16:32:20.000Z
|
2021-07-05T04:50:30.000Z
|
"""FastAPI Project for CodeSpace.
https://csdot.ml
"""
| 13.75
| 33
| 0.690909
| true
| true
|
|
790e25c4d5b74218ebf3024bbf700dd23d8d987e
| 229
|
py
|
Python
|
about/views.py
|
nicolas-costa/univ-pweb2-django-institutional
|
be61d1b02a207dc6d40847f44e3504a268e9bc18
|
[
"WTFPL"
] | null | null | null |
about/views.py
|
nicolas-costa/univ-pweb2-django-institutional
|
be61d1b02a207dc6d40847f44e3504a268e9bc18
|
[
"WTFPL"
] | null | null | null |
about/views.py
|
nicolas-costa/univ-pweb2-django-institutional
|
be61d1b02a207dc6d40847f44e3504a268e9bc18
|
[
"WTFPL"
] | null | null | null |
from django.http import HttpResponse
from django.template import loader
# Create your views here.
def index(request):
templ = loader.get_template('about/index.html')
return HttpResponse(templ.render(request=request))
| 20.818182
| 54
| 0.768559
|
from django.http import HttpResponse
from django.template import loader
def index(request):
templ = loader.get_template('about/index.html')
return HttpResponse(templ.render(request=request))
| true
| true
|
790e25dee299e98cceaa4a8112d570f33ec279e9
| 677
|
py
|
Python
|
taskcat/_cli_modules/generate_iam_policy.py
|
sirhc/taskcat
|
22e4bf0fa2b13363eca87cfbb3f3061247fa63c3
|
[
"Apache-2.0"
] | 920
|
2016-12-03T01:41:25.000Z
|
2021-11-04T13:52:21.000Z
|
taskcat/_cli_modules/generate_iam_policy.py
|
sirhc/taskcat
|
22e4bf0fa2b13363eca87cfbb3f3061247fa63c3
|
[
"Apache-2.0"
] | 544
|
2017-02-23T22:41:25.000Z
|
2021-11-03T23:02:25.000Z
|
taskcat/_cli_modules/generate_iam_policy.py
|
sirhc/taskcat
|
22e4bf0fa2b13363eca87cfbb3f3061247fa63c3
|
[
"Apache-2.0"
] | 225
|
2016-12-11T13:36:21.000Z
|
2021-11-04T14:43:53.000Z
|
import logging
from pathlib import Path
from taskcat._config import Config
from taskcat.iam_policy.policy import CFNPolicyGenerator
LOG = logging.getLogger(__name__)
class GenerateIAMPolicy:
"""
[ALPHA] Introspects CFN Template(s) and generates an IAM policy necessary to successfully launch the template(s)
"""
CLINAME = "generate-iam-policy"
def __init__(
self, output_file: str = "./cfn_stack_policy.json", project_root: str = "./"
):
project_root_path = Path(project_root).expanduser().resolve()
config = Config.create(project_root=project_root_path)
CFNPolicyGenerator(config, output_file).generate_policy()
| 27.08
| 116
| 0.725258
|
import logging
from pathlib import Path
from taskcat._config import Config
from taskcat.iam_policy.policy import CFNPolicyGenerator
LOG = logging.getLogger(__name__)
class GenerateIAMPolicy:
CLINAME = "generate-iam-policy"
def __init__(
self, output_file: str = "./cfn_stack_policy.json", project_root: str = "./"
):
project_root_path = Path(project_root).expanduser().resolve()
config = Config.create(project_root=project_root_path)
CFNPolicyGenerator(config, output_file).generate_policy()
| true
| true
|
790e26a11754a9d70167dbcc6e4019648c229da6
| 73,932
|
py
|
Python
|
src/config/common/cfgm_common/vnc_cassandra.py
|
atsgen/tf-controller
|
9321889cdd3d7108980cc88937b2e82956502cc5
|
[
"Apache-2.0"
] | null | null | null |
src/config/common/cfgm_common/vnc_cassandra.py
|
atsgen/tf-controller
|
9321889cdd3d7108980cc88937b2e82956502cc5
|
[
"Apache-2.0"
] | null | null | null |
src/config/common/cfgm_common/vnc_cassandra.py
|
atsgen/tf-controller
|
9321889cdd3d7108980cc88937b2e82956502cc5
|
[
"Apache-2.0"
] | 1
|
2020-12-18T18:22:53.000Z
|
2020-12-18T18:22:53.000Z
|
from __future__ import absolute_import
from __future__ import unicode_literals
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
from builtins import next
from builtins import chr
from builtins import str
from builtins import range
from builtins import object
import copy
import os
import gevent
from pprint import pformat
import six
from vnc_api import vnc_api
from .exceptions import NoIdError, VncError
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from cfgm_common import jsonutils as json
from . import utils
import datetime
from operator import itemgetter
from collections import OrderedDict
from cfgm_common.datastore.drivers.cassandra_thrift import CassandraDriverThrift
from cfgm_common.datastore.drivers.cassandra_cql import CassandraDriverCQL
from cfgm_common.datastore import api as datastore_api
JSON_NONE = json.dumps(None)
class VncCassandraClient(object):
@staticmethod
def _is_metadata(column_name):
return column_name[:5] == 'META:'
@staticmethod
def _is_parent(column_name):
return column_name[:7] == 'parent:'
@staticmethod
def _is_prop(column_name):
return column_name[:5] == 'prop:'
@staticmethod
def _is_prop_list(column_name):
return column_name[:6] == 'propl:'
@staticmethod
def _is_prop_map(column_name):
return column_name[:6] == 'propm:'
@staticmethod
def _is_ref(column_name):
return column_name[:4] == 'ref:'
@staticmethod
def _is_backref(column_name):
return column_name[:8] == 'backref:'
@staticmethod
def _is_children(column_name):
return column_name[:9] == 'children:'
def add(self, cf_name, key, value):
try:
self._cassandra_driver.insert(key, value, cf_name=cf_name)
return True
except Exception as e:
self._logger("VNCCassandra, unable to add {}={}, error: {}".format(
key, value, e), level=SandeshLevel.SYS_WARN)
return False
def delete(self, cf_name, key, columns=None):
try:
self._cassandra_driver.remove(
key, columns, cf_name=cf_name)
return True
except Exception as e:
self._logger("VNCCassandra, unable to del {}={}, error: {}".format(
key, columns, e), level=SandeshLevel.SYS_WARN)
return False
def _get_resource_class(self, obj_type):
if hasattr(self, '_db_client_mgr'):
return self._db_client_mgr.get_resource_class(obj_type)
cls_name = '%s' % (utils.CamelCase(obj_type))
return getattr(vnc_api, cls_name)
# end _get_resource_class
@classmethod
def get_db_info(cls):
db_info = [(datastore_api.UUID_KEYSPACE_NAME, [datastore_api.OBJ_UUID_CF_NAME,
datastore_api.OBJ_FQ_NAME_CF_NAME,
datastore_api.OBJ_SHARED_CF_NAME])]
return db_info
# end get_db_info
def __init__(self, server_list, cassandra_driver, **options):
if cassandra_driver == 'cql':
driverClass = CassandraDriverCQL
elif cassandra_driver == 'thrift':
driverClass = CassandraDriverThrift
# TODO(sahid): To satisfy test-framework which has its
# specific py3 support for thrift we can have the above
# condition, when that will be fixed we could uncomment
# the code.
#if six.PY3:
# raise VncError(
# "selected driver `{}` not supported for Python 3.".format(
# cassandra_driver))
else:
raise VncError(
"datastore driver not selected, see `cassandra_driver`.")
self._cassandra_driver = driverClass(server_list, **options)
self._logger = self._cassandra_driver.options.logger
self._logger('VNCCassandra started with driver {}'.format(driverClass),
level=SandeshLevel.SYS_INFO)
self._cache_uuid_to_fq_name = {}
self._obj_cache_mgr = ObjectCacheManager(
self._cassandra_driver.options.logger,
self,
max_entries=self._cassandra_driver.options.obj_cache_entries,
obj_cache_exclude_types=self._cassandra_driver.options.obj_cache_exclude_types,
debug_obj_cache_types=self._cassandra_driver.options.debug_obj_cache_types,
)
self._obj_cache_exclude_types = self._cassandra_driver.options.obj_cache_exclude_types or []
# these functions make calls to pycassa xget() and get_range()
# generator functions which can't be wrapped around handle_exceptions()
# at the time of cassandra init, hence need to wrap these functions that
# uses it to catch cassandra connection failures.
self.object_update = self._cassandra_driver._handle_exceptions(
self.object_update)
self.object_list = self._cassandra_driver._handle_exceptions(
self.object_list)
self.object_read = self._cassandra_driver._handle_exceptions(
self.object_read)
self.object_raw_read = self._cassandra_driver._handle_exceptions(
self.object_raw_read)
self.object_delete = self._cassandra_driver._handle_exceptions(
self.object_delete)
self.prop_collection_read = self._cassandra_driver._handle_exceptions(
self.prop_collection_read)
self.uuid_to_fq_name = self._cassandra_driver._handle_exceptions(
self.uuid_to_fq_name)
self.uuid_to_obj_type = self._cassandra_driver._handle_exceptions(
self.uuid_to_obj_type)
self.fq_name_to_uuid = self._cassandra_driver._handle_exceptions(
self.fq_name_to_uuid)
self.get_shared = self._cassandra_driver._handle_exceptions(
self.get_shared)
self.walk = self._cassandra_driver._handle_exceptions(self.walk)
if self._cassandra_driver.options.walk:
self.walk()
# end __init__
def _create_prop(self, bch, obj_uuid, prop_name, prop_val):
self._cassandra_driver.insert(
obj_uuid,
{'prop:%s' % (prop_name): json.dumps(prop_val)},
batch=bch)
# end _create_prop
def _update_prop(self, bch, obj_uuid, prop_name, new_props):
if new_props[prop_name] is None:
self._cassandra_driver.remove(obj_uuid,
columns=['prop:' + prop_name],
batch=bch)
else:
self._cassandra_driver.insert(
obj_uuid,
{'prop:' + prop_name: json.dumps(new_props[prop_name])},
batch=bch)
# prop has been accounted for, remove so only new ones remain
del new_props[prop_name]
# end _update_prop
def _add_to_prop_list(self, bch, obj_uuid, prop_name,
prop_elem_value, prop_elem_position):
self._cassandra_driver.insert(obj_uuid,
{'propl:%s:%s' % (prop_name, prop_elem_position):
json.dumps(prop_elem_value)},
batch=bch)
# end _add_to_prop_list
def _delete_from_prop_list(self, bch, obj_uuid, prop_name,
prop_elem_position):
self._cassandra_driver.remove(
obj_uuid,
columns=['propl:%s:%s' % (prop_name, prop_elem_position)],
batch=bch)
# end _delete_from_prop_list
def _set_in_prop_map(self, bch, obj_uuid, prop_name,
prop_elem_value, prop_elem_position):
self._cassandra_driver.insert(obj_uuid,
{'propm:%s:%s' % (prop_name, prop_elem_position):
json.dumps(prop_elem_value)},
batch=bch)
# end _set_in_prop_map
def _delete_from_prop_map(self, bch, obj_uuid, prop_name,
prop_elem_position):
self._cassandra_driver.remove(
obj_uuid,
columns=['propm:%s:%s' % (prop_name, prop_elem_position)],
batch=bch)
# end _delete_from_prop_map
def _create_child(self, bch, parent_type, parent_uuid,
child_type, child_uuid):
child_col = {'children:%s:%s' %
(child_type, child_uuid): JSON_NONE}
self._cassandra_driver.insert(parent_uuid, child_col, batch=bch)
parent_col = {'parent:%s:%s' %
(parent_type, parent_uuid): JSON_NONE}
self._cassandra_driver.insert(child_uuid, parent_col, batch=bch)
# update latest_col_ts on parent object
if parent_type not in self._obj_cache_exclude_types:
self.update_latest_col_ts(bch, parent_uuid)
# end _create_child
def _delete_child(self, bch, parent_type, parent_uuid,
child_type, child_uuid):
self._cassandra_driver.remove(
parent_uuid,
columns=['children:%s:%s' % (child_type, child_uuid)],
batch=bch)
# update latest_col_ts on parent object
if parent_type not in self._obj_cache_exclude_types:
self.update_latest_col_ts(bch, parent_uuid)
# end _delete_child
def _create_ref(self, bch, obj_type, obj_uuid, ref_obj_type, ref_uuid,
ref_data):
j_ref_data = json.dumps(ref_data)
symmetric_ref_updates = []
self._cassandra_driver.insert(
obj_uuid, {'ref:%s:%s' %
(ref_obj_type, ref_uuid): j_ref_data},
batch=bch)
if obj_type == ref_obj_type:
self._cassandra_driver.insert(
ref_uuid, {'ref:%s:%s' %
(obj_type, obj_uuid): j_ref_data},
batch=bch)
self.update_last_modified(bch, obj_type, ref_uuid)
symmetric_ref_updates = [ref_uuid]
else:
self._cassandra_driver.insert(
ref_uuid, {'backref:%s:%s' %
(obj_type, obj_uuid): j_ref_data},
batch=bch)
# update latest_col_ts on referred object
if ref_obj_type not in self._obj_cache_exclude_types:
if ref_obj_type == obj_type:
# evict other side of ref since it is stale from
# GET /<old-ref-uuid> pov.
self._obj_cache_mgr.evict(obj_type, [ref_uuid])
else:
self.update_latest_col_ts(bch, ref_uuid)
return symmetric_ref_updates
# end _create_ref
def _update_ref(self, bch, obj_type, obj_uuid, ref_obj_type, old_ref_uuid,
new_ref_infos):
if ref_obj_type not in new_ref_infos:
# update body didn't touch this type, nop
return []
symmetric_ref_updates = []
if old_ref_uuid not in new_ref_infos[ref_obj_type]:
# remove old ref
self._cassandra_driver.remove(
obj_uuid,
columns=['ref:%s:%s' % (ref_obj_type, old_ref_uuid)],
batch=bch)
if obj_type == ref_obj_type:
self._cassandra_driver.remove(
old_ref_uuid,
columns=['ref:%s:%s' % (obj_type, obj_uuid)],
batch=bch)
try:
self.update_last_modified(bch, obj_type, old_ref_uuid)
symmetric_ref_updates = [old_ref_uuid]
except NoIdError as e:
# old_ref_uuid might have been deleted
# if cache has the link, it will be evicted
# if cache doesn't have, keyerror is caught and continued
pass
else:
self._cassandra_driver.remove(
old_ref_uuid,
columns=['backref:%s:%s' % (obj_type, obj_uuid)],
batch=bch)
else:
# retain old ref with new ref attr
new_ref_data = new_ref_infos[ref_obj_type][old_ref_uuid]
j_new_ref_data = json.dumps(new_ref_data)
self._cassandra_driver.insert(
obj_uuid,
{'ref:%s:%s' % (ref_obj_type, old_ref_uuid):
j_new_ref_data},
batch=bch)
if obj_type == ref_obj_type:
self._cassandra_driver.insert(
old_ref_uuid,
{'ref:%s:%s' % (obj_type, obj_uuid):
j_new_ref_data},
batch=bch)
self.update_last_modified(bch, obj_type, old_ref_uuid)
symmetric_ref_updates = [old_ref_uuid]
else:
self._cassandra_driver.insert(
old_ref_uuid,
{'backref:%s:%s' % (obj_type, obj_uuid):
j_new_ref_data},
batch=bch)
# uuid has been accounted for, remove so only new ones remain
del new_ref_infos[ref_obj_type][old_ref_uuid]
# update latest_col_ts on referred object
if ref_obj_type not in self._obj_cache_exclude_types:
if ref_obj_type == obj_type:
# evict other side of ref since it is stale from
# GET /<old-ref-uuid> pov.
self._obj_cache_mgr.evict(obj_type, [old_ref_uuid])
else:
self.update_latest_col_ts(bch, old_ref_uuid)
return symmetric_ref_updates
# end _update_ref
def _delete_ref(self, bch, obj_type, obj_uuid, ref_obj_type, ref_uuid):
send = False
symmetric_ref_updates = []
if bch is None:
send = True
bch = self._cassandra_driver.get_cf_batch(datastore_api.OBJ_UUID_CF_NAME)
self._cassandra_driver.remove(
obj_uuid,
columns=['ref:%s:%s' % (ref_obj_type, ref_uuid)],
batch=bch)
if obj_type == ref_obj_type:
self._cassandra_driver.remove(ref_uuid, columns=[
'ref:%s:%s' % (obj_type, obj_uuid)],
batch=bch)
try:
self.update_last_modified(bch, obj_type, ref_uuid)
symmetric_ref_updates = [ref_uuid]
except NoIdError as e:
# ref_uuid might have been deleted
# if cache has the link, it will be evicted
# if cache doesn't have, keyerror is caught and continued
pass
else:
self._cassandra_driver.remove(
ref_uuid,
columns=['backref:%s:%s' % (obj_type, obj_uuid)],
batch=bch)
# update latest_col_ts on referred object
if ref_obj_type not in self._obj_cache_exclude_types:
if ref_obj_type == obj_type:
# evict other side of ref since it is stale from
# GET /<old-ref-uuid> pov.
self._obj_cache_mgr.evict(obj_type, [ref_uuid])
else:
self.update_latest_col_ts(bch, ref_uuid)
if send:
bch.send()
return symmetric_ref_updates
# end _delete_ref
def _get_xsd_class(self, xsd_type):
return getattr(vnc_api, xsd_type)
# end _get_xsd_class
def object_create(self, obj_type, obj_id, obj_dict,
uuid_batch=None, fqname_batch=None):
obj_class = self._get_resource_class(obj_type)
if uuid_batch:
bch = uuid_batch
else:
# Gather column values for obj and updates to backrefs
# in a batch and write it at the end
bch = self._cassandra_driver.get_cf_batch(datastore_api.OBJ_UUID_CF_NAME)
obj_cols = {}
obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
obj_cols['type'] = json.dumps(obj_type)
if obj_type not in self._obj_cache_exclude_types:
obj_cols['META:latest_col_ts'] = JSON_NONE
if 'parent_type' in obj_dict:
# non config-root child
parent_type = obj_dict['parent_type']
if parent_type not in obj_class.parent_types:
msg = ("Invalid parent type: %s not in %s" %
(parent_type, obj_class.parent_types))
return False, (400, msg)
parent_object_type = self._get_resource_class(
parent_type).object_type
parent_fq_name = obj_dict['fq_name'][:-1]
obj_cols['parent_type'] = json.dumps(parent_type)
parent_uuid = self.fq_name_to_uuid(parent_object_type,
parent_fq_name)
self._create_child(bch, parent_object_type, parent_uuid, obj_type,
obj_id)
# Properties
for prop_field in obj_class.prop_fields:
field = obj_dict.get(prop_field)
# Specifically checking for None
if field is None:
continue
if prop_field == 'id_perms':
field['created'] = datetime.datetime.utcnow().isoformat()
field['last_modified'] = field['created']
if prop_field in obj_class.prop_list_fields:
# store list elements in list order
# iterate on wrapped element or directly or prop field
if obj_class.prop_list_field_has_wrappers[prop_field]:
wrapper_field_keys = list(field.keys())
if wrapper_field_keys:
wrapper_field = wrapper_field_keys[0]
list_coll = field[wrapper_field]
else:
list_coll = []
else:
list_coll = field
for i in range(len(list_coll)):
self._add_to_prop_list(
bch, obj_id, prop_field, list_coll[i], str(i))
elif prop_field in obj_class.prop_map_fields:
# iterate on wrapped element or directly or prop field
if obj_class.prop_map_field_has_wrappers[prop_field]:
wrapper_field_keys = list(field.keys())
if wrapper_field_keys:
wrapper_field = wrapper_field_keys[0]
map_coll = field[wrapper_field]
else:
map_coll = []
else:
map_coll = field
map_key_name = obj_class.prop_map_field_key_names[prop_field]
for map_elem in map_coll:
map_key = map_elem[map_key_name]
self._set_in_prop_map(
bch, obj_id, prop_field, map_elem, map_key)
else:
self._create_prop(bch, obj_id, prop_field, field)
# References
# e.g. ref_field = 'network_ipam_refs'
# ref_res_type = 'network-ipam'
# ref_link_type = 'VnSubnetsType'
# is_weakref = False
symmetric_ref_updates = []
for ref_field in obj_class.ref_fields:
ref_fld_types_list = list(obj_class.ref_field_types[ref_field])
ref_res_type = ref_fld_types_list[0]
ref_link_type = ref_fld_types_list[1]
ref_obj_type = self._get_resource_class(ref_res_type).object_type
refs = obj_dict.get(ref_field, [])
for ref in refs:
ref_uuid = self.fq_name_to_uuid(ref_obj_type, ref['to'])
ref_attr = ref.get('attr')
ref_data = {'attr': ref_attr, 'is_weakref': False}
ret = self._create_ref(bch, obj_type, obj_id, ref_obj_type, ref_uuid,
ref_data)
symmetric_ref_updates.extend(ret)
self._cassandra_driver.insert(obj_id, obj_cols, batch=bch)
if not uuid_batch:
bch.send()
# Update fqname table
fq_name_str = ':'.join(obj_dict['fq_name'])
fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_id:
JSON_NONE}
if fqname_batch:
fqname_batch.insert(obj_type, fq_name_cols)
else:
self._cassandra_driver.insert(
cf_name=datastore_api.OBJ_FQ_NAME_CF_NAME,
key=obj_type,
columns=fq_name_cols)
return (True, symmetric_ref_updates)
# end object_create
def object_raw_read(self, obj_type, obj_uuids, prop_names):
obj_class = self._get_resource_class(obj_type)
hit_obj_dicts, miss_uuids = self._obj_cache_mgr.read(
obj_class, obj_uuids, prop_names, False)
miss_obj_rows = self._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME, miss_uuids,
['prop:' + x for x in prop_names])
miss_obj_dicts = []
for obj_uuid, columns in list(miss_obj_rows.items()):
miss_obj_dict = {'uuid': obj_uuid}
for prop_name in columns:
# strip 'prop:' before sending result back
miss_obj_dict[prop_name[5:]] = columns[prop_name]
miss_obj_dicts.append(miss_obj_dict)
return hit_obj_dicts + miss_obj_dicts
def object_read(self, obj_type, obj_uuids, field_names=None,
ret_readonly=False):
if not obj_uuids:
return (True, [])
# if field_names=None, all fields will be read/returned
req_fields = field_names
obj_class = self._get_resource_class(obj_type)
ref_fields = obj_class.ref_fields
backref_fields = obj_class.backref_fields
children_fields = obj_class.children_fields
list_fields = obj_class.prop_list_fields
map_fields = obj_class.prop_map_fields
prop_fields = obj_class.prop_fields - (list_fields | map_fields)
if ((ret_readonly is False) or
(obj_type in self._obj_cache_exclude_types)):
ignore_cache = True
else:
ignore_cache = False
# optimize for common case of reading non-backref, non-children fields
# ignoring columns starting from 'b' and 'c' - significant performance
# impact in scaled setting. e.g. read of project
# For caching (when ret values will be used for readonly
# e.g. object read/list context):
# 1. pick the hits, and for the misses..
# 2. read from db, cache, filter with fields
# else read from db with specified field filters
if (field_names is None or
set(field_names) & (backref_fields | children_fields)):
# atleast one backref/children field is needed
include_backrefs_children = True
if ignore_cache:
hit_obj_dicts = []
miss_uuids = obj_uuids
else:
hit_obj_dicts, miss_uuids = self._obj_cache_mgr.read(
obj_class,
obj_uuids,
field_names,
include_backrefs_children,
)
miss_obj_rows = self._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME, miss_uuids,
timestamp=True)
else:
# ignore reading backref + children columns
include_backrefs_children = False
if ignore_cache:
hit_obj_dicts = []
miss_uuids = obj_uuids
else:
hit_obj_dicts, miss_uuids = self._obj_cache_mgr.read(
obj_class,
obj_uuids,
field_names,
include_backrefs_children,
)
miss_obj_rows = self._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME,
miss_uuids,
start='d',
timestamp=True)
if (ignore_cache or
self._obj_cache_mgr.max_entries < len(miss_uuids)):
# caller may modify returned value, or
# cannot fit in cache,
# just render with filter and don't cache
rendered_objs = self._render_obj_from_db(
obj_class, miss_obj_rows, req_fields,
include_backrefs_children)
obj_dicts = hit_obj_dicts + \
[v['obj_dict'] for k,v in list(rendered_objs.items())]
else:
# can fit and caller won't modify returned value,
# so render without filter, cache and return
# cached value
rendered_objs_to_cache = self._render_obj_from_db(
obj_class, miss_obj_rows, None,
include_backrefs_children)
field_filtered_objs = self._obj_cache_mgr.set(
obj_type,
rendered_objs_to_cache,
req_fields,
include_backrefs_children,
)
obj_dicts = hit_obj_dicts + field_filtered_objs
if not obj_dicts:
if len(obj_uuids) == 1:
raise NoIdError(obj_uuids[0])
else:
return (True, [])
return (True, obj_dicts)
# end object_read
def object_count_children(self, obj_type, obj_uuid, child_type):
if child_type is None:
return (False, '')
obj_class = self._get_resource_class(obj_type)
if child_type not in obj_class.children_fields:
return (False,
'%s is not a child type of %s' % (child_type, obj_type))
col_start = 'children:' + child_type[:-1] + ':'
col_finish = 'children:' + child_type[:-1] + ';'
num_children = self._cassandra_driver.get_count(
datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
start=col_start,
finish=col_finish)
return (True, num_children)
# end object_count_children
def update_last_modified(self, bch, obj_type, obj_uuid, id_perms=None):
if id_perms is None:
id_perms = self._cassandra_driver.get_one_col(
datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
'prop:id_perms')
id_perms['last_modified'] = datetime.datetime.utcnow().isoformat()
self._update_prop(bch, obj_uuid, 'id_perms', {'id_perms': id_perms})
if obj_type not in self._obj_cache_exclude_types:
self.update_latest_col_ts(bch, obj_uuid)
# end update_last_modified
def update_latest_col_ts(self, bch, obj_uuid):
try:
self._cassandra_driver.get_one_col(datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
'type')
except NoIdError:
return
self._cassandra_driver.insert(obj_uuid,
{'META:latest_col_ts':
JSON_NONE},
batch=bch)
# end update_latest_col_ts
def object_update(self, obj_type, obj_uuid, new_obj_dict, uuid_batch=None):
obj_class = self._get_resource_class(obj_type)
# Grab ref-uuids and properties in new version
new_ref_infos = {}
symmetric_ref_updates = []
# Properties
new_props = {}
for prop_field in obj_class.prop_fields:
if prop_field in new_obj_dict:
new_props[prop_field] = new_obj_dict[prop_field]
# References
# e.g. ref_field = 'network_ipam_refs'
# ref_type = 'network-ipam'
# ref_link_type = 'VnSubnetsType'
# is_weakref = False
for ref_field in obj_class.ref_fields:
ref_fld_types_list = list(obj_class.ref_field_types[ref_field])
ref_res_type = ref_fld_types_list[0]
ref_link_type = ref_fld_types_list[1]
is_weakref = ref_fld_types_list[2]
ref_obj_type = self._get_resource_class(ref_res_type).object_type
if ref_field in new_obj_dict:
new_refs = new_obj_dict[ref_field]
new_ref_infos[ref_obj_type] = {}
for new_ref in new_refs or []:
try:
new_ref_uuid = new_ref['uuid']
except KeyError:
new_ref_uuid = self.fq_name_to_uuid(ref_obj_type,
new_ref['to'])
new_ref_attr = new_ref.get('attr')
new_ref_data = {'attr': new_ref_attr,
'is_weakref': is_weakref}
new_ref_infos[ref_obj_type][new_ref_uuid] = new_ref_data
# Gather column values for obj and updates to backrefs
# in a batch and write it at the end
if uuid_batch:
bch = uuid_batch
else:
bch = self._cassandra_driver.get_cf_batch(
datastore_api.OBJ_UUID_CF_NAME)
for col_name, col_value in self._cassandra_driver.xget(
datastore_api.OBJ_UUID_CF_NAME, obj_uuid):
if self._is_prop(col_name):
(_, prop_name) = col_name.split(':')
if prop_name == 'id_perms':
# id-perms always has to be updated for last-mod timestamp
# get it from request dict(or from db if not in request dict)
new_id_perms = new_obj_dict.get(
prop_name, json.loads(col_value))
self.update_last_modified(
bch, obj_type, obj_uuid, new_id_perms)
elif prop_name in new_obj_dict:
self._update_prop(
bch, obj_uuid, prop_name, new_props)
if self._is_prop_list(col_name):
(_, prop_name, prop_elem_position) = col_name.split(':', 2)
if prop_name in new_props:
# delete all old values of prop list
self._delete_from_prop_list(
bch, obj_uuid, prop_name, prop_elem_position)
if self._is_prop_map(col_name):
(_, prop_name, prop_elem_position) = col_name.split(':', 2)
if prop_name in new_props:
# delete all old values of prop list
self._delete_from_prop_map(
bch, obj_uuid, prop_name, prop_elem_position)
if self._is_ref(col_name):
(_, ref_type, ref_uuid) = col_name.split(':')
ret = self._update_ref(bch, obj_type, obj_uuid, ref_type,
ref_uuid, new_ref_infos)
symmetric_ref_updates.extend(ret)
# for all column names
# create new refs
for ref_type in list(new_ref_infos.keys()):
for ref_uuid in list(new_ref_infos[ref_type].keys()):
ref_data = new_ref_infos[ref_type][ref_uuid]
ret = self._create_ref(bch, obj_type, obj_uuid, ref_type,
ref_uuid, ref_data)
symmetric_ref_updates.extend(ret)
# create new props
for prop_name in list(new_props.keys()):
if prop_name in obj_class.prop_list_fields:
# store list elements in list order
# iterate on wrapped element or directly on prop field
# for wrapped lists, store without the wrapper. regenerate
# wrapper on read
if (obj_class.prop_list_field_has_wrappers[prop_name] and
new_props[prop_name]):
wrapper_field = list(new_props[prop_name].keys())[0]
list_coll = new_props[prop_name][wrapper_field]
else:
list_coll = new_props[prop_name]
for i in range(len(list_coll)):
self._add_to_prop_list(bch, obj_uuid, prop_name,
list_coll[i], str(i))
elif prop_name in obj_class.prop_map_fields:
# store map elements in key order
# iterate on wrapped element or directly on prop field
# for wrapped lists, store without the wrapper. regenerate
# wrapper on read
if (obj_class.prop_map_field_has_wrappers[prop_name] and
new_props[prop_name]):
wrapper_field = list(new_props[prop_name].keys())[0]
map_coll = new_props[prop_name][wrapper_field]
else:
map_coll = new_props[prop_name]
map_key_name = obj_class.prop_map_field_key_names[prop_name]
for map_elem in map_coll:
map_key = map_elem[map_key_name]
self._set_in_prop_map(bch, obj_uuid, prop_name,
map_elem, map_key)
else:
self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
if not uuid_batch:
try:
bch.send()
finally:
self._obj_cache_mgr.evict(obj_type, [obj_uuid])
return (True, symmetric_ref_updates)
# end object_update
def object_list(self, obj_type, parent_uuids=None, back_ref_uuids=None,
obj_uuids=None, count=False, filters=None,
paginate_start=None, paginate_count=None):
obj_class = self._get_resource_class(obj_type)
children_fq_names_uuids = []
ret_marker = None
anchored_op = True
def filter_rows(coll_infos, filters=None):
if not coll_infos or not filters:
return coll_infos
filtered_infos = {}
columns = ['prop:%s' % filter_key for filter_key in filters if
filter_key in obj_class.prop_fields]
if not columns:
return coll_infos
rows = self._cassandra_driver.multiget(datastore_api.OBJ_UUID_CF_NAME,
list(coll_infos.keys()),
columns=columns)
for obj_uuid, properties in list(rows.items()):
# give chance for zk heartbeat/ping
gevent.sleep(0)
full_match = True
for filter_key, filter_values in list(filters.items()):
property = 'prop:%s' % filter_key
if property not in properties:
full_match = False
break
prop_value = properties[property]
if isinstance(prop_value, dict):
for filter_value in filter_values:
try:
filter_dict = json.loads(filter_value)
except ValueError:
continue
if (six.viewitems(filter_dict) <=
six.viewitems(prop_value)):
break
else:
full_match = False
break
elif prop_value not in filter_values:
full_match = False
break
if full_match:
filtered_infos[obj_uuid] = coll_infos[obj_uuid]
return filtered_infos
# end filter_rows
def get_fq_name_uuid_list(obj_uuids):
ret_list = []
for obj_uuid in obj_uuids:
try:
if obj_type != self.uuid_to_obj_type(obj_uuid):
continue
obj_fq_name = self.uuid_to_fq_name(obj_uuid)
ret_list.append((obj_fq_name, obj_uuid))
except NoIdError:
pass
return ret_list
# end get_fq_name_uuid_list
if parent_uuids:
# go from parent to child
## tune start and count if paginated on same row
#if paginate_start and (len(parent_uuids) == 1):
if paginate_start and paginate_start != '0':
start = 'children:%s:%s' % (obj_type,
paginate_start[:-1]+chr(ord(paginate_start[-1])+1))
num_columns = paginate_count
else:
start = 'children:%s:' % (obj_type)
num_columns = None
obj_rows = self._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME,
parent_uuids,
start=start,
finish='children:%s;' % (obj_type),
num_columns=num_columns,
timestamp=True)
def filter_rows_parent_anchor(sort=False):
# flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
all_cols = [cols for obj_key in list(obj_rows.keys())
for cols in list(obj_rows[obj_key].items())]
all_child_infos = {}
for col_name, col_val_ts in all_cols:
# give chance for zk heartbeat/ping
gevent.sleep(0)
child_uuid = col_name.split(':')[2]
if obj_uuids and child_uuid not in obj_uuids:
continue
if back_ref_uuids:
child_cols = self._cassandra_driver.get(
datastore_api.OBJ_UUID_CF_NAME,
child_uuid,
start='ref:',
finish='ref;')
child_ref_ids = {col.split(':')[2]
for col in child_cols or []}
if not set(back_ref_uuids) & child_ref_ids:
continue
all_child_infos[child_uuid] = {'uuid': child_uuid,
'tstamp': col_val_ts[1]}
filt_child_infos = filter_rows(all_child_infos, filters)
if not sort:
ret_child_infos = list(filt_child_infos.values())
else:
ret_child_infos = sorted(list(filt_child_infos.values()),
key=itemgetter('tstamp'))
return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
# end filter_rows_parent_anchor
children_fq_names_uuids.extend(filter_rows_parent_anchor(sort=True))
elif back_ref_uuids:
# go from anchor to backrefs
if paginate_start and paginate_start != '0':
# get next lexical value of marker
start = 'backref:%s:%s' % (obj_type,
paginate_start[:-1]+chr(ord(paginate_start[-1])+1))
num_columns = paginate_count
else:
start = 'backref:%s:' % (obj_type)
num_columns = None
obj_rows = self._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME,
back_ref_uuids,
start=start,
finish='backref:%s;' % (obj_type),
num_columns=num_columns,
timestamp=True)
def filter_rows_backref_anchor():
# flatten to [('backref:<obj-type>:<uuid>', (<val>,<ts>), *]
all_cols = [cols for obj_key in list(obj_rows.keys())
for cols in list(obj_rows[obj_key].items())]
all_backref_infos = {}
for col_name, col_val_ts in all_cols:
# give chance for zk heartbeat/ping
gevent.sleep(0)
backref_uuid = col_name.split(':')[2]
if obj_uuids and backref_uuid not in obj_uuids:
continue
all_backref_infos[backref_uuid] = \
{'uuid': backref_uuid, 'tstamp': col_val_ts[1]}
filt_backref_infos = filter_rows(all_backref_infos, filters)
return get_fq_name_uuid_list(r['uuid'] for r in
list(filt_backref_infos.values()))
# end filter_rows_backref_anchor
children_fq_names_uuids.extend(filter_rows_backref_anchor())
else:
anchored_op = False
if obj_uuids:
# exact objects specified
def filter_rows_object_list():
all_obj_infos = {}
marker = None
read_in = 0
start_idx = 0
if paginate_start and paginate_start != '0':
# paginate through objects
# in list order of obj_uuids
try:
start_idx = obj_uuids.index(paginate_start) + 1
except ValueError:
# simulate end of pagination
start_idx = len(obj_uuids)
for obj_uuid in obj_uuids[start_idx:]:
all_obj_infos[obj_uuid] = None
read_in += 1
if paginate_start and read_in >= paginate_count:
marker = obj_uuid
break
filt_obj_infos = filter_rows(all_obj_infos, filters)
return get_fq_name_uuid_list(list(filt_obj_infos.keys())), marker
# end filter_rows_object_list
filtered_rows, ret_marker = filter_rows_object_list()
children_fq_names_uuids.extend(filtered_rows)
else: # grab all resources of this type
if paginate_start and paginate_start != '0':
start = paginate_start[:-1] + \
chr(ord(paginate_start[-1]) + 1)
else:
start = ''
cols = self._cassandra_driver.xget(
datastore_api.OBJ_FQ_NAME_CF_NAME, '%s' %(obj_type),
start=start)
def filter_rows_no_anchor():
marker = None
all_obj_infos = {}
read_in = 0
for col_name, _ in cols:
# give chance for zk heartbeat/ping
gevent.sleep(0)
col_name_arr = utils.decode_string(col_name).split(':')
obj_uuid = col_name_arr[-1]
all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
read_in += 1
if paginate_start and read_in >= paginate_count:
marker = col_name
break
filt_obj_infos = filter_rows(all_obj_infos, filters)
return list(filt_obj_infos.values()), marker
# end filter_rows_no_anchor
if count and not filters:
# when listing all objects of a type
# return early if only count query is in request
return (True, sum(1 for col in cols), None)
filtered_rows, ret_marker = filter_rows_no_anchor()
children_fq_names_uuids.extend(filtered_rows)
if count:
return (True, len(children_fq_names_uuids), None)
# for anchored list with pagination,
# prune from union of anchors and last uuid is marker
if paginate_start and anchored_op:
children_fq_names_uuids = sorted(children_fq_names_uuids,
key=lambda fqn_uuid: fqn_uuid[1])
if len(children_fq_names_uuids) > paginate_count:
children_fq_names_uuids = children_fq_names_uuids[:paginate_count]
if not children_fq_names_uuids:
ret_marker = None
else:
ret_marker = children_fq_names_uuids[-1][1]
return (True, children_fq_names_uuids, ret_marker)
# end object_list
def object_delete(self, obj_type, obj_uuid):
obj_class = self._get_resource_class(obj_type)
fq_name = self._cassandra_driver.get_one_col(datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
'fq_name')
bch = self._cassandra_driver.get_cf_batch(datastore_api.OBJ_UUID_CF_NAME)
# unlink from parent
col_start = 'parent:'
col_fin = 'parent;'
col_name_iter = self._cassandra_driver.xget(datastore_api.OBJ_UUID_CF_NAME,
obj_uuid, start=col_start, finish=col_fin)
for (col_name, col_val) in col_name_iter:
(_, parent_type, parent_uuid) = col_name.split(':')
self._delete_child(
bch, parent_type, parent_uuid, obj_type, obj_uuid)
# remove refs
col_start = 'ref:'
col_fin = 'ref;'
col_name_iter = self._cassandra_driver.xget(datastore_api.OBJ_UUID_CF_NAME,
obj_uuid, start=col_start, finish=col_fin)
symmetric_ref_updates = []
for (col_name, col_val) in col_name_iter:
(_, ref_type, ref_uuid) = col_name.split(':')
ret = self._delete_ref(bch, obj_type, obj_uuid, ref_type, ref_uuid)
symmetric_ref_updates.extend(ret)
# remove link from relaxed back refs
col_start = 'relaxbackref:'
col_fin = 'relaxbackref;'
col_name_iter = self._cassandra_driver.xget(datastore_api.OBJ_UUID_CF_NAME,
obj_uuid, start=col_start, finish=col_fin)
for (col_name, col_val) in col_name_iter:
(_, backref_uuid) = col_name.split(':')
self._delete_ref(bch, None, backref_uuid, obj_type, obj_uuid)
self._cassandra_driver.remove(obj_uuid, batch=bch)
try:
bch.send()
finally:
self._obj_cache_mgr.evict(obj_type, [obj_uuid])
# Update fqname table
fq_name_str = ':'.join(fq_name)
fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
self._cassandra_driver.remove(
cf_name=datastore_api.OBJ_FQ_NAME_CF_NAME,
key=obj_type,
columns=[fq_name_col])
# Purge map naming cache
self.cache_uuid_to_fq_name_del(obj_uuid)
return (True, symmetric_ref_updates)
# end object_delete
def prop_collection_read(self, obj_type, obj_uuid, obj_fields, position):
obj_class = self._get_resource_class(obj_type)
result = {}
# always read-in id-perms for upper-layers to do rbac/visibility
result['id_perms'] = self._cassandra_driver.get_one_col(
datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
'prop:id_perms')
# read in prop-list or prop-map fields
for field in obj_fields:
if field in obj_class.prop_list_fields:
prop_pfx = 'propl'
elif field in obj_class.prop_map_fields:
prop_pfx = 'propm'
else:
continue
if position:
col_start = '%s:%s:%s' % (prop_pfx, field, position)
col_end = '%s:%s:%s' % (prop_pfx, field, position)
else:
col_start = '%s:%s:' % (prop_pfx, field)
col_end = '%s:%s;' % (prop_pfx, field)
obj_cols = self._cassandra_driver.xget(
datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
start=col_start,
finish=col_end)
result[field] = []
for name, value in obj_cols:
# tuple of col_value, position. result is already sorted
# lexically by position (necessary only for list property)
result[field].append((json.loads(value), name.split(':', 2)[-1]))
return (True, result)
# end prop_collection_read
def cache_uuid_to_fq_name_add(self, id, fq_name, obj_type):
self._cache_uuid_to_fq_name[id] = (fq_name, obj_type)
# end cache_uuid_to_fq_name_add
def cache_uuid_to_fq_name_del(self, id):
self._cache_uuid_to_fq_name.pop(id, None)
# end cache_uuid_to_fq_name_del
def uuid_to_fq_name(self, id):
try:
return copy.copy(self._cache_uuid_to_fq_name[id][0])
except KeyError:
obj = self._cassandra_driver.get(datastore_api.OBJ_UUID_CF_NAME, id,
columns=['fq_name', 'type'])
if not obj:
raise NoIdError(id)
if 'type' not in obj or 'fq_name' not in obj:
raise NoIdError(id)
fq_name = obj['fq_name']
obj_type = obj['type']
self.cache_uuid_to_fq_name_add(id, fq_name, obj_type)
return copy.copy(fq_name)
# end uuid_to_fq_name
def uuid_to_obj_type(self, id):
try:
return self._cache_uuid_to_fq_name[id][1]
except KeyError:
obj = self._cassandra_driver.get(datastore_api.OBJ_UUID_CF_NAME, id,
columns=['fq_name', 'type'])
if not obj:
raise NoIdError(id)
if 'type' not in obj or 'fq_name' not in obj:
raise NoIdError(id)
fq_name = obj['fq_name']
obj_type = obj['type']
self.cache_uuid_to_fq_name_add(id, fq_name, obj_type)
return obj_type
# end uuid_to_obj_type
def fq_name_to_uuid(self, obj_type, fq_name):
fq_name_str = utils.encode_string(':'.join(fq_name))
col_infos = self._cassandra_driver.get(datastore_api.OBJ_FQ_NAME_CF_NAME,
obj_type,
start=fq_name_str + ':',
finish=fq_name_str + ';')
if not col_infos:
raise NoIdError('%s %s' % (obj_type, fq_name_str))
if len(col_infos) > 1:
raise VncError('Multi match %s for %s' % (fq_name_str, obj_type))
fq_name_uuid = utils.decode_string(col_infos.popitem()[0]).split(':')
if obj_type != 'route_target' and fq_name_uuid[:-1] != fq_name:
raise NoIdError('%s %s' % (obj_type, fq_name_str))
return fq_name_uuid[-1]
# end fq_name_to_uuid
# return all objects shared with a (share_type, share_id)
def get_shared(self, obj_type, share_id='', share_type='global'):
result = []
column = '%s:%s' % (share_type, share_id)
col_infos = self._cassandra_driver.get(datastore_api.OBJ_SHARED_CF_NAME,
obj_type,
start=column + ':',
finish=column + ';')
if not col_infos:
return None
for (col_name, col_val) in list(col_infos.items()):
# ('*:*:f7963198-08a4-4b96-a02e-41cc66593163', u'7')
obj_uuid = col_name.split(':')[-1]
result.append((obj_uuid, col_val))
return result
# share an object 'obj_id' with <share_type:share_id>
# rwx indicate type of access (sharing) allowed
def set_shared(self, obj_type, obj_id, share_id = '', share_type = 'global', rwx = 7):
col_name = '%s:%s:%s' % (share_type, share_id, obj_id)
self._cassandra_driver.insert(
cf_name=datastore_api.OBJ_SHARED_CF_NAME,
key=obj_type,
columns={col_name:json.dumps(rwx)})
# delete share of 'obj_id' object with <share_type:share_id>
def del_shared(self, obj_type, obj_id, share_id = '', share_type = 'global'):
col_name = '%s:%s:%s' % (share_type, share_id, obj_id)
self._cassandra_driver.remove(
cf_name=datastore_api.OBJ_SHARED_CF_NAME,
key=obj_type,
columns=[col_name])
def _render_obj_from_db(self, obj_class, obj_rows, field_names=None,
include_backrefs_children=False):
ref_fields = obj_class.ref_fields
backref_fields = obj_class.backref_fields
children_fields = obj_class.children_fields
list_fields = obj_class.prop_list_fields
map_fields = obj_class.prop_map_fields
prop_fields = obj_class.prop_fields - (list_fields | map_fields)
results = {}
for obj_uuid, obj_cols in list(obj_rows.items()):
if 'type' not in obj_cols or 'fq_name' not in obj_cols:
# if object has been deleted, these fields may not
# be present
continue
if obj_class.object_type != obj_cols.pop('type')[0]:
continue
id_perms_ts = 0
row_latest_ts = 0
result = {}
result['uuid'] = obj_uuid
result['fq_name'] = obj_cols.pop('fq_name')[0]
for col_name in list(obj_cols.keys()):
if self._is_parent(col_name):
# non config-root child
(_, _, parent_uuid) = col_name.split(':')
try:
result['parent_type'] = obj_cols['parent_type'][0]
except KeyError:
# parent_type may not be present in obj_cols
pass
result['parent_uuid'] = parent_uuid
continue
if self._is_prop(col_name):
(_, prop_name) = col_name.split(':')
if prop_name == 'id_perms':
id_perms_ts = obj_cols[col_name][1]
if ((prop_name not in prop_fields) or
(field_names and prop_name not in field_names)):
continue
result[prop_name] = obj_cols[col_name][0]
continue
if self._is_prop_list(col_name):
(_, prop_name, prop_elem_position) = col_name.split(':')
if field_names and prop_name not in field_names:
continue
if obj_class.prop_list_field_has_wrappers[prop_name]:
prop_field_types = obj_class.prop_field_types[prop_name]
wrapper_type = prop_field_types['xsd_type']
wrapper_cls = self._get_xsd_class(wrapper_type)
wrapper_field = wrapper_cls.attr_fields[0]
if prop_name not in result:
result[prop_name] = {wrapper_field: []}
result[prop_name][wrapper_field].append(
(obj_cols[col_name][0], prop_elem_position))
else:
if prop_name not in result:
result[prop_name] = []
result[prop_name].append((obj_cols[col_name][0],
prop_elem_position))
continue
if self._is_prop_map(col_name):
(_, prop_name, _) = col_name.split(':', 2)
if field_names and prop_name not in field_names:
continue
if obj_class.prop_map_field_has_wrappers[prop_name]:
prop_field_types = obj_class.prop_field_types[prop_name]
wrapper_type = prop_field_types['xsd_type']
wrapper_cls = self._get_xsd_class(wrapper_type)
wrapper_field = wrapper_cls.attr_fields[0]
if prop_name not in result:
result[prop_name] = {wrapper_field: []}
result[prop_name][wrapper_field].append(
obj_cols[col_name][0])
else:
if prop_name not in result:
result[prop_name] = []
result[prop_name].append(obj_cols[col_name][0])
continue
if self._is_children(col_name):
(_, child_type, child_uuid) = col_name.split(':')
if field_names and '%ss' %(child_type) not in field_names:
continue
if child_type+'s' not in children_fields:
continue
child_tstamp = obj_cols[col_name][1]
try:
self._read_child(result, obj_uuid, child_type,
child_uuid, child_tstamp)
except NoIdError:
continue
continue
if self._is_ref(col_name):
(_, ref_type, ref_uuid) = col_name.split(':')
if ((ref_type+'_refs' not in ref_fields) or
(field_names and ref_type + '_refs' not in field_names)):
continue
self._read_ref(result, obj_uuid, ref_type, ref_uuid,
obj_cols[col_name][0])
continue
if self._is_backref(col_name):
(_, back_ref_type, back_ref_uuid) = col_name.split(':')
if back_ref_type+'_back_refs' not in backref_fields:
continue
if (field_names and
'%s_back_refs' %(back_ref_type) not in field_names):
continue
try:
self._read_back_ref(result, obj_uuid, back_ref_type,
back_ref_uuid, obj_cols[col_name][0])
except NoIdError:
continue
continue
if self._is_metadata(col_name):
(_, meta_type) = col_name.split(':')
if meta_type == 'latest_col_ts':
row_latest_ts = obj_cols[col_name][1]
continue
# for all column names
# sort children by creation time
for child_field in obj_class.children_fields:
if child_field not in result:
continue
sorted_children = sorted(result[child_field],
key = itemgetter('tstamp'))
# re-write result's children without timestamp
result[child_field] = sorted_children
[child.pop('tstamp') for child in result[child_field]]
# for all children
# Ordering property lists by position attribute
for prop_name in (obj_class.prop_list_fields & set(result.keys())):
if isinstance(result[prop_name], list):
result[prop_name] = [el[0] for el in
sorted(result[prop_name],
key=itemgetter(1))]
elif isinstance(result[prop_name], dict):
wrapper, unsorted_list = result[prop_name].popitem()
result[prop_name][wrapper] = [el[0] for el in
sorted(unsorted_list,
key=itemgetter(1))]
# 'id_perms_ts' tracks timestamp of id-perms column
# i.e. latest update of *any* prop or ref.
# 'row_latest_ts' tracks timestamp of last modified column
# so any backref/children column is also captured. 0=>unknown
results[obj_uuid] = {'obj_dict': result,
'id_perms_ts': id_perms_ts}
if include_backrefs_children:
# update our copy of ts only if we read the
# corresponding fields from db
results[obj_uuid]['row_latest_ts'] = row_latest_ts
# end for all rows
return results
# end _render_obj_from_db
def _read_child(self, result, obj_uuid, child_obj_type, child_uuid,
child_tstamp):
if '%ss' % (child_obj_type) not in result:
result['%ss' % (child_obj_type)] = []
child_res_type = self._get_resource_class(child_obj_type).resource_type
child_info = {}
child_info['to'] = self.uuid_to_fq_name(child_uuid)
child_info['uuid'] = child_uuid
child_info['tstamp'] = child_tstamp
result['%ss' % (child_obj_type)].append(child_info)
# end _read_child
def _read_ref(self, result, obj_uuid, ref_obj_type, ref_uuid, ref_data_json):
if '%s_refs' % (ref_obj_type) not in result:
result['%s_refs' % (ref_obj_type)] = []
ref_data = ref_data_json
ref_info = {}
try:
ref_info['to'] = self.uuid_to_fq_name(ref_uuid)
except NoIdError:
ref_info['to'] = ['ERROR']
if ref_data:
try:
ref_info['attr'] = ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
ref_info['attr'] = ref_data
ref_info['uuid'] = ref_uuid
result['%s_refs' % (ref_obj_type)].append(ref_info)
# end _read_ref
def _read_back_ref(self, result, obj_uuid, back_ref_obj_type, back_ref_uuid,
back_ref_data_json):
if '%s_back_refs' % (back_ref_obj_type) not in result:
result['%s_back_refs' % (back_ref_obj_type)] = []
back_ref_info = {}
back_ref_info['to'] = self.uuid_to_fq_name(back_ref_uuid)
back_ref_data = back_ref_data_json
if back_ref_data:
try:
back_ref_info['attr'] = back_ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
back_ref_info['attr'] = back_ref_data
back_ref_info['uuid'] = back_ref_uuid
result['%s_back_refs' % (back_ref_obj_type)].append(back_ref_info)
# end _read_back_ref
def walk(self, fn=None):
type_to_object = {}
for obj_uuid, obj_col in self._cassandra_driver.get_range(
datastore_api.OBJ_UUID_CF_NAME,
columns=['type', 'fq_name']):
try:
obj_type = json.loads(obj_col['type'])
obj_fq_name = json.loads(obj_col['fq_name'])
# prep cache to avoid n/w round-trip in db.read for ref
self.cache_uuid_to_fq_name_add(obj_uuid, obj_fq_name, obj_type)
try:
type_to_object[obj_type].append(obj_uuid)
except KeyError:
type_to_object[obj_type] = [obj_uuid]
except Exception as e:
self._logger('Error in db walk read %s' % (str(e)),
level=SandeshLevel.SYS_ERR)
continue
if fn is None:
return []
walk_results = []
for obj_type, uuid_list in list(type_to_object.items()):
try:
self._logger('DB walk: obj_type %s len %s'
% (obj_type, len(uuid_list)),
level=SandeshLevel.SYS_INFO)
result = fn(obj_type, uuid_list)
if result:
walk_results.append(result)
except Exception as e:
self._logger('Error in db walk invoke %s' % (str(e)),
level=SandeshLevel.SYS_ERR)
continue
return walk_results
# end walk
# end class VncCassandraClient
class ObjectCacheManager(object):
class CachedObject(object):
# provide a read-only copy in so far as
# top level keys cannot be add/mod/del
class RODict(dict):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyDict")
__setitem__ = __readonly__
__delitem__ = __readonly__
pop = __readonly__
popitem = __readonly__
clear = __readonly__
update = __readonly__
setdefault = __readonly__
del __readonly__
# end RODict
def __init__(self, obj_dict, id_perms_ts, row_latest_ts):
self.obj_dict = self.RODict(obj_dict)
self.id_perms_ts = id_perms_ts
self.row_latest_ts = row_latest_ts
# end __init__
def update_obj_dict(self, new_obj_dict):
self.obj_dict = self.RODict(new_obj_dict)
# end update_obj_dict
def get_filtered_copy(self, field_names=None):
if not field_names:
return self.obj_dict
# TODO filter with field_names
return {k: copy.deepcopy(self.obj_dict[k])
for k in field_names if k in self.obj_dict}
# end get_filtered_copy
# end class CachedObject
def __init__(self, logger, db_client, max_entries,
obj_cache_exclude_types=None, debug_obj_cache_types=None):
self._logger = logger
self.max_entries = max_entries
self._db_client = db_client
self._cache = OrderedDict()
self._obj_cache_exclude_types = set(obj_cache_exclude_types or [])
self._debug_obj_cache_types = set(debug_obj_cache_types or [])
self._debug_obj_cache_types -= self._obj_cache_exclude_types
# end __init__
def _log(self, msg, level=SandeshLevel.SYS_DEBUG):
msg = 'Object UUID cache manager: %s' % msg
self._logger(msg, level)
def evict(self, obj_type, obj_uuids):
for obj_uuid in obj_uuids:
try:
obj_dict = self._cache.pop(obj_uuid).obj_dict
if obj_type in self._debug_obj_cache_types:
self._log("%s %s (%s) was evicted from cache. Cache "
"contained: %s" % (
obj_type.replace('_', '-').title(),
':'.join(obj_dict['fq_name']),
obj_uuid,
pformat(obj_dict),
),
)
except KeyError:
continue
# end evict
def set(self, obj_type, db_rendered_objs, req_fields,
include_backrefs_children):
# build up results with field filter
result_obj_dicts = []
if req_fields:
result_fields = set(req_fields) | set(['fq_name', 'uuid',
'parent_type', 'parent_uuid'])
for obj_uuid, render_info in list(db_rendered_objs.items()):
id_perms_ts = render_info.get('id_perms_ts', 0)
row_latest_ts = render_info.get('row_latest_ts', 0)
cached_obj = self._cache.pop(obj_uuid, None)
if cached_obj is not None:
# if we had stale, just update from new db value
cached_obj.update_obj_dict(render_info['obj_dict'])
cached_obj.id_perms_ts = id_perms_ts
if include_backrefs_children:
cached_obj.row_latest_ts = row_latest_ts
else:
# this was a miss in cache
cached_obj = self.CachedObject(
render_info['obj_dict'],
id_perms_ts,
row_latest_ts,
)
if len(self._cache) >= self.max_entries:
# get first element (least recently used)
# without getting full copy of dict keys
if hasattr(self._cache, 'iterkeys'):
key = next(iter(list(self._cache.iterkeys())))
else:
# 'keys()' returns an iterator with PY3.
key = next(iter(list(self._cache.keys())))
self.evict(obj_type, [key])
self._cache[obj_uuid] = cached_obj
if obj_type in self._debug_obj_cache_types:
self._log("%s %s (%s) was set in cache with values: %s" % (
obj_type.replace('_', ' ').title(),
':'.join(cached_obj.obj_dict['fq_name']),
obj_uuid,
pformat(cached_obj.obj_dict),
),
)
if req_fields:
result_obj_dicts.append(
cached_obj.get_filtered_copy(result_fields))
else:
result_obj_dicts.append(cached_obj.get_filtered_copy())
# end for all rendered objects
return result_obj_dicts
# end set
def read(self, obj_class, obj_uuids, req_fields, include_backrefs_children):
# find which keys are a hit, find which hit keys are not stale
# return hit entries and miss+stale uuids.
hit_uuids = []
miss_uuids = []
for obj_uuid in obj_uuids:
if obj_uuid in self._cache:
hit_uuids.append(obj_uuid)
else:
miss_uuids.append(obj_uuid)
stale_uuids = []
# staleness when include_backrefs_children is False = id_perms tstamp
# when include_backrefs_children is True = latest_col_ts tstamp
if include_backrefs_children:
stale_check_col_name = 'META:latest_col_ts'
stale_check_ts_attr = 'row_latest_ts'
else:
stale_check_col_name = 'prop:id_perms'
stale_check_ts_attr = 'id_perms_ts'
hit_rows_in_db = self._db_client._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME, hit_uuids,
columns=[stale_check_col_name], timestamp=True)
obj_dicts = []
result_fields = {'fq_name', 'uuid', 'parent_type', 'parent_uuid'}
if req_fields:
result_fields = set(req_fields) | result_fields
for hit_uuid in hit_uuids:
try:
obj_cols = hit_rows_in_db[hit_uuid]
cached_obj = self._cache[hit_uuid]
except KeyError:
# Either stale check column missing, treat as miss
# Or entry could have been evicted while context switched
# for reading stale-check-col, treat as miss
miss_uuids.append(hit_uuid)
continue
if (getattr(cached_obj, stale_check_ts_attr) !=
obj_cols[stale_check_col_name][1]):
miss_uuids.append(hit_uuid)
stale_uuids.append(hit_uuid)
continue
if req_fields:
obj_dicts.append(cached_obj.get_filtered_copy(result_fields))
else:
obj_dicts.append(cached_obj.get_filtered_copy())
if obj_class.object_type in self._debug_obj_cache_types:
obj_rows = self._db_client._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME,
[hit_uuid],
timestamp=True)
rendered_objs = self._db_client._render_obj_from_db(
obj_class, obj_rows, req_fields, include_backrefs_children)
db_obj_dict = rendered_objs[hit_uuid]['obj_dict']
self._log("%s %s (%s) was read from cache.\nDB values: %s\n"
"Cache value: %s\n" % (
obj_class.object_type.replace('_', ' ').title(),
':'.join(cached_obj.obj_dict['fq_name']),
hit_uuid,
pformat(db_obj_dict),
pformat(cached_obj.obj_dict),
),
)
# end for all hit in cache
self.evict(obj_class.object_type, stale_uuids)
return obj_dicts, miss_uuids
# end read
def dump_cache(self, obj_uuids=None, count=10):
obj_dicts = {}
i = 1
if obj_uuids:
for obj_uuid in obj_uuids:
try:
obj = self._cache[obj_uuid]
except KeyError:
continue
obj_json = json.dumps(obj, default=lambda o: dict((k, v)
for k, v in list(o.__dict__.items())))
obj_dicts[i] = json.loads(obj_json)
i += 1
else:
for key in self._cache:
if i > count:
break
obj = self._cache[key]
obj_json = json.dumps(obj, default=lambda o: dict((k, v)
for k, v in list(o.__dict__.items())))
obj_dicts[i] = json.loads(obj_json)
i += 1
return obj_dicts
# end class ObjectCacheManager
| 42.319405
| 100
| 0.542985
|
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import next
from builtins import chr
from builtins import str
from builtins import range
from builtins import object
import copy
import os
import gevent
from pprint import pformat
import six
from vnc_api import vnc_api
from .exceptions import NoIdError, VncError
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from cfgm_common import jsonutils as json
from . import utils
import datetime
from operator import itemgetter
from collections import OrderedDict
from cfgm_common.datastore.drivers.cassandra_thrift import CassandraDriverThrift
from cfgm_common.datastore.drivers.cassandra_cql import CassandraDriverCQL
from cfgm_common.datastore import api as datastore_api
JSON_NONE = json.dumps(None)
class VncCassandraClient(object):
@staticmethod
def _is_metadata(column_name):
return column_name[:5] == 'META:'
@staticmethod
def _is_parent(column_name):
return column_name[:7] == 'parent:'
@staticmethod
def _is_prop(column_name):
return column_name[:5] == 'prop:'
@staticmethod
def _is_prop_list(column_name):
return column_name[:6] == 'propl:'
@staticmethod
def _is_prop_map(column_name):
return column_name[:6] == 'propm:'
@staticmethod
def _is_ref(column_name):
return column_name[:4] == 'ref:'
@staticmethod
def _is_backref(column_name):
return column_name[:8] == 'backref:'
@staticmethod
def _is_children(column_name):
return column_name[:9] == 'children:'
def add(self, cf_name, key, value):
try:
self._cassandra_driver.insert(key, value, cf_name=cf_name)
return True
except Exception as e:
self._logger("VNCCassandra, unable to add {}={}, error: {}".format(
key, value, e), level=SandeshLevel.SYS_WARN)
return False
def delete(self, cf_name, key, columns=None):
try:
self._cassandra_driver.remove(
key, columns, cf_name=cf_name)
return True
except Exception as e:
self._logger("VNCCassandra, unable to del {}={}, error: {}".format(
key, columns, e), level=SandeshLevel.SYS_WARN)
return False
def _get_resource_class(self, obj_type):
if hasattr(self, '_db_client_mgr'):
return self._db_client_mgr.get_resource_class(obj_type)
cls_name = '%s' % (utils.CamelCase(obj_type))
return getattr(vnc_api, cls_name)
@classmethod
def get_db_info(cls):
db_info = [(datastore_api.UUID_KEYSPACE_NAME, [datastore_api.OBJ_UUID_CF_NAME,
datastore_api.OBJ_FQ_NAME_CF_NAME,
datastore_api.OBJ_SHARED_CF_NAME])]
return db_info
def __init__(self, server_list, cassandra_driver, **options):
if cassandra_driver == 'cql':
driverClass = CassandraDriverCQL
elif cassandra_driver == 'thrift':
driverClass = CassandraDriverThrift
else:
raise VncError(
"datastore driver not selected, see `cassandra_driver`.")
self._cassandra_driver = driverClass(server_list, **options)
self._logger = self._cassandra_driver.options.logger
self._logger('VNCCassandra started with driver {}'.format(driverClass),
level=SandeshLevel.SYS_INFO)
self._cache_uuid_to_fq_name = {}
self._obj_cache_mgr = ObjectCacheManager(
self._cassandra_driver.options.logger,
self,
max_entries=self._cassandra_driver.options.obj_cache_entries,
obj_cache_exclude_types=self._cassandra_driver.options.obj_cache_exclude_types,
debug_obj_cache_types=self._cassandra_driver.options.debug_obj_cache_types,
)
self._obj_cache_exclude_types = self._cassandra_driver.options.obj_cache_exclude_types or []
# at the time of cassandra init, hence need to wrap these functions that
# uses it to catch cassandra connection failures.
self.object_update = self._cassandra_driver._handle_exceptions(
self.object_update)
self.object_list = self._cassandra_driver._handle_exceptions(
self.object_list)
self.object_read = self._cassandra_driver._handle_exceptions(
self.object_read)
self.object_raw_read = self._cassandra_driver._handle_exceptions(
self.object_raw_read)
self.object_delete = self._cassandra_driver._handle_exceptions(
self.object_delete)
self.prop_collection_read = self._cassandra_driver._handle_exceptions(
self.prop_collection_read)
self.uuid_to_fq_name = self._cassandra_driver._handle_exceptions(
self.uuid_to_fq_name)
self.uuid_to_obj_type = self._cassandra_driver._handle_exceptions(
self.uuid_to_obj_type)
self.fq_name_to_uuid = self._cassandra_driver._handle_exceptions(
self.fq_name_to_uuid)
self.get_shared = self._cassandra_driver._handle_exceptions(
self.get_shared)
self.walk = self._cassandra_driver._handle_exceptions(self.walk)
if self._cassandra_driver.options.walk:
self.walk()
# end __init__
def _create_prop(self, bch, obj_uuid, prop_name, prop_val):
self._cassandra_driver.insert(
obj_uuid,
{'prop:%s' % (prop_name): json.dumps(prop_val)},
batch=bch)
# end _create_prop
def _update_prop(self, bch, obj_uuid, prop_name, new_props):
if new_props[prop_name] is None:
self._cassandra_driver.remove(obj_uuid,
columns=['prop:' + prop_name],
batch=bch)
else:
self._cassandra_driver.insert(
obj_uuid,
{'prop:' + prop_name: json.dumps(new_props[prop_name])},
batch=bch)
# prop has been accounted for, remove so only new ones remain
del new_props[prop_name]
# end _update_prop
def _add_to_prop_list(self, bch, obj_uuid, prop_name,
prop_elem_value, prop_elem_position):
self._cassandra_driver.insert(obj_uuid,
{'propl:%s:%s' % (prop_name, prop_elem_position):
json.dumps(prop_elem_value)},
batch=bch)
# end _add_to_prop_list
def _delete_from_prop_list(self, bch, obj_uuid, prop_name,
prop_elem_position):
self._cassandra_driver.remove(
obj_uuid,
columns=['propl:%s:%s' % (prop_name, prop_elem_position)],
batch=bch)
# end _delete_from_prop_list
def _set_in_prop_map(self, bch, obj_uuid, prop_name,
prop_elem_value, prop_elem_position):
self._cassandra_driver.insert(obj_uuid,
{'propm:%s:%s' % (prop_name, prop_elem_position):
json.dumps(prop_elem_value)},
batch=bch)
# end _set_in_prop_map
def _delete_from_prop_map(self, bch, obj_uuid, prop_name,
prop_elem_position):
self._cassandra_driver.remove(
obj_uuid,
columns=['propm:%s:%s' % (prop_name, prop_elem_position)],
batch=bch)
# end _delete_from_prop_map
def _create_child(self, bch, parent_type, parent_uuid,
child_type, child_uuid):
child_col = {'children:%s:%s' %
(child_type, child_uuid): JSON_NONE}
self._cassandra_driver.insert(parent_uuid, child_col, batch=bch)
parent_col = {'parent:%s:%s' %
(parent_type, parent_uuid): JSON_NONE}
self._cassandra_driver.insert(child_uuid, parent_col, batch=bch)
# update latest_col_ts on parent object
if parent_type not in self._obj_cache_exclude_types:
self.update_latest_col_ts(bch, parent_uuid)
# end _create_child
def _delete_child(self, bch, parent_type, parent_uuid,
child_type, child_uuid):
self._cassandra_driver.remove(
parent_uuid,
columns=['children:%s:%s' % (child_type, child_uuid)],
batch=bch)
# update latest_col_ts on parent object
if parent_type not in self._obj_cache_exclude_types:
self.update_latest_col_ts(bch, parent_uuid)
# end _delete_child
def _create_ref(self, bch, obj_type, obj_uuid, ref_obj_type, ref_uuid,
ref_data):
j_ref_data = json.dumps(ref_data)
symmetric_ref_updates = []
self._cassandra_driver.insert(
obj_uuid, {'ref:%s:%s' %
(ref_obj_type, ref_uuid): j_ref_data},
batch=bch)
if obj_type == ref_obj_type:
self._cassandra_driver.insert(
ref_uuid, {'ref:%s:%s' %
(obj_type, obj_uuid): j_ref_data},
batch=bch)
self.update_last_modified(bch, obj_type, ref_uuid)
symmetric_ref_updates = [ref_uuid]
else:
self._cassandra_driver.insert(
ref_uuid, {'backref:%s:%s' %
(obj_type, obj_uuid): j_ref_data},
batch=bch)
# update latest_col_ts on referred object
if ref_obj_type not in self._obj_cache_exclude_types:
if ref_obj_type == obj_type:
# evict other side of ref since it is stale from
# GET /<old-ref-uuid> pov.
self._obj_cache_mgr.evict(obj_type, [ref_uuid])
else:
self.update_latest_col_ts(bch, ref_uuid)
return symmetric_ref_updates
# end _create_ref
def _update_ref(self, bch, obj_type, obj_uuid, ref_obj_type, old_ref_uuid,
new_ref_infos):
if ref_obj_type not in new_ref_infos:
# update body didn't touch this type, nop
return []
symmetric_ref_updates = []
if old_ref_uuid not in new_ref_infos[ref_obj_type]:
self._cassandra_driver.remove(
obj_uuid,
columns=['ref:%s:%s' % (ref_obj_type, old_ref_uuid)],
batch=bch)
if obj_type == ref_obj_type:
self._cassandra_driver.remove(
old_ref_uuid,
columns=['ref:%s:%s' % (obj_type, obj_uuid)],
batch=bch)
try:
self.update_last_modified(bch, obj_type, old_ref_uuid)
symmetric_ref_updates = [old_ref_uuid]
except NoIdError as e:
pass
else:
self._cassandra_driver.remove(
old_ref_uuid,
columns=['backref:%s:%s' % (obj_type, obj_uuid)],
batch=bch)
else:
# retain old ref with new ref attr
new_ref_data = new_ref_infos[ref_obj_type][old_ref_uuid]
j_new_ref_data = json.dumps(new_ref_data)
self._cassandra_driver.insert(
obj_uuid,
{'ref:%s:%s' % (ref_obj_type, old_ref_uuid):
j_new_ref_data},
batch=bch)
if obj_type == ref_obj_type:
self._cassandra_driver.insert(
old_ref_uuid,
{'ref:%s:%s' % (obj_type, obj_uuid):
j_new_ref_data},
batch=bch)
self.update_last_modified(bch, obj_type, old_ref_uuid)
symmetric_ref_updates = [old_ref_uuid]
else:
self._cassandra_driver.insert(
old_ref_uuid,
{'backref:%s:%s' % (obj_type, obj_uuid):
j_new_ref_data},
batch=bch)
# uuid has been accounted for, remove so only new ones remain
del new_ref_infos[ref_obj_type][old_ref_uuid]
# update latest_col_ts on referred object
if ref_obj_type not in self._obj_cache_exclude_types:
if ref_obj_type == obj_type:
# evict other side of ref since it is stale from
# GET /<old-ref-uuid> pov.
self._obj_cache_mgr.evict(obj_type, [old_ref_uuid])
else:
self.update_latest_col_ts(bch, old_ref_uuid)
return symmetric_ref_updates
# end _update_ref
def _delete_ref(self, bch, obj_type, obj_uuid, ref_obj_type, ref_uuid):
send = False
symmetric_ref_updates = []
if bch is None:
send = True
bch = self._cassandra_driver.get_cf_batch(datastore_api.OBJ_UUID_CF_NAME)
self._cassandra_driver.remove(
obj_uuid,
columns=['ref:%s:%s' % (ref_obj_type, ref_uuid)],
batch=bch)
if obj_type == ref_obj_type:
self._cassandra_driver.remove(ref_uuid, columns=[
'ref:%s:%s' % (obj_type, obj_uuid)],
batch=bch)
try:
self.update_last_modified(bch, obj_type, ref_uuid)
symmetric_ref_updates = [ref_uuid]
except NoIdError as e:
# ref_uuid might have been deleted
# if cache has the link, it will be evicted
# if cache doesn't have, keyerror is caught and continued
pass
else:
self._cassandra_driver.remove(
ref_uuid,
columns=['backref:%s:%s' % (obj_type, obj_uuid)],
batch=bch)
if ref_obj_type not in self._obj_cache_exclude_types:
if ref_obj_type == obj_type:
self._obj_cache_mgr.evict(obj_type, [ref_uuid])
else:
self.update_latest_col_ts(bch, ref_uuid)
if send:
bch.send()
return symmetric_ref_updates
def _get_xsd_class(self, xsd_type):
return getattr(vnc_api, xsd_type)
def object_create(self, obj_type, obj_id, obj_dict,
uuid_batch=None, fqname_batch=None):
obj_class = self._get_resource_class(obj_type)
if uuid_batch:
bch = uuid_batch
else:
bch = self._cassandra_driver.get_cf_batch(datastore_api.OBJ_UUID_CF_NAME)
obj_cols = {}
obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
obj_cols['type'] = json.dumps(obj_type)
if obj_type not in self._obj_cache_exclude_types:
obj_cols['META:latest_col_ts'] = JSON_NONE
if 'parent_type' in obj_dict:
parent_type = obj_dict['parent_type']
if parent_type not in obj_class.parent_types:
msg = ("Invalid parent type: %s not in %s" %
(parent_type, obj_class.parent_types))
return False, (400, msg)
parent_object_type = self._get_resource_class(
parent_type).object_type
parent_fq_name = obj_dict['fq_name'][:-1]
obj_cols['parent_type'] = json.dumps(parent_type)
parent_uuid = self.fq_name_to_uuid(parent_object_type,
parent_fq_name)
self._create_child(bch, parent_object_type, parent_uuid, obj_type,
obj_id)
for prop_field in obj_class.prop_fields:
field = obj_dict.get(prop_field)
if field is None:
continue
if prop_field == 'id_perms':
field['created'] = datetime.datetime.utcnow().isoformat()
field['last_modified'] = field['created']
if prop_field in obj_class.prop_list_fields:
if obj_class.prop_list_field_has_wrappers[prop_field]:
wrapper_field_keys = list(field.keys())
if wrapper_field_keys:
wrapper_field = wrapper_field_keys[0]
list_coll = field[wrapper_field]
else:
list_coll = []
else:
list_coll = field
for i in range(len(list_coll)):
self._add_to_prop_list(
bch, obj_id, prop_field, list_coll[i], str(i))
elif prop_field in obj_class.prop_map_fields:
if obj_class.prop_map_field_has_wrappers[prop_field]:
wrapper_field_keys = list(field.keys())
if wrapper_field_keys:
wrapper_field = wrapper_field_keys[0]
map_coll = field[wrapper_field]
else:
map_coll = []
else:
map_coll = field
map_key_name = obj_class.prop_map_field_key_names[prop_field]
for map_elem in map_coll:
map_key = map_elem[map_key_name]
self._set_in_prop_map(
bch, obj_id, prop_field, map_elem, map_key)
else:
self._create_prop(bch, obj_id, prop_field, field)
symmetric_ref_updates = []
for ref_field in obj_class.ref_fields:
ref_fld_types_list = list(obj_class.ref_field_types[ref_field])
ref_res_type = ref_fld_types_list[0]
ref_link_type = ref_fld_types_list[1]
ref_obj_type = self._get_resource_class(ref_res_type).object_type
refs = obj_dict.get(ref_field, [])
for ref in refs:
ref_uuid = self.fq_name_to_uuid(ref_obj_type, ref['to'])
ref_attr = ref.get('attr')
ref_data = {'attr': ref_attr, 'is_weakref': False}
ret = self._create_ref(bch, obj_type, obj_id, ref_obj_type, ref_uuid,
ref_data)
symmetric_ref_updates.extend(ret)
self._cassandra_driver.insert(obj_id, obj_cols, batch=bch)
if not uuid_batch:
bch.send()
fq_name_str = ':'.join(obj_dict['fq_name'])
fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_id:
JSON_NONE}
if fqname_batch:
fqname_batch.insert(obj_type, fq_name_cols)
else:
self._cassandra_driver.insert(
cf_name=datastore_api.OBJ_FQ_NAME_CF_NAME,
key=obj_type,
columns=fq_name_cols)
return (True, symmetric_ref_updates)
def object_raw_read(self, obj_type, obj_uuids, prop_names):
obj_class = self._get_resource_class(obj_type)
hit_obj_dicts, miss_uuids = self._obj_cache_mgr.read(
obj_class, obj_uuids, prop_names, False)
miss_obj_rows = self._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME, miss_uuids,
['prop:' + x for x in prop_names])
miss_obj_dicts = []
for obj_uuid, columns in list(miss_obj_rows.items()):
miss_obj_dict = {'uuid': obj_uuid}
for prop_name in columns:
miss_obj_dict[prop_name[5:]] = columns[prop_name]
miss_obj_dicts.append(miss_obj_dict)
return hit_obj_dicts + miss_obj_dicts
def object_read(self, obj_type, obj_uuids, field_names=None,
ret_readonly=False):
if not obj_uuids:
return (True, [])
req_fields = field_names
obj_class = self._get_resource_class(obj_type)
ref_fields = obj_class.ref_fields
backref_fields = obj_class.backref_fields
children_fields = obj_class.children_fields
list_fields = obj_class.prop_list_fields
map_fields = obj_class.prop_map_fields
prop_fields = obj_class.prop_fields - (list_fields | map_fields)
if ((ret_readonly is False) or
(obj_type in self._obj_cache_exclude_types)):
ignore_cache = True
else:
ignore_cache = False
if (field_names is None or
set(field_names) & (backref_fields | children_fields)):
include_backrefs_children = True
if ignore_cache:
hit_obj_dicts = []
miss_uuids = obj_uuids
else:
hit_obj_dicts, miss_uuids = self._obj_cache_mgr.read(
obj_class,
obj_uuids,
field_names,
include_backrefs_children,
)
miss_obj_rows = self._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME, miss_uuids,
timestamp=True)
else:
include_backrefs_children = False
if ignore_cache:
hit_obj_dicts = []
miss_uuids = obj_uuids
else:
hit_obj_dicts, miss_uuids = self._obj_cache_mgr.read(
obj_class,
obj_uuids,
field_names,
include_backrefs_children,
)
miss_obj_rows = self._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME,
miss_uuids,
start='d',
timestamp=True)
if (ignore_cache or
self._obj_cache_mgr.max_entries < len(miss_uuids)):
rendered_objs = self._render_obj_from_db(
obj_class, miss_obj_rows, req_fields,
include_backrefs_children)
obj_dicts = hit_obj_dicts + \
[v['obj_dict'] for k,v in list(rendered_objs.items())]
else:
# can fit and caller won't modify returned value,
rendered_objs_to_cache = self._render_obj_from_db(
obj_class, miss_obj_rows, None,
include_backrefs_children)
field_filtered_objs = self._obj_cache_mgr.set(
obj_type,
rendered_objs_to_cache,
req_fields,
include_backrefs_children,
)
obj_dicts = hit_obj_dicts + field_filtered_objs
if not obj_dicts:
if len(obj_uuids) == 1:
raise NoIdError(obj_uuids[0])
else:
return (True, [])
return (True, obj_dicts)
def object_count_children(self, obj_type, obj_uuid, child_type):
if child_type is None:
return (False, '')
obj_class = self._get_resource_class(obj_type)
if child_type not in obj_class.children_fields:
return (False,
'%s is not a child type of %s' % (child_type, obj_type))
col_start = 'children:' + child_type[:-1] + ':'
col_finish = 'children:' + child_type[:-1] + ';'
num_children = self._cassandra_driver.get_count(
datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
start=col_start,
finish=col_finish)
return (True, num_children)
def update_last_modified(self, bch, obj_type, obj_uuid, id_perms=None):
if id_perms is None:
id_perms = self._cassandra_driver.get_one_col(
datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
'prop:id_perms')
id_perms['last_modified'] = datetime.datetime.utcnow().isoformat()
self._update_prop(bch, obj_uuid, 'id_perms', {'id_perms': id_perms})
if obj_type not in self._obj_cache_exclude_types:
self.update_latest_col_ts(bch, obj_uuid)
def update_latest_col_ts(self, bch, obj_uuid):
try:
self._cassandra_driver.get_one_col(datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
'type')
except NoIdError:
return
self._cassandra_driver.insert(obj_uuid,
{'META:latest_col_ts':
JSON_NONE},
batch=bch)
def object_update(self, obj_type, obj_uuid, new_obj_dict, uuid_batch=None):
obj_class = self._get_resource_class(obj_type)
new_ref_infos = {}
symmetric_ref_updates = []
new_props = {}
for prop_field in obj_class.prop_fields:
if prop_field in new_obj_dict:
new_props[prop_field] = new_obj_dict[prop_field]
for ref_field in obj_class.ref_fields:
ref_fld_types_list = list(obj_class.ref_field_types[ref_field])
ref_res_type = ref_fld_types_list[0]
ref_link_type = ref_fld_types_list[1]
is_weakref = ref_fld_types_list[2]
ref_obj_type = self._get_resource_class(ref_res_type).object_type
if ref_field in new_obj_dict:
new_refs = new_obj_dict[ref_field]
new_ref_infos[ref_obj_type] = {}
for new_ref in new_refs or []:
try:
new_ref_uuid = new_ref['uuid']
except KeyError:
new_ref_uuid = self.fq_name_to_uuid(ref_obj_type,
new_ref['to'])
new_ref_attr = new_ref.get('attr')
new_ref_data = {'attr': new_ref_attr,
'is_weakref': is_weakref}
new_ref_infos[ref_obj_type][new_ref_uuid] = new_ref_data
if uuid_batch:
bch = uuid_batch
else:
bch = self._cassandra_driver.get_cf_batch(
datastore_api.OBJ_UUID_CF_NAME)
for col_name, col_value in self._cassandra_driver.xget(
datastore_api.OBJ_UUID_CF_NAME, obj_uuid):
if self._is_prop(col_name):
(_, prop_name) = col_name.split(':')
if prop_name == 'id_perms':
new_id_perms = new_obj_dict.get(
prop_name, json.loads(col_value))
self.update_last_modified(
bch, obj_type, obj_uuid, new_id_perms)
elif prop_name in new_obj_dict:
self._update_prop(
bch, obj_uuid, prop_name, new_props)
if self._is_prop_list(col_name):
(_, prop_name, prop_elem_position) = col_name.split(':', 2)
if prop_name in new_props:
self._delete_from_prop_list(
bch, obj_uuid, prop_name, prop_elem_position)
if self._is_prop_map(col_name):
(_, prop_name, prop_elem_position) = col_name.split(':', 2)
if prop_name in new_props:
self._delete_from_prop_map(
bch, obj_uuid, prop_name, prop_elem_position)
if self._is_ref(col_name):
(_, ref_type, ref_uuid) = col_name.split(':')
ret = self._update_ref(bch, obj_type, obj_uuid, ref_type,
ref_uuid, new_ref_infos)
symmetric_ref_updates.extend(ret)
for ref_type in list(new_ref_infos.keys()):
for ref_uuid in list(new_ref_infos[ref_type].keys()):
ref_data = new_ref_infos[ref_type][ref_uuid]
ret = self._create_ref(bch, obj_type, obj_uuid, ref_type,
ref_uuid, ref_data)
symmetric_ref_updates.extend(ret)
for prop_name in list(new_props.keys()):
if prop_name in obj_class.prop_list_fields:
if (obj_class.prop_list_field_has_wrappers[prop_name] and
new_props[prop_name]):
wrapper_field = list(new_props[prop_name].keys())[0]
list_coll = new_props[prop_name][wrapper_field]
else:
list_coll = new_props[prop_name]
for i in range(len(list_coll)):
self._add_to_prop_list(bch, obj_uuid, prop_name,
list_coll[i], str(i))
elif prop_name in obj_class.prop_map_fields:
if (obj_class.prop_map_field_has_wrappers[prop_name] and
new_props[prop_name]):
wrapper_field = list(new_props[prop_name].keys())[0]
map_coll = new_props[prop_name][wrapper_field]
else:
map_coll = new_props[prop_name]
map_key_name = obj_class.prop_map_field_key_names[prop_name]
for map_elem in map_coll:
map_key = map_elem[map_key_name]
self._set_in_prop_map(bch, obj_uuid, prop_name,
map_elem, map_key)
else:
self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
if not uuid_batch:
try:
bch.send()
finally:
self._obj_cache_mgr.evict(obj_type, [obj_uuid])
return (True, symmetric_ref_updates)
def object_list(self, obj_type, parent_uuids=None, back_ref_uuids=None,
obj_uuids=None, count=False, filters=None,
paginate_start=None, paginate_count=None):
obj_class = self._get_resource_class(obj_type)
children_fq_names_uuids = []
ret_marker = None
anchored_op = True
def filter_rows(coll_infos, filters=None):
if not coll_infos or not filters:
return coll_infos
filtered_infos = {}
columns = ['prop:%s' % filter_key for filter_key in filters if
filter_key in obj_class.prop_fields]
if not columns:
return coll_infos
rows = self._cassandra_driver.multiget(datastore_api.OBJ_UUID_CF_NAME,
list(coll_infos.keys()),
columns=columns)
for obj_uuid, properties in list(rows.items()):
gevent.sleep(0)
full_match = True
for filter_key, filter_values in list(filters.items()):
property = 'prop:%s' % filter_key
if property not in properties:
full_match = False
break
prop_value = properties[property]
if isinstance(prop_value, dict):
for filter_value in filter_values:
try:
filter_dict = json.loads(filter_value)
except ValueError:
continue
if (six.viewitems(filter_dict) <=
six.viewitems(prop_value)):
break
else:
full_match = False
break
elif prop_value not in filter_values:
full_match = False
break
if full_match:
filtered_infos[obj_uuid] = coll_infos[obj_uuid]
return filtered_infos
def get_fq_name_uuid_list(obj_uuids):
ret_list = []
for obj_uuid in obj_uuids:
try:
if obj_type != self.uuid_to_obj_type(obj_uuid):
continue
obj_fq_name = self.uuid_to_fq_name(obj_uuid)
ret_list.append((obj_fq_name, obj_uuid))
except NoIdError:
pass
return ret_list
if parent_uuids:
paginate_start != '0':
start = 'children:%s:%s' % (obj_type,
paginate_start[:-1]+chr(ord(paginate_start[-1])+1))
num_columns = paginate_count
else:
start = 'children:%s:' % (obj_type)
num_columns = None
obj_rows = self._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME,
parent_uuids,
start=start,
finish='children:%s;' % (obj_type),
num_columns=num_columns,
timestamp=True)
def filter_rows_parent_anchor(sort=False):
all_cols = [cols for obj_key in list(obj_rows.keys())
for cols in list(obj_rows[obj_key].items())]
all_child_infos = {}
for col_name, col_val_ts in all_cols:
gevent.sleep(0)
child_uuid = col_name.split(':')[2]
if obj_uuids and child_uuid not in obj_uuids:
continue
if back_ref_uuids:
child_cols = self._cassandra_driver.get(
datastore_api.OBJ_UUID_CF_NAME,
child_uuid,
start='ref:',
finish='ref;')
child_ref_ids = {col.split(':')[2]
for col in child_cols or []}
if not set(back_ref_uuids) & child_ref_ids:
continue
all_child_infos[child_uuid] = {'uuid': child_uuid,
'tstamp': col_val_ts[1]}
filt_child_infos = filter_rows(all_child_infos, filters)
if not sort:
ret_child_infos = list(filt_child_infos.values())
else:
ret_child_infos = sorted(list(filt_child_infos.values()),
key=itemgetter('tstamp'))
return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
children_fq_names_uuids.extend(filter_rows_parent_anchor(sort=True))
elif back_ref_uuids:
if paginate_start and paginate_start != '0':
start = 'backref:%s:%s' % (obj_type,
paginate_start[:-1]+chr(ord(paginate_start[-1])+1))
num_columns = paginate_count
else:
start = 'backref:%s:' % (obj_type)
num_columns = None
obj_rows = self._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME,
back_ref_uuids,
start=start,
finish='backref:%s;' % (obj_type),
num_columns=num_columns,
timestamp=True)
def filter_rows_backref_anchor():
all_cols = [cols for obj_key in list(obj_rows.keys())
for cols in list(obj_rows[obj_key].items())]
all_backref_infos = {}
for col_name, col_val_ts in all_cols:
gevent.sleep(0)
backref_uuid = col_name.split(':')[2]
if obj_uuids and backref_uuid not in obj_uuids:
continue
all_backref_infos[backref_uuid] = \
{'uuid': backref_uuid, 'tstamp': col_val_ts[1]}
filt_backref_infos = filter_rows(all_backref_infos, filters)
return get_fq_name_uuid_list(r['uuid'] for r in
list(filt_backref_infos.values()))
children_fq_names_uuids.extend(filter_rows_backref_anchor())
else:
anchored_op = False
if obj_uuids:
def filter_rows_object_list():
all_obj_infos = {}
marker = None
read_in = 0
start_idx = 0
if paginate_start and paginate_start != '0':
try:
start_idx = obj_uuids.index(paginate_start) + 1
except ValueError:
start_idx = len(obj_uuids)
for obj_uuid in obj_uuids[start_idx:]:
all_obj_infos[obj_uuid] = None
read_in += 1
if paginate_start and read_in >= paginate_count:
marker = obj_uuid
break
filt_obj_infos = filter_rows(all_obj_infos, filters)
return get_fq_name_uuid_list(list(filt_obj_infos.keys())), marker
filtered_rows, ret_marker = filter_rows_object_list()
children_fq_names_uuids.extend(filtered_rows)
else:
if paginate_start and paginate_start != '0':
start = paginate_start[:-1] + \
chr(ord(paginate_start[-1]) + 1)
else:
start = ''
cols = self._cassandra_driver.xget(
datastore_api.OBJ_FQ_NAME_CF_NAME, '%s' %(obj_type),
start=start)
def filter_rows_no_anchor():
marker = None
all_obj_infos = {}
read_in = 0
for col_name, _ in cols:
gevent.sleep(0)
col_name_arr = utils.decode_string(col_name).split(':')
obj_uuid = col_name_arr[-1]
all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
read_in += 1
if paginate_start and read_in >= paginate_count:
marker = col_name
break
filt_obj_infos = filter_rows(all_obj_infos, filters)
return list(filt_obj_infos.values()), marker
if count and not filters:
return (True, sum(1 for col in cols), None)
filtered_rows, ret_marker = filter_rows_no_anchor()
children_fq_names_uuids.extend(filtered_rows)
if count:
return (True, len(children_fq_names_uuids), None)
if paginate_start and anchored_op:
children_fq_names_uuids = sorted(children_fq_names_uuids,
key=lambda fqn_uuid: fqn_uuid[1])
if len(children_fq_names_uuids) > paginate_count:
children_fq_names_uuids = children_fq_names_uuids[:paginate_count]
if not children_fq_names_uuids:
ret_marker = None
else:
ret_marker = children_fq_names_uuids[-1][1]
return (True, children_fq_names_uuids, ret_marker)
def object_delete(self, obj_type, obj_uuid):
obj_class = self._get_resource_class(obj_type)
fq_name = self._cassandra_driver.get_one_col(datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
'fq_name')
bch = self._cassandra_driver.get_cf_batch(datastore_api.OBJ_UUID_CF_NAME)
col_start = 'parent:'
col_fin = 'parent;'
col_name_iter = self._cassandra_driver.xget(datastore_api.OBJ_UUID_CF_NAME,
obj_uuid, start=col_start, finish=col_fin)
for (col_name, col_val) in col_name_iter:
(_, parent_type, parent_uuid) = col_name.split(':')
self._delete_child(
bch, parent_type, parent_uuid, obj_type, obj_uuid)
col_start = 'ref:'
col_fin = 'ref;'
col_name_iter = self._cassandra_driver.xget(datastore_api.OBJ_UUID_CF_NAME,
obj_uuid, start=col_start, finish=col_fin)
symmetric_ref_updates = []
for (col_name, col_val) in col_name_iter:
(_, ref_type, ref_uuid) = col_name.split(':')
ret = self._delete_ref(bch, obj_type, obj_uuid, ref_type, ref_uuid)
symmetric_ref_updates.extend(ret)
col_start = 'relaxbackref:'
col_fin = 'relaxbackref;'
col_name_iter = self._cassandra_driver.xget(datastore_api.OBJ_UUID_CF_NAME,
obj_uuid, start=col_start, finish=col_fin)
for (col_name, col_val) in col_name_iter:
(_, backref_uuid) = col_name.split(':')
self._delete_ref(bch, None, backref_uuid, obj_type, obj_uuid)
self._cassandra_driver.remove(obj_uuid, batch=bch)
try:
bch.send()
finally:
self._obj_cache_mgr.evict(obj_type, [obj_uuid])
fq_name_str = ':'.join(fq_name)
fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
self._cassandra_driver.remove(
cf_name=datastore_api.OBJ_FQ_NAME_CF_NAME,
key=obj_type,
columns=[fq_name_col])
self.cache_uuid_to_fq_name_del(obj_uuid)
return (True, symmetric_ref_updates)
def prop_collection_read(self, obj_type, obj_uuid, obj_fields, position):
obj_class = self._get_resource_class(obj_type)
result = {}
result['id_perms'] = self._cassandra_driver.get_one_col(
datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
'prop:id_perms')
for field in obj_fields:
if field in obj_class.prop_list_fields:
prop_pfx = 'propl'
elif field in obj_class.prop_map_fields:
prop_pfx = 'propm'
else:
continue
if position:
col_start = '%s:%s:%s' % (prop_pfx, field, position)
col_end = '%s:%s:%s' % (prop_pfx, field, position)
else:
col_start = '%s:%s:' % (prop_pfx, field)
col_end = '%s:%s;' % (prop_pfx, field)
obj_cols = self._cassandra_driver.xget(
datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
start=col_start,
finish=col_end)
result[field] = []
for name, value in obj_cols:
result[field].append((json.loads(value), name.split(':', 2)[-1]))
return (True, result)
def cache_uuid_to_fq_name_add(self, id, fq_name, obj_type):
self._cache_uuid_to_fq_name[id] = (fq_name, obj_type)
def cache_uuid_to_fq_name_del(self, id):
self._cache_uuid_to_fq_name.pop(id, None)
def uuid_to_fq_name(self, id):
try:
return copy.copy(self._cache_uuid_to_fq_name[id][0])
except KeyError:
obj = self._cassandra_driver.get(datastore_api.OBJ_UUID_CF_NAME, id,
columns=['fq_name', 'type'])
if not obj:
raise NoIdError(id)
if 'type' not in obj or 'fq_name' not in obj:
raise NoIdError(id)
fq_name = obj['fq_name']
obj_type = obj['type']
self.cache_uuid_to_fq_name_add(id, fq_name, obj_type)
return copy.copy(fq_name)
def uuid_to_obj_type(self, id):
try:
return self._cache_uuid_to_fq_name[id][1]
except KeyError:
obj = self._cassandra_driver.get(datastore_api.OBJ_UUID_CF_NAME, id,
columns=['fq_name', 'type'])
if not obj:
raise NoIdError(id)
if 'type' not in obj or 'fq_name' not in obj:
raise NoIdError(id)
fq_name = obj['fq_name']
obj_type = obj['type']
self.cache_uuid_to_fq_name_add(id, fq_name, obj_type)
return obj_type
def fq_name_to_uuid(self, obj_type, fq_name):
fq_name_str = utils.encode_string(':'.join(fq_name))
col_infos = self._cassandra_driver.get(datastore_api.OBJ_FQ_NAME_CF_NAME,
obj_type,
start=fq_name_str + ':',
finish=fq_name_str + ';')
if not col_infos:
raise NoIdError('%s %s' % (obj_type, fq_name_str))
if len(col_infos) > 1:
raise VncError('Multi match %s for %s' % (fq_name_str, obj_type))
fq_name_uuid = utils.decode_string(col_infos.popitem()[0]).split(':')
if obj_type != 'route_target' and fq_name_uuid[:-1] != fq_name:
raise NoIdError('%s %s' % (obj_type, fq_name_str))
return fq_name_uuid[-1]
def get_shared(self, obj_type, share_id='', share_type='global'):
result = []
column = '%s:%s' % (share_type, share_id)
col_infos = self._cassandra_driver.get(datastore_api.OBJ_SHARED_CF_NAME,
obj_type,
start=column + ':',
finish=column + ';')
if not col_infos:
return None
for (col_name, col_val) in list(col_infos.items()):
obj_uuid = col_name.split(':')[-1]
result.append((obj_uuid, col_val))
return result
def set_shared(self, obj_type, obj_id, share_id = '', share_type = 'global', rwx = 7):
col_name = '%s:%s:%s' % (share_type, share_id, obj_id)
self._cassandra_driver.insert(
cf_name=datastore_api.OBJ_SHARED_CF_NAME,
key=obj_type,
columns={col_name:json.dumps(rwx)})
def del_shared(self, obj_type, obj_id, share_id = '', share_type = 'global'):
col_name = '%s:%s:%s' % (share_type, share_id, obj_id)
self._cassandra_driver.remove(
cf_name=datastore_api.OBJ_SHARED_CF_NAME,
key=obj_type,
columns=[col_name])
def _render_obj_from_db(self, obj_class, obj_rows, field_names=None,
include_backrefs_children=False):
ref_fields = obj_class.ref_fields
backref_fields = obj_class.backref_fields
children_fields = obj_class.children_fields
list_fields = obj_class.prop_list_fields
map_fields = obj_class.prop_map_fields
prop_fields = obj_class.prop_fields - (list_fields | map_fields)
results = {}
for obj_uuid, obj_cols in list(obj_rows.items()):
if 'type' not in obj_cols or 'fq_name' not in obj_cols:
continue
if obj_class.object_type != obj_cols.pop('type')[0]:
continue
id_perms_ts = 0
row_latest_ts = 0
result = {}
result['uuid'] = obj_uuid
result['fq_name'] = obj_cols.pop('fq_name')[0]
for col_name in list(obj_cols.keys()):
if self._is_parent(col_name):
(_, _, parent_uuid) = col_name.split(':')
try:
result['parent_type'] = obj_cols['parent_type'][0]
except KeyError:
pass
result['parent_uuid'] = parent_uuid
continue
if self._is_prop(col_name):
(_, prop_name) = col_name.split(':')
if prop_name == 'id_perms':
id_perms_ts = obj_cols[col_name][1]
if ((prop_name not in prop_fields) or
(field_names and prop_name not in field_names)):
continue
result[prop_name] = obj_cols[col_name][0]
continue
if self._is_prop_list(col_name):
(_, prop_name, prop_elem_position) = col_name.split(':')
if field_names and prop_name not in field_names:
continue
if obj_class.prop_list_field_has_wrappers[prop_name]:
prop_field_types = obj_class.prop_field_types[prop_name]
wrapper_type = prop_field_types['xsd_type']
wrapper_cls = self._get_xsd_class(wrapper_type)
wrapper_field = wrapper_cls.attr_fields[0]
if prop_name not in result:
result[prop_name] = {wrapper_field: []}
result[prop_name][wrapper_field].append(
(obj_cols[col_name][0], prop_elem_position))
else:
if prop_name not in result:
result[prop_name] = []
result[prop_name].append((obj_cols[col_name][0],
prop_elem_position))
continue
if self._is_prop_map(col_name):
(_, prop_name, _) = col_name.split(':', 2)
if field_names and prop_name not in field_names:
continue
if obj_class.prop_map_field_has_wrappers[prop_name]:
prop_field_types = obj_class.prop_field_types[prop_name]
wrapper_type = prop_field_types['xsd_type']
wrapper_cls = self._get_xsd_class(wrapper_type)
wrapper_field = wrapper_cls.attr_fields[0]
if prop_name not in result:
result[prop_name] = {wrapper_field: []}
result[prop_name][wrapper_field].append(
obj_cols[col_name][0])
else:
if prop_name not in result:
result[prop_name] = []
result[prop_name].append(obj_cols[col_name][0])
continue
if self._is_children(col_name):
(_, child_type, child_uuid) = col_name.split(':')
if field_names and '%ss' %(child_type) not in field_names:
continue
if child_type+'s' not in children_fields:
continue
child_tstamp = obj_cols[col_name][1]
try:
self._read_child(result, obj_uuid, child_type,
child_uuid, child_tstamp)
except NoIdError:
continue
continue
if self._is_ref(col_name):
(_, ref_type, ref_uuid) = col_name.split(':')
if ((ref_type+'_refs' not in ref_fields) or
(field_names and ref_type + '_refs' not in field_names)):
continue
self._read_ref(result, obj_uuid, ref_type, ref_uuid,
obj_cols[col_name][0])
continue
if self._is_backref(col_name):
(_, back_ref_type, back_ref_uuid) = col_name.split(':')
if back_ref_type+'_back_refs' not in backref_fields:
continue
if (field_names and
'%s_back_refs' %(back_ref_type) not in field_names):
continue
try:
self._read_back_ref(result, obj_uuid, back_ref_type,
back_ref_uuid, obj_cols[col_name][0])
except NoIdError:
continue
continue
if self._is_metadata(col_name):
(_, meta_type) = col_name.split(':')
if meta_type == 'latest_col_ts':
row_latest_ts = obj_cols[col_name][1]
continue
for child_field in obj_class.children_fields:
if child_field not in result:
continue
sorted_children = sorted(result[child_field],
key = itemgetter('tstamp'))
result[child_field] = sorted_children
[child.pop('tstamp') for child in result[child_field]]
# for all children
# Ordering property lists by position attribute
for prop_name in (obj_class.prop_list_fields & set(result.keys())):
if isinstance(result[prop_name], list):
result[prop_name] = [el[0] for el in
sorted(result[prop_name],
key=itemgetter(1))]
elif isinstance(result[prop_name], dict):
wrapper, unsorted_list = result[prop_name].popitem()
result[prop_name][wrapper] = [el[0] for el in
sorted(unsorted_list,
key=itemgetter(1))]
# 'id_perms_ts' tracks timestamp of id-perms column
# i.e. latest update of *any* prop or ref.
# 'row_latest_ts' tracks timestamp of last modified column
# so any backref/children column is also captured. 0=>unknown
results[obj_uuid] = {'obj_dict': result,
'id_perms_ts': id_perms_ts}
if include_backrefs_children:
# update our copy of ts only if we read the
# corresponding fields from db
results[obj_uuid]['row_latest_ts'] = row_latest_ts
# end for all rows
return results
# end _render_obj_from_db
def _read_child(self, result, obj_uuid, child_obj_type, child_uuid,
child_tstamp):
if '%ss' % (child_obj_type) not in result:
result['%ss' % (child_obj_type)] = []
child_res_type = self._get_resource_class(child_obj_type).resource_type
child_info = {}
child_info['to'] = self.uuid_to_fq_name(child_uuid)
child_info['uuid'] = child_uuid
child_info['tstamp'] = child_tstamp
result['%ss' % (child_obj_type)].append(child_info)
# end _read_child
def _read_ref(self, result, obj_uuid, ref_obj_type, ref_uuid, ref_data_json):
if '%s_refs' % (ref_obj_type) not in result:
result['%s_refs' % (ref_obj_type)] = []
ref_data = ref_data_json
ref_info = {}
try:
ref_info['to'] = self.uuid_to_fq_name(ref_uuid)
except NoIdError:
ref_info['to'] = ['ERROR']
if ref_data:
try:
ref_info['attr'] = ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
ref_info['attr'] = ref_data
ref_info['uuid'] = ref_uuid
result['%s_refs' % (ref_obj_type)].append(ref_info)
# end _read_ref
def _read_back_ref(self, result, obj_uuid, back_ref_obj_type, back_ref_uuid,
back_ref_data_json):
if '%s_back_refs' % (back_ref_obj_type) not in result:
result['%s_back_refs' % (back_ref_obj_type)] = []
back_ref_info = {}
back_ref_info['to'] = self.uuid_to_fq_name(back_ref_uuid)
back_ref_data = back_ref_data_json
if back_ref_data:
try:
back_ref_info['attr'] = back_ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
back_ref_info['attr'] = back_ref_data
back_ref_info['uuid'] = back_ref_uuid
result['%s_back_refs' % (back_ref_obj_type)].append(back_ref_info)
# end _read_back_ref
def walk(self, fn=None):
type_to_object = {}
for obj_uuid, obj_col in self._cassandra_driver.get_range(
datastore_api.OBJ_UUID_CF_NAME,
columns=['type', 'fq_name']):
try:
obj_type = json.loads(obj_col['type'])
obj_fq_name = json.loads(obj_col['fq_name'])
# prep cache to avoid n/w round-trip in db.read for ref
self.cache_uuid_to_fq_name_add(obj_uuid, obj_fq_name, obj_type)
try:
type_to_object[obj_type].append(obj_uuid)
except KeyError:
type_to_object[obj_type] = [obj_uuid]
except Exception as e:
self._logger('Error in db walk read %s' % (str(e)),
level=SandeshLevel.SYS_ERR)
continue
if fn is None:
return []
walk_results = []
for obj_type, uuid_list in list(type_to_object.items()):
try:
self._logger('DB walk: obj_type %s len %s'
% (obj_type, len(uuid_list)),
level=SandeshLevel.SYS_INFO)
result = fn(obj_type, uuid_list)
if result:
walk_results.append(result)
except Exception as e:
self._logger('Error in db walk invoke %s' % (str(e)),
level=SandeshLevel.SYS_ERR)
continue
return walk_results
# end walk
# end class VncCassandraClient
class ObjectCacheManager(object):
class CachedObject(object):
# provide a read-only copy in so far as
# top level keys cannot be add/mod/del
class RODict(dict):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyDict")
__setitem__ = __readonly__
__delitem__ = __readonly__
pop = __readonly__
popitem = __readonly__
clear = __readonly__
update = __readonly__
setdefault = __readonly__
del __readonly__
# end RODict
def __init__(self, obj_dict, id_perms_ts, row_latest_ts):
self.obj_dict = self.RODict(obj_dict)
self.id_perms_ts = id_perms_ts
self.row_latest_ts = row_latest_ts
# end __init__
def update_obj_dict(self, new_obj_dict):
self.obj_dict = self.RODict(new_obj_dict)
# end update_obj_dict
def get_filtered_copy(self, field_names=None):
if not field_names:
return self.obj_dict
# TODO filter with field_names
return {k: copy.deepcopy(self.obj_dict[k])
for k in field_names if k in self.obj_dict}
# end get_filtered_copy
# end class CachedObject
def __init__(self, logger, db_client, max_entries,
obj_cache_exclude_types=None, debug_obj_cache_types=None):
self._logger = logger
self.max_entries = max_entries
self._db_client = db_client
self._cache = OrderedDict()
self._obj_cache_exclude_types = set(obj_cache_exclude_types or [])
self._debug_obj_cache_types = set(debug_obj_cache_types or [])
self._debug_obj_cache_types -= self._obj_cache_exclude_types
# end __init__
def _log(self, msg, level=SandeshLevel.SYS_DEBUG):
msg = 'Object UUID cache manager: %s' % msg
self._logger(msg, level)
def evict(self, obj_type, obj_uuids):
for obj_uuid in obj_uuids:
try:
obj_dict = self._cache.pop(obj_uuid).obj_dict
if obj_type in self._debug_obj_cache_types:
self._log("%s %s (%s) was evicted from cache. Cache "
"contained: %s" % (
obj_type.replace('_', '-').title(),
':'.join(obj_dict['fq_name']),
obj_uuid,
pformat(obj_dict),
),
)
except KeyError:
continue
# end evict
def set(self, obj_type, db_rendered_objs, req_fields,
include_backrefs_children):
# build up results with field filter
result_obj_dicts = []
if req_fields:
result_fields = set(req_fields) | set(['fq_name', 'uuid',
'parent_type', 'parent_uuid'])
for obj_uuid, render_info in list(db_rendered_objs.items()):
id_perms_ts = render_info.get('id_perms_ts', 0)
row_latest_ts = render_info.get('row_latest_ts', 0)
cached_obj = self._cache.pop(obj_uuid, None)
if cached_obj is not None:
# if we had stale, just update from new db value
cached_obj.update_obj_dict(render_info['obj_dict'])
cached_obj.id_perms_ts = id_perms_ts
if include_backrefs_children:
cached_obj.row_latest_ts = row_latest_ts
else:
# this was a miss in cache
cached_obj = self.CachedObject(
render_info['obj_dict'],
id_perms_ts,
row_latest_ts,
)
if len(self._cache) >= self.max_entries:
# get first element (least recently used)
# without getting full copy of dict keys
if hasattr(self._cache, 'iterkeys'):
key = next(iter(list(self._cache.iterkeys())))
else:
# 'keys()' returns an iterator with PY3.
key = next(iter(list(self._cache.keys())))
self.evict(obj_type, [key])
self._cache[obj_uuid] = cached_obj
if obj_type in self._debug_obj_cache_types:
self._log("%s %s (%s) was set in cache with values: %s" % (
obj_type.replace('_', ' ').title(),
':'.join(cached_obj.obj_dict['fq_name']),
obj_uuid,
pformat(cached_obj.obj_dict),
),
)
if req_fields:
result_obj_dicts.append(
cached_obj.get_filtered_copy(result_fields))
else:
result_obj_dicts.append(cached_obj.get_filtered_copy())
# end for all rendered objects
return result_obj_dicts
# end set
def read(self, obj_class, obj_uuids, req_fields, include_backrefs_children):
# find which keys are a hit, find which hit keys are not stale
# return hit entries and miss+stale uuids.
hit_uuids = []
miss_uuids = []
for obj_uuid in obj_uuids:
if obj_uuid in self._cache:
hit_uuids.append(obj_uuid)
else:
miss_uuids.append(obj_uuid)
stale_uuids = []
# staleness when include_backrefs_children is False = id_perms tstamp
# when include_backrefs_children is True = latest_col_ts tstamp
if include_backrefs_children:
stale_check_col_name = 'META:latest_col_ts'
stale_check_ts_attr = 'row_latest_ts'
else:
stale_check_col_name = 'prop:id_perms'
stale_check_ts_attr = 'id_perms_ts'
hit_rows_in_db = self._db_client._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME, hit_uuids,
columns=[stale_check_col_name], timestamp=True)
obj_dicts = []
result_fields = {'fq_name', 'uuid', 'parent_type', 'parent_uuid'}
if req_fields:
result_fields = set(req_fields) | result_fields
for hit_uuid in hit_uuids:
try:
obj_cols = hit_rows_in_db[hit_uuid]
cached_obj = self._cache[hit_uuid]
except KeyError:
# Either stale check column missing, treat as miss
# Or entry could have been evicted while context switched
# for reading stale-check-col, treat as miss
miss_uuids.append(hit_uuid)
continue
if (getattr(cached_obj, stale_check_ts_attr) !=
obj_cols[stale_check_col_name][1]):
miss_uuids.append(hit_uuid)
stale_uuids.append(hit_uuid)
continue
if req_fields:
obj_dicts.append(cached_obj.get_filtered_copy(result_fields))
else:
obj_dicts.append(cached_obj.get_filtered_copy())
if obj_class.object_type in self._debug_obj_cache_types:
obj_rows = self._db_client._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME,
[hit_uuid],
timestamp=True)
rendered_objs = self._db_client._render_obj_from_db(
obj_class, obj_rows, req_fields, include_backrefs_children)
db_obj_dict = rendered_objs[hit_uuid]['obj_dict']
self._log("%s %s (%s) was read from cache.\nDB values: %s\n"
"Cache value: %s\n" % (
obj_class.object_type.replace('_', ' ').title(),
':'.join(cached_obj.obj_dict['fq_name']),
hit_uuid,
pformat(db_obj_dict),
pformat(cached_obj.obj_dict),
),
)
# end for all hit in cache
self.evict(obj_class.object_type, stale_uuids)
return obj_dicts, miss_uuids
# end read
def dump_cache(self, obj_uuids=None, count=10):
obj_dicts = {}
i = 1
if obj_uuids:
for obj_uuid in obj_uuids:
try:
obj = self._cache[obj_uuid]
except KeyError:
continue
obj_json = json.dumps(obj, default=lambda o: dict((k, v)
for k, v in list(o.__dict__.items())))
obj_dicts[i] = json.loads(obj_json)
i += 1
else:
for key in self._cache:
if i > count:
break
obj = self._cache[key]
obj_json = json.dumps(obj, default=lambda o: dict((k, v)
for k, v in list(o.__dict__.items())))
obj_dicts[i] = json.loads(obj_json)
i += 1
return obj_dicts
# end class ObjectCacheManager
| true
| true
|
790e26be3128ca0358ecc4721ef38ca8373a9407
| 2,496
|
py
|
Python
|
source/engine/steps/config_model.py
|
Borrk/DeepLearning-Engine
|
54f6cdb8a76e76d9f439f8562652f545e4dbc02e
|
[
"MIT"
] | null | null | null |
source/engine/steps/config_model.py
|
Borrk/DeepLearning-Engine
|
54f6cdb8a76e76d9f439f8562652f545e4dbc02e
|
[
"MIT"
] | null | null | null |
source/engine/steps/config_model.py
|
Borrk/DeepLearning-Engine
|
54f6cdb8a76e76d9f439f8562652f545e4dbc02e
|
[
"MIT"
] | null | null | null |
from engine.steps.IStep import IStep
from keras.models import Model
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
class config_model(IStep):
"""config model"""
create_Optimizer_func = None
create_loss_func = None
def __init__(self, output_channel, name, create_Optimizer_func, create_loss_func):
super().__init__(self, output_channel, name)
self.create_Optimizer_func = create_Optimizer_func
self.create_loss_func = create_loss_func
def IRun(self):
if self.create_Optimizer_func == None:
raise Exception( "No create optimizer function!" )
if self.create_loss_func == None:
self.create_loss_func = self._default_categorical_crossentropy
try:
opt = self.create_Optimizer_func(self)
loss = self.create_loss_func(self)
model = self.output_channel['model']
"""
if self.train_only_top_layer:
for layer in base_model.layers:
layer.trainable = False
"""
model.compile(optimizer=opt, loss=loss, metrics=[self.metrics] )
except Exception as e:
self.output_channel['Error'] = "fatal error occur: " + e.message
self.output_channel['ErrorType'] = "fatal"
def IParseConfig( self, config_json ):
self.epochs = config_json['epochs']
self.learning_ratio = config_json['learning_ratio']
self.batch_size = config_json['batch_size']
self.metrics = config_json['metrics']
self.output_channel['epochs'] = self.epochs
self.output_channel['learning_ratio'] = self.learning_ratio
self.output_channel['batch_size'] = self.batch_size
def IDispose( self ):
pass
def _default_categorical_crossentropy():
return "categorical_crossentropy"
class config_model_adam_categorical_crossentropy(config_model):
""" config model: optimizer=Adam, loss = 'categorical_crossentropy' """
def __init__(self, output_channel, name=None ):
super().__init__(self, output_channel, name, self.create_Adam, self.create_loss )
def create_Adam( self ):
return Adam(lr=self.learning_ratio, decay=self.learning_ratio / self.epochs )
def create_loss( self ):
""" create loss function """
return "categorical_crossentropy"
| 33.28
| 89
| 0.654647
|
from engine.steps.IStep import IStep
from keras.models import Model
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
class config_model(IStep):
create_Optimizer_func = None
create_loss_func = None
def __init__(self, output_channel, name, create_Optimizer_func, create_loss_func):
super().__init__(self, output_channel, name)
self.create_Optimizer_func = create_Optimizer_func
self.create_loss_func = create_loss_func
def IRun(self):
if self.create_Optimizer_func == None:
raise Exception( "No create optimizer function!" )
if self.create_loss_func == None:
self.create_loss_func = self._default_categorical_crossentropy
try:
opt = self.create_Optimizer_func(self)
loss = self.create_loss_func(self)
model = self.output_channel['model']
model.compile(optimizer=opt, loss=loss, metrics=[self.metrics] )
except Exception as e:
self.output_channel['Error'] = "fatal error occur: " + e.message
self.output_channel['ErrorType'] = "fatal"
def IParseConfig( self, config_json ):
self.epochs = config_json['epochs']
self.learning_ratio = config_json['learning_ratio']
self.batch_size = config_json['batch_size']
self.metrics = config_json['metrics']
self.output_channel['epochs'] = self.epochs
self.output_channel['learning_ratio'] = self.learning_ratio
self.output_channel['batch_size'] = self.batch_size
def IDispose( self ):
pass
def _default_categorical_crossentropy():
return "categorical_crossentropy"
class config_model_adam_categorical_crossentropy(config_model):
def __init__(self, output_channel, name=None ):
super().__init__(self, output_channel, name, self.create_Adam, self.create_loss )
def create_Adam( self ):
return Adam(lr=self.learning_ratio, decay=self.learning_ratio / self.epochs )
def create_loss( self ):
return "categorical_crossentropy"
| true
| true
|
790e275df8597c6b16772b5c3a832d566d6ed222
| 2,461
|
py
|
Python
|
sdk/keyvault/azure-keyvault/azure/keyvault/v7_3_preview/models/sas_definition_create_parameters.py
|
mccoyp/azure-keyvault-7.3-preview
|
da351753a9d3d2bf97c27566865cd88bae7faa55
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-keyvault/azure/keyvault/v7_3_preview/models/sas_definition_create_parameters.py
|
mccoyp/azure-keyvault-7.3-preview
|
da351753a9d3d2bf97c27566865cd88bae7faa55
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-keyvault/azure/keyvault/v7_3_preview/models/sas_definition_create_parameters.py
|
mccoyp/azure-keyvault-7.3-preview
|
da351753a9d3d2bf97c27566865cd88bae7faa55
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SasDefinitionCreateParameters(Model):
"""The SAS definition create parameters.
All required parameters must be populated in order to send to Azure.
:param template_uri: Required. The SAS definition token template signed
with an arbitrary key. Tokens created according to the SAS definition
will have the same properties as the template.
:type template_uri: str
:param sas_type: Required. The type of SAS token the SAS definition will
create. Possible values include: 'account', 'service'
:type sas_type: str or ~storage.models.SasTokenType
:param validity_period: Required. The validity period of SAS tokens
created according to the SAS definition.
:type validity_period: str
:param sas_definition_attributes: The attributes of the SAS definition.
:type sas_definition_attributes: ~storage.models.SasDefinitionAttributes
:param tags: Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_validation = {
'template_uri': {'required': True},
'sas_type': {'required': True},
'validity_period': {'required': True},
}
_attribute_map = {
'template_uri': {'key': 'templateUri', 'type': 'str'},
'sas_type': {'key': 'sasType', 'type': 'str'},
'validity_period': {'key': 'validityPeriod', 'type': 'str'},
'sas_definition_attributes': {'key': 'attributes', 'type': 'SasDefinitionAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(SasDefinitionCreateParameters, self).__init__(**kwargs)
self.template_uri = kwargs.get('template_uri', None)
self.sas_type = kwargs.get('sas_type', None)
self.validity_period = kwargs.get('validity_period', None)
self.sas_definition_attributes = kwargs.get('sas_definition_attributes', None)
self.tags = kwargs.get('tags', None)
| 43.175439
| 94
| 0.650549
|
from msrest.serialization import Model
class SasDefinitionCreateParameters(Model):
_validation = {
'template_uri': {'required': True},
'sas_type': {'required': True},
'validity_period': {'required': True},
}
_attribute_map = {
'template_uri': {'key': 'templateUri', 'type': 'str'},
'sas_type': {'key': 'sasType', 'type': 'str'},
'validity_period': {'key': 'validityPeriod', 'type': 'str'},
'sas_definition_attributes': {'key': 'attributes', 'type': 'SasDefinitionAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(SasDefinitionCreateParameters, self).__init__(**kwargs)
self.template_uri = kwargs.get('template_uri', None)
self.sas_type = kwargs.get('sas_type', None)
self.validity_period = kwargs.get('validity_period', None)
self.sas_definition_attributes = kwargs.get('sas_definition_attributes', None)
self.tags = kwargs.get('tags', None)
| true
| true
|
790e28b2a29b6a3f90c09296edb60423a6e6516e
| 3,785
|
py
|
Python
|
analysis_figure_code/SuppFig2/SuppFig2.py
|
chrisroadmap/Near_term_warming
|
7fc712fdcbf135bc3a73027b1c7b5a3504c5ea5e
|
[
"Apache-2.0"
] | 2
|
2021-01-20T03:18:15.000Z
|
2022-01-25T18:47:27.000Z
|
analysis_figure_code/SuppFig2/SuppFig2.py
|
chrisroadmap/Near_term_warming
|
7fc712fdcbf135bc3a73027b1c7b5a3504c5ea5e
|
[
"Apache-2.0"
] | null | null | null |
analysis_figure_code/SuppFig2/SuppFig2.py
|
chrisroadmap/Near_term_warming
|
7fc712fdcbf135bc3a73027b1c7b5a3504c5ea5e
|
[
"Apache-2.0"
] | 3
|
2020-07-31T14:51:39.000Z
|
2020-10-29T22:30:54.000Z
|
import numpy as np
import numpy.ma as npma
from scipy import stats
import matplotlib.pyplot as plt
import baspy as bp
import fnmatch
"""
Created on Wed Nov 27 18:34 2019
@author: Christine McKenna
========================================================================
Purpose: Plots Supp Fig 2, a pdf of all possible 20-year trends in gsat
for CMIP6 piControl simulations for each model. First detrends
the raw gsat time series to remove any long term drift,
which could bias 20-year trends (e.g. if positive drift,
pdf of 20-year trends likely biased positive).
Saves pdf of 20-year trends for models used in Supp Fig 8.
========================================================================
"""
# Required directories
loaddir_CMIP = 'Priestley-Centre/Near_term_warming/analysis_figure_code/'+\
'SuppFig2/saved_arrays'
savedir = 'Priestley-Centre/Near_term_warming/analysis_figure_code/'+\
'SuppFig8/saved_data'
### ------ Load in CMIP6 data ------
# Load models
models = np.load(loaddir_CMIP+'/models_gtas_CMIP6_piControl.npy')
# Load catalogue so can extract runids
var = 'tas'
cat_PI = bp.catalogue(dataset='cmip6',Var=var,Experiment='piControl',\
CMOR='Amon')
years = np.linspace(1,20,20)
### Process data, one model and RunID at a time
i = 0
fig,axs = plt.subplots(7,7,sharex=True,sharey=True,\
figsize=(15,12))
fig.suptitle('PDFs of rolling GSAT trends for 20-year segments of CMIP6 '+\
'piControl runs',fontsize=20)
axs = axs.ravel()
for model in models:
## Get data for model
filtmod_PI = cat_PI[cat_PI['Model'] == model]
## Only keep r1i1p1f?
runids_PI = np.unique(filtmod_PI['RunID'])
runids_PI = fnmatch.filter(runids_PI,'r1i1p1f?')
## Get data for each RunID
for runid in runids_PI:
## Load gsat data
gsat_tmp = np.load(loaddir_CMIP+'/gtas_'+model+'_'+runid+\
'_CMIP6_piControl.npy')
ny = len(gsat_tmp)
## Remove any drift
[m,c,_,_,_] = stats.linregress(np.linspace(0,ny-1,ny),gsat_tmp)
gsat_lin = m*np.linspace(0,ny-1,ny)+c
gsat = gsat_tmp - gsat_lin
## Calculate trends
gsat_trends = np.zeros([ny-20])
for y in xrange(0,ny-20):
[m,_,_,_,_] = stats.linregress(years,gsat[y:y+20])
gsat_trends[y] = m*10
## If model used in Supp Fig 8 save pdf of 20y trends
if (model == 'BCC-CSM2-MR') or (model == 'MIROC-ES2L'):
np.save(savedir+'/gsat_20ytrends_CMIP6_piControl_'+\
model+'.npy',gsat_trends)
### ------ Plot results ------
### Plot individual models
axs[i].hist(gsat_trends,density=True)
axs[i].set_title(model,fontsize=13)
axs[i].plot(np.zeros([2]),[0,11],'grey',linewidth=1)
axs[i].plot(np.ones([2])*(-0.075),[0,11],'black',\
linewidth=1,linestyle='--')
axs[i].plot(np.ones([2])*(0.072),[0,11],'black',\
linewidth=1,linestyle='--')
axs[i].plot(np.ones([2])*(-0.084),[0,11],'black',\
linewidth=1,linestyle='--')
axs[i].plot(np.ones([2])*(0.094),[0,11],'black',\
linewidth=1,linestyle='--')
axs[i].tick_params(labelsize=13)
i += 1
fig.text(0.5,0.02,'$^{\circ}$C / decade',ha='center',\
va='center',fontsize=18)
fig.text(0.02,0.5,'Probability density',ha='center',va='center',\
rotation='vertical',fontsize=18)
axs[i-1].set_xlim([-0.3,0.3])
axs[i-1].set_ylim([0,11])
axs[i].axis('off')
plt.subplots_adjust(top=0.9,bottom=0.07,left=0.07,right=0.97,\
wspace=0.17,hspace=0.27)
plt.show()
| 33.495575
| 75
| 0.573844
|
import numpy as np
import numpy.ma as npma
from scipy import stats
import matplotlib.pyplot as plt
import baspy as bp
import fnmatch
loaddir_CMIP = 'Priestley-Centre/Near_term_warming/analysis_figure_code/'+\
'SuppFig2/saved_arrays'
savedir = 'Priestley-Centre/Near_term_warming/analysis_figure_code/'+\
'SuppFig8/saved_data'
var = 'tas'
cat_PI = bp.catalogue(dataset='cmip6',Var=var,Experiment='piControl',\
CMOR='Amon')
years = np.linspace(1,20,20)
(15,12))
fig.suptitle('PDFs of rolling GSAT trends for 20-year segments of CMIP6 '+\
'piControl runs',fontsize=20)
axs = axs.ravel()
for model in models:
t_PI[cat_PI['Model'] == model]
unique(filtmod_PI['RunID'])
runids_PI = fnmatch.filter(runids_PI,'r1i1p1f?')
PI:
p = np.load(loaddir_CMIP+'/gtas_'+model+'_'+runid+\
'_CMIP6_piControl.npy')
ny = len(gsat_tmp)
_] = stats.linregress(np.linspace(0,ny-1,ny),gsat_tmp)
gsat_lin = m*np.linspace(0,ny-1,ny)+c
gsat = gsat_tmp - gsat_lin
ds = np.zeros([ny-20])
for y in xrange(0,ny-20):
[m,_,_,_,_] = stats.linregress(years,gsat[y:y+20])
gsat_trends[y] = m*10
IROC-ES2L'):
np.save(savedir+'/gsat_20ytrends_CMIP6_piControl_'+\
model+'.npy',gsat_trends)
lot(np.zeros([2]),[0,11],'grey',linewidth=1)
axs[i].plot(np.ones([2])*(-0.075),[0,11],'black',\
linewidth=1,linestyle='--')
axs[i].plot(np.ones([2])*(0.072),[0,11],'black',\
linewidth=1,linestyle='--')
axs[i].plot(np.ones([2])*(-0.084),[0,11],'black',\
linewidth=1,linestyle='--')
axs[i].plot(np.ones([2])*(0.094),[0,11],'black',\
linewidth=1,linestyle='--')
axs[i].tick_params(labelsize=13)
i += 1
fig.text(0.5,0.02,'$^{\circ}$C / decade',ha='center',\
va='center',fontsize=18)
fig.text(0.02,0.5,'Probability density',ha='center',va='center',\
rotation='vertical',fontsize=18)
axs[i-1].set_xlim([-0.3,0.3])
axs[i-1].set_ylim([0,11])
axs[i].axis('off')
plt.subplots_adjust(top=0.9,bottom=0.07,left=0.07,right=0.97,\
wspace=0.17,hspace=0.27)
plt.show()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.