hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f3d9183252e3ed19e447b3ee21c5ef14aff3da24 | 329 | py | Python | django_unical_bootstrap_italia/urls.py | UniversitaDellaCalabria/django-unical-bootstrap-italia | 7130a675956d20ad065616eaa95418746a01e6d7 | [
"BSD-3-Clause"
] | 1 | 2022-02-25T14:23:00.000Z | 2022-02-25T14:23:00.000Z | django_unical_bootstrap_italia/urls.py | UniversitaDellaCalabria/django-unical-bootstrap-italia | 7130a675956d20ad065616eaa95418746a01e6d7 | [
"BSD-3-Clause"
] | 1 | 2020-10-31T15:10:56.000Z | 2020-10-31T15:10:56.000Z | django_unical_bootstrap_italia/urls.py | UniversitaDellaCalabria/django-unical-bootstrap-italia | 7130a675956d20ad065616eaa95418746a01e6d7 | [
"BSD-3-Clause"
] | 2 | 2019-06-07T12:20:30.000Z | 2019-09-20T14:13:39.000Z | """
agid_template URL Configuration
"""
from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from .views import *
app_name="django_unical_bootstrap_italia"
urlpatterns = [
path('', index, name='index'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 20.5625 | 65 | 0.768997 |
72260600439e0b62a0c9c61c60f752f90a665aa2 | 1,195 | py | Python | fed_distill/fed/splitter.py | Ahmedjjj/dataset-distillation | f2e4267d070c7fb8e50476297e95638f351b76d6 | [
"MIT"
] | null | null | null | fed_distill/fed/splitter.py | Ahmedjjj/dataset-distillation | f2e4267d070c7fb8e50476297e95638f351b76d6 | [
"MIT"
] | null | null | null | fed_distill/fed/splitter.py | Ahmedjjj/dataset-distillation | f2e4267d070c7fb8e50476297e95638f351b76d6 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from typing import List
from torch.utils.data import Dataset, Subset
import numpy as np
class DataSplitter(ABC):
@abstractmethod
def split(self, num_nodes: int) -> List[Dataset]:
raise NotImplementedError
class HeterogenousDistribution(DataSplitter):
def __init__(self, unif_percentage=0.1):
self.unif_percentage = unif_percentage
def split(self, dataset: Dataset, num_nodes: int) -> List[Dataset]:
len_heter = len(dataset) - int(len(dataset) * self.unif_percentage)
targets, unif_targets = np.split(
np.copy(dataset.targets), [len_heter]
)
sorted_targets = np.argsort(targets[: len_heter])
unif_targets = np.random.permutation(unif_targets)
subsets = []
sorted_split = np.array_split(sorted_targets, num_nodes)
unif_split = np.array_split(unif_targets, num_nodes)
for (h, r) in zip(sorted_split, unif_split):
all_indices = np.concatenate((h, r))
subset = Subset(dataset, all_indices)
subset.targets = np.array(dataset.targets)[all_indices]
subsets.append(subset)
return subsets
| 33.194444 | 75 | 0.671967 |
c8b2d295d26eac4b473ce142e529bb56ce185907 | 6,164 | py | Python | numeric_numpad.py | pmcloete/numpad | 5ab86ecd9d9d8778ba2807250219b78eab6d5df5 | [
"MIT"
] | null | null | null | numeric_numpad.py | pmcloete/numpad | 5ab86ecd9d9d8778ba2807250219b78eab6d5df5 | [
"MIT"
] | null | null | null | numeric_numpad.py | pmcloete/numpad | 5ab86ecd9d9d8778ba2807250219b78eab6d5df5 | [
"MIT"
] | null | null | null | import PySimpleGUI as sg
from PySimpleGUI.PySimpleGUI import DEFAULT_BASE64_ICON, DEFAULT_BASE64_ICON_16_BY_16, ICON_BUY_ME_A_COFFEE, InputText, set_global_icon
from themes import Themes
class Numpad:
"""Creates a numpad"""
def __init__(self):
"""Initialize the keypad"""
# GUI Instance
self.gui = sg
# Button Attributes
self.size_button = (5, 1)
self.size_border_button = 2
self.button_font = ('Menlo', 25)
self.color_button = '6C6C6C'
self.color_enter_button = '#d90429'
# Display Field Attributes
self.display_font = ('Menlo', 25)
self.display_size = (18, 1)
self.current_value = ''
# Info Text Attributes
self.info_text_font = ('Menlo', 8)
# Theme Attributes
self.theme = Themes()
self.gui.theme_add_new('Peter', self.theme._get_theme(selection='grey'))
self.gui.theme('Peter')
# Icon
self.icon_path = '/Users/peter/Library/Mobile Documents/com~apple~CloudDocs/PythonMain/projects/pysimplegui_numpad/assests/website_logo.png'
# Main Layout
self.layout = [
[self.gui.Input(key='-VALUE-', font=(self.display_font),
size=self.display_size)
], [
self.gui.Text('© Peter Inc. 2021', pad=(6, 2), font=(
self.info_text_font), justification='left')
],
[
self.gui.Button('EXIT', font=(self.button_font),
size=(self.size_button)),
self.gui.Button('AC', font=(self.button_font),
size=(self.size_button)),
self.gui.Button('<', font=(self.button_font),
size=(self.size_button))
],
[
self.gui.Button('7', font=(self.button_font),
size=self.size_button),
self.gui.Button('8', font=(self.button_font),
size=self.size_button),
self.gui.Button('9', font=(self.button_font),
size=self.size_button)],
[
self.gui.Button('4', font=(self.button_font),
size=self.size_button),
self.gui.Button(
'5', font=(self.button_font),
size=self.size_button),
self.gui.Button('6', font=(self.button_font),
size=self.size_button)],
[
self.gui.Button('1', font=(self.button_font),
size=self.size_button),
self.gui.Button('2', font=(self.button_font),
size=self.size_button),
self.gui.Button('3', font=(self.button_font),
size=self.size_button)
],
[
self.gui.Button('0', font=(self.button_font),
size=(self.size_button)),
self.gui.Button('.', font=(self.button_font),
size=(self.size_button)),
self.gui.Button('->', font=(self.button_font), button_color=(self.color_enter_button),
size=(self.size_button))
]
]
# Create the window accordingly
self.window = self.gui.Window(
'Numpad', self.layout, no_titlebar=False, grab_anywhere=True,
resizable=True, finalize=True, titlebar_background_color='#000000', icon=DEFAULT_BASE64_ICON)
self.window_size = self.window.Size
def main(self):
"""The main function"""
# Numbers for the event loop
event_items = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.']
# To enable updating the text display
count = 0
while True:
event, value = self.window.read()
# Close the program
if event == self.gui.WIN_CLOSED or event == 'EXIT':
break
# Check if the button pressed is in event_items
if event in event_items:
# Add a 0 before if numbers < 1 are to be entered
if event.startswith('.') and not self.current_value:
count += 2
self.current_value = '0' + self.current_value
# Make sure the duplicate decimals cannot be added
if self.current_value.endswith('.') and event == '.':
continue
self.current_value += event
count += 1
# Clear the display
if event == 'AC':
self.current_value = ''
# Backspace
if event == '<':
count -= 1
# Return/ Send data to where it needs to go
if event == '->':
# Send the data entered to wherever it needs to go
self._data_send()
self.current_value = ''
count = 0
# No more data in the display, reset the count
if count == 0:
self.current_value = ''
count = 0
# Update the display field on every pass
self._update_display(count)
# Program exited
self.window.close()
def _update_display(self, count):
"""Update the text display for the values entered on the keypad"""
if count >= 0:
self.window['-VALUE-'].update(self.current_value[:count])
else:
self.window['-VALUE-'].update(self.current_value)
def _data_send(self):
"""Send the data after '->' is pressed on the keypad"""
if not self.current_value:
pass # No data entered
else:
# if self.current_value.startswith('.'):
# self.current_value = '0' + self.current_value
print('data sent')
print(self.current_value)
if __name__ == '__main__':
numpad = Numpad()
numpad.main()
| 39.012658 | 148 | 0.503569 |
13410916e1afe5382590875fa339b9e07e13ea17 | 1,965 | py | Python | parser/push_todb.py | DmitriyKhodykin/HunterHH | f27a56348fa47088fd520323a9e167118cf79d1c | [
"MIT"
] | 1 | 2020-04-13T08:16:11.000Z | 2020-04-13T08:16:11.000Z | parser/push_todb.py | DmitriyKhodykin/HunterHH | f27a56348fa47088fd520323a9e167118cf79d1c | [
"MIT"
] | null | null | null | parser/push_todb.py | DmitriyKhodykin/HunterHH | f27a56348fa47088fd520323a9e167118cf79d1c | [
"MIT"
] | 1 | 2020-04-10T10:48:39.000Z | 2020-04-10T10:48:39.000Z | # Module for Pushing parsed data into a local database
from hh_parser import Parser
import MySQLdb # pip install mysqlclient
import time
import auth
def pusher():
"""Push parsed data into a local db"""
url = f'https://hh.ru/search/resume?L_is_autosearch=false&area={113}\
&clusters=true&exp_period=all_time&logic=normal&no_magic=false&\
order_by=relevance&pos=full_text&specialization={17.242}'
db_smc = MySQLdb.connect(
host=auth.host, user=auth.user,
passwd=auth.passwd, db=auth.db, charset='utf8'
)
print('Get cursor() method for operations with local db')
cursor = db_smc.cursor()
prsr = Parser(url)
refs = prsr.get_refs()[0]
print(f'Len of refs list: {len(refs)}')
dates = prsr.get_refs()[1]
print(f'Len of dates list: {len(dates)}')
print('Execute SQL-query')
for ref, dat in zip(refs, dates):
# Def variables and their values
entrydate = time.strftime("%Y-%m-%d")
title = prsr.get_features(ref)[0]
gender = prsr.get_features(ref)[1]
city = prsr.get_features(ref)[2]
age = prsr.get_features(ref)[3]
salary = prsr.get_features(ref)[4]
experience = prsr.get_features(ref)[5]
last_job = prsr.get_features(ref)[6]
updated = dat
link = ref
# The values to be added to the table
values = (entrydate, title, gender, city, age, salary, experience, last_job, updated, link)
# The SQL-query to the db table with reference to the variables
if 30 < salary < 300:
cursor.execute("""
INSERT INTO
salesman_candidates (entrydate, title, gender, city, age, salary, experience, last_job, updated, link)
VALUES
(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""", values)
time.sleep(1)
print('Commit')
db_smc.commit()
db_smc.close()
pusher()
| 31.693548 | 118 | 0.601527 |
767e288bf00c8812de932aa6dc32ad23931e6e14 | 469 | py | Python | ebc/pauta/skins/ebc_pauta_custom_templates/getPautasCafe.py | lflrocha/ebc.pauta | 1a77e9f47e22b60af88cf23f492a8b47ddfd27b6 | [
"Unlicense"
] | null | null | null | ebc/pauta/skins/ebc_pauta_custom_templates/getPautasCafe.py | lflrocha/ebc.pauta | 1a77e9f47e22b60af88cf23f492a8b47ddfd27b6 | [
"Unlicense"
] | null | null | null | ebc/pauta/skins/ebc_pauta_custom_templates/getPautasCafe.py | lflrocha/ebc.pauta | 1a77e9f47e22b60af88cf23f492a8b47ddfd27b6 | [
"Unlicense"
] | null | null | null | ## Script (Python) "getPautasCafe"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=
##title=Retorna a lista de pautas do Cafe
pautas = context.portal_catalog.searchResults(portal_type='Pauta', \
getMidias='cafe', \
sort_on='getData', \
sort_order='reverse')
return pautas
| 27.588235 | 68 | 0.586354 |
d627b49c169373f82d4112d7a6a5a9690250116d | 10,842 | py | Python | cp2k_input_tools/preprocessor.py | cp2k/cp2k-input-tools | 33b876c6ee133d12594c96980e675736884d0d2e | [
"MIT"
] | 12 | 2020-05-28T17:52:44.000Z | 2022-02-19T13:26:25.000Z | cp2k_input_tools/preprocessor.py | cp2k/cp2k-input-tools | 33b876c6ee133d12594c96980e675736884d0d2e | [
"MIT"
] | 27 | 2020-03-26T07:50:09.000Z | 2022-03-25T10:16:28.000Z | cp2k_input_tools/preprocessor.py | cp2k/cp2k-input-tools | 33b876c6ee133d12594c96980e675736884d0d2e | [
"MIT"
] | 5 | 2020-08-24T07:23:07.000Z | 2022-02-19T13:26:26.000Z | import re
from collections import defaultdict
from collections.abc import Iterator
from pathlib import Path
from typing import NamedTuple, Sequence
from .lineiterator import MultiFileLineIterator
from .parser_errors import PreprocessorError
from .tokenizer import COMMENT_CHARS, Context, TokenizerError, tokenize
class _Variable(NamedTuple):
value: str
ctx: defaultdict
class _ConditionalBlock(NamedTuple):
condition: str
ctx: defaultdict
_VALID_VAR_NAME_MATCH = re.compile(r"^[a-z_]\w*$", flags=re.IGNORECASE | re.ASCII)
_CONDITIONAL_MATCH = re.compile(r"\s*@(?P<stmt>IF|ENDIF)\s*(?P<cond>.*)", flags=re.IGNORECASE)
_SET_MATCH = re.compile(r"\s*@SET\s+(?P<var>\S+)\s+(?P<value>.+)", flags=re.IGNORECASE)
_INCLUDE_MATCH = re.compile(r"\s*(?P<complete>@(?P<type>INCLUDE|XCTYPE)\b\s*(?P<file>.*))", flags=re.IGNORECASE)
class CP2KPreprocessor(Iterator):
def __init__(self, fhandle, base_dir, initial_variable_values=None):
self._varstack = {}
self._lineiter = MultiFileLineIterator()
self._conditional_block = None
self._current_line_entry = None
if isinstance(base_dir, (str, Path)):
self._inc_dirs = [Path(base_dir)]
elif isinstance(base_dir, Sequence):
self._inc_dirs = [Path(b) for b in base_dir]
else:
raise TypeError("invalid type passed for base_dir")
if initial_variable_values:
self._varstack.update({k.upper(): _Variable(v, None) for k, v in initial_variable_values.items()})
self._lineiter.add_file(fhandle, managed=False)
def _resolve_variables(self, line):
var_start = 0
var_end = 0
ctx = Context(line=line)
# the following algorithm is from CP2Ks cp_parser_inpp_methods.F to reproduce its behavior :(
# first replace all "${...}" with no nesting, meaning that ${foo${bar}} means foo$bar is the key
while True:
var_start = line.find("${")
if var_start < 0:
break
var_end = line.find("}", var_start + 2)
if var_end < 0:
ctx.colnr = len(line)
ctx.ref_colnr = var_start
raise PreprocessorError("unterminated variable", ctx)
ctx.colnr = var_start
ctx.ref_colnr = var_end
key = line[var_start + 2 : var_end] # without ${ and }
value = None
try:
# see whether we got a default value and unpack
key, value = key.split("-", maxsplit=1)
except ValueError:
pass
if not _VALID_VAR_NAME_MATCH.match(key):
raise PreprocessorError(f"invalid variable name '{key}'", ctx) from None
try:
value = self._varstack[key.upper()].value
except KeyError:
if value is None:
raise PreprocessorError(f"undefined variable '{key}' (and no default given)", ctx) from None
line = f"{line[:var_start]}{value}{line[var_end+1:]}"
var_start = 0
var_end = 0
while True:
var_start = line.find("$")
if var_start < 0:
break
var_end = line.find(" ", var_start + 1)
if var_end < 0:
# -1 would be the last entry, but in a range it is without the specified entry
var_end = len(line.rstrip())
ctx.colnr = var_start
ctx.ref_colnr = var_end - 1
key = line[var_start + 1 : var_end]
if not _VALID_VAR_NAME_MATCH.match(key):
raise PreprocessorError(f"invalid variable name '{key}'", ctx) from None
try:
value = self._varstack[key.upper()].value
except KeyError:
raise PreprocessorError(f"undefined variable '{key}'", ctx) from None
line = f"{line[:var_start]}{value}{line[var_end:]}"
return line
def _parse_preprocessor_instruction(self, line):
conditional_match = _CONDITIONAL_MATCH.match(line)
ctx = Context(line=line)
if conditional_match:
stmt = conditional_match.group("stmt")
condition = conditional_match.group("cond").strip()
if stmt.upper() == "ENDIF":
if self._conditional_block is None:
raise PreprocessorError("found @ENDIF without a previous @IF", ctx)
# check for garbage which is not a comment, note: we're stricter than CP2K here
if condition and not condition.startswith(COMMENT_CHARS):
ctx.colnr = conditional_match.start("cond")
ctx.ref_colnr = conditional_match.end("cond")
raise PreprocessorError("garbage found after @ENDIF", ctx)
self._conditional_block = None
else:
if self._conditional_block is not None:
ctx.ref_line = self._conditional_block.ctx.line
raise PreprocessorError("nested @IF are not allowed", ctx)
# resolve any variables inside the condition
try:
condition = self._resolve_variables(condition)
except PreprocessorError as exc:
exc.args[1].colnr += conditional_match.start("cond")
exc.args[1].ref_colnr += conditional_match.start("cond")
raise
# prefix-whitespace are consumed in the regex, suffix with the strip() above
if not condition or condition == "0":
self._conditional_block = _ConditionalBlock(False, ctx)
elif "==" in condition:
lhs, rhs = [s.strip() for s in condition.split("==", maxsplit=1)]
self._conditional_block = _ConditionalBlock(lhs == rhs, ctx)
elif "/=" in condition:
lhs, rhs = [s.strip() for s in condition.split("/=", maxsplit=1)]
self._conditional_block = _ConditionalBlock(lhs != rhs, ctx)
else:
self._conditional_block = _ConditionalBlock(True, ctx)
return
if self._conditional_block and not self._conditional_block.condition:
return
set_match = _SET_MATCH.match(line)
if set_match:
# resolve other variables in the definition first
key = set_match.group("var")
value = self._resolve_variables(set_match.group("value"))
if not _VALID_VAR_NAME_MATCH.match(key):
raise PreprocessorError(f"invalid variable name '{key}'", ctx) from None
self._varstack[key.upper()] = _Variable(value, ctx)
return
include_match = _INCLUDE_MATCH.match(line)
if include_match:
inctype = include_match["type"]
# resolve variables first
try:
filename = self._resolve_variables(include_match.group("file"))
except PreprocessorError as exc:
exc.args[1].colnr += include_match.start("file") # shift colnr
exc.args[1].ref_colnr += include_match.start("file")
raise
if filename.startswith(("'", '"')):
try:
tokens = tokenize(filename) # use the tokenizer to detect unterminated quotes
except TokenizerError as exc:
exc.args[1].colnr += include_match.start("file") # shift colnr
exc.args[1].ref_colnr += include_match.start("file")
raise
if len(tokens) != 1:
raise PreprocessorError(
"@INCLUDE requires exactly one argument",
Context(colnr=include_match.start("complete"), ref_colnr=include_match.end("complete")),
)
filename = tokens[0].strip("'\"")
if not filename:
raise PreprocessorError(
f"@{inctype} requires exactly one argument",
Context(colnr=include_match.start("complete"), ref_colnr=include_match.end("complete")),
)
filename = filename.strip("'\"")
if inctype.upper() == "XCTYPE":
filename = f"xc_section/{filename}.sec"
for inc_dir in self._inc_dirs:
try:
# if the filename is an absolute path, joinpath uses that one and will ignore the dir
fhandle = inc_dir.joinpath(filename).open("r")
# the _lineiter takes over the handle and closes it at EOF
self._lineiter.add_file(fhandle)
break
except OSError:
continue
else:
raise PreprocessorError(f"specified INCLUDE/XCTYPE {filename} could not be opened", ctx)
return
raise PreprocessorError("unknown preprocessor directive found", ctx)
def __next__(self):
for line in self._lineiter:
try:
# ignore empty lines and comments:
if not line or line.startswith(COMMENT_CHARS):
continue
if line.startswith("@"):
self._parse_preprocessor_instruction(line)
continue
# ignore everything in a disable @IF/@ENDIF block
if self._conditional_block and not self._conditional_block.condition:
continue
return self._resolve_variables(line)
except (PreprocessorError, TokenizerError) as exc:
exc.args[1].filename = self._lineiter.fname
exc.args[1].linenr = self._lineiter.line_range[1]
exc.args[1].colnrs = self._lineiter.colnrs
exc.args[1].line = line
raise
if self._conditional_block is not None:
raise PreprocessorError(
"conditional block not closed at end of file", Context(ref_line=self._conditional_block.ctx.line)
)
raise StopIteration
@property
def line_range(self):
"""Original line numbers (start and end) of the last (possibly combined) line"""
return self._lineiter.line_range
@property
def colnrs(self):
"""Original column numbers where non-whitespace content started for most recent emitted line"""
return self._lineiter.colnrs
@property
def starts(self):
"""Index in the most recent emitted line where content from a new line in file starts"""
return self._lineiter.starts
@property
def fname(self):
return self._lineiter.fname
| 37.645833 | 113 | 0.573141 |
3c979bed5eb549d4c954cf4a0e4aea1993624fea | 65 | py | Python | __init__.py | rocheparadox/IMU-MPU6050 | a5bb3ddeafcde48589edcfd6652b1936f0b6ddcb | [
"MIT"
] | null | null | null | __init__.py | rocheparadox/IMU-MPU6050 | a5bb3ddeafcde48589edcfd6652b1936f0b6ddcb | [
"MIT"
] | null | null | null | __init__.py | rocheparadox/IMU-MPU6050 | a5bb3ddeafcde48589edcfd6652b1936f0b6ddcb | [
"MIT"
] | null | null | null | #Author : Roche Christopher
#File created on 22 Jul 2019 9:52 PM
| 21.666667 | 36 | 0.753846 |
3b88ed5e46008809de3273d4c22597af08276f49 | 1,181 | py | Python | generators/thrsend.py | Chyi341152/chyi-book | ddeaf49d69a68f5718c20c3b7fe6fd37381d21eb | [
"MIT"
] | null | null | null | generators/thrsend.py | Chyi341152/chyi-book | ddeaf49d69a68f5718c20c3b7fe6fd37381d21eb | [
"MIT"
] | null | null | null | generators/thrsend.py | Chyi341152/chyi-book | ddeaf49d69a68f5718c20c3b7fe6fd37381d21eb | [
"MIT"
] | null | null | null | # thrsend.py
#
# Send items to consumer threads
from queue import Queue
import threading
class ConsumerThread(threading.Thread):
def __init__(self,target):
threading.Thread.__init__(self)
self.setDaemon(True)
self.in_queue = Queue.Queue()
self.target = target
def send(self,item):
self.in_queue.put(item)
def generate(self):
while True:
item = self.in_queue.get()
yield item
def run(self):
self.target(self.generate())
# Example Use
if __name__ == '__main__':
from .follow import *
from .apachelog import *
from .broadcast import *
def find_404(log):
r404 = (r for r in log if r['status'] == 404)
for r in r404:
print(r['status'],r['datetime'],r['request'])
def bytes_transferred(log):
total = 0
for r in log:
total += r['bytes']
print("Total bytes", total)
c1 = ConsumerThread(find_404)
c1.start()
c2 = ConsumerThread(bytes_transferred)
c2.start()
lines = follow(open("run/foo/access-log"))
log = apache_log(lines)
broadcast(log,[c1,c2])
| 24.102041 | 57 | 0.584251 |
15fd72c960ca9b87507552d07692b364fa1b92fd | 2,614 | py | Python | scripts/write_NAICS_07_to_17_Crosswalk.py | ericmbell1/flowsa | d251301864289a4de42dda118c9c6da41bcf4cf0 | [
"CC0-1.0"
] | null | null | null | scripts/write_NAICS_07_to_17_Crosswalk.py | ericmbell1/flowsa | d251301864289a4de42dda118c9c6da41bcf4cf0 | [
"CC0-1.0"
] | null | null | null | scripts/write_NAICS_07_to_17_Crosswalk.py | ericmbell1/flowsa | d251301864289a4de42dda118c9c6da41bcf4cf0 | [
"CC0-1.0"
] | null | null | null | # write_NAICS_from_Census.py (scripts)
# !/usr/bin/env python3
# coding=utf-8
"""
Uses a csv file manually loaded, originally from USEEIOR (4/18/2020), to form base NAICS crosswalk from 2007-2017
Loops through the source crosswalks to find any NAICS not in offical Census NAICS Code list. Adds the additional NAICS
to NAICS crosswalk.
- Writes reshaped file to datapath as csv.
"""
from flowsa.common import datapath
import glob
import pandas as pd
#from rpy2.robjects.packages import importr
#from rpy2.robjects import pandas2ri
# does not work due to issues with rpy2. Crosswalk was manually copied from useeior and added as csv (4/18/2020)
# pandas2ri.activate()
# useeior = importr('useeior')
# NAICS_crosswalk = useeior.getMasterCrosswalk(2012)
# NAICS_crosswalk = pandas2ri.ri2py_dataframe(NAICS_crosswalk)
# update the useeior crosswalk with crosswalks created for flowsa datasets
# read the csv loaded as a raw datafile
naics = pd.read_csv(datapath + "NAICS_useeior_Crosswalk.csv")
naics = naics[naics['NAICS_2007_Code'].notna()]
# convert all rows to string
naics = naics.astype(str)
missing_naics_df_list = []
# read in all the crosswalk csv files (ends in toNAICS.csv)
for file_name in glob.glob(datapath + "activitytosectormapping/"+'*_toNAICS.csv'):
df = pd.read_csv(file_name, low_memory=False)
# determine sector year
naics_year = df['SectorSourceName'].all()
# subset dataframe so only sector
df = df[['Sector']]
# trim whitespace and cast as string, rename column
df['Sector'] = df['Sector'].astype(str).str.strip()
df = df.rename(columns={'Sector': naics_year})
# extract sector year column from master crosswalk
df_naics = naics[[naics_year]]
# find any NAICS that are in source crosswalk but not in mastercrosswalk
common = df.merge(df_naics, on=[naics_year, naics_year])
missing_naics = df[(~df[naics_year].isin(common[naics_year])) & (~df[naics_year].isin(common[naics_year]))]
# append to df list
missing_naics_df_list.append(missing_naics)
# concat df list and drop duplications
missing_naics_df = pd.concat(missing_naics_df_list, ignore_index=True, sort=False).drop_duplicates()
# sort df
missing_naics_df = missing_naics_df.sort_values(['NAICS_2012_Code', 'NAICS_2007_Code'])
missing_naics_df = missing_naics_df.reset_index(drop=True)
# add missing naics to master naics crosswalk
total_naics= naics.append(missing_naics_df, sort=True)
# sort df
total_naics = total_naics.sort_values(['NAICS_2012_Code', 'NAICS_2007_Code'])
# save as csv
total_naics.to_csv(datapath + "NAICS_07_to_17_Crosswalk.csv", index=False)
| 37.342857 | 118 | 0.760903 |
80c48777966036a4000f0707172602b24da98d87 | 10,543 | py | Python | ucb_api/python_client/swagger_client/models/options6.py | educup/ucb-python-api | 4b3532be465afe6480b7e362e8942ff67b95633b | [
"MIT"
] | 1 | 2020-09-12T03:15:35.000Z | 2020-09-12T03:15:35.000Z | ucb_api/python_client/swagger_client/models/options6.py | 70nybl4nc0/ucb-python-api | 9e6020436b7ca65667ed581dd91d2f8ad0674b91 | [
"MIT"
] | null | null | null | ucb_api/python_client/swagger_client/models/options6.py | 70nybl4nc0/ucb-python-api | 9e6020436b7ca65667ed581dd91d2f8ad0674b91 | [
"MIT"
] | 1 | 2020-09-12T02:57:22.000Z | 2020-09-12T02:57:22.000Z | # coding: utf-8
"""
Unity Cloud Build
This API is intended to be used in conjunction with the Unity Cloud Build service. A tool for building your Unity projects in the Cloud. See https://developer.cloud.unity3d.com for more information. ## Making requests This website is built to allow requests to be made against the API. If you are currently logged into Cloud Build you should be able to make requests without entering an API key. You can find your API key in the Unity Cloud Services portal by clicking on 'Cloud Build Preferences' in the sidebar. Copy the API Key and paste it into the upper left corner of this website. It will be used in all subsequent requests. ## Clients The Unity Cloud Build API is based upon Swagger. Client libraries to integrate with your projects can easily be generated with the [Swagger Code Generator](https://github.com/swagger-api/swagger-codegen). The JSON schema required to generate a client for this API version is located here: ``` [API_URL][BASE_PATH]/api.json ``` ## Authorization The Unity Cloud Build API requires an access token from your Unity Cloud Build account, which can be found at https://build.cloud.unity3d.com/login/me To authenticate requests, include a Basic Authentication header with your API key as the value. e.g. ``` Authorization: Basic [YOUR API KEY] ``` ## Pagination Paged results will take two parameters. A page number that is calculated based upon the per_page amount. For instance if there are 40 results and you specify page 2 with per_page set to 10 you will receive records 11-20. Paged results will also return a Content-Range header. For the example above the content range header would look like this: ``` Content-Range: items 11-20/40 ``` ## Versioning The API version is indicated in the request URL. Upgrading to a newer API version can be done by changing the path. The API will receive a new version in the following cases: * removal of a path or request type * addition of a required field * removal of a required field The following changes are considered backwards compatible and will not trigger a new API version: * addition of an endpoint or request type * addition of an optional field * removal of an optional field * changes to the format of ids ## Identifiers It should not be assumed that any of the identifiers used in paths will be a perfect match for your user-entered information. If you see unexpected 403s or 404s from API calls then check your identifiers match the ones used by the API. In particular, `projectId` does NOT typically change when the project is renamed and in fact may not be a direct match for the project name even at initial creation time. To avoid confusion we recommend that instead of using the human-readable autogenerated orgId and projectId available from the API you should instead use: * org foreign key for `orgId` (available from project APIs as `orgFk` and org APIs as `coreForeignKey`) * `guid` for `projectId` All links generated by the API and the Dashboard should follow this format already, making it easy to figure out the correct parameters by making a comparison. ## Rate Limiting Requests against the Cloud Build API are limited to a rate of 100 per minute. To preserve the quality of service throughout Cloud Build, additional rate limits may apply to some actions. For example, polling aggressively instead of using webhooks or making API calls with a high concurrency may result in rate limiting. It is not intended for these rate limits to interfere with any legitimate use of the API. Please contact support at <cloudbuild@unity3d.com> if your use is affected by this rate limit. You can check the returned HTTP headers for any API request to see your current rate limit status. * __X-RateLimit-Limit:__ maximum number of requests per minute * __X-RateLimit-Remaining:__ remaining number of requests in the current window * __X-RateLimit-Reset:__ time at which the current window will reset (UTC epoch seconds) Once you go over the rate limit you will receive an error response: ``` HTTP Status: 429 { \"error\": \"Rate limit exceeded, retry in XX seconds\" } ``` # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Options6(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'platform': 'str',
'enabled': 'bool',
'settings': 'OrgsorgidprojectsprojectidbuildtargetsSettings',
'credentials': 'OrgsorgidprojectsprojectidbuildtargetsCredentials1'
}
attribute_map = {
'name': 'name',
'platform': 'platform',
'enabled': 'enabled',
'settings': 'settings',
'credentials': 'credentials'
}
def __init__(self, name=None, platform=None, enabled=None, settings=None, credentials=None): # noqa: E501
"""Options6 - a model defined in Swagger""" # noqa: E501
self._name = None
self._platform = None
self._enabled = None
self._settings = None
self._credentials = None
self.discriminator = None
if name is not None:
self.name = name
if platform is not None:
self.platform = platform
if enabled is not None:
self.enabled = enabled
if settings is not None:
self.settings = settings
if credentials is not None:
self.credentials = credentials
@property
def name(self):
"""Gets the name of this Options6. # noqa: E501
:return: The name of this Options6. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Options6.
:param name: The name of this Options6. # noqa: E501
:type: str
"""
if name is not None and len(name) > 64:
raise ValueError("Invalid value for `name`, length must be less than or equal to `64`") # noqa: E501
self._name = name
@property
def platform(self):
"""Gets the platform of this Options6. # noqa: E501
:return: The platform of this Options6. # noqa: E501
:rtype: str
"""
return self._platform
@platform.setter
def platform(self, platform):
"""Sets the platform of this Options6.
:param platform: The platform of this Options6. # noqa: E501
:type: str
"""
allowed_values = ["ios", "android", "webplayer", "webgl", "standaloneosxintel", "standaloneosxintel64", "standaloneosxuniversal", "standalonewindows", "standalonewindows64", "standalonelinux", "standalonelinux64", "standalonelinuxuniversal"] # noqa: E501
if platform not in allowed_values:
raise ValueError(
"Invalid value for `platform` ({0}), must be one of {1}" # noqa: E501
.format(platform, allowed_values)
)
self._platform = platform
@property
def enabled(self):
"""Gets the enabled of this Options6. # noqa: E501
:return: The enabled of this Options6. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this Options6.
:param enabled: The enabled of this Options6. # noqa: E501
:type: bool
"""
self._enabled = enabled
@property
def settings(self):
"""Gets the settings of this Options6. # noqa: E501
:return: The settings of this Options6. # noqa: E501
:rtype: OrgsorgidprojectsprojectidbuildtargetsSettings
"""
return self._settings
@settings.setter
def settings(self, settings):
"""Sets the settings of this Options6.
:param settings: The settings of this Options6. # noqa: E501
:type: OrgsorgidprojectsprojectidbuildtargetsSettings
"""
self._settings = settings
@property
def credentials(self):
"""Gets the credentials of this Options6. # noqa: E501
:return: The credentials of this Options6. # noqa: E501
:rtype: OrgsorgidprojectsprojectidbuildtargetsCredentials1
"""
return self._credentials
@credentials.setter
def credentials(self, credentials):
"""Sets the credentials of this Options6.
:param credentials: The credentials of this Options6. # noqa: E501
:type: OrgsorgidprojectsprojectidbuildtargetsCredentials1
"""
self._credentials = credentials
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Options6, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Options6):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 46.241228 | 4,140 | 0.656834 |
429e79bacde841450aeaa7660d8a56ee96040471 | 2,388 | py | Python | notbook/__init__.py | samuelcolvin/notbook | a26b5519029cde6999de5e78eb89ae1c833c1a42 | [
"MIT"
] | 52 | 2020-05-10T18:26:44.000Z | 2021-02-27T00:01:43.000Z | notbook/__init__.py | samuelcolvin/notbook | a26b5519029cde6999de5e78eb89ae1c833c1a42 | [
"MIT"
] | 2 | 2020-05-11T13:23:10.000Z | 2020-05-16T10:32:46.000Z | notbook/__init__.py | samuelcolvin/notbook | a26b5519029cde6999de5e78eb89ae1c833c1a42 | [
"MIT"
] | 2 | 2020-05-11T13:10:16.000Z | 2021-08-02T16:13:56.000Z | import inspect
from types import FrameType
from . import context
from .models import PlotBlock
try:
from bokeh import plotting as bokeh_plotting
from bokeh.embed import file_html as bokeh_file_html
from bokeh.plotting import Figure as BokehFigure
except ImportError:
bokeh_plotting = None
__all__ = ('show_plot',)
plot_id = 0
def show_plot(plot, *, title: str = None, filename: str = None):
global plot_id
if repr(plot.__class__) == "<class 'bokeh.plotting.figure.Figure'>":
assert bokeh_plotting is not None, 'could not find bokeh install'
assert isinstance(plot, BokehFigure), plot
if context.is_active():
frame = inspect.currentframe().f_back
if plot.sizing_mode is None:
plot.sizing_mode = 'stretch_both'
if plot.aspect_ratio is None:
plot.aspect_ratio = 1.78
plot.title.text_font = 'Titillium Web, sans-serif'
plot.title.text_font_size = '1.5rem'
plot.title.align = 'center'
plot.legend.label_text_font = 'Merriweather, serif'
plot.legend.label_text_font_size = '1rem'
plot.xaxis.axis_label_text_font = 'Ubuntu Mono, monospace'
plot.xaxis.axis_label_text_font_size = '1.2rem'
plot.xaxis.major_label_text_font_size = '1rem'
plot.yaxis.axis_label_text_font = 'Ubuntu Mono, monospace'
plot.yaxis.axis_label_text_font_size = '1.2rem'
plot.yaxis.major_label_text_font_size = '1rem'
bokeh_figure_to_html(plot, frame, title)
else:
if not filename:
plot_id += 1
filename = f'plot_{plot_id}.html'
bokeh_plotting.output_file(filename, title=title)
bokeh_plotting.show(plot)
else:
raise NotImplementedError(f'cannot render {plot} ({type(plot)})')
class FakeTemplate:
def __init__(self):
self.context = None
def render(self, context):
self.context = context
def bokeh_figure_to_html(fig, frame: FrameType, title: str = None):
t = FakeTemplate()
bokeh_file_html(fig, (None, None), template=t, title=title)
plot_script = t.context['plot_script'].strip('\n')
plot_div = t.context['plot_div'].strip('\n')
block = PlotBlock(f'{plot_div}\n{plot_script}', frame.f_lineno)
context.append(block)
| 35.117647 | 73 | 0.647404 |
6cd4f48c0ba846d62106b30b88280a2e681571c4 | 912 | py | Python | tests/generate_key_pairs.py | link-money/distribution_robot-master | 4c35d80b8b74b6549529d147277981d593a24402 | [
"MIT"
] | null | null | null | tests/generate_key_pairs.py | link-money/distribution_robot-master | 4c35d80b8b74b6549529d147277981d593a24402 | [
"MIT"
] | null | null | null | tests/generate_key_pairs.py | link-money/distribution_robot-master | 4c35d80b8b74b6549529d147277981d593a24402 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import sqlite3
from stellar_base.address import *
from wrapper.encryption import *
# global variables:
__key,__iv=generate_random_key_pair()
f=open('../test.test', 'w')
f.write('key:' + __key + '\niv:'+ __iv)
f.close()
pc = prpcrypt(__key, __iv) # 初始化密钥 和 iv
# 连接sqlite
conn = sqlite3.connect('/home/cc5985/key_pairs.db')
cursor=conn.cursor()
for cnt in range(0,10000):
# 1. generate 10000 secret keys and corresponding addresses
key_pair=Keypair.random()
secret_key=key_pair.seed().decode()
address=key_pair.address().decode()
# 2. encrypt the secret keys
cipher_text = pc.encrypt(secret_key) # 加密
# 3. write the cipher_text into sqlite3 database
sql_str='insert into key_pairs_2018_3_17 values(NULL,"' + cipher_text + '","' + address +'")'
cursor.execute(sql_str)
conn.commit()
print('success')
# 4. transfer money to the addresses
| 20.727273 | 97 | 0.689693 |
918982123e15a2f3645a46a4f1beaff65f466521 | 1,926 | py | Python | run.py | admiralobvious/falcon-boilerplate | eb076ff9e1d6fb1e537b18f86d2228afbf123e19 | [
"MIT"
] | 34 | 2016-10-26T17:19:23.000Z | 2020-04-30T09:12:34.000Z | run.py | admiralobvious/falcon-boilerplate | eb076ff9e1d6fb1e537b18f86d2228afbf123e19 | [
"MIT"
] | 2 | 2019-04-09T11:50:38.000Z | 2019-07-05T03:31:57.000Z | run.py | admiralobvious/falcon-boilerplate | eb076ff9e1d6fb1e537b18f86d2228afbf123e19 | [
"MIT"
] | 12 | 2017-01-14T09:40:24.000Z | 2020-04-16T07:40:56.000Z | from gevent import monkey
monkey.patch_all()
import logging
import multiprocessing
import falcon
import gunicorn.app.base
from app import configure, create_app, start
from app.config import settings
class Application(gunicorn.app.base.BaseApplication):
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super(Application, self).__init__()
def load_config(self):
config = dict(
[
(key, value)
for key, value in self.options.items()
if key in self.cfg.settings and value is not None
]
)
for key, value in config.items():
self.cfg.set(key.lower(), value)
def load(self):
return self.application
def init_app() -> falcon.App:
configure()
return create_app()
def _post_fork(server=None, w=None):
_config_logging()
start()
def _config_logging():
for logger in "gunicorn.access", "gunicorn.error":
logging.getLogger(logger).propagate = True
logging.getLogger(logger).handlers = []
if __name__ == "__main__":
app = init_app()
env_name = settings.get("ENV_NAME")
default_workers = (multiprocessing.cpu_count() * 2) + 1
opts = {
"accesslog": settings.get("ACCESS_LOG"),
"access_log_format": settings.get("ACCESS_LOG_FORMAT"),
"bind": settings.get("BIND"),
"errorlog": settings.get("ERROR_LOG"),
"keepalive": settings.get("KEEP_ALIVE"),
"post_fork": _post_fork,
"proc_name": settings.get("APP_NAME"),
"max_requests": settings.get("MAX_REQUESTS"),
"max_requests_jitter": settings.get("MAX_REQUESTS_JITTER"),
"worker_class": settings.get("WORKER_CLASS"),
"workers": settings.get("WORKERS")
or (1 if env_name == "LOCAL" else default_workers),
}
Application(app, opts).run()
| 26.75 | 67 | 0.627207 |
4a1bbb894ac6a047c11cb8ce7e34177f1bef1449 | 615 | py | Python | setup.py | TravisJRCain/lambdata-zmurray | 61a906c896ec629e2cd486b300b04921159840da | [
"MIT"
] | null | null | null | setup.py | TravisJRCain/lambdata-zmurray | 61a906c896ec629e2cd486b300b04921159840da | [
"MIT"
] | null | null | null | setup.py | TravisJRCain/lambdata-zmurray | 61a906c896ec629e2cd486b300b04921159840da | [
"MIT"
] | null | null | null | # setup.py file
from setuptools import find_packages, setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="lambdata-zmurray", # the name that you will install via pip
version="1.2",
author="Zack Murray",
author_email="zachery.murray@gmail.com",
description="A short description",
long_description=long_description,
long_description_content_type="text/markdown", # required if using a md file for long desc
license="MIT",
url="https://github.com/zack-murray/lambdata-zmurray",
#keywords="",
packages=find_packages() # ["my_lambdata"]
) | 30.75 | 94 | 0.700813 |
b1bf7826ebf9fece2b5f756a2c8dbd6b3242e9ec | 183,176 | py | Python | Tools/clinic/clinic.py | chexca/cpython | cfc6ce4d40f2f01314b7e283fb972a7bb3ed3faa | [
"CNRI-Python-GPL-Compatible"
] | 2 | 2020-06-22T07:22:12.000Z | 2020-09-29T06:33:22.000Z | Tools/clinic/clinic.py | chexca/cpython | cfc6ce4d40f2f01314b7e283fb972a7bb3ed3faa | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2020-10-05T12:20:06.000Z | 2020-10-05T12:23:05.000Z | Tools/clinic/clinic.py | chexca/cpython | cfc6ce4d40f2f01314b7e283fb972a7bb3ed3faa | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2020-10-05T11:42:31.000Z | 2020-10-05T11:42:31.000Z | #!/usr/bin/env python3
#
# Argument Clinic
# Copyright 2012-2013 by Larry Hastings.
# Licensed to the PSF under a contributor agreement.
#
import abc
import ast
import collections
import contextlib
import copy
import cpp
import functools
import hashlib
import inspect
import io
import itertools
import os
import pprint
import re
import shlex
import string
import sys
import tempfile
import textwrap
import traceback
import types
from types import *
NoneType = type(None)
# TODO:
#
# soon:
#
# * allow mixing any two of {positional-only, positional-or-keyword,
# keyword-only}
# * dict constructor uses positional-only and keyword-only
# * max and min use positional only with an optional group
# and keyword-only
#
version = '1'
NoneType = type(None)
class Unspecified:
def __repr__(self):
return '<Unspecified>'
unspecified = Unspecified()
class Null:
def __repr__(self):
return '<Null>'
NULL = Null()
class Unknown:
def __repr__(self):
return '<Unknown>'
unknown = Unknown()
sig_end_marker = '--'
_text_accumulator_nt = collections.namedtuple("_text_accumulator", "text append output")
def _text_accumulator():
text = []
def output():
s = ''.join(text)
text.clear()
return s
return _text_accumulator_nt(text, text.append, output)
text_accumulator_nt = collections.namedtuple("text_accumulator", "text append")
def text_accumulator():
"""
Creates a simple text accumulator / joiner.
Returns a pair of callables:
append, output
"append" appends a string to the accumulator.
"output" returns the contents of the accumulator
joined together (''.join(accumulator)) and
empties the accumulator.
"""
text, append, output = _text_accumulator()
return text_accumulator_nt(append, output)
def warn_or_fail(fail=False, *args, filename=None, line_number=None):
joined = " ".join([str(a) for a in args])
add, output = text_accumulator()
if fail:
add("Error")
else:
add("Warning")
if clinic:
if filename is None:
filename = clinic.filename
if getattr(clinic, 'block_parser', None) and (line_number is None):
line_number = clinic.block_parser.line_number
if filename is not None:
add(' in file "' + filename + '"')
if line_number is not None:
add(" on line " + str(line_number))
add(':\n')
add(joined)
print(output())
if fail:
sys.exit(-1)
def warn(*args, filename=None, line_number=None):
return warn_or_fail(False, *args, filename=filename, line_number=line_number)
def fail(*args, filename=None, line_number=None):
return warn_or_fail(True, *args, filename=filename, line_number=line_number)
def quoted_for_c_string(s):
for old, new in (
('\\', '\\\\'), # must be first!
('"', '\\"'),
("'", "\\'"),
):
s = s.replace(old, new)
return s
def c_repr(s):
return '"' + s + '"'
is_legal_c_identifier = re.compile('^[A-Za-z_][A-Za-z0-9_]*$').match
def is_legal_py_identifier(s):
return all(is_legal_c_identifier(field) for field in s.split('.'))
# identifiers that are okay in Python but aren't a good idea in C.
# so if they're used Argument Clinic will add "_value" to the end
# of the name in C.
c_keywords = set("""
asm auto break case char const continue default do double
else enum extern float for goto if inline int long
register return short signed sizeof static struct switch
typedef typeof union unsigned void volatile while
""".strip().split())
def ensure_legal_c_identifier(s):
# for now, just complain if what we're given isn't legal
if not is_legal_c_identifier(s):
fail("Illegal C identifier: {}".format(s))
# but if we picked a C keyword, pick something else
if s in c_keywords:
return s + "_value"
return s
def rstrip_lines(s):
text, add, output = _text_accumulator()
for line in s.split('\n'):
add(line.rstrip())
add('\n')
text.pop()
return output()
def format_escape(s):
# double up curly-braces, this string will be used
# as part of a format_map() template later
s = s.replace('{', '{{')
s = s.replace('}', '}}')
return s
def linear_format(s, **kwargs):
"""
Perform str.format-like substitution, except:
* The strings substituted must be on lines by
themselves. (This line is the "source line".)
* If the substitution text is empty, the source line
is removed in the output.
* If the field is not recognized, the original line
is passed unmodified through to the output.
* If the substitution text is not empty:
* Each line of the substituted text is indented
by the indent of the source line.
* A newline will be added to the end.
"""
add, output = text_accumulator()
for line in s.split('\n'):
indent, curly, trailing = line.partition('{')
if not curly:
add(line)
add('\n')
continue
name, curly, trailing = trailing.partition('}')
if not curly or name not in kwargs:
add(line)
add('\n')
continue
if trailing:
fail("Text found after {" + name + "} block marker! It must be on a line by itself.")
if indent.strip():
fail("Non-whitespace characters found before {" + name + "} block marker! It must be on a line by itself.")
value = kwargs[name]
if not value:
continue
value = textwrap.indent(rstrip_lines(value), indent)
add(value)
add('\n')
return output()[:-1]
def indent_all_lines(s, prefix):
"""
Returns 's', with 'prefix' prepended to all lines.
If the last line is empty, prefix is not prepended
to it. (If s is blank, returns s unchanged.)
(textwrap.indent only adds to non-blank lines.)
"""
split = s.split('\n')
last = split.pop()
final = []
for line in split:
final.append(prefix)
final.append(line)
final.append('\n')
if last:
final.append(prefix)
final.append(last)
return ''.join(final)
def suffix_all_lines(s, suffix):
"""
Returns 's', with 'suffix' appended to all lines.
If the last line is empty, suffix is not appended
to it. (If s is blank, returns s unchanged.)
"""
split = s.split('\n')
last = split.pop()
final = []
for line in split:
final.append(line)
final.append(suffix)
final.append('\n')
if last:
final.append(last)
final.append(suffix)
return ''.join(final)
def version_splitter(s):
"""Splits a version string into a tuple of integers.
The following ASCII characters are allowed, and employ
the following conversions:
a -> -3
b -> -2
c -> -1
(This permits Python-style version strings such as "1.4b3".)
"""
version = []
accumulator = []
def flush():
if not accumulator:
raise ValueError('Unsupported version string: ' + repr(s))
version.append(int(''.join(accumulator)))
accumulator.clear()
for c in s:
if c.isdigit():
accumulator.append(c)
elif c == '.':
flush()
elif c in 'abc':
flush()
version.append('abc'.index(c) - 3)
else:
raise ValueError('Illegal character ' + repr(c) + ' in version string ' + repr(s))
flush()
return tuple(version)
def version_comparitor(version1, version2):
iterator = itertools.zip_longest(version_splitter(version1), version_splitter(version2), fillvalue=0)
for i, (a, b) in enumerate(iterator):
if a < b:
return -1
if a > b:
return 1
return 0
class CRenderData:
def __init__(self):
# The C statements to declare variables.
# Should be full lines with \n eol characters.
self.declarations = []
# The C statements required to initialize the variables before the parse call.
# Should be full lines with \n eol characters.
self.initializers = []
# The C statements needed to dynamically modify the values
# parsed by the parse call, before calling the impl.
self.modifications = []
# The entries for the "keywords" array for PyArg_ParseTuple.
# Should be individual strings representing the names.
self.keywords = []
# The "format units" for PyArg_ParseTuple.
# Should be individual strings that will get
self.format_units = []
# The varargs arguments for PyArg_ParseTuple.
self.parse_arguments = []
# The parameter declarations for the impl function.
self.impl_parameters = []
# The arguments to the impl function at the time it's called.
self.impl_arguments = []
# For return converters: the name of the variable that
# should receive the value returned by the impl.
self.return_value = "return_value"
# For return converters: the code to convert the return
# value from the parse function. This is also where
# you should check the _return_value for errors, and
# "goto exit" if there are any.
self.return_conversion = []
# The C statements required to clean up after the impl call.
self.cleanup = []
class FormatCounterFormatter(string.Formatter):
"""
This counts how many instances of each formatter
"replacement string" appear in the format string.
e.g. after evaluating "string {a}, {b}, {c}, {a}"
the counts dict would now look like
{'a': 2, 'b': 1, 'c': 1}
"""
def __init__(self):
self.counts = collections.Counter()
def get_value(self, key, args, kwargs):
self.counts[key] += 1
return ''
class Language(metaclass=abc.ABCMeta):
start_line = ""
body_prefix = ""
stop_line = ""
checksum_line = ""
def __init__(self, filename):
pass
@abc.abstractmethod
def render(self, clinic, signatures):
pass
def parse_line(self, line):
pass
def validate(self):
def assert_only_one(attr, *additional_fields):
"""
Ensures that the string found at getattr(self, attr)
contains exactly one formatter replacement string for
each valid field. The list of valid fields is
['dsl_name'] extended by additional_fields.
e.g.
self.fmt = "{dsl_name} {a} {b}"
# this passes
self.assert_only_one('fmt', 'a', 'b')
# this fails, the format string has a {b} in it
self.assert_only_one('fmt', 'a')
# this fails, the format string doesn't have a {c} in it
self.assert_only_one('fmt', 'a', 'b', 'c')
# this fails, the format string has two {a}s in it,
# it must contain exactly one
self.fmt2 = '{dsl_name} {a} {a}'
self.assert_only_one('fmt2', 'a')
"""
fields = ['dsl_name']
fields.extend(additional_fields)
line = getattr(self, attr)
fcf = FormatCounterFormatter()
fcf.format(line)
def local_fail(should_be_there_but_isnt):
if should_be_there_but_isnt:
fail("{} {} must contain {{{}}} exactly once!".format(
self.__class__.__name__, attr, name))
else:
fail("{} {} must not contain {{{}}}!".format(
self.__class__.__name__, attr, name))
for name, count in fcf.counts.items():
if name in fields:
if count > 1:
local_fail(True)
else:
local_fail(False)
for name in fields:
if fcf.counts.get(name) != 1:
local_fail(True)
assert_only_one('start_line')
assert_only_one('stop_line')
field = "arguments" if "{arguments}" in self.checksum_line else "checksum"
assert_only_one('checksum_line', field)
class PythonLanguage(Language):
language = 'Python'
start_line = "#/*[{dsl_name} input]"
body_prefix = "#"
stop_line = "#[{dsl_name} start generated code]*/"
checksum_line = "#/*[{dsl_name} end generated code: {arguments}]*/"
def permute_left_option_groups(l):
"""
Given [1, 2, 3], should yield:
()
(3,)
(2, 3)
(1, 2, 3)
"""
yield tuple()
accumulator = []
for group in reversed(l):
accumulator = list(group) + accumulator
yield tuple(accumulator)
def permute_right_option_groups(l):
"""
Given [1, 2, 3], should yield:
()
(1,)
(1, 2)
(1, 2, 3)
"""
yield tuple()
accumulator = []
for group in l:
accumulator.extend(group)
yield tuple(accumulator)
def permute_optional_groups(left, required, right):
"""
Generator function that computes the set of acceptable
argument lists for the provided iterables of
argument groups. (Actually it generates a tuple of tuples.)
Algorithm: prefer left options over right options.
If required is empty, left must also be empty.
"""
required = tuple(required)
result = []
if not required:
assert not left
accumulator = []
counts = set()
for r in permute_right_option_groups(right):
for l in permute_left_option_groups(left):
t = l + required + r
if len(t) in counts:
continue
counts.add(len(t))
accumulator.append(t)
accumulator.sort(key=len)
return tuple(accumulator)
def strip_leading_and_trailing_blank_lines(s):
lines = s.rstrip().split('\n')
while lines:
line = lines[0]
if line.strip():
break
del lines[0]
return '\n'.join(lines)
@functools.lru_cache()
def normalize_snippet(s, *, indent=0):
"""
Reformats s:
* removes leading and trailing blank lines
* ensures that it does not end with a newline
* dedents so the first nonwhite character on any line is at column "indent"
"""
s = strip_leading_and_trailing_blank_lines(s)
s = textwrap.dedent(s)
if indent:
s = textwrap.indent(s, ' ' * indent)
return s
def wrap_declarations(text, length=78):
"""
A simple-minded text wrapper for C function declarations.
It views a declaration line as looking like this:
xxxxxxxx(xxxxxxxxx,xxxxxxxxx)
If called with length=30, it would wrap that line into
xxxxxxxx(xxxxxxxxx,
xxxxxxxxx)
(If the declaration has zero or one parameters, this
function won't wrap it.)
If this doesn't work properly, it's probably better to
start from scratch with a more sophisticated algorithm,
rather than try and improve/debug this dumb little function.
"""
lines = []
for line in text.split('\n'):
prefix, _, after_l_paren = line.partition('(')
if not after_l_paren:
lines.append(line)
continue
parameters, _, after_r_paren = after_l_paren.partition(')')
if not _:
lines.append(line)
continue
if ',' not in parameters:
lines.append(line)
continue
parameters = [x.strip() + ", " for x in parameters.split(',')]
prefix += "("
if len(prefix) < length:
spaces = " " * len(prefix)
else:
spaces = " " * 4
while parameters:
line = prefix
first = True
while parameters:
if (not first and
(len(line) + len(parameters[0]) > length)):
break
line += parameters.pop(0)
first = False
if not parameters:
line = line.rstrip(", ") + ")" + after_r_paren
lines.append(line.rstrip())
prefix = spaces
return "\n".join(lines)
class CLanguage(Language):
body_prefix = "#"
language = 'C'
start_line = "/*[{dsl_name} input]"
body_prefix = ""
stop_line = "[{dsl_name} start generated code]*/"
checksum_line = "/*[{dsl_name} end generated code: {arguments}]*/"
def __init__(self, filename):
super().__init__(filename)
self.cpp = cpp.Monitor(filename)
self.cpp.fail = fail
def parse_line(self, line):
self.cpp.writeline(line)
def render(self, clinic, signatures):
function = None
for o in signatures:
if isinstance(o, Function):
if function:
fail("You may specify at most one function per block.\nFound a block containing at least two:\n\t" + repr(function) + " and " + repr(o))
function = o
return self.render_function(clinic, function)
def docstring_for_c_string(self, f):
if re.search(r'[^\x00-\x7F]', f.docstring):
warn("Non-ascii character appear in docstring.")
text, add, output = _text_accumulator()
# turn docstring into a properly quoted C string
for line in f.docstring.split('\n'):
add('"')
add(quoted_for_c_string(line))
add('\\n"\n')
if text[-2] == sig_end_marker:
# If we only have a signature, add the blank line that the
# __text_signature__ getter expects to be there.
add('"\\n"')
else:
text.pop()
add('"')
return ''.join(text)
def output_templates(self, f):
parameters = list(f.parameters.values())
assert parameters
assert isinstance(parameters[0].converter, self_converter)
del parameters[0]
converters = [p.converter for p in parameters]
has_option_groups = parameters and (parameters[0].group or parameters[-1].group)
default_return_converter = (not f.return_converter or
f.return_converter.type == 'PyObject *')
new_or_init = f.kind in (METHOD_NEW, METHOD_INIT)
pos_only = min_pos = max_pos = min_kw_only = 0
for i, p in enumerate(parameters, 1):
if p.is_keyword_only():
assert not p.is_positional_only()
if not p.is_optional():
min_kw_only = i - max_pos
else:
max_pos = i
if p.is_positional_only():
pos_only = i
if not p.is_optional():
min_pos = i
requires_defining_class = any(
isinstance(p.converter, defining_class_converter)
for p in parameters)
meth_o = (len(parameters) == 1 and
parameters[0].is_positional_only() and
not converters[0].is_optional() and
not requires_defining_class and
not new_or_init)
# we have to set these things before we're done:
#
# docstring_prototype
# docstring_definition
# impl_prototype
# methoddef_define
# parser_prototype
# parser_definition
# impl_definition
# cpp_if
# cpp_endif
# methoddef_ifndef
return_value_declaration = "PyObject *return_value = NULL;"
methoddef_define = normalize_snippet("""
#define {methoddef_name} \\
{{"{name}", {methoddef_cast}{c_basename}, {methoddef_flags}, {c_basename}__doc__}},
""")
if new_or_init and not f.docstring:
docstring_prototype = docstring_definition = ''
else:
docstring_prototype = normalize_snippet("""
PyDoc_VAR({c_basename}__doc__);
""")
docstring_definition = normalize_snippet("""
PyDoc_STRVAR({c_basename}__doc__,
{docstring});
""")
impl_definition = normalize_snippet("""
static {impl_return_type}
{c_basename}_impl({impl_parameters})
""")
impl_prototype = parser_prototype = parser_definition = None
parser_prototype_keyword = normalize_snippet("""
static PyObject *
{c_basename}({self_type}{self_name}, PyObject *args, PyObject *kwargs)
""")
parser_prototype_varargs = normalize_snippet("""
static PyObject *
{c_basename}({self_type}{self_name}, PyObject *args)
""")
parser_prototype_fastcall = normalize_snippet("""
static PyObject *
{c_basename}({self_type}{self_name}, PyObject *const *args, Py_ssize_t nargs)
""")
parser_prototype_fastcall_keywords = normalize_snippet("""
static PyObject *
{c_basename}({self_type}{self_name}, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
""")
parser_prototype_def_class = normalize_snippet("""
static PyObject *
{c_basename}({self_type}{self_name}, PyTypeObject *{defining_class_name}, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
""")
# parser_body_fields remembers the fields passed in to the
# previous call to parser_body. this is used for an awful hack.
parser_body_fields = ()
parser_body_declarations = ''
def parser_body(prototype, *fields, declarations=''):
nonlocal parser_body_fields, parser_body_declarations
add, output = text_accumulator()
add(prototype)
parser_body_fields = fields
parser_body_declarations = declarations
fields = list(fields)
fields.insert(0, normalize_snippet("""
{{
{return_value_declaration}
{parser_declarations}
{declarations}
{initializers}
""") + "\n")
# just imagine--your code is here in the middle
fields.append(normalize_snippet("""
{modifications}
{return_value} = {c_basename}_impl({impl_arguments});
{return_conversion}
{exit_label}
{cleanup}
return return_value;
}}
"""))
for field in fields:
add('\n')
add(field)
return linear_format(output(), parser_declarations=declarations)
if not parameters:
# no parameters, METH_NOARGS
flags = "METH_NOARGS"
parser_prototype = normalize_snippet("""
static PyObject *
{c_basename}({self_type}{self_name}, PyObject *Py_UNUSED(ignored))
""")
parser_definition = parser_prototype
if default_return_converter:
parser_definition = parser_prototype + '\n' + normalize_snippet("""
{{
return {c_basename}_impl({impl_arguments});
}}
""")
else:
parser_definition = parser_body(parser_prototype)
elif meth_o:
flags = "METH_O"
if (isinstance(converters[0], object_converter) and
converters[0].format_unit == 'O'):
meth_o_prototype = normalize_snippet("""
static PyObject *
{c_basename}({impl_parameters})
""")
if default_return_converter:
# maps perfectly to METH_O, doesn't need a return converter.
# so we skip making a parse function
# and call directly into the impl function.
impl_prototype = parser_prototype = parser_definition = ''
impl_definition = meth_o_prototype
else:
# SLIGHT HACK
# use impl_parameters for the parser here!
parser_prototype = meth_o_prototype
parser_definition = parser_body(parser_prototype)
else:
argname = 'arg'
if parameters[0].name == argname:
argname += '_'
parser_prototype = normalize_snippet("""
static PyObject *
{c_basename}({self_type}{self_name}, PyObject *%s)
""" % argname)
displayname = parameters[0].get_displayname(0)
parsearg = converters[0].parse_arg(argname, displayname)
if parsearg is None:
parsearg = """
if (!PyArg_Parse(%s, "{format_units}:{name}", {parse_arguments})) {{
goto exit;
}}
""" % argname
parser_definition = parser_body(parser_prototype,
normalize_snippet(parsearg, indent=4))
elif has_option_groups:
# positional parameters with option groups
# (we have to generate lots of PyArg_ParseTuple calls
# in a big switch statement)
flags = "METH_VARARGS"
parser_prototype = parser_prototype_varargs
parser_definition = parser_body(parser_prototype, ' {option_group_parsing}')
elif not requires_defining_class and pos_only == len(parameters):
if not new_or_init:
# positional-only, but no option groups
# we only need one call to _PyArg_ParseStack
flags = "METH_FASTCALL"
parser_prototype = parser_prototype_fastcall
nargs = 'nargs'
argname_fmt = 'args[%d]'
else:
# positional-only, but no option groups
# we only need one call to PyArg_ParseTuple
flags = "METH_VARARGS"
parser_prototype = parser_prototype_varargs
nargs = 'PyTuple_GET_SIZE(args)'
argname_fmt = 'PyTuple_GET_ITEM(args, %d)'
parser_code = [normalize_snippet("""
if (!_PyArg_CheckPositional("{name}", %s, %d, %d)) {{
goto exit;
}}
""" % (nargs, min_pos, max_pos), indent=4)]
has_optional = False
for i, p in enumerate(parameters):
displayname = p.get_displayname(i+1)
parsearg = p.converter.parse_arg(argname_fmt % i, displayname)
if parsearg is None:
#print('Cannot convert %s %r for %s' % (p.converter.__class__.__name__, p.converter.format_unit, p.converter.name), file=sys.stderr)
parser_code = None
break
if has_optional or p.is_optional():
has_optional = True
parser_code.append(normalize_snippet("""
if (%s < %d) {{
goto skip_optional;
}}
""", indent=4) % (nargs, i + 1))
parser_code.append(normalize_snippet(parsearg, indent=4))
if parser_code is not None:
if has_optional:
parser_code.append("skip_optional:")
else:
if not new_or_init:
parser_code = [normalize_snippet("""
if (!_PyArg_ParseStack(args, nargs, "{format_units}:{name}",
{parse_arguments})) {{
goto exit;
}}
""", indent=4)]
else:
parser_code = [normalize_snippet("""
if (!PyArg_ParseTuple(args, "{format_units}:{name}",
{parse_arguments})) {{
goto exit;
}}
""", indent=4)]
parser_definition = parser_body(parser_prototype, *parser_code)
else:
has_optional_kw = (max(pos_only, min_pos) + min_kw_only < len(converters))
if not new_or_init:
flags = "METH_FASTCALL|METH_KEYWORDS"
parser_prototype = parser_prototype_fastcall_keywords
argname_fmt = 'args[%d]'
declarations = normalize_snippet("""
static const char * const _keywords[] = {{{keywords} NULL}};
static _PyArg_Parser _parser = {{NULL, _keywords, "{name}", 0}};
PyObject *argsbuf[%s];
""" % len(converters))
if has_optional_kw:
declarations += "\nPy_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - %d;" % (min_pos + min_kw_only)
parser_code = [normalize_snippet("""
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, %d, %d, %d, argsbuf);
if (!args) {{
goto exit;
}}
""" % (min_pos, max_pos, min_kw_only), indent=4)]
else:
# positional-or-keyword arguments
flags = "METH_VARARGS|METH_KEYWORDS"
parser_prototype = parser_prototype_keyword
argname_fmt = 'fastargs[%d]'
declarations = normalize_snippet("""
static const char * const _keywords[] = {{{keywords} NULL}};
static _PyArg_Parser _parser = {{NULL, _keywords, "{name}", 0}};
PyObject *argsbuf[%s];
PyObject * const *fastargs;
Py_ssize_t nargs = PyTuple_GET_SIZE(args);
""" % len(converters))
if has_optional_kw:
declarations += "\nPy_ssize_t noptargs = nargs + (kwargs ? PyDict_GET_SIZE(kwargs) : 0) - %d;" % (min_pos + min_kw_only)
parser_code = [normalize_snippet("""
fastargs = _PyArg_UnpackKeywords(_PyTuple_CAST(args)->ob_item, nargs, kwargs, NULL, &_parser, %d, %d, %d, argsbuf);
if (!fastargs) {{
goto exit;
}}
""" % (min_pos, max_pos, min_kw_only), indent=4)]
if requires_defining_class:
flags = 'METH_METHOD|' + flags
parser_prototype = parser_prototype_def_class
add_label = None
for i, p in enumerate(parameters):
displayname = p.get_displayname(i+1)
parsearg = p.converter.parse_arg(argname_fmt % i, displayname)
if parsearg is None:
#print('Cannot convert %s %r for %s' % (p.converter.__class__.__name__, p.converter.format_unit, p.converter.name), file=sys.stderr)
parser_code = None
break
if add_label and (i == pos_only or i == max_pos):
parser_code.append("%s:" % add_label)
add_label = None
if not p.is_optional():
parser_code.append(normalize_snippet(parsearg, indent=4))
elif i < pos_only:
add_label = 'skip_optional_posonly'
parser_code.append(normalize_snippet("""
if (nargs < %d) {{
goto %s;
}}
""" % (i + 1, add_label), indent=4))
if has_optional_kw:
parser_code.append(normalize_snippet("""
noptargs--;
""", indent=4))
parser_code.append(normalize_snippet(parsearg, indent=4))
else:
if i < max_pos:
label = 'skip_optional_pos'
first_opt = max(min_pos, pos_only)
else:
label = 'skip_optional_kwonly'
first_opt = max_pos + min_kw_only
if i == first_opt:
add_label = label
parser_code.append(normalize_snippet("""
if (!noptargs) {{
goto %s;
}}
""" % add_label, indent=4))
if i + 1 == len(parameters):
parser_code.append(normalize_snippet(parsearg, indent=4))
else:
add_label = label
parser_code.append(normalize_snippet("""
if (%s) {{
""" % (argname_fmt % i), indent=4))
parser_code.append(normalize_snippet(parsearg, indent=8))
parser_code.append(normalize_snippet("""
if (!--noptargs) {{
goto %s;
}}
}}
""" % add_label, indent=4))
if parser_code is not None:
if add_label:
parser_code.append("%s:" % add_label)
else:
declarations = (
'static const char * const _keywords[] = {{{keywords} NULL}};\n'
'static _PyArg_Parser _parser = {{"{format_units}:{name}", _keywords, 0}};')
if not new_or_init:
parser_code = [normalize_snippet("""
if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser{parse_arguments_comma}
{parse_arguments})) {{
goto exit;
}}
""", indent=4)]
else:
parser_code = [normalize_snippet("""
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwargs, &_parser,
{parse_arguments})) {{
goto exit;
}}
""", indent=4)]
parser_definition = parser_body(parser_prototype, *parser_code,
declarations=declarations)
if new_or_init:
methoddef_define = ''
if f.kind == METHOD_NEW:
parser_prototype = parser_prototype_keyword
else:
return_value_declaration = "int return_value = -1;"
parser_prototype = normalize_snippet("""
static int
{c_basename}({self_type}{self_name}, PyObject *args, PyObject *kwargs)
""")
fields = list(parser_body_fields)
parses_positional = 'METH_NOARGS' not in flags
parses_keywords = 'METH_KEYWORDS' in flags
if parses_keywords:
assert parses_positional
if requires_defining_class:
raise ValueError("Slot methods cannot access their defining class.")
if not parses_keywords:
fields.insert(0, normalize_snippet("""
if ({self_type_check}!_PyArg_NoKeywords("{name}", kwargs)) {{
goto exit;
}}
""", indent=4))
if not parses_positional:
fields.insert(0, normalize_snippet("""
if ({self_type_check}!_PyArg_NoPositional("{name}", args)) {{
goto exit;
}}
""", indent=4))
parser_definition = parser_body(parser_prototype, *fields,
declarations=parser_body_declarations)
if flags in ('METH_NOARGS', 'METH_O', 'METH_VARARGS'):
methoddef_cast = "(PyCFunction)"
else:
methoddef_cast = "(PyCFunction)(void(*)(void))"
if f.methoddef_flags:
flags += '|' + f.methoddef_flags
methoddef_define = methoddef_define.replace('{methoddef_flags}', flags)
methoddef_define = methoddef_define.replace('{methoddef_cast}', methoddef_cast)
methoddef_ifndef = ''
conditional = self.cpp.condition()
if not conditional:
cpp_if = cpp_endif = ''
else:
cpp_if = "#if " + conditional
cpp_endif = "#endif /* " + conditional + " */"
if methoddef_define and f.full_name not in clinic.ifndef_symbols:
clinic.ifndef_symbols.add(f.full_name)
methoddef_ifndef = normalize_snippet("""
#ifndef {methoddef_name}
#define {methoddef_name}
#endif /* !defined({methoddef_name}) */
""")
# add ';' to the end of parser_prototype and impl_prototype
# (they mustn't be None, but they could be an empty string.)
assert parser_prototype is not None
if parser_prototype:
assert not parser_prototype.endswith(';')
parser_prototype += ';'
if impl_prototype is None:
impl_prototype = impl_definition
if impl_prototype:
impl_prototype += ";"
parser_definition = parser_definition.replace("{return_value_declaration}", return_value_declaration)
d = {
"docstring_prototype" : docstring_prototype,
"docstring_definition" : docstring_definition,
"impl_prototype" : impl_prototype,
"methoddef_define" : methoddef_define,
"parser_prototype" : parser_prototype,
"parser_definition" : parser_definition,
"impl_definition" : impl_definition,
"cpp_if" : cpp_if,
"cpp_endif" : cpp_endif,
"methoddef_ifndef" : methoddef_ifndef,
}
# make sure we didn't forget to assign something,
# and wrap each non-empty value in \n's
d2 = {}
for name, value in d.items():
assert value is not None, "got a None value for template " + repr(name)
if value:
value = '\n' + value + '\n'
d2[name] = value
return d2
@staticmethod
def group_to_variable_name(group):
adjective = "left_" if group < 0 else "right_"
return "group_" + adjective + str(abs(group))
def render_option_group_parsing(self, f, template_dict):
# positional only, grouped, optional arguments!
# can be optional on the left or right.
# here's an example:
#
# [ [ [ A1 A2 ] B1 B2 B3 ] C1 C2 ] D1 D2 D3 [ E1 E2 E3 [ F1 F2 F3 ] ]
#
# Here group D are required, and all other groups are optional.
# (Group D's "group" is actually None.)
# We can figure out which sets of arguments we have based on
# how many arguments are in the tuple.
#
# Note that you need to count up on both sides. For example,
# you could have groups C+D, or C+D+E, or C+D+E+F.
#
# What if the number of arguments leads us to an ambiguous result?
# Clinic prefers groups on the left. So in the above example,
# five arguments would map to B+C, not C+D.
add, output = text_accumulator()
parameters = list(f.parameters.values())
if isinstance(parameters[0].converter, self_converter):
del parameters[0]
groups = []
group = None
left = []
right = []
required = []
last = unspecified
for p in parameters:
group_id = p.group
if group_id != last:
last = group_id
group = []
if group_id < 0:
left.append(group)
elif group_id == 0:
group = required
else:
right.append(group)
group.append(p)
count_min = sys.maxsize
count_max = -1
add("switch (PyTuple_GET_SIZE(args)) {\n")
for subset in permute_optional_groups(left, required, right):
count = len(subset)
count_min = min(count_min, count)
count_max = max(count_max, count)
if count == 0:
add(""" case 0:
break;
""")
continue
group_ids = {p.group for p in subset} # eliminate duplicates
d = {}
d['count'] = count
d['name'] = f.name
d['format_units'] = "".join(p.converter.format_unit for p in subset)
parse_arguments = []
for p in subset:
p.converter.parse_argument(parse_arguments)
d['parse_arguments'] = ", ".join(parse_arguments)
group_ids.discard(0)
lines = [self.group_to_variable_name(g) + " = 1;" for g in group_ids]
lines = "\n".join(lines)
s = """\
case {count}:
if (!PyArg_ParseTuple(args, "{format_units}:{name}", {parse_arguments})) {{
goto exit;
}}
{group_booleans}
break;
"""
s = linear_format(s, group_booleans=lines)
s = s.format_map(d)
add(s)
add(" default:\n")
s = ' PyErr_SetString(PyExc_TypeError, "{} requires {} to {} arguments");\n'
add(s.format(f.full_name, count_min, count_max))
add(' goto exit;\n')
add("}")
template_dict['option_group_parsing'] = format_escape(output())
def render_function(self, clinic, f):
if not f:
return ""
add, output = text_accumulator()
data = CRenderData()
assert f.parameters, "We should always have a 'self' at this point!"
parameters = f.render_parameters
converters = [p.converter for p in parameters]
templates = self.output_templates(f)
f_self = parameters[0]
selfless = parameters[1:]
assert isinstance(f_self.converter, self_converter), "No self parameter in " + repr(f.full_name) + "!"
last_group = 0
first_optional = len(selfless)
positional = selfless and selfless[-1].is_positional_only()
new_or_init = f.kind in (METHOD_NEW, METHOD_INIT)
default_return_converter = (not f.return_converter or
f.return_converter.type == 'PyObject *')
has_option_groups = False
# offset i by -1 because first_optional needs to ignore self
for i, p in enumerate(parameters, -1):
c = p.converter
if (i != -1) and (p.default is not unspecified):
first_optional = min(first_optional, i)
# insert group variable
group = p.group
if last_group != group:
last_group = group
if group:
group_name = self.group_to_variable_name(group)
data.impl_arguments.append(group_name)
data.declarations.append("int " + group_name + " = 0;")
data.impl_parameters.append("int " + group_name)
has_option_groups = True
c.render(p, data)
if has_option_groups and (not positional):
fail("You cannot use optional groups ('[' and ']')\nunless all parameters are positional-only ('/').")
# HACK
# when we're METH_O, but have a custom return converter,
# we use "impl_parameters" for the parsing function
# because that works better. but that means we must
# suppress actually declaring the impl's parameters
# as variables in the parsing function. but since it's
# METH_O, we have exactly one anyway, so we know exactly
# where it is.
if ("METH_O" in templates['methoddef_define'] and
'{impl_parameters}' in templates['parser_prototype']):
data.declarations.pop(0)
template_dict = {}
full_name = f.full_name
template_dict['full_name'] = full_name
if new_or_init:
name = f.cls.name
else:
name = f.name
template_dict['name'] = name
if f.c_basename:
c_basename = f.c_basename
else:
fields = full_name.split(".")
if fields[-1] == '__new__':
fields.pop()
c_basename = "_".join(fields)
template_dict['c_basename'] = c_basename
methoddef_name = "{}_METHODDEF".format(c_basename.upper())
template_dict['methoddef_name'] = methoddef_name
template_dict['docstring'] = self.docstring_for_c_string(f)
template_dict['self_name'] = template_dict['self_type'] = template_dict['self_type_check'] = ''
for converter in converters:
converter.set_template_dict(template_dict)
f.return_converter.render(f, data)
template_dict['impl_return_type'] = f.return_converter.type
template_dict['declarations'] = format_escape("\n".join(data.declarations))
template_dict['initializers'] = "\n\n".join(data.initializers)
template_dict['modifications'] = '\n\n'.join(data.modifications)
template_dict['keywords'] = ' '.join('"' + k + '",' for k in data.keywords)
template_dict['format_units'] = ''.join(data.format_units)
template_dict['parse_arguments'] = ', '.join(data.parse_arguments)
if data.parse_arguments:
template_dict['parse_arguments_comma'] = ',';
else:
template_dict['parse_arguments_comma'] = '';
template_dict['impl_parameters'] = ", ".join(data.impl_parameters)
template_dict['impl_arguments'] = ", ".join(data.impl_arguments)
template_dict['return_conversion'] = format_escape("".join(data.return_conversion).rstrip())
template_dict['cleanup'] = format_escape("".join(data.cleanup))
template_dict['return_value'] = data.return_value
# used by unpack tuple code generator
ignore_self = -1 if isinstance(converters[0], self_converter) else 0
unpack_min = first_optional
unpack_max = len(selfless)
template_dict['unpack_min'] = str(unpack_min)
template_dict['unpack_max'] = str(unpack_max)
if has_option_groups:
self.render_option_group_parsing(f, template_dict)
# buffers, not destination
for name, destination in clinic.destination_buffers.items():
template = templates[name]
if has_option_groups:
template = linear_format(template,
option_group_parsing=template_dict['option_group_parsing'])
template = linear_format(template,
declarations=template_dict['declarations'],
return_conversion=template_dict['return_conversion'],
initializers=template_dict['initializers'],
modifications=template_dict['modifications'],
cleanup=template_dict['cleanup'],
)
# Only generate the "exit:" label
# if we have any gotos
need_exit_label = "goto exit;" in template
template = linear_format(template,
exit_label="exit:" if need_exit_label else ''
)
s = template.format_map(template_dict)
# mild hack:
# reflow long impl declarations
if name in {"impl_prototype", "impl_definition"}:
s = wrap_declarations(s)
if clinic.line_prefix:
s = indent_all_lines(s, clinic.line_prefix)
if clinic.line_suffix:
s = suffix_all_lines(s, clinic.line_suffix)
destination.append(s)
return clinic.get_destination('block').dump()
@contextlib.contextmanager
def OverrideStdioWith(stdout):
saved_stdout = sys.stdout
sys.stdout = stdout
try:
yield
finally:
assert sys.stdout is stdout
sys.stdout = saved_stdout
def create_regex(before, after, word=True, whole_line=True):
"""Create an re object for matching marker lines."""
group_re = r"\w+" if word else ".+"
pattern = r'{}({}){}'
if whole_line:
pattern = '^' + pattern + '$'
pattern = pattern.format(re.escape(before), group_re, re.escape(after))
return re.compile(pattern)
class Block:
r"""
Represents a single block of text embedded in
another file. If dsl_name is None, the block represents
verbatim text, raw original text from the file, in
which case "input" will be the only non-false member.
If dsl_name is not None, the block represents a Clinic
block.
input is always str, with embedded \n characters.
input represents the original text from the file;
if it's a Clinic block, it is the original text with
the body_prefix and redundant leading whitespace removed.
dsl_name is either str or None. If str, it's the text
found on the start line of the block between the square
brackets.
signatures is either list or None. If it's a list,
it may only contain clinic.Module, clinic.Class, and
clinic.Function objects. At the moment it should
contain at most one of each.
output is either str or None. If str, it's the output
from this block, with embedded '\n' characters.
indent is either str or None. It's the leading whitespace
that was found on every line of input. (If body_prefix is
not empty, this is the indent *after* removing the
body_prefix.)
preindent is either str or None. It's the whitespace that
was found in front of every line of input *before* the
"body_prefix" (see the Language object). If body_prefix
is empty, preindent must always be empty too.
To illustrate indent and preindent: Assume that '_'
represents whitespace. If the block processed was in a
Python file, and looked like this:
____#/*[python]
____#__for a in range(20):
____#____print(a)
____#[python]*/
"preindent" would be "____" and "indent" would be "__".
"""
def __init__(self, input, dsl_name=None, signatures=None, output=None, indent='', preindent=''):
assert isinstance(input, str)
self.input = input
self.dsl_name = dsl_name
self.signatures = signatures or []
self.output = output
self.indent = indent
self.preindent = preindent
def __repr__(self):
dsl_name = self.dsl_name or "text"
def summarize(s):
s = repr(s)
if len(s) > 30:
return s[:26] + "..." + s[0]
return s
return "".join((
"<Block ", dsl_name, " input=", summarize(self.input), " output=", summarize(self.output), ">"))
class BlockParser:
"""
Block-oriented parser for Argument Clinic.
Iterator, yields Block objects.
"""
def __init__(self, input, language, *, verify=True):
"""
"input" should be a str object
with embedded \n characters.
"language" should be a Language object.
"""
language.validate()
self.input = collections.deque(reversed(input.splitlines(keepends=True)))
self.block_start_line_number = self.line_number = 0
self.language = language
before, _, after = language.start_line.partition('{dsl_name}')
assert _ == '{dsl_name}'
self.find_start_re = create_regex(before, after, whole_line=False)
self.start_re = create_regex(before, after)
self.verify = verify
self.last_checksum_re = None
self.last_dsl_name = None
self.dsl_name = None
self.first_block = True
def __iter__(self):
return self
def __next__(self):
while True:
if not self.input:
raise StopIteration
if self.dsl_name:
return_value = self.parse_clinic_block(self.dsl_name)
self.dsl_name = None
self.first_block = False
return return_value
block = self.parse_verbatim_block()
if self.first_block and not block.input:
continue
self.first_block = False
return block
def is_start_line(self, line):
match = self.start_re.match(line.lstrip())
return match.group(1) if match else None
def _line(self, lookahead=False):
self.line_number += 1
line = self.input.pop()
if not lookahead:
self.language.parse_line(line)
return line
def parse_verbatim_block(self):
add, output = text_accumulator()
self.block_start_line_number = self.line_number
while self.input:
line = self._line()
dsl_name = self.is_start_line(line)
if dsl_name:
self.dsl_name = dsl_name
break
add(line)
return Block(output())
def parse_clinic_block(self, dsl_name):
input_add, input_output = text_accumulator()
self.block_start_line_number = self.line_number + 1
stop_line = self.language.stop_line.format(dsl_name=dsl_name)
body_prefix = self.language.body_prefix.format(dsl_name=dsl_name)
def is_stop_line(line):
# make sure to recognize stop line even if it
# doesn't end with EOL (it could be the very end of the file)
if not line.startswith(stop_line):
return False
remainder = line[len(stop_line):]
return (not remainder) or remainder.isspace()
# consume body of program
while self.input:
line = self._line()
if is_stop_line(line) or self.is_start_line(line):
break
if body_prefix:
line = line.lstrip()
assert line.startswith(body_prefix)
line = line[len(body_prefix):]
input_add(line)
# consume output and checksum line, if present.
if self.last_dsl_name == dsl_name:
checksum_re = self.last_checksum_re
else:
before, _, after = self.language.checksum_line.format(dsl_name=dsl_name, arguments='{arguments}').partition('{arguments}')
assert _ == '{arguments}'
checksum_re = create_regex(before, after, word=False)
self.last_dsl_name = dsl_name
self.last_checksum_re = checksum_re
# scan forward for checksum line
output_add, output_output = text_accumulator()
arguments = None
while self.input:
line = self._line(lookahead=True)
match = checksum_re.match(line.lstrip())
arguments = match.group(1) if match else None
if arguments:
break
output_add(line)
if self.is_start_line(line):
break
output = output_output()
if arguments:
d = {}
for field in shlex.split(arguments):
name, equals, value = field.partition('=')
if not equals:
fail("Mangled Argument Clinic marker line: {!r}".format(line))
d[name.strip()] = value.strip()
if self.verify:
if 'input' in d:
checksum = d['output']
input_checksum = d['input']
else:
checksum = d['checksum']
input_checksum = None
computed = compute_checksum(output, len(checksum))
if checksum != computed:
fail("Checksum mismatch!\nExpected: {}\nComputed: {}\n"
"Suggested fix: remove all generated code including "
"the end marker,\n"
"or use the '-f' option."
.format(checksum, computed))
else:
# put back output
output_lines = output.splitlines(keepends=True)
self.line_number -= len(output_lines)
self.input.extend(reversed(output_lines))
output = None
return Block(input_output(), dsl_name, output=output)
class BlockPrinter:
def __init__(self, language, f=None):
self.language = language
self.f = f or io.StringIO()
def print_block(self, block):
input = block.input
output = block.output
dsl_name = block.dsl_name
write = self.f.write
assert not ((dsl_name == None) ^ (output == None)), "you must specify dsl_name and output together, dsl_name " + repr(dsl_name)
if not dsl_name:
write(input)
return
write(self.language.start_line.format(dsl_name=dsl_name))
write("\n")
body_prefix = self.language.body_prefix.format(dsl_name=dsl_name)
if not body_prefix:
write(input)
else:
for line in input.split('\n'):
write(body_prefix)
write(line)
write("\n")
write(self.language.stop_line.format(dsl_name=dsl_name))
write("\n")
input = ''.join(block.input)
output = ''.join(block.output)
if output:
if not output.endswith('\n'):
output += '\n'
write(output)
arguments="output={} input={}".format(compute_checksum(output, 16), compute_checksum(input, 16))
write(self.language.checksum_line.format(dsl_name=dsl_name, arguments=arguments))
write("\n")
def write(self, text):
self.f.write(text)
class BufferSeries:
"""
Behaves like a "defaultlist".
When you ask for an index that doesn't exist yet,
the object grows the list until that item exists.
So o[n] will always work.
Supports negative indices for actual items.
e.g. o[-1] is an element immediately preceding o[0].
"""
def __init__(self):
self._start = 0
self._array = []
self._constructor = _text_accumulator
def __getitem__(self, i):
i -= self._start
if i < 0:
self._start += i
prefix = [self._constructor() for x in range(-i)]
self._array = prefix + self._array
i = 0
while i >= len(self._array):
self._array.append(self._constructor())
return self._array[i]
def clear(self):
for ta in self._array:
ta._text.clear()
def dump(self):
texts = [ta.output() for ta in self._array]
return "".join(texts)
class Destination:
def __init__(self, name, type, clinic, *args):
self.name = name
self.type = type
self.clinic = clinic
valid_types = ('buffer', 'file', 'suppress')
if type not in valid_types:
fail("Invalid destination type " + repr(type) + " for " + name + " , must be " + ', '.join(valid_types))
extra_arguments = 1 if type == "file" else 0
if len(args) < extra_arguments:
fail("Not enough arguments for destination " + name + " new " + type)
if len(args) > extra_arguments:
fail("Too many arguments for destination " + name + " new " + type)
if type =='file':
d = {}
filename = clinic.filename
d['path'] = filename
dirname, basename = os.path.split(filename)
if not dirname:
dirname = '.'
d['dirname'] = dirname
d['basename'] = basename
d['basename_root'], d['basename_extension'] = os.path.splitext(filename)
self.filename = args[0].format_map(d)
self.buffers = BufferSeries()
def __repr__(self):
if self.type == 'file':
file_repr = " " + repr(self.filename)
else:
file_repr = ''
return "".join(("<Destination ", self.name, " ", self.type, file_repr, ">"))
def clear(self):
if self.type != 'buffer':
fail("Can't clear destination" + self.name + " , it's not of type buffer")
self.buffers.clear()
def dump(self):
return self.buffers.dump()
# maps strings to Language objects.
# "languages" maps the name of the language ("C", "Python").
# "extensions" maps the file extension ("c", "py").
languages = { 'C': CLanguage, 'Python': PythonLanguage }
extensions = { name: CLanguage for name in "c cc cpp cxx h hh hpp hxx".split() }
extensions['py'] = PythonLanguage
# maps strings to callables.
# these callables must be of the form:
# def foo(name, default, *, ...)
# The callable may have any number of keyword-only parameters.
# The callable must return a CConverter object.
# The callable should not call builtins.print.
converters = {}
# maps strings to callables.
# these callables follow the same rules as those for "converters" above.
# note however that they will never be called with keyword-only parameters.
legacy_converters = {}
# maps strings to callables.
# these callables must be of the form:
# def foo(*, ...)
# The callable may have any number of keyword-only parameters.
# The callable must return a CConverter object.
# The callable should not call builtins.print.
return_converters = {}
clinic = None
class Clinic:
presets_text = """
preset block
everything block
methoddef_ifndef buffer 1
docstring_prototype suppress
parser_prototype suppress
cpp_if suppress
cpp_endif suppress
preset original
everything block
methoddef_ifndef buffer 1
docstring_prototype suppress
parser_prototype suppress
cpp_if suppress
cpp_endif suppress
preset file
everything file
methoddef_ifndef file 1
docstring_prototype suppress
parser_prototype suppress
impl_definition block
preset buffer
everything buffer
methoddef_ifndef buffer 1
impl_definition block
docstring_prototype suppress
impl_prototype suppress
parser_prototype suppress
preset partial-buffer
everything buffer
methoddef_ifndef buffer 1
docstring_prototype block
impl_prototype suppress
methoddef_define block
parser_prototype block
impl_definition block
"""
def __init__(self, language, printer=None, *, force=False, verify=True, filename=None):
# maps strings to Parser objects.
# (instantiated from the "parsers" global.)
self.parsers = {}
self.language = language
if printer:
fail("Custom printers are broken right now")
self.printer = printer or BlockPrinter(language)
self.verify = verify
self.force = force
self.filename = filename
self.modules = collections.OrderedDict()
self.classes = collections.OrderedDict()
self.functions = []
self.line_prefix = self.line_suffix = ''
self.destinations = {}
self.add_destination("block", "buffer")
self.add_destination("suppress", "suppress")
self.add_destination("buffer", "buffer")
if filename:
self.add_destination("file", "file", "{dirname}/clinic/{basename}.h")
d = self.get_destination_buffer
self.destination_buffers = collections.OrderedDict((
('cpp_if', d('file')),
('docstring_prototype', d('suppress')),
('docstring_definition', d('file')),
('methoddef_define', d('file')),
('impl_prototype', d('file')),
('parser_prototype', d('suppress')),
('parser_definition', d('file')),
('cpp_endif', d('file')),
('methoddef_ifndef', d('file', 1)),
('impl_definition', d('block')),
))
self.destination_buffers_stack = []
self.ifndef_symbols = set()
self.presets = {}
preset = None
for line in self.presets_text.strip().split('\n'):
line = line.strip()
if not line:
continue
name, value, *options = line.split()
if name == 'preset':
self.presets[value] = preset = collections.OrderedDict()
continue
if len(options):
index = int(options[0])
else:
index = 0
buffer = self.get_destination_buffer(value, index)
if name == 'everything':
for name in self.destination_buffers:
preset[name] = buffer
continue
assert name in self.destination_buffers
preset[name] = buffer
global clinic
clinic = self
def add_destination(self, name, type, *args):
if name in self.destinations:
fail("Destination already exists: " + repr(name))
self.destinations[name] = Destination(name, type, self, *args)
def get_destination(self, name):
d = self.destinations.get(name)
if not d:
fail("Destination does not exist: " + repr(name))
return d
def get_destination_buffer(self, name, item=0):
d = self.get_destination(name)
return d.buffers[item]
def parse(self, input):
printer = self.printer
self.block_parser = BlockParser(input, self.language, verify=self.verify)
for block in self.block_parser:
dsl_name = block.dsl_name
if dsl_name:
if dsl_name not in self.parsers:
assert dsl_name in parsers, "No parser to handle {!r} block.".format(dsl_name)
self.parsers[dsl_name] = parsers[dsl_name](self)
parser = self.parsers[dsl_name]
try:
parser.parse(block)
except Exception:
fail('Exception raised during parsing:\n' +
traceback.format_exc().rstrip())
printer.print_block(block)
second_pass_replacements = {}
# these are destinations not buffers
for name, destination in self.destinations.items():
if destination.type == 'suppress':
continue
output = destination.dump()
if output:
block = Block("", dsl_name="clinic", output=output)
if destination.type == 'buffer':
block.input = "dump " + name + "\n"
warn("Destination buffer " + repr(name) + " not empty at end of file, emptying.")
printer.write("\n")
printer.print_block(block)
continue
if destination.type == 'file':
try:
dirname = os.path.dirname(destination.filename)
try:
os.makedirs(dirname)
except FileExistsError:
if not os.path.isdir(dirname):
fail("Can't write to destination {}, "
"can't make directory {}!".format(
destination.filename, dirname))
if self.verify:
with open(destination.filename, "rt") as f:
parser_2 = BlockParser(f.read(), language=self.language)
blocks = list(parser_2)
if (len(blocks) != 1) or (blocks[0].input != 'preserve\n'):
fail("Modified destination file " + repr(destination.filename) + ", not overwriting!")
except FileNotFoundError:
pass
block.input = 'preserve\n'
printer_2 = BlockPrinter(self.language)
printer_2.print_block(block)
with open(destination.filename, "wt") as f:
f.write(printer_2.f.getvalue())
continue
text = printer.f.getvalue()
if second_pass_replacements:
printer_2 = BlockPrinter(self.language)
parser_2 = BlockParser(text, self.language)
changed = False
for block in parser_2:
if block.dsl_name:
for id, replacement in second_pass_replacements.items():
if id in block.output:
changed = True
block.output = block.output.replace(id, replacement)
printer_2.print_block(block)
if changed:
text = printer_2.f.getvalue()
return text
def _module_and_class(self, fields):
"""
fields should be an iterable of field names.
returns a tuple of (module, class).
the module object could actually be self (a clinic object).
this function is only ever used to find the parent of where
a new class/module should go.
"""
in_classes = False
parent = module = self
cls = None
so_far = []
for field in fields:
so_far.append(field)
if not in_classes:
child = parent.modules.get(field)
if child:
parent = module = child
continue
in_classes = True
if not hasattr(parent, 'classes'):
return module, cls
child = parent.classes.get(field)
if not child:
fail('Parent class or module ' + '.'.join(so_far) + " does not exist.")
cls = parent = child
return module, cls
def parse_file(filename, *, force=False, verify=True, output=None, encoding='utf-8'):
extension = os.path.splitext(filename)[1][1:]
if not extension:
fail("Can't extract file type for file " + repr(filename))
try:
language = extensions[extension](filename)
except KeyError:
fail("Can't identify file type for file " + repr(filename))
with open(filename, 'r', encoding=encoding) as f:
raw = f.read()
# exit quickly if there are no clinic markers in the file
find_start_re = BlockParser("", language).find_start_re
if not find_start_re.search(raw):
return
clinic = Clinic(language, force=force, verify=verify, filename=filename)
cooked = clinic.parse(raw)
if (cooked == raw) and not force:
return
directory = os.path.dirname(filename) or '.'
with tempfile.TemporaryDirectory(prefix="clinic", dir=directory) as tmpdir:
bytes = cooked.encode(encoding)
tmpfilename = os.path.join(tmpdir, os.path.basename(filename))
with open(tmpfilename, "wb") as f:
f.write(bytes)
os.replace(tmpfilename, output or filename)
def compute_checksum(input, length=None):
input = input or ''
s = hashlib.sha1(input.encode('utf-8')).hexdigest()
if length:
s = s[:length]
return s
class PythonParser:
def __init__(self, clinic):
pass
def parse(self, block):
s = io.StringIO()
with OverrideStdioWith(s):
exec(block.input)
block.output = s.getvalue()
class Module:
def __init__(self, name, module=None):
self.name = name
self.module = self.parent = module
self.modules = collections.OrderedDict()
self.classes = collections.OrderedDict()
self.functions = []
def __repr__(self):
return "<clinic.Module " + repr(self.name) + " at " + str(id(self)) + ">"
class Class:
def __init__(self, name, module=None, cls=None, typedef=None, type_object=None):
self.name = name
self.module = module
self.cls = cls
self.typedef = typedef
self.type_object = type_object
self.parent = cls or module
self.classes = collections.OrderedDict()
self.functions = []
def __repr__(self):
return "<clinic.Class " + repr(self.name) + " at " + str(id(self)) + ">"
unsupported_special_methods = set("""
__abs__
__add__
__and__
__bytes__
__call__
__complex__
__delitem__
__divmod__
__eq__
__float__
__floordiv__
__ge__
__getattr__
__getattribute__
__getitem__
__gt__
__hash__
__iadd__
__iand__
__ifloordiv__
__ilshift__
__imatmul__
__imod__
__imul__
__index__
__int__
__invert__
__ior__
__ipow__
__irshift__
__isub__
__iter__
__itruediv__
__ixor__
__le__
__len__
__lshift__
__lt__
__matmul__
__mod__
__mul__
__neg__
__new__
__next__
__or__
__pos__
__pow__
__radd__
__rand__
__rdivmod__
__repr__
__rfloordiv__
__rlshift__
__rmatmul__
__rmod__
__rmul__
__ror__
__rpow__
__rrshift__
__rshift__
__rsub__
__rtruediv__
__rxor__
__setattr__
__setitem__
__str__
__sub__
__truediv__
__xor__
""".strip().split())
INVALID, CALLABLE, STATIC_METHOD, CLASS_METHOD, METHOD_INIT, METHOD_NEW = """
INVALID, CALLABLE, STATIC_METHOD, CLASS_METHOD, METHOD_INIT, METHOD_NEW
""".replace(",", "").strip().split()
class Function:
"""
Mutable duck type for inspect.Function.
docstring - a str containing
* embedded line breaks
* text outdented to the left margin
* no trailing whitespace.
It will always be true that
(not docstring) or ((not docstring[0].isspace()) and (docstring.rstrip() == docstring))
"""
def __init__(self, parameters=None, *, name,
module, cls=None, c_basename=None,
full_name=None,
return_converter, return_annotation=inspect.Signature.empty,
docstring=None, kind=CALLABLE, coexist=False,
docstring_only=False):
self.parameters = parameters or collections.OrderedDict()
self.return_annotation = return_annotation
self.name = name
self.full_name = full_name
self.module = module
self.cls = cls
self.parent = cls or module
self.c_basename = c_basename
self.return_converter = return_converter
self.docstring = docstring or ''
self.kind = kind
self.coexist = coexist
self.self_converter = None
# docstring_only means "don't generate a machine-readable
# signature, just a normal docstring". it's True for
# functions with optional groups because we can't represent
# those accurately with inspect.Signature in 3.4.
self.docstring_only = docstring_only
self.rendered_parameters = None
__render_parameters__ = None
@property
def render_parameters(self):
if not self.__render_parameters__:
self.__render_parameters__ = l = []
for p in self.parameters.values():
p = p.copy()
p.converter.pre_render()
l.append(p)
return self.__render_parameters__
@property
def methoddef_flags(self):
if self.kind in (METHOD_INIT, METHOD_NEW):
return None
flags = []
if self.kind == CLASS_METHOD:
flags.append('METH_CLASS')
elif self.kind == STATIC_METHOD:
flags.append('METH_STATIC')
else:
assert self.kind == CALLABLE, "unknown kind: " + repr(self.kind)
if self.coexist:
flags.append('METH_COEXIST')
return '|'.join(flags)
def __repr__(self):
return '<clinic.Function ' + self.name + '>'
def copy(self, **overrides):
kwargs = {
'name': self.name, 'module': self.module, 'parameters': self.parameters,
'cls': self.cls, 'c_basename': self.c_basename,
'full_name': self.full_name,
'return_converter': self.return_converter, 'return_annotation': self.return_annotation,
'docstring': self.docstring, 'kind': self.kind, 'coexist': self.coexist,
'docstring_only': self.docstring_only,
}
kwargs.update(overrides)
f = Function(**kwargs)
parameters = collections.OrderedDict()
for name, value in f.parameters.items():
value = value.copy(function=f)
parameters[name] = value
f.parameters = parameters
return f
class Parameter:
"""
Mutable duck type of inspect.Parameter.
"""
def __init__(self, name, kind, *, default=inspect.Parameter.empty,
function, converter, annotation=inspect.Parameter.empty,
docstring=None, group=0):
self.name = name
self.kind = kind
self.default = default
self.function = function
self.converter = converter
self.annotation = annotation
self.docstring = docstring or ''
self.group = group
def __repr__(self):
return '<clinic.Parameter ' + self.name + '>'
def is_keyword_only(self):
return self.kind == inspect.Parameter.KEYWORD_ONLY
def is_positional_only(self):
return self.kind == inspect.Parameter.POSITIONAL_ONLY
def is_optional(self):
return (self.default is not unspecified)
def copy(self, **overrides):
kwargs = {
'name': self.name, 'kind': self.kind, 'default':self.default,
'function': self.function, 'converter': self.converter, 'annotation': self.annotation,
'docstring': self.docstring, 'group': self.group,
}
kwargs.update(overrides)
if 'converter' not in overrides:
converter = copy.copy(self.converter)
converter.function = kwargs['function']
kwargs['converter'] = converter
return Parameter(**kwargs)
def get_displayname(self, i):
if i == 0:
return '"argument"'
if not self.is_positional_only():
return '''"argument '{}'"'''.format(self.name)
else:
return '"argument {}"'.format(i)
class LandMine:
# try to access any
def __init__(self, message):
self.__message__ = message
def __repr__(self):
return '<LandMine ' + repr(self.__message__) + ">"
def __getattribute__(self, name):
if name in ('__repr__', '__message__'):
return super().__getattribute__(name)
# raise RuntimeError(repr(name))
fail("Stepped on a land mine, trying to access attribute " + repr(name) + ":\n" + self.__message__)
def add_c_converter(f, name=None):
if not name:
name = f.__name__
if not name.endswith('_converter'):
return f
name = name[:-len('_converter')]
converters[name] = f
return f
def add_default_legacy_c_converter(cls):
# automatically add converter for default format unit
# (but without stomping on the existing one if it's already
# set, in case you subclass)
if ((cls.format_unit not in ('O&', '')) and
(cls.format_unit not in legacy_converters)):
legacy_converters[cls.format_unit] = cls
return cls
def add_legacy_c_converter(format_unit, **kwargs):
"""
Adds a legacy converter.
"""
def closure(f):
if not kwargs:
added_f = f
else:
added_f = functools.partial(f, **kwargs)
if format_unit:
legacy_converters[format_unit] = added_f
return f
return closure
class CConverterAutoRegister(type):
def __init__(cls, name, bases, classdict):
add_c_converter(cls)
add_default_legacy_c_converter(cls)
class CConverter(metaclass=CConverterAutoRegister):
"""
For the init function, self, name, function, and default
must be keyword-or-positional parameters. All other
parameters must be keyword-only.
"""
# The C name to use for this variable.
name = None
# The Python name to use for this variable.
py_name = None
# The C type to use for this variable.
# 'type' should be a Python string specifying the type, e.g. "int".
# If this is a pointer type, the type string should end with ' *'.
type = None
# The Python default value for this parameter, as a Python value.
# Or the magic value "unspecified" if there is no default.
# Or the magic value "unknown" if this value is a cannot be evaluated
# at Argument-Clinic-preprocessing time (but is presumed to be valid
# at runtime).
default = unspecified
# If not None, default must be isinstance() of this type.
# (You can also specify a tuple of types.)
default_type = None
# "default" converted into a C value, as a string.
# Or None if there is no default.
c_default = None
# "default" converted into a Python value, as a string.
# Or None if there is no default.
py_default = None
# The default value used to initialize the C variable when
# there is no default, but not specifying a default may
# result in an "uninitialized variable" warning. This can
# easily happen when using option groups--although
# properly-written code won't actually use the variable,
# the variable does get passed in to the _impl. (Ah, if
# only dataflow analysis could inline the static function!)
#
# This value is specified as a string.
# Every non-abstract subclass should supply a valid value.
c_ignored_default = 'NULL'
# The C converter *function* to be used, if any.
# (If this is not None, format_unit must be 'O&'.)
converter = None
# Should Argument Clinic add a '&' before the name of
# the variable when passing it into the _impl function?
impl_by_reference = False
# Should Argument Clinic add a '&' before the name of
# the variable when passing it into PyArg_ParseTuple (AndKeywords)?
parse_by_reference = True
#############################################################
#############################################################
## You shouldn't need to read anything below this point to ##
## write your own converter functions. ##
#############################################################
#############################################################
# The "format unit" to specify for this variable when
# parsing arguments using PyArg_ParseTuple (AndKeywords).
# Custom converters should always use the default value of 'O&'.
format_unit = 'O&'
# What encoding do we want for this variable? Only used
# by format units starting with 'e'.
encoding = None
# Should this object be required to be a subclass of a specific type?
# If not None, should be a string representing a pointer to a
# PyTypeObject (e.g. "&PyUnicode_Type").
# Only used by the 'O!' format unit (and the "object" converter).
subclass_of = None
# Do we want an adjacent '_length' variable for this variable?
# Only used by format units ending with '#'.
length = False
# Should we show this parameter in the generated
# __text_signature__? This is *almost* always True.
# (It's only False for __new__, __init__, and METH_STATIC functions.)
show_in_signature = True
# Overrides the name used in a text signature.
# The name used for a "self" parameter must be one of
# self, type, or module; however users can set their own.
# This lets the self_converter overrule the user-settable
# name, *just* for the text signature.
# Only set by self_converter.
signature_name = None
# keep in sync with self_converter.__init__!
def __init__(self, name, py_name, function, default=unspecified, *, c_default=None, py_default=None, annotation=unspecified, **kwargs):
self.name = ensure_legal_c_identifier(name)
self.py_name = py_name
if default is not unspecified:
if self.default_type and not isinstance(default, (self.default_type, Unknown)):
if isinstance(self.default_type, type):
types_str = self.default_type.__name__
else:
types_str = ', '.join((cls.__name__ for cls in self.default_type))
fail("{}: default value {!r} for field {} is not of type {}".format(
self.__class__.__name__, default, name, types_str))
self.default = default
if c_default:
self.c_default = c_default
if py_default:
self.py_default = py_default
if annotation != unspecified:
fail("The 'annotation' parameter is not currently permitted.")
# this is deliberate, to prevent you from caching information
# about the function in the init.
# (that breaks if we get cloned.)
# so after this change we will noisily fail.
self.function = LandMine("Don't access members of self.function inside converter_init!")
self.converter_init(**kwargs)
self.function = function
def converter_init(self):
pass
def is_optional(self):
return (self.default is not unspecified)
def _render_self(self, parameter, data):
self.parameter = parameter
name = self.name
# impl_arguments
s = ("&" if self.impl_by_reference else "") + name
data.impl_arguments.append(s)
if self.length:
data.impl_arguments.append(self.length_name())
# impl_parameters
data.impl_parameters.append(self.simple_declaration(by_reference=self.impl_by_reference))
if self.length:
data.impl_parameters.append("Py_ssize_clean_t " + self.length_name())
def _render_non_self(self, parameter, data):
self.parameter = parameter
name = self.name
# declarations
d = self.declaration()
data.declarations.append(d)
# initializers
initializers = self.initialize()
if initializers:
data.initializers.append('/* initializers for ' + name + ' */\n' + initializers.rstrip())
# modifications
modifications = self.modify()
if modifications:
data.modifications.append('/* modifications for ' + name + ' */\n' + modifications.rstrip())
# keywords
if parameter.is_positional_only():
data.keywords.append('')
else:
data.keywords.append(parameter.name)
# format_units
if self.is_optional() and '|' not in data.format_units:
data.format_units.append('|')
if parameter.is_keyword_only() and '$' not in data.format_units:
data.format_units.append('$')
data.format_units.append(self.format_unit)
# parse_arguments
self.parse_argument(data.parse_arguments)
# cleanup
cleanup = self.cleanup()
if cleanup:
data.cleanup.append('/* Cleanup for ' + name + ' */\n' + cleanup.rstrip() + "\n")
def render(self, parameter, data):
"""
parameter is a clinic.Parameter instance.
data is a CRenderData instance.
"""
self._render_self(parameter, data)
self._render_non_self(parameter, data)
def length_name(self):
"""Computes the name of the associated "length" variable."""
if not self.length:
return None
return self.name + "_length"
# Why is this one broken out separately?
# For "positional-only" function parsing,
# which generates a bunch of PyArg_ParseTuple calls.
def parse_argument(self, list):
assert not (self.converter and self.encoding)
if self.format_unit == 'O&':
assert self.converter
list.append(self.converter)
if self.encoding:
list.append(c_repr(self.encoding))
elif self.subclass_of:
list.append(self.subclass_of)
s = ("&" if self.parse_by_reference else "") + self.name
list.append(s)
if self.length:
list.append("&" + self.length_name())
#
# All the functions after here are intended as extension points.
#
def simple_declaration(self, by_reference=False):
"""
Computes the basic declaration of the variable.
Used in computing the prototype declaration and the
variable declaration.
"""
prototype = [self.type]
if by_reference or not self.type.endswith('*'):
prototype.append(" ")
if by_reference:
prototype.append('*')
prototype.append(self.name)
return "".join(prototype)
def declaration(self):
"""
The C statement to declare this variable.
"""
declaration = [self.simple_declaration()]
default = self.c_default
if not default and self.parameter.group:
default = self.c_ignored_default
if default:
declaration.append(" = ")
declaration.append(default)
declaration.append(";")
if self.length:
declaration.append('\nPy_ssize_clean_t ')
declaration.append(self.length_name())
declaration.append(';')
return "".join(declaration)
def initialize(self):
"""
The C statements required to set up this variable before parsing.
Returns a string containing this code indented at column 0.
If no initialization is necessary, returns an empty string.
"""
return ""
def modify(self):
"""
The C statements required to modify this variable after parsing.
Returns a string containing this code indented at column 0.
If no initialization is necessary, returns an empty string.
"""
return ""
def cleanup(self):
"""
The C statements required to clean up after this variable.
Returns a string containing this code indented at column 0.
If no cleanup is necessary, returns an empty string.
"""
return ""
def pre_render(self):
"""
A second initialization function, like converter_init,
called just before rendering.
You are permitted to examine self.function here.
"""
pass
def parse_arg(self, argname, displayname):
if self.format_unit == 'O&':
return """
if (!{converter}({argname}, &{paramname})) {{{{
goto exit;
}}}}
""".format(argname=argname, paramname=self.name,
converter=self.converter)
if self.format_unit == 'O!':
cast = '(%s)' % self.type if self.type != 'PyObject *' else ''
if self.subclass_of in type_checks:
typecheck, typename = type_checks[self.subclass_of]
return """
if (!{typecheck}({argname})) {{{{
_PyArg_BadArgument("{{name}}", {displayname}, "{typename}", {argname});
goto exit;
}}}}
{paramname} = {cast}{argname};
""".format(argname=argname, paramname=self.name,
displayname=displayname, typecheck=typecheck,
typename=typename, cast=cast)
return """
if (!PyObject_TypeCheck({argname}, {subclass_of})) {{{{
_PyArg_BadArgument("{{name}}", {displayname}, ({subclass_of})->tp_name, {argname});
goto exit;
}}}}
{paramname} = {cast}{argname};
""".format(argname=argname, paramname=self.name,
subclass_of=self.subclass_of, cast=cast,
displayname=displayname)
if self.format_unit == 'O':
cast = '(%s)' % self.type if self.type != 'PyObject *' else ''
return """
{paramname} = {cast}{argname};
""".format(argname=argname, paramname=self.name, cast=cast)
return None
def set_template_dict(self, template_dict):
pass
type_checks = {
'&PyLong_Type': ('PyLong_Check', 'int'),
'&PyTuple_Type': ('PyTuple_Check', 'tuple'),
'&PyList_Type': ('PyList_Check', 'list'),
'&PySet_Type': ('PySet_Check', 'set'),
'&PyFrozenSet_Type': ('PyFrozenSet_Check', 'frozenset'),
'&PyDict_Type': ('PyDict_Check', 'dict'),
'&PyUnicode_Type': ('PyUnicode_Check', 'str'),
'&PyBytes_Type': ('PyBytes_Check', 'bytes'),
'&PyByteArray_Type': ('PyByteArray_Check', 'bytearray'),
}
class bool_converter(CConverter):
type = 'int'
default_type = bool
format_unit = 'p'
c_ignored_default = '0'
def converter_init(self, *, accept={object}):
if accept == {int}:
self.format_unit = 'i'
elif accept != {object}:
fail("bool_converter: illegal 'accept' argument " + repr(accept))
if self.default is not unspecified:
self.default = bool(self.default)
self.c_default = str(int(self.default))
def parse_arg(self, argname, displayname):
if self.format_unit == 'i':
# XXX PyFloat_Check can be removed after the end of the
# deprecation in _PyLong_FromNbIndexOrNbInt.
return """
{paramname} = _PyLong_AsInt({argname});
if ({paramname} == -1 && PyErr_Occurred()) {{{{
goto exit;
}}}}
""".format(argname=argname, paramname=self.name)
elif self.format_unit == 'p':
return """
{paramname} = PyObject_IsTrue({argname});
if ({paramname} < 0) {{{{
goto exit;
}}}}
""".format(argname=argname, paramname=self.name)
return super().parse_arg(argname, displayname)
class defining_class_converter(CConverter):
"""
A special-case converter:
this is the default converter used for the defining class.
"""
type = 'PyTypeObject *'
format_unit = ''
show_in_signature = False
def converter_init(self, *, type=None):
self.specified_type = type
def render(self, parameter, data):
self._render_self(parameter, data)
def set_template_dict(self, template_dict):
template_dict['defining_class_name'] = self.name
class char_converter(CConverter):
type = 'char'
default_type = (bytes, bytearray)
format_unit = 'c'
c_ignored_default = "'\0'"
def converter_init(self):
if isinstance(self.default, self.default_type):
if len(self.default) != 1:
fail("char_converter: illegal default value " + repr(self.default))
self.c_default = repr(bytes(self.default))[1:]
if self.c_default == '"\'"':
self.c_default = r"'\''"
def parse_arg(self, argname, displayname):
if self.format_unit == 'c':
return """
if (PyBytes_Check({argname}) && PyBytes_GET_SIZE({argname}) == 1) {{{{
{paramname} = PyBytes_AS_STRING({argname})[0];
}}}}
else if (PyByteArray_Check({argname}) && PyByteArray_GET_SIZE({argname}) == 1) {{{{
{paramname} = PyByteArray_AS_STRING({argname})[0];
}}}}
else {{{{
_PyArg_BadArgument("{{name}}", {displayname}, "a byte string of length 1", {argname});
goto exit;
}}}}
""".format(argname=argname, paramname=self.name,
displayname=displayname)
return super().parse_arg(argname, displayname)
@add_legacy_c_converter('B', bitwise=True)
class unsigned_char_converter(CConverter):
type = 'unsigned char'
default_type = int
format_unit = 'b'
c_ignored_default = "'\0'"
def converter_init(self, *, bitwise=False):
if bitwise:
self.format_unit = 'B'
def parse_arg(self, argname, displayname):
if self.format_unit == 'b':
return """
{{{{
long ival = PyLong_AsLong({argname});
if (ival == -1 && PyErr_Occurred()) {{{{
goto exit;
}}}}
else if (ival < 0) {{{{
PyErr_SetString(PyExc_OverflowError,
"unsigned byte integer is less than minimum");
goto exit;
}}}}
else if (ival > UCHAR_MAX) {{{{
PyErr_SetString(PyExc_OverflowError,
"unsigned byte integer is greater than maximum");
goto exit;
}}}}
else {{{{
{paramname} = (unsigned char) ival;
}}}}
}}}}
""".format(argname=argname, paramname=self.name)
elif self.format_unit == 'B':
return """
{{{{
unsigned long ival = PyLong_AsUnsignedLongMask({argname});
if (ival == (unsigned long)-1 && PyErr_Occurred()) {{{{
goto exit;
}}}}
else {{{{
{paramname} = (unsigned char) ival;
}}}}
}}}}
""".format(argname=argname, paramname=self.name)
return super().parse_arg(argname, displayname)
class byte_converter(unsigned_char_converter): pass
class short_converter(CConverter):
type = 'short'
default_type = int
format_unit = 'h'
c_ignored_default = "0"
def parse_arg(self, argname, displayname):
if self.format_unit == 'h':
return """
{{{{
long ival = PyLong_AsLong({argname});
if (ival == -1 && PyErr_Occurred()) {{{{
goto exit;
}}}}
else if (ival < SHRT_MIN) {{{{
PyErr_SetString(PyExc_OverflowError,
"signed short integer is less than minimum");
goto exit;
}}}}
else if (ival > SHRT_MAX) {{{{
PyErr_SetString(PyExc_OverflowError,
"signed short integer is greater than maximum");
goto exit;
}}}}
else {{{{
{paramname} = (short) ival;
}}}}
}}}}
""".format(argname=argname, paramname=self.name)
return super().parse_arg(argname, displayname)
class unsigned_short_converter(CConverter):
type = 'unsigned short'
default_type = int
c_ignored_default = "0"
def converter_init(self, *, bitwise=False):
if bitwise:
self.format_unit = 'H'
else:
self.converter = '_PyLong_UnsignedShort_Converter'
def parse_arg(self, argname, displayname):
if self.format_unit == 'H':
return """
{paramname} = (unsigned short)PyLong_AsUnsignedLongMask({argname});
if ({paramname} == (unsigned short)-1 && PyErr_Occurred()) {{{{
goto exit;
}}}}
""".format(argname=argname, paramname=self.name)
return super().parse_arg(argname, displayname)
@add_legacy_c_converter('C', accept={str})
class int_converter(CConverter):
type = 'int'
default_type = int
format_unit = 'i'
c_ignored_default = "0"
def converter_init(self, *, accept={int}, type=None):
if accept == {str}:
self.format_unit = 'C'
elif accept != {int}:
fail("int_converter: illegal 'accept' argument " + repr(accept))
if type != None:
self.type = type
def parse_arg(self, argname, displayname):
if self.format_unit == 'i':
return """
{paramname} = _PyLong_AsInt({argname});
if ({paramname} == -1 && PyErr_Occurred()) {{{{
goto exit;
}}}}
""".format(argname=argname, paramname=self.name)
elif self.format_unit == 'C':
return """
if (!PyUnicode_Check({argname})) {{{{
_PyArg_BadArgument("{{name}}", {displayname}, "a unicode character", {argname});
goto exit;
}}}}
if (PyUnicode_READY({argname})) {{{{
goto exit;
}}}}
if (PyUnicode_GET_LENGTH({argname}) != 1) {{{{
_PyArg_BadArgument("{{name}}", {displayname}, "a unicode character", {argname});
goto exit;
}}}}
{paramname} = PyUnicode_READ_CHAR({argname}, 0);
""".format(argname=argname, paramname=self.name,
displayname=displayname)
return super().parse_arg(argname, displayname)
class unsigned_int_converter(CConverter):
type = 'unsigned int'
default_type = int
c_ignored_default = "0"
def converter_init(self, *, bitwise=False):
if bitwise:
self.format_unit = 'I'
else:
self.converter = '_PyLong_UnsignedInt_Converter'
def parse_arg(self, argname, displayname):
if self.format_unit == 'I':
return """
{paramname} = (unsigned int)PyLong_AsUnsignedLongMask({argname});
if ({paramname} == (unsigned int)-1 && PyErr_Occurred()) {{{{
goto exit;
}}}}
""".format(argname=argname, paramname=self.name)
return super().parse_arg(argname, displayname)
class long_converter(CConverter):
type = 'long'
default_type = int
format_unit = 'l'
c_ignored_default = "0"
def parse_arg(self, argname, displayname):
if self.format_unit == 'l':
return """
{paramname} = PyLong_AsLong({argname});
if ({paramname} == -1 && PyErr_Occurred()) {{{{
goto exit;
}}}}
""".format(argname=argname, paramname=self.name)
return super().parse_arg(argname, displayname)
class unsigned_long_converter(CConverter):
type = 'unsigned long'
default_type = int
c_ignored_default = "0"
def converter_init(self, *, bitwise=False):
if bitwise:
self.format_unit = 'k'
else:
self.converter = '_PyLong_UnsignedLong_Converter'
def parse_arg(self, argname, displayname):
if self.format_unit == 'k':
return """
if (!PyLong_Check({argname})) {{{{
_PyArg_BadArgument("{{name}}", {displayname}, "int", {argname});
goto exit;
}}}}
{paramname} = PyLong_AsUnsignedLongMask({argname});
""".format(argname=argname, paramname=self.name,
displayname=displayname)
return super().parse_arg(argname, displayname)
class long_long_converter(CConverter):
type = 'long long'
default_type = int
format_unit = 'L'
c_ignored_default = "0"
def parse_arg(self, argname, displayname):
if self.format_unit == 'L':
return """
{paramname} = PyLong_AsLongLong({argname});
if ({paramname} == -1 && PyErr_Occurred()) {{{{
goto exit;
}}}}
""".format(argname=argname, paramname=self.name)
return super().parse_arg(argname, displayname)
class unsigned_long_long_converter(CConverter):
type = 'unsigned long long'
default_type = int
c_ignored_default = "0"
def converter_init(self, *, bitwise=False):
if bitwise:
self.format_unit = 'K'
else:
self.converter = '_PyLong_UnsignedLongLong_Converter'
def parse_arg(self, argname, displayname):
if self.format_unit == 'K':
return """
if (!PyLong_Check({argname})) {{{{
_PyArg_BadArgument("{{name}}", {displayname}, "int", {argname});
goto exit;
}}}}
{paramname} = PyLong_AsUnsignedLongLongMask({argname});
""".format(argname=argname, paramname=self.name,
displayname=displayname)
return super().parse_arg(argname, displayname)
class Py_ssize_t_converter(CConverter):
type = 'Py_ssize_t'
c_ignored_default = "0"
def converter_init(self, *, accept={int}):
if accept == {int}:
self.format_unit = 'n'
self.default_type = int
elif accept == {int, NoneType}:
self.converter = '_Py_convert_optional_to_ssize_t'
else:
fail("Py_ssize_t_converter: illegal 'accept' argument " + repr(accept))
def parse_arg(self, argname, displayname):
if self.format_unit == 'n':
return """
{{{{
Py_ssize_t ival = -1;
PyObject *iobj = _PyNumber_Index({argname});
if (iobj != NULL) {{{{
ival = PyLong_AsSsize_t(iobj);
Py_DECREF(iobj);
}}}}
if (ival == -1 && PyErr_Occurred()) {{{{
goto exit;
}}}}
{paramname} = ival;
}}}}
""".format(argname=argname, paramname=self.name)
return super().parse_arg(argname, displayname)
class slice_index_converter(CConverter):
type = 'Py_ssize_t'
def converter_init(self, *, accept={int, NoneType}):
if accept == {int}:
self.converter = '_PyEval_SliceIndexNotNone'
elif accept == {int, NoneType}:
self.converter = '_PyEval_SliceIndex'
else:
fail("slice_index_converter: illegal 'accept' argument " + repr(accept))
class size_t_converter(CConverter):
type = 'size_t'
converter = '_PyLong_Size_t_Converter'
c_ignored_default = "0"
def parse_arg(self, argname, displayname):
if self.format_unit == 'n':
return """
{paramname} = PyNumber_AsSsize_t({argname}, PyExc_OverflowError);
if ({paramname} == -1 && PyErr_Occurred()) {{{{
goto exit;
}}}}
""".format(argname=argname, paramname=self.name)
return super().parse_arg(argname, displayname)
class float_converter(CConverter):
type = 'float'
default_type = float
format_unit = 'f'
c_ignored_default = "0.0"
def parse_arg(self, argname, displayname):
if self.format_unit == 'f':
return """
if (PyFloat_CheckExact({argname})) {{{{
{paramname} = (float) (PyFloat_AS_DOUBLE({argname}));
}}}}
else
{{{{
{paramname} = (float) PyFloat_AsDouble({argname});
if ({paramname} == -1.0 && PyErr_Occurred()) {{{{
goto exit;
}}}}
}}}}
""".format(argname=argname, paramname=self.name)
return super().parse_arg(argname, displayname)
class double_converter(CConverter):
type = 'double'
default_type = float
format_unit = 'd'
c_ignored_default = "0.0"
def parse_arg(self, argname, displayname):
if self.format_unit == 'd':
return """
if (PyFloat_CheckExact({argname})) {{{{
{paramname} = PyFloat_AS_DOUBLE({argname});
}}}}
else
{{{{
{paramname} = PyFloat_AsDouble({argname});
if ({paramname} == -1.0 && PyErr_Occurred()) {{{{
goto exit;
}}}}
}}}}
""".format(argname=argname, paramname=self.name)
return super().parse_arg(argname, displayname)
class Py_complex_converter(CConverter):
type = 'Py_complex'
default_type = complex
format_unit = 'D'
c_ignored_default = "{0.0, 0.0}"
def parse_arg(self, argname, displayname):
if self.format_unit == 'D':
return """
{paramname} = PyComplex_AsCComplex({argname});
if (PyErr_Occurred()) {{{{
goto exit;
}}}}
""".format(argname=argname, paramname=self.name)
return super().parse_arg(argname, displayname)
class object_converter(CConverter):
type = 'PyObject *'
format_unit = 'O'
def converter_init(self, *, converter=None, type=None, subclass_of=None):
if converter:
if subclass_of:
fail("object: Cannot pass in both 'converter' and 'subclass_of'")
self.format_unit = 'O&'
self.converter = converter
elif subclass_of:
self.format_unit = 'O!'
self.subclass_of = subclass_of
if type is not None:
self.type = type
#
# We define three conventions for buffer types in the 'accept' argument:
#
# buffer : any object supporting the buffer interface
# rwbuffer: any object supporting the buffer interface, but must be writeable
# robuffer: any object supporting the buffer interface, but must not be writeable
#
class buffer: pass
class rwbuffer: pass
class robuffer: pass
def str_converter_key(types, encoding, zeroes):
return (frozenset(types), bool(encoding), bool(zeroes))
str_converter_argument_map = {}
class str_converter(CConverter):
type = 'const char *'
default_type = (str, Null, NoneType)
format_unit = 's'
def converter_init(self, *, accept={str}, encoding=None, zeroes=False):
key = str_converter_key(accept, encoding, zeroes)
format_unit = str_converter_argument_map.get(key)
if not format_unit:
fail("str_converter: illegal combination of arguments", key)
self.format_unit = format_unit
self.length = bool(zeroes)
if encoding:
if self.default not in (Null, None, unspecified):
fail("str_converter: Argument Clinic doesn't support default values for encoded strings")
self.encoding = encoding
self.type = 'char *'
# sorry, clinic can't support preallocated buffers
# for es# and et#
self.c_default = "NULL"
if NoneType in accept and self.c_default == "Py_None":
self.c_default = "NULL"
def cleanup(self):
if self.encoding:
name = self.name
return "".join(["if (", name, ") {\n PyMem_FREE(", name, ");\n}\n"])
def parse_arg(self, argname, displayname):
if self.format_unit == 's':
return """
if (!PyUnicode_Check({argname})) {{{{
_PyArg_BadArgument("{{name}}", {displayname}, "str", {argname});
goto exit;
}}}}
Py_ssize_t {paramname}_length;
{paramname} = PyUnicode_AsUTF8AndSize({argname}, &{paramname}_length);
if ({paramname} == NULL) {{{{
goto exit;
}}}}
if (strlen({paramname}) != (size_t){paramname}_length) {{{{
PyErr_SetString(PyExc_ValueError, "embedded null character");
goto exit;
}}}}
""".format(argname=argname, paramname=self.name,
displayname=displayname)
if self.format_unit == 'z':
return """
if ({argname} == Py_None) {{{{
{paramname} = NULL;
}}}}
else if (PyUnicode_Check({argname})) {{{{
Py_ssize_t {paramname}_length;
{paramname} = PyUnicode_AsUTF8AndSize({argname}, &{paramname}_length);
if ({paramname} == NULL) {{{{
goto exit;
}}}}
if (strlen({paramname}) != (size_t){paramname}_length) {{{{
PyErr_SetString(PyExc_ValueError, "embedded null character");
goto exit;
}}}}
}}}}
else {{{{
_PyArg_BadArgument("{{name}}", {displayname}, "str or None", {argname});
goto exit;
}}}}
""".format(argname=argname, paramname=self.name,
displayname=displayname)
return super().parse_arg(argname, displayname)
#
# This is the fourth or fifth rewrite of registering all the
# string converter format units. Previous approaches hid
# bugs--generally mismatches between the semantics of the format
# unit and the arguments necessary to represent those semantics
# properly. Hopefully with this approach we'll get it 100% right.
#
# The r() function (short for "register") both registers the
# mapping from arguments to format unit *and* registers the
# legacy C converter for that format unit.
#
def r(format_unit, *, accept, encoding=False, zeroes=False):
if not encoding and format_unit != 's':
# add the legacy c converters here too.
#
# note: add_legacy_c_converter can't work for
# es, es#, et, or et#
# because of their extra encoding argument
#
# also don't add the converter for 's' because
# the metaclass for CConverter adds it for us.
kwargs = {}
if accept != {str}:
kwargs['accept'] = accept
if zeroes:
kwargs['zeroes'] = True
added_f = functools.partial(str_converter, **kwargs)
legacy_converters[format_unit] = added_f
d = str_converter_argument_map
key = str_converter_key(accept, encoding, zeroes)
if key in d:
sys.exit("Duplicate keys specified for str_converter_argument_map!")
d[key] = format_unit
r('es', encoding=True, accept={str})
r('es#', encoding=True, zeroes=True, accept={str})
r('et', encoding=True, accept={bytes, bytearray, str})
r('et#', encoding=True, zeroes=True, accept={bytes, bytearray, str})
r('s', accept={str})
r('s#', zeroes=True, accept={robuffer, str})
r('y', accept={robuffer})
r('y#', zeroes=True, accept={robuffer})
r('z', accept={str, NoneType})
r('z#', zeroes=True, accept={robuffer, str, NoneType})
del r
class PyBytesObject_converter(CConverter):
type = 'PyBytesObject *'
format_unit = 'S'
# accept = {bytes}
def parse_arg(self, argname, displayname):
if self.format_unit == 'S':
return """
if (!PyBytes_Check({argname})) {{{{
_PyArg_BadArgument("{{name}}", {displayname}, "bytes", {argname});
goto exit;
}}}}
{paramname} = ({type}){argname};
""".format(argname=argname, paramname=self.name,
type=self.type, displayname=displayname)
return super().parse_arg(argname, displayname)
class PyByteArrayObject_converter(CConverter):
type = 'PyByteArrayObject *'
format_unit = 'Y'
# accept = {bytearray}
def parse_arg(self, argname, displayname):
if self.format_unit == 'Y':
return """
if (!PyByteArray_Check({argname})) {{{{
_PyArg_BadArgument("{{name}}", {displayname}, "bytearray", {argname});
goto exit;
}}}}
{paramname} = ({type}){argname};
""".format(argname=argname, paramname=self.name,
type=self.type, displayname=displayname)
return super().parse_arg(argname, displayname)
class unicode_converter(CConverter):
type = 'PyObject *'
default_type = (str, Null, NoneType)
format_unit = 'U'
def parse_arg(self, argname, displayname):
if self.format_unit == 'U':
return """
if (!PyUnicode_Check({argname})) {{{{
_PyArg_BadArgument("{{name}}", {displayname}, "str", {argname});
goto exit;
}}}}
if (PyUnicode_READY({argname}) == -1) {{{{
goto exit;
}}}}
{paramname} = {argname};
""".format(argname=argname, paramname=self.name,
displayname=displayname)
return super().parse_arg(argname, displayname)
@add_legacy_c_converter('u#', zeroes=True)
@add_legacy_c_converter('Z', accept={str, NoneType})
@add_legacy_c_converter('Z#', accept={str, NoneType}, zeroes=True)
class Py_UNICODE_converter(CConverter):
type = 'const Py_UNICODE *'
default_type = (str, Null, NoneType)
format_unit = 'u'
def converter_init(self, *, accept={str}, zeroes=False):
format_unit = 'Z' if accept=={str, NoneType} else 'u'
if zeroes:
format_unit += '#'
self.length = True
self.format_unit = format_unit
@add_legacy_c_converter('s*', accept={str, buffer})
@add_legacy_c_converter('z*', accept={str, buffer, NoneType})
@add_legacy_c_converter('w*', accept={rwbuffer})
class Py_buffer_converter(CConverter):
type = 'Py_buffer'
format_unit = 'y*'
impl_by_reference = True
c_ignored_default = "{NULL, NULL}"
def converter_init(self, *, accept={buffer}):
if self.default not in (unspecified, None):
fail("The only legal default value for Py_buffer is None.")
self.c_default = self.c_ignored_default
if accept == {str, buffer, NoneType}:
format_unit = 'z*'
elif accept == {str, buffer}:
format_unit = 's*'
elif accept == {buffer}:
format_unit = 'y*'
elif accept == {rwbuffer}:
format_unit = 'w*'
else:
fail("Py_buffer_converter: illegal combination of arguments")
self.format_unit = format_unit
def cleanup(self):
name = self.name
return "".join(["if (", name, ".obj) {\n PyBuffer_Release(&", name, ");\n}\n"])
def parse_arg(self, argname, displayname):
if self.format_unit == 'y*':
return """
if (PyObject_GetBuffer({argname}, &{paramname}, PyBUF_SIMPLE) != 0) {{{{
goto exit;
}}}}
if (!PyBuffer_IsContiguous(&{paramname}, 'C')) {{{{
_PyArg_BadArgument("{{name}}", {displayname}, "contiguous buffer", {argname});
goto exit;
}}}}
""".format(argname=argname, paramname=self.name,
displayname=displayname)
elif self.format_unit == 's*':
return """
if (PyUnicode_Check({argname})) {{{{
Py_ssize_t len;
const char *ptr = PyUnicode_AsUTF8AndSize({argname}, &len);
if (ptr == NULL) {{{{
goto exit;
}}}}
PyBuffer_FillInfo(&{paramname}, {argname}, (void *)ptr, len, 1, 0);
}}}}
else {{{{ /* any bytes-like object */
if (PyObject_GetBuffer({argname}, &{paramname}, PyBUF_SIMPLE) != 0) {{{{
goto exit;
}}}}
if (!PyBuffer_IsContiguous(&{paramname}, 'C')) {{{{
_PyArg_BadArgument("{{name}}", {displayname}, "contiguous buffer", {argname});
goto exit;
}}}}
}}}}
""".format(argname=argname, paramname=self.name,
displayname=displayname)
elif self.format_unit == 'w*':
return """
if (PyObject_GetBuffer({argname}, &{paramname}, PyBUF_WRITABLE) < 0) {{{{
PyErr_Clear();
_PyArg_BadArgument("{{name}}", {displayname}, "read-write bytes-like object", {argname});
goto exit;
}}}}
if (!PyBuffer_IsContiguous(&{paramname}, 'C')) {{{{
_PyArg_BadArgument("{{name}}", {displayname}, "contiguous buffer", {argname});
goto exit;
}}}}
""".format(argname=argname, paramname=self.name,
displayname=displayname)
return super().parse_arg(argname, displayname)
def correct_name_for_self(f):
if f.kind in (CALLABLE, METHOD_INIT):
if f.cls:
return "PyObject *", "self"
return "PyObject *", "module"
if f.kind == STATIC_METHOD:
return "void *", "null"
if f.kind in (CLASS_METHOD, METHOD_NEW):
return "PyTypeObject *", "type"
raise RuntimeError("Unhandled type of function f: " + repr(f.kind))
def required_type_for_self_for_parser(f):
type, _ = correct_name_for_self(f)
if f.kind in (METHOD_INIT, METHOD_NEW, STATIC_METHOD, CLASS_METHOD):
return type
return None
class self_converter(CConverter):
"""
A special-case converter:
this is the default converter used for "self".
"""
type = None
format_unit = ''
def converter_init(self, *, type=None):
self.specified_type = type
def pre_render(self):
f = self.function
default_type, default_name = correct_name_for_self(f)
self.signature_name = default_name
self.type = self.specified_type or self.type or default_type
kind = self.function.kind
new_or_init = kind in (METHOD_NEW, METHOD_INIT)
if (kind == STATIC_METHOD) or new_or_init:
self.show_in_signature = False
# tp_new (METHOD_NEW) functions are of type newfunc:
# typedef PyObject *(*newfunc)(struct _typeobject *, PyObject *, PyObject *);
# PyTypeObject is a typedef for struct _typeobject.
#
# tp_init (METHOD_INIT) functions are of type initproc:
# typedef int (*initproc)(PyObject *, PyObject *, PyObject *);
#
# All other functions generated by Argument Clinic are stored in
# PyMethodDef structures, in the ml_meth slot, which is of type PyCFunction:
# typedef PyObject *(*PyCFunction)(PyObject *, PyObject *);
# However! We habitually cast these functions to PyCFunction,
# since functions that accept keyword arguments don't fit this signature
# but are stored there anyway. So strict type equality isn't important
# for these functions.
#
# So:
#
# * The name of the first parameter to the impl and the parsing function will always
# be self.name.
#
# * The type of the first parameter to the impl will always be of self.type.
#
# * If the function is neither tp_new (METHOD_NEW) nor tp_init (METHOD_INIT):
# * The type of the first parameter to the parsing function is also self.type.
# This means that if you step into the parsing function, your "self" parameter
# is of the correct type, which may make debugging more pleasant.
#
# * Else if the function is tp_new (METHOD_NEW):
# * The type of the first parameter to the parsing function is "PyTypeObject *",
# so the type signature of the function call is an exact match.
# * If self.type != "PyTypeObject *", we cast the first parameter to self.type
# in the impl call.
#
# * Else if the function is tp_init (METHOD_INIT):
# * The type of the first parameter to the parsing function is "PyObject *",
# so the type signature of the function call is an exact match.
# * If self.type != "PyObject *", we cast the first parameter to self.type
# in the impl call.
@property
def parser_type(self):
return required_type_for_self_for_parser(self.function) or self.type
def render(self, parameter, data):
"""
parameter is a clinic.Parameter instance.
data is a CRenderData instance.
"""
if self.function.kind == STATIC_METHOD:
return
self._render_self(parameter, data)
if self.type != self.parser_type:
# insert cast to impl_argument[0], aka self.
# we know we're in the first slot in all the CRenderData lists,
# because we render parameters in order, and self is always first.
assert len(data.impl_arguments) == 1
assert data.impl_arguments[0] == self.name
data.impl_arguments[0] = '(' + self.type + ")" + data.impl_arguments[0]
def set_template_dict(self, template_dict):
template_dict['self_name'] = self.name
template_dict['self_type'] = self.parser_type
kind = self.function.kind
cls = self.function.cls
if ((kind in (METHOD_NEW, METHOD_INIT)) and cls and cls.typedef):
type_object = self.function.cls.type_object
if kind == METHOD_NEW:
type_check = '({} == {})'.format(self.name, type_object)
else:
type_check = 'Py_IS_TYPE({}, {})'.format(self.name, type_object)
line = '{} &&\n '.format(type_check)
template_dict['self_type_check'] = line
def add_c_return_converter(f, name=None):
if not name:
name = f.__name__
if not name.endswith('_return_converter'):
return f
name = name[:-len('_return_converter')]
return_converters[name] = f
return f
class CReturnConverterAutoRegister(type):
def __init__(cls, name, bases, classdict):
add_c_return_converter(cls)
class CReturnConverter(metaclass=CReturnConverterAutoRegister):
# The C type to use for this variable.
# 'type' should be a Python string specifying the type, e.g. "int".
# If this is a pointer type, the type string should end with ' *'.
type = 'PyObject *'
# The Python default value for this parameter, as a Python value.
# Or the magic value "unspecified" if there is no default.
default = None
def __init__(self, *, py_default=None, **kwargs):
self.py_default = py_default
try:
self.return_converter_init(**kwargs)
except TypeError as e:
s = ', '.join(name + '=' + repr(value) for name, value in kwargs.items())
sys.exit(self.__class__.__name__ + '(' + s + ')\n' + str(e))
def return_converter_init(self):
pass
def declare(self, data, name="_return_value"):
line = []
add = line.append
add(self.type)
if not self.type.endswith('*'):
add(' ')
add(name + ';')
data.declarations.append(''.join(line))
data.return_value = name
def err_occurred_if(self, expr, data):
data.return_conversion.append('if (({}) && PyErr_Occurred()) {{\n goto exit;\n}}\n'.format(expr))
def err_occurred_if_null_pointer(self, variable, data):
data.return_conversion.append('if ({} == NULL) {{\n goto exit;\n}}\n'.format(variable))
def render(self, function, data):
"""
function is a clinic.Function instance.
data is a CRenderData instance.
"""
pass
add_c_return_converter(CReturnConverter, 'object')
class NoneType_return_converter(CReturnConverter):
def render(self, function, data):
self.declare(data)
data.return_conversion.append('''
if (_return_value != Py_None) {
goto exit;
}
return_value = Py_None;
Py_INCREF(Py_None);
'''.strip())
class bool_return_converter(CReturnConverter):
type = 'int'
def render(self, function, data):
self.declare(data)
self.err_occurred_if("_return_value == -1", data)
data.return_conversion.append('return_value = PyBool_FromLong((long)_return_value);\n')
class long_return_converter(CReturnConverter):
type = 'long'
conversion_fn = 'PyLong_FromLong'
cast = ''
unsigned_cast = ''
def render(self, function, data):
self.declare(data)
self.err_occurred_if("_return_value == {}-1".format(self.unsigned_cast), data)
data.return_conversion.append(
''.join(('return_value = ', self.conversion_fn, '(', self.cast, '_return_value);\n')))
class int_return_converter(long_return_converter):
type = 'int'
cast = '(long)'
class init_return_converter(long_return_converter):
"""
Special return converter for __init__ functions.
"""
type = 'int'
cast = '(long)'
def render(self, function, data):
pass
class unsigned_long_return_converter(long_return_converter):
type = 'unsigned long'
conversion_fn = 'PyLong_FromUnsignedLong'
unsigned_cast = '(unsigned long)'
class unsigned_int_return_converter(unsigned_long_return_converter):
type = 'unsigned int'
cast = '(unsigned long)'
unsigned_cast = '(unsigned int)'
class Py_ssize_t_return_converter(long_return_converter):
type = 'Py_ssize_t'
conversion_fn = 'PyLong_FromSsize_t'
class size_t_return_converter(long_return_converter):
type = 'size_t'
conversion_fn = 'PyLong_FromSize_t'
unsigned_cast = '(size_t)'
class double_return_converter(CReturnConverter):
type = 'double'
cast = ''
def render(self, function, data):
self.declare(data)
self.err_occurred_if("_return_value == -1.0", data)
data.return_conversion.append(
'return_value = PyFloat_FromDouble(' + self.cast + '_return_value);\n')
class float_return_converter(double_return_converter):
type = 'float'
cast = '(double)'
def eval_ast_expr(node, globals, *, filename='-'):
"""
Takes an ast.Expr node. Compiles and evaluates it.
Returns the result of the expression.
globals represents the globals dict the expression
should see. (There's no equivalent for "locals" here.)
"""
if isinstance(node, ast.Expr):
node = node.value
node = ast.Expression(node)
co = compile(node, filename, 'eval')
fn = types.FunctionType(co, globals)
return fn()
class IndentStack:
def __init__(self):
self.indents = []
self.margin = None
def _ensure(self):
if not self.indents:
fail('IndentStack expected indents, but none are defined.')
def measure(self, line):
"""
Returns the length of the line's margin.
"""
if '\t' in line:
fail('Tab characters are illegal in the Argument Clinic DSL.')
stripped = line.lstrip()
if not len(stripped):
# we can't tell anything from an empty line
# so just pretend it's indented like our current indent
self._ensure()
return self.indents[-1]
return len(line) - len(stripped)
def infer(self, line):
"""
Infer what is now the current margin based on this line.
Returns:
1 if we have indented (or this is the first margin)
0 if the margin has not changed
-N if we have dedented N times
"""
indent = self.measure(line)
margin = ' ' * indent
if not self.indents:
self.indents.append(indent)
self.margin = margin
return 1
current = self.indents[-1]
if indent == current:
return 0
if indent > current:
self.indents.append(indent)
self.margin = margin
return 1
# indent < current
if indent not in self.indents:
fail("Illegal outdent.")
outdent_count = 0
while indent != current:
self.indents.pop()
current = self.indents[-1]
outdent_count -= 1
self.margin = margin
return outdent_count
@property
def depth(self):
"""
Returns how many margins are currently defined.
"""
return len(self.indents)
def indent(self, line):
"""
Indents a line by the currently defined margin.
"""
return self.margin + line
def dedent(self, line):
"""
Dedents a line by the currently defined margin.
(The inverse of 'indent'.)
"""
margin = self.margin
indent = self.indents[-1]
if not line.startswith(margin):
fail('Cannot dedent, line does not start with the previous margin:')
return line[indent:]
class DSLParser:
def __init__(self, clinic):
self.clinic = clinic
self.directives = {}
for name in dir(self):
# functions that start with directive_ are added to directives
_, s, key = name.partition("directive_")
if s:
self.directives[key] = getattr(self, name)
# functions that start with at_ are too, with an @ in front
_, s, key = name.partition("at_")
if s:
self.directives['@' + key] = getattr(self, name)
self.reset()
def reset(self):
self.function = None
self.state = self.state_dsl_start
self.parameter_indent = None
self.keyword_only = False
self.positional_only = False
self.group = 0
self.parameter_state = self.ps_start
self.seen_positional_with_default = False
self.indent = IndentStack()
self.kind = CALLABLE
self.coexist = False
self.parameter_continuation = ''
self.preserve_output = False
def directive_version(self, required):
global version
if version_comparitor(version, required) < 0:
fail("Insufficient Clinic version!\n Version: " + version + "\n Required: " + required)
def directive_module(self, name):
fields = name.split('.')
new = fields.pop()
module, cls = self.clinic._module_and_class(fields)
if cls:
fail("Can't nest a module inside a class!")
if name in module.classes:
fail("Already defined module " + repr(name) + "!")
m = Module(name, module)
module.modules[name] = m
self.block.signatures.append(m)
def directive_class(self, name, typedef, type_object):
fields = name.split('.')
in_classes = False
parent = self
name = fields.pop()
so_far = []
module, cls = self.clinic._module_and_class(fields)
parent = cls or module
if name in parent.classes:
fail("Already defined class " + repr(name) + "!")
c = Class(name, module, cls, typedef, type_object)
parent.classes[name] = c
self.block.signatures.append(c)
def directive_set(self, name, value):
if name not in ("line_prefix", "line_suffix"):
fail("unknown variable", repr(name))
value = value.format_map({
'block comment start': '/*',
'block comment end': '*/',
})
self.clinic.__dict__[name] = value
def directive_destination(self, name, command, *args):
if command == 'new':
self.clinic.add_destination(name, *args)
return
if command == 'clear':
self.clinic.get_destination(name).clear()
fail("unknown destination command", repr(command))
def directive_output(self, command_or_name, destination=''):
fd = self.clinic.destination_buffers
if command_or_name == "preset":
preset = self.clinic.presets.get(destination)
if not preset:
fail("Unknown preset " + repr(destination) + "!")
fd.update(preset)
return
if command_or_name == "push":
self.clinic.destination_buffers_stack.append(fd.copy())
return
if command_or_name == "pop":
if not self.clinic.destination_buffers_stack:
fail("Can't 'output pop', stack is empty!")
previous_fd = self.clinic.destination_buffers_stack.pop()
fd.update(previous_fd)
return
# secret command for debugging!
if command_or_name == "print":
self.block.output.append(pprint.pformat(fd))
self.block.output.append('\n')
return
d = self.clinic.get_destination(destination)
if command_or_name == "everything":
for name in list(fd):
fd[name] = d
return
if command_or_name not in fd:
fail("Invalid command / destination name " + repr(command_or_name) + ", must be one of:\n preset push pop print everything " + " ".join(fd))
fd[command_or_name] = d
def directive_dump(self, name):
self.block.output.append(self.clinic.get_destination(name).dump())
def directive_print(self, *args):
self.block.output.append(' '.join(args))
self.block.output.append('\n')
def directive_preserve(self):
if self.preserve_output:
fail("Can't have preserve twice in one block!")
self.preserve_output = True
def at_classmethod(self):
if self.kind is not CALLABLE:
fail("Can't set @classmethod, function is not a normal callable")
self.kind = CLASS_METHOD
def at_staticmethod(self):
if self.kind is not CALLABLE:
fail("Can't set @staticmethod, function is not a normal callable")
self.kind = STATIC_METHOD
def at_coexist(self):
if self.coexist:
fail("Called @coexist twice!")
self.coexist = True
def parse(self, block):
self.reset()
self.block = block
self.saved_output = self.block.output
block.output = []
block_start = self.clinic.block_parser.line_number
lines = block.input.split('\n')
for line_number, line in enumerate(lines, self.clinic.block_parser.block_start_line_number):
if '\t' in line:
fail('Tab characters are illegal in the Clinic DSL.\n\t' + repr(line), line_number=block_start)
self.state(line)
self.next(self.state_terminal)
self.state(None)
block.output.extend(self.clinic.language.render(clinic, block.signatures))
if self.preserve_output:
if block.output:
fail("'preserve' only works for blocks that don't produce any output!")
block.output = self.saved_output
@staticmethod
def ignore_line(line):
# ignore comment-only lines
if line.lstrip().startswith('#'):
return True
# Ignore empty lines too
# (but not in docstring sections!)
if not line.strip():
return True
return False
@staticmethod
def calculate_indent(line):
return len(line) - len(line.strip())
def next(self, state, line=None):
# real_print(self.state.__name__, "->", state.__name__, ", line=", line)
self.state = state
if line is not None:
self.state(line)
def state_dsl_start(self, line):
# self.block = self.ClinicOutputBlock(self)
if self.ignore_line(line):
return
# is it a directive?
fields = shlex.split(line)
directive_name = fields[0]
directive = self.directives.get(directive_name, None)
if directive:
try:
directive(*fields[1:])
except TypeError as e:
fail(str(e))
return
self.next(self.state_modulename_name, line)
def state_modulename_name(self, line):
# looking for declaration, which establishes the leftmost column
# line should be
# modulename.fnname [as c_basename] [-> return annotation]
# square brackets denote optional syntax.
#
# alternatively:
# modulename.fnname [as c_basename] = modulename.existing_fn_name
# clones the parameters and return converter from that
# function. you can't modify them. you must enter a
# new docstring.
#
# (but we might find a directive first!)
#
# this line is permitted to start with whitespace.
# we'll call this number of spaces F (for "function").
if not line.strip():
return
self.indent.infer(line)
# are we cloning?
before, equals, existing = line.rpartition('=')
if equals:
full_name, _, c_basename = before.partition(' as ')
full_name = full_name.strip()
c_basename = c_basename.strip()
existing = existing.strip()
if (is_legal_py_identifier(full_name) and
(not c_basename or is_legal_c_identifier(c_basename)) and
is_legal_py_identifier(existing)):
# we're cloning!
fields = [x.strip() for x in existing.split('.')]
function_name = fields.pop()
module, cls = self.clinic._module_and_class(fields)
for existing_function in (cls or module).functions:
if existing_function.name == function_name:
break
else:
existing_function = None
if not existing_function:
print("class", cls, "module", module, "existing", existing)
print("cls. functions", cls.functions)
fail("Couldn't find existing function " + repr(existing) + "!")
fields = [x.strip() for x in full_name.split('.')]
function_name = fields.pop()
module, cls = self.clinic._module_and_class(fields)
if not (existing_function.kind == self.kind and existing_function.coexist == self.coexist):
fail("'kind' of function and cloned function don't match! (@classmethod/@staticmethod/@coexist)")
self.function = existing_function.copy(name=function_name, full_name=full_name, module=module, cls=cls, c_basename=c_basename, docstring='')
self.block.signatures.append(self.function)
(cls or module).functions.append(self.function)
self.next(self.state_function_docstring)
return
line, _, returns = line.partition('->')
full_name, _, c_basename = line.partition(' as ')
full_name = full_name.strip()
c_basename = c_basename.strip() or None
if not is_legal_py_identifier(full_name):
fail("Illegal function name: {}".format(full_name))
if c_basename and not is_legal_c_identifier(c_basename):
fail("Illegal C basename: {}".format(c_basename))
return_converter = None
if returns:
ast_input = "def x() -> {}: pass".format(returns)
module = None
try:
module = ast.parse(ast_input)
except SyntaxError:
pass
if not module:
fail("Badly-formed annotation for " + full_name + ": " + returns)
try:
name, legacy, kwargs = self.parse_converter(module.body[0].returns)
if legacy:
fail("Legacy converter {!r} not allowed as a return converter"
.format(name))
if name not in return_converters:
fail("No available return converter called " + repr(name))
return_converter = return_converters[name](**kwargs)
except ValueError:
fail("Badly-formed annotation for " + full_name + ": " + returns)
fields = [x.strip() for x in full_name.split('.')]
function_name = fields.pop()
module, cls = self.clinic._module_and_class(fields)
fields = full_name.split('.')
if fields[-1] == '__new__':
if (self.kind != CLASS_METHOD) or (not cls):
fail("__new__ must be a class method!")
self.kind = METHOD_NEW
elif fields[-1] == '__init__':
if (self.kind != CALLABLE) or (not cls):
fail("__init__ must be a normal method, not a class or static method!")
self.kind = METHOD_INIT
if not return_converter:
return_converter = init_return_converter()
elif fields[-1] in unsupported_special_methods:
fail(fields[-1] + " is a special method and cannot be converted to Argument Clinic! (Yet.)")
if not return_converter:
return_converter = CReturnConverter()
if not module:
fail("Undefined module used in declaration of " + repr(full_name.strip()) + ".")
self.function = Function(name=function_name, full_name=full_name, module=module, cls=cls, c_basename=c_basename,
return_converter=return_converter, kind=self.kind, coexist=self.coexist)
self.block.signatures.append(self.function)
# insert a self converter automatically
type, name = correct_name_for_self(self.function)
kwargs = {}
if cls and type == "PyObject *":
kwargs['type'] = cls.typedef
sc = self.function.self_converter = self_converter(name, name, self.function, **kwargs)
p_self = Parameter(sc.name, inspect.Parameter.POSITIONAL_ONLY, function=self.function, converter=sc)
self.function.parameters[sc.name] = p_self
(cls or module).functions.append(self.function)
self.next(self.state_parameters_start)
# Now entering the parameters section. The rules, formally stated:
#
# * All lines must be indented with spaces only.
# * The first line must be a parameter declaration.
# * The first line must be indented.
# * This first line establishes the indent for parameters.
# * We'll call this number of spaces P (for "parameter").
# * Thenceforth:
# * Lines indented with P spaces specify a parameter.
# * Lines indented with > P spaces are docstrings for the previous
# parameter.
# * We'll call this number of spaces D (for "docstring").
# * All subsequent lines indented with >= D spaces are stored as
# part of the per-parameter docstring.
# * All lines will have the first D spaces of the indent stripped
# before they are stored.
# * It's illegal to have a line starting with a number of spaces X
# such that P < X < D.
# * A line with < P spaces is the first line of the function
# docstring, which ends processing for parameters and per-parameter
# docstrings.
# * The first line of the function docstring must be at the same
# indent as the function declaration.
# * It's illegal to have any line in the parameters section starting
# with X spaces such that F < X < P. (As before, F is the indent
# of the function declaration.)
#
# Also, currently Argument Clinic places the following restrictions on groups:
# * Each group must contain at least one parameter.
# * Each group may contain at most one group, which must be the furthest
# thing in the group from the required parameters. (The nested group
# must be the first in the group when it's before the required
# parameters, and the last thing in the group when after the required
# parameters.)
# * There may be at most one (top-level) group to the left or right of
# the required parameters.
# * You must specify a slash, and it must be after all parameters.
# (In other words: either all parameters are positional-only,
# or none are.)
#
# Said another way:
# * Each group must contain at least one parameter.
# * All left square brackets before the required parameters must be
# consecutive. (You can't have a left square bracket followed
# by a parameter, then another left square bracket. You can't
# have a left square bracket, a parameter, a right square bracket,
# and then a left square bracket.)
# * All right square brackets after the required parameters must be
# consecutive.
#
# These rules are enforced with a single state variable:
# "parameter_state". (Previously the code was a miasma of ifs and
# separate boolean state variables.) The states are:
#
# [ [ a, b, ] c, ] d, e, f=3, [ g, h, [ i ] ] <- line
# 01 2 3 4 5 6 <- state transitions
#
# 0: ps_start. before we've seen anything. legal transitions are to 1 or 3.
# 1: ps_left_square_before. left square brackets before required parameters.
# 2: ps_group_before. in a group, before required parameters.
# 3: ps_required. required parameters, positional-or-keyword or positional-only
# (we don't know yet). (renumber left groups!)
# 4: ps_optional. positional-or-keyword or positional-only parameters that
# now must have default values.
# 5: ps_group_after. in a group, after required parameters.
# 6: ps_right_square_after. right square brackets after required parameters.
ps_start, ps_left_square_before, ps_group_before, ps_required, \
ps_optional, ps_group_after, ps_right_square_after = range(7)
def state_parameters_start(self, line):
if self.ignore_line(line):
return
# if this line is not indented, we have no parameters
if not self.indent.infer(line):
return self.next(self.state_function_docstring, line)
self.parameter_continuation = ''
return self.next(self.state_parameter, line)
def to_required(self):
"""
Transition to the "required" parameter state.
"""
if self.parameter_state != self.ps_required:
self.parameter_state = self.ps_required
for p in self.function.parameters.values():
p.group = -p.group
def state_parameter(self, line):
if self.parameter_continuation:
line = self.parameter_continuation + ' ' + line.lstrip()
self.parameter_continuation = ''
if self.ignore_line(line):
return
assert self.indent.depth == 2
indent = self.indent.infer(line)
if indent == -1:
# we outdented, must be to definition column
return self.next(self.state_function_docstring, line)
if indent == 1:
# we indented, must be to new parameter docstring column
return self.next(self.state_parameter_docstring_start, line)
line = line.rstrip()
if line.endswith('\\'):
self.parameter_continuation = line[:-1]
return
line = line.lstrip()
if line in ('*', '/', '[', ']'):
self.parse_special_symbol(line)
return
if self.parameter_state in (self.ps_start, self.ps_required):
self.to_required()
elif self.parameter_state == self.ps_left_square_before:
self.parameter_state = self.ps_group_before
elif self.parameter_state == self.ps_group_before:
if not self.group:
self.to_required()
elif self.parameter_state in (self.ps_group_after, self.ps_optional):
pass
else:
fail("Function " + self.function.name + " has an unsupported group configuration. (Unexpected state " + str(self.parameter_state) + ".a)")
# handle "as" for parameters too
c_name = None
name, have_as_token, trailing = line.partition(' as ')
if have_as_token:
name = name.strip()
if ' ' not in name:
fields = trailing.strip().split(' ')
if not fields:
fail("Invalid 'as' clause!")
c_name = fields[0]
if c_name.endswith(':'):
name += ':'
c_name = c_name[:-1]
fields[0] = name
line = ' '.join(fields)
base, equals, default = line.rpartition('=')
if not equals:
base = default
default = None
module = None
try:
ast_input = "def x({}): pass".format(base)
module = ast.parse(ast_input)
except SyntaxError:
try:
# the last = was probably inside a function call, like
# c: int(accept={str})
# so assume there was no actual default value.
default = None
ast_input = "def x({}): pass".format(line)
module = ast.parse(ast_input)
except SyntaxError:
pass
if not module:
fail("Function " + self.function.name + " has an invalid parameter declaration:\n\t" + line)
function_args = module.body[0].args
if len(function_args.args) > 1:
fail("Function " + self.function.name + " has an invalid parameter declaration (comma?):\n\t" + line)
if function_args.defaults or function_args.kw_defaults:
fail("Function " + self.function.name + " has an invalid parameter declaration (default value?):\n\t" + line)
if function_args.vararg or function_args.kwarg:
fail("Function " + self.function.name + " has an invalid parameter declaration (*args? **kwargs?):\n\t" + line)
parameter = function_args.args[0]
parameter_name = parameter.arg
name, legacy, kwargs = self.parse_converter(parameter.annotation)
if not default:
if self.parameter_state == self.ps_optional:
fail("Can't have a parameter without a default (" + repr(parameter_name) + ")\nafter a parameter with a default!")
value = unspecified
if 'py_default' in kwargs:
fail("You can't specify py_default without specifying a default value!")
else:
if self.parameter_state == self.ps_required:
self.parameter_state = self.ps_optional
default = default.strip()
bad = False
ast_input = "x = {}".format(default)
bad = False
try:
module = ast.parse(ast_input)
if 'c_default' not in kwargs:
# we can only represent very simple data values in C.
# detect whether default is okay, via a blacklist
# of disallowed ast nodes.
class DetectBadNodes(ast.NodeVisitor):
bad = False
def bad_node(self, node):
self.bad = True
# inline function call
visit_Call = bad_node
# inline if statement ("x = 3 if y else z")
visit_IfExp = bad_node
# comprehensions and generator expressions
visit_ListComp = visit_SetComp = bad_node
visit_DictComp = visit_GeneratorExp = bad_node
# literals for advanced types
visit_Dict = visit_Set = bad_node
visit_List = visit_Tuple = bad_node
# "starred": "a = [1, 2, 3]; *a"
visit_Starred = bad_node
blacklist = DetectBadNodes()
blacklist.visit(module)
bad = blacklist.bad
else:
# if they specify a c_default, we can be more lenient about the default value.
# but at least make an attempt at ensuring it's a valid expression.
try:
value = eval(default)
if value == unspecified:
fail("'unspecified' is not a legal default value!")
except NameError:
pass # probably a named constant
except Exception as e:
fail("Malformed expression given as default value\n"
"{!r} caused {!r}".format(default, e))
if bad:
fail("Unsupported expression as default value: " + repr(default))
expr = module.body[0].value
# mild hack: explicitly support NULL as a default value
if isinstance(expr, ast.Name) and expr.id == 'NULL':
value = NULL
py_default = '<unrepresentable>'
c_default = "NULL"
elif (isinstance(expr, ast.BinOp) or
(isinstance(expr, ast.UnaryOp) and
not (isinstance(expr.operand, ast.Num) or
(hasattr(ast, 'Constant') and
isinstance(expr.operand, ast.Constant) and
type(expr.operand.value) in (int, float, complex)))
)):
c_default = kwargs.get("c_default")
if not (isinstance(c_default, str) and c_default):
fail("When you specify an expression (" + repr(default) + ") as your default value,\nyou MUST specify a valid c_default." + ast.dump(expr))
py_default = default
value = unknown
elif isinstance(expr, ast.Attribute):
a = []
n = expr
while isinstance(n, ast.Attribute):
a.append(n.attr)
n = n.value
if not isinstance(n, ast.Name):
fail("Unsupported default value " + repr(default) + " (looked like a Python constant)")
a.append(n.id)
py_default = ".".join(reversed(a))
c_default = kwargs.get("c_default")
if not (isinstance(c_default, str) and c_default):
fail("When you specify a named constant (" + repr(py_default) + ") as your default value,\nyou MUST specify a valid c_default.")
try:
value = eval(py_default)
except NameError:
value = unknown
else:
value = ast.literal_eval(expr)
py_default = repr(value)
if isinstance(value, (bool, None.__class__)):
c_default = "Py_" + py_default
elif isinstance(value, str):
c_default = c_repr(value)
else:
c_default = py_default
except SyntaxError as e:
fail("Syntax error: " + repr(e.text))
except (ValueError, AttributeError):
value = unknown
c_default = kwargs.get("c_default")
py_default = default
if not (isinstance(c_default, str) and c_default):
fail("When you specify a named constant (" + repr(py_default) + ") as your default value,\nyou MUST specify a valid c_default.")
kwargs.setdefault('c_default', c_default)
kwargs.setdefault('py_default', py_default)
dict = legacy_converters if legacy else converters
legacy_str = "legacy " if legacy else ""
if name not in dict:
fail('{} is not a valid {}converter'.format(name, legacy_str))
# if you use a c_name for the parameter, we just give that name to the converter
# but the parameter object gets the python name
converter = dict[name](c_name or parameter_name, parameter_name, self.function, value, **kwargs)
kind = inspect.Parameter.KEYWORD_ONLY if self.keyword_only else inspect.Parameter.POSITIONAL_OR_KEYWORD
if isinstance(converter, self_converter):
if len(self.function.parameters) == 1:
if (self.parameter_state != self.ps_required):
fail("A 'self' parameter cannot be marked optional.")
if value is not unspecified:
fail("A 'self' parameter cannot have a default value.")
if self.group:
fail("A 'self' parameter cannot be in an optional group.")
kind = inspect.Parameter.POSITIONAL_ONLY
self.parameter_state = self.ps_start
self.function.parameters.clear()
else:
fail("A 'self' parameter, if specified, must be the very first thing in the parameter block.")
if isinstance(converter, defining_class_converter):
_lp = len(self.function.parameters)
if _lp == 1:
if (self.parameter_state != self.ps_required):
fail("A 'defining_class' parameter cannot be marked optional.")
if value is not unspecified:
fail("A 'defining_class' parameter cannot have a default value.")
if self.group:
fail("A 'defining_class' parameter cannot be in an optional group.")
else:
fail("A 'defining_class' parameter, if specified, must either be the first thing in the parameter block, or come just after 'self'.")
p = Parameter(parameter_name, kind, function=self.function, converter=converter, default=value, group=self.group)
if parameter_name in self.function.parameters:
fail("You can't have two parameters named " + repr(parameter_name) + "!")
self.function.parameters[parameter_name] = p
def parse_converter(self, annotation):
if (hasattr(ast, 'Constant') and
isinstance(annotation, ast.Constant) and
type(annotation.value) is str):
return annotation.value, True, {}
if isinstance(annotation, ast.Str):
return annotation.s, True, {}
if isinstance(annotation, ast.Name):
return annotation.id, False, {}
if not isinstance(annotation, ast.Call):
fail("Annotations must be either a name, a function call, or a string.")
name = annotation.func.id
symbols = globals()
kwargs = {node.arg: eval_ast_expr(node.value, symbols) for node in annotation.keywords}
return name, False, kwargs
def parse_special_symbol(self, symbol):
if symbol == '*':
if self.keyword_only:
fail("Function " + self.function.name + " uses '*' more than once.")
self.keyword_only = True
elif symbol == '[':
if self.parameter_state in (self.ps_start, self.ps_left_square_before):
self.parameter_state = self.ps_left_square_before
elif self.parameter_state in (self.ps_required, self.ps_group_after):
self.parameter_state = self.ps_group_after
else:
fail("Function " + self.function.name + " has an unsupported group configuration. (Unexpected state " + str(self.parameter_state) + ".b)")
self.group += 1
self.function.docstring_only = True
elif symbol == ']':
if not self.group:
fail("Function " + self.function.name + " has a ] without a matching [.")
if not any(p.group == self.group for p in self.function.parameters.values()):
fail("Function " + self.function.name + " has an empty group.\nAll groups must contain at least one parameter.")
self.group -= 1
if self.parameter_state in (self.ps_left_square_before, self.ps_group_before):
self.parameter_state = self.ps_group_before
elif self.parameter_state in (self.ps_group_after, self.ps_right_square_after):
self.parameter_state = self.ps_right_square_after
else:
fail("Function " + self.function.name + " has an unsupported group configuration. (Unexpected state " + str(self.parameter_state) + ".c)")
elif symbol == '/':
if self.positional_only:
fail("Function " + self.function.name + " uses '/' more than once.")
self.positional_only = True
# ps_required and ps_optional are allowed here, that allows positional-only without option groups
# to work (and have default values!)
if (self.parameter_state not in (self.ps_required, self.ps_optional, self.ps_right_square_after, self.ps_group_before)) or self.group:
fail("Function " + self.function.name + " has an unsupported group configuration. (Unexpected state " + str(self.parameter_state) + ".d)")
if self.keyword_only:
fail("Function " + self.function.name + " mixes keyword-only and positional-only parameters, which is unsupported.")
# fixup preceding parameters
for p in self.function.parameters.values():
if (p.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD and not isinstance(p.converter, self_converter)):
fail("Function " + self.function.name + " mixes keyword-only and positional-only parameters, which is unsupported.")
p.kind = inspect.Parameter.POSITIONAL_ONLY
def state_parameter_docstring_start(self, line):
self.parameter_docstring_indent = len(self.indent.margin)
assert self.indent.depth == 3
return self.next(self.state_parameter_docstring, line)
# every line of the docstring must start with at least F spaces,
# where F > P.
# these F spaces will be stripped.
def state_parameter_docstring(self, line):
stripped = line.strip()
if stripped.startswith('#'):
return
indent = self.indent.measure(line)
if indent < self.parameter_docstring_indent:
self.indent.infer(line)
assert self.indent.depth < 3
if self.indent.depth == 2:
# back to a parameter
return self.next(self.state_parameter, line)
assert self.indent.depth == 1
return self.next(self.state_function_docstring, line)
assert self.function.parameters
last_parameter = next(reversed(list(self.function.parameters.values())))
new_docstring = last_parameter.docstring
if new_docstring:
new_docstring += '\n'
if stripped:
new_docstring += self.indent.dedent(line)
last_parameter.docstring = new_docstring
# the final stanza of the DSL is the docstring.
def state_function_docstring(self, line):
if self.group:
fail("Function " + self.function.name + " has a ] without a matching [.")
stripped = line.strip()
if stripped.startswith('#'):
return
new_docstring = self.function.docstring
if new_docstring:
new_docstring += "\n"
if stripped:
line = self.indent.dedent(line).rstrip()
else:
line = ''
new_docstring += line
self.function.docstring = new_docstring
def format_docstring(self):
f = self.function
new_or_init = f.kind in (METHOD_NEW, METHOD_INIT)
if new_or_init and not f.docstring:
# don't render a docstring at all, no signature, nothing.
return f.docstring
text, add, output = _text_accumulator()
parameters = f.render_parameters
##
## docstring first line
##
if new_or_init:
# classes get *just* the name of the class
# not __new__, not __init__, and not module.classname
assert f.cls
add(f.cls.name)
else:
add(f.name)
add('(')
# populate "right_bracket_count" field for every parameter
assert parameters, "We should always have a self parameter. " + repr(f)
assert isinstance(parameters[0].converter, self_converter)
# self is always positional-only.
assert parameters[0].is_positional_only()
parameters[0].right_bracket_count = 0
positional_only = True
for p in parameters[1:]:
if not p.is_positional_only():
positional_only = False
else:
assert positional_only
if positional_only:
p.right_bracket_count = abs(p.group)
else:
# don't put any right brackets around non-positional-only parameters, ever.
p.right_bracket_count = 0
right_bracket_count = 0
def fix_right_bracket_count(desired):
nonlocal right_bracket_count
s = ''
while right_bracket_count < desired:
s += '['
right_bracket_count += 1
while right_bracket_count > desired:
s += ']'
right_bracket_count -= 1
return s
need_slash = False
added_slash = False
need_a_trailing_slash = False
# we only need a trailing slash:
# * if this is not a "docstring_only" signature
# * and if the last *shown* parameter is
# positional only
if not f.docstring_only:
for p in reversed(parameters):
if not p.converter.show_in_signature:
continue
if p.is_positional_only():
need_a_trailing_slash = True
break
added_star = False
first_parameter = True
last_p = parameters[-1]
line_length = len(''.join(text))
indent = " " * line_length
def add_parameter(text):
nonlocal line_length
nonlocal first_parameter
if first_parameter:
s = text
first_parameter = False
else:
s = ' ' + text
if line_length + len(s) >= 72:
add('\n')
add(indent)
line_length = len(indent)
s = text
line_length += len(s)
add(s)
for p in parameters:
if not p.converter.show_in_signature:
continue
assert p.name
is_self = isinstance(p.converter, self_converter)
if is_self and f.docstring_only:
# this isn't a real machine-parsable signature,
# so let's not print the "self" parameter
continue
if p.is_positional_only():
need_slash = not f.docstring_only
elif need_slash and not (added_slash or p.is_positional_only()):
added_slash = True
add_parameter('/,')
if p.is_keyword_only() and not added_star:
added_star = True
add_parameter('*,')
p_add, p_output = text_accumulator()
p_add(fix_right_bracket_count(p.right_bracket_count))
if isinstance(p.converter, self_converter):
# annotate first parameter as being a "self".
#
# if inspect.Signature gets this function,
# and it's already bound, the self parameter
# will be stripped off.
#
# if it's not bound, it should be marked
# as positional-only.
#
# note: we don't print "self" for __init__,
# because this isn't actually the signature
# for __init__. (it can't be, __init__ doesn't
# have a docstring.) if this is an __init__
# (or __new__), then this signature is for
# calling the class to construct a new instance.
p_add('$')
name = p.converter.signature_name or p.name
p_add(name)
if p.converter.is_optional():
p_add('=')
value = p.converter.py_default
if not value:
value = repr(p.converter.default)
p_add(value)
if (p != last_p) or need_a_trailing_slash:
p_add(',')
add_parameter(p_output())
add(fix_right_bracket_count(0))
if need_a_trailing_slash:
add_parameter('/')
add(')')
# PEP 8 says:
#
# The Python standard library will not use function annotations
# as that would result in a premature commitment to a particular
# annotation style. Instead, the annotations are left for users
# to discover and experiment with useful annotation styles.
#
# therefore this is commented out:
#
# if f.return_converter.py_default:
# add(' -> ')
# add(f.return_converter.py_default)
if not f.docstring_only:
add("\n" + sig_end_marker + "\n")
docstring_first_line = output()
# now fix up the places where the brackets look wrong
docstring_first_line = docstring_first_line.replace(', ]', ',] ')
# okay. now we're officially building the "parameters" section.
# create substitution text for {parameters}
spacer_line = False
for p in parameters:
if not p.docstring.strip():
continue
if spacer_line:
add('\n')
else:
spacer_line = True
add(" ")
add(p.name)
add('\n')
add(textwrap.indent(rstrip_lines(p.docstring.rstrip()), " "))
parameters = output()
if parameters:
parameters += '\n'
##
## docstring body
##
docstring = f.docstring.rstrip()
lines = [line.rstrip() for line in docstring.split('\n')]
# Enforce the summary line!
# The first line of a docstring should be a summary of the function.
# It should fit on one line (80 columns? 79 maybe?) and be a paragraph
# by itself.
#
# Argument Clinic enforces the following rule:
# * either the docstring is empty,
# * or it must have a summary line.
#
# Guido said Clinic should enforce this:
# http://mail.python.org/pipermail/python-dev/2013-June/127110.html
if len(lines) >= 2:
if lines[1]:
fail("Docstring for " + f.full_name + " does not have a summary line!\n" +
"Every non-blank function docstring must start with\n" +
"a single line summary followed by an empty line.")
elif len(lines) == 1:
# the docstring is only one line right now--the summary line.
# add an empty line after the summary line so we have space
# between it and the {parameters} we're about to add.
lines.append('')
parameters_marker_count = len(docstring.split('{parameters}')) - 1
if parameters_marker_count > 1:
fail('You may not specify {parameters} more than once in a docstring!')
if not parameters_marker_count:
# insert after summary line
lines.insert(2, '{parameters}')
# insert at front of docstring
lines.insert(0, docstring_first_line)
docstring = "\n".join(lines)
add(docstring)
docstring = output()
docstring = linear_format(docstring, parameters=parameters)
docstring = docstring.rstrip()
return docstring
def state_terminal(self, line):
"""
Called when processing the block is done.
"""
assert not line
if not self.function:
return
if self.keyword_only:
values = self.function.parameters.values()
if not values:
no_parameter_after_star = True
else:
last_parameter = next(reversed(list(values)))
no_parameter_after_star = last_parameter.kind != inspect.Parameter.KEYWORD_ONLY
if no_parameter_after_star:
fail("Function " + self.function.name + " specifies '*' without any parameters afterwards.")
# remove trailing whitespace from all parameter docstrings
for name, value in self.function.parameters.items():
if not value:
continue
value.docstring = value.docstring.rstrip()
self.function.docstring = self.format_docstring()
# maps strings to callables.
# the callable should return an object
# that implements the clinic parser
# interface (__init__ and parse).
#
# example parsers:
# "clinic", handles the Clinic DSL
# "python", handles running Python code
#
parsers = {'clinic' : DSLParser, 'python': PythonParser}
clinic = None
def main(argv):
import sys
if sys.version_info.major < 3 or sys.version_info.minor < 3:
sys.exit("Error: clinic.py requires Python 3.3 or greater.")
import argparse
cmdline = argparse.ArgumentParser(
description="""Preprocessor for CPython C files.
The purpose of the Argument Clinic is automating all the boilerplate involved
with writing argument parsing code for builtins and providing introspection
signatures ("docstrings") for CPython builtins.
For more information see https://docs.python.org/3/howto/clinic.html""")
cmdline.add_argument("-f", "--force", action='store_true')
cmdline.add_argument("-o", "--output", type=str)
cmdline.add_argument("-v", "--verbose", action='store_true')
cmdline.add_argument("--converters", action='store_true')
cmdline.add_argument("--make", action='store_true',
help="Walk --srcdir to run over all relevant files.")
cmdline.add_argument("--srcdir", type=str, default=os.curdir,
help="The directory tree to walk in --make mode.")
cmdline.add_argument("filename", type=str, nargs="*")
ns = cmdline.parse_args(argv)
if ns.converters:
if ns.filename:
print("Usage error: can't specify --converters and a filename at the same time.")
print()
cmdline.print_usage()
sys.exit(-1)
converters = []
return_converters = []
ignored = set("""
add_c_converter
add_c_return_converter
add_default_legacy_c_converter
add_legacy_c_converter
""".strip().split())
module = globals()
for name in module:
for suffix, ids in (
("_return_converter", return_converters),
("_converter", converters),
):
if name in ignored:
continue
if name.endswith(suffix):
ids.append((name, name[:-len(suffix)]))
break
print()
print("Legacy converters:")
legacy = sorted(legacy_converters)
print(' ' + ' '.join(c for c in legacy if c[0].isupper()))
print(' ' + ' '.join(c for c in legacy if c[0].islower()))
print()
for title, attribute, ids in (
("Converters", 'converter_init', converters),
("Return converters", 'return_converter_init', return_converters),
):
print(title + ":")
longest = -1
for name, short_name in ids:
longest = max(longest, len(short_name))
for name, short_name in sorted(ids, key=lambda x: x[1].lower()):
cls = module[name]
callable = getattr(cls, attribute, None)
if not callable:
continue
signature = inspect.signature(callable)
parameters = []
for parameter_name, parameter in signature.parameters.items():
if parameter.kind == inspect.Parameter.KEYWORD_ONLY:
if parameter.default != inspect.Parameter.empty:
s = '{}={!r}'.format(parameter_name, parameter.default)
else:
s = parameter_name
parameters.append(s)
print(' {}({})'.format(short_name, ', '.join(parameters)))
print()
print("All converters also accept (c_default=None, py_default=None, annotation=None).")
print("All return converters also accept (py_default=None).")
sys.exit(0)
if ns.make:
if ns.output or ns.filename:
print("Usage error: can't use -o or filenames with --make.")
print()
cmdline.print_usage()
sys.exit(-1)
if not ns.srcdir:
print("Usage error: --srcdir must not be empty with --make.")
print()
cmdline.print_usage()
sys.exit(-1)
for root, dirs, files in os.walk(ns.srcdir):
for rcs_dir in ('.svn', '.git', '.hg', 'build', 'externals'):
if rcs_dir in dirs:
dirs.remove(rcs_dir)
for filename in files:
if not (filename.endswith('.c') or filename.endswith('.h')):
continue
path = os.path.join(root, filename)
if ns.verbose:
print(path)
parse_file(path, force=ns.force, verify=not ns.force)
return
if not ns.filename:
cmdline.print_usage()
sys.exit(-1)
if ns.output and len(ns.filename) > 1:
print("Usage error: can't use -o with multiple filenames.")
print()
cmdline.print_usage()
sys.exit(-1)
for filename in ns.filename:
if ns.verbose:
print(filename)
parse_file(filename, output=ns.output, force=ns.force, verify=not ns.force)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 36.193638 | 163 | 0.565156 |
5447d098a54cc47f7d8a89ce1863f41998698878 | 3,356 | py | Python | analyzing_datasets/general/changing_timestamps.py | julio-navarro-lara/thesis_scripts | dab1faabd6004389aadb082d63b10bae848bf1c8 | [
"Apache-2.0"
] | 2 | 2020-09-25T16:34:43.000Z | 2021-07-05T06:38:18.000Z | analyzing_datasets/general/changing_timestamps.py | julio-navarro-lara/thesis_scripts | dab1faabd6004389aadb082d63b10bae848bf1c8 | [
"Apache-2.0"
] | null | null | null | analyzing_datasets/general/changing_timestamps.py | julio-navarro-lara/thesis_scripts | dab1faabd6004389aadb082d63b10bae848bf1c8 | [
"Apache-2.0"
] | null | null | null | #Copyright 2019 Julio Navarro
#Built at the University of Strasbourg (France). CSTB team @ ICube laboratory
#17/01/2019
#Script to change the timestamps in the dataset for a sequence
import sys
import csv
from datetime import datetime
import time
import math
number_fields_input = 13
number_fields_output = 13
#input_positions
input_pos_id = 0
input_pos_timestamp = 1
input_pos_origin = 2
input_pos_service = 3
input_pos_source = 4
input_pos_destination = 5
input_pos_type = 6
input_pos_action = 7
input_pos_process_id = 8
input_pos_port_src = 9
input_pos_port_dst = 10
input_pos_log = 11
input_pos_tag = 12
#output_positions
output_pos_id = 0
output_pos_timestamp = 1
output_pos_origin = 2
output_pos_service = 3
output_pos_source = 4
output_pos_destination = 5
output_pos_type = 6
output_pos_action = 7
output_pos_process_id = 8
output_pos_port_src = 9
output_pos_port_dst = 10
output_pos_log = 11
output_pos_tag = 12
def extract_csv_file(filepath):
result = []
with open(filepath,'rb') as f:
reader = csv.reader(f)
for row in reader:
result.append(row)
return result
def write_csv_file(filepath,input_list):
with open(filepath,'wb') as f:
writer = csv.writer(f)
writer.writerows(input_list)
def convert_string_time_2(string_time):
datetime_object = datetime.strptime(string_time, '%Y-%m-%d %H:%M:%S')
result = time.mktime(datetime_object.timetuple())
return result
def dec2sex(deci):
#Script from the internet: https://github.com/esheldon/sdsspy/blob/master/sdsspy/sandbox/convert.py
(hfrac, hd) = math.modf(deci)
(min_frac, m) = math.modf(hfrac * 60)
s = min_frac * 60.
return (int(hd), int(m), s)
def get_stats_from_field(input_filepath, field):
result = {}
list_logs = extract_csv_file(input_filepath)
for log_tuple in list_logs:
value = log_tuple[field]
if value in result:
result[value] += 1
else:
result[value] = 1
print result
print "Size: "+str(len(result))
def get_number_of_lines(input_filepath):
list_logs = extract_csv_file(input_filepath)
print "Size: "+str(len(list_logs))
def get_difference_time(input_filepath):
list_logs = extract_csv_file(input_filepath)
time1 = list_logs[0][input_pos_timestamp]
time2 = list_logs[-1][input_pos_timestamp]
diff_time = float(time2)-float(time1)
print "Diff in time:"
print "Seconds: "+str(diff_time)
print "Minutes: "+str(diff_time/60)
print "Hours: "+str(diff_time/3600)
print "Hours: "+str(dec2sex(diff_time/3600))
def change_timestamps(input_filepath,output_filepath):
list_logs = extract_csv_file(input_filepath)
result = []
for i in range(0,len(list_logs)):
new_log = list_logs[i]
new_log[input_pos_timestamp] = i
result.append(new_log)
write_csv_file(output_filepath,result)
def main(input_filepath,output_filepath):
#extract_file(input_filepath,output_filepath)
#get_stats_from_field(input_filepath,input_pos_type)
#get_number_of_lines(input_filepath)
#get_difference_time(input_filepath)
change_timestamps(input_filepath,output_filepath)
if __name__ == "__main__":
#main(sys.argv[1],sys.argv[2])
main("../../datasets/huma/agg_attacks.csv","../../datasets/huma/tests/sequential_time_agg_attacks.csv")
| 27.064516 | 107 | 0.723778 |
565cc4321609f7b9a3984b3debda2d5d8b5de88a | 1,271 | py | Python | tests/adapters/test_visa.py | NeoBeats/pymeasure | e48f9d679d6ee970e2e875d2fc9a5679378b07aa | [
"MIT"
] | null | null | null | tests/adapters/test_visa.py | NeoBeats/pymeasure | e48f9d679d6ee970e2e875d2fc9a5679378b07aa | [
"MIT"
] | null | null | null | tests/adapters/test_visa.py | NeoBeats/pymeasure | e48f9d679d6ee970e2e875d2fc9a5679378b07aa | [
"MIT"
] | null | null | null | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2020 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from pymeasure.adapters import VISAAdapter
def test_visa_version():
assert VISAAdapter.has_supported_version()
| 43.827586 | 79 | 0.782061 |
c0c0a1959fc3a09dd181717ac6f70bd758d9e279 | 51,384 | py | Python | detectron2/modeling/backbone/fpnmine.py | c-rbp/panoptic_segmentation | aa212d1d6e851857e0b9563bb94fe7297c987c1a | [
"Apache-2.0"
] | null | null | null | detectron2/modeling/backbone/fpnmine.py | c-rbp/panoptic_segmentation | aa212d1d6e851857e0b9563bb94fe7297c987c1a | [
"Apache-2.0"
] | 1 | 2021-08-23T08:04:48.000Z | 2021-08-23T08:04:48.000Z | detectron2/modeling/backbone/fpnmine.py | c-rbp/panoptic_segmentation | aa212d1d6e851857e0b9563bb94fe7297c987c1a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from .rnns import hConvGRUCell, tdConvGRUCell , RBPFun, CBP_penalty
from .backbone import Backbone
from .build import BACKBONE_REGISTRY
from .resnet import build_resnet_backbone
from .gn import build_resnet_gn_backbone
from .gnbn import build_resnet_gnbn_backbone
from .gnbn_lowlevel import build_resnet_gnbn_lowlevel_backbone
__all__ = [
"build_resnet_fpngn_backbone",
"build_resnet_fpngn_gala_backbone",
"build_resnet_fpngn_cbp10_backbone",
"build_resnet_fpn_gn_backbone",
"build_resnet_fpn_gnbn_backbone",
"build_resnet_fpn_gnbn_lowlevel_backbone",
"build_retinanet_resnet_fpn_backbone",
"FPNGN",
"FPNGNFULL",
"FPN"]
class FPNINDI(Backbone):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(
self,
bottom_up,
in_features,
out_channels,
norm="",
gala=False,
top_block=None,
fuse_type="sum",
grad_method='bptt',
neumann_iterations=15,
memory_mode=False,
timesteps=3):
"""
Args:
bottom_up (Backbone): module representing the bottom up subnetwork.
Must be a subclass of :class:`Backbone`. The multi-scale feature
maps generated by the bottom up network, and listed in `in_features`,
are used to generate FPN levels.
in_features (list[str]): names of the input feature maps coming
from the backbone to which FPN is attached. For example, if the
backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
of these may be used; order must be from high to low resolution.
out_channels (int): number of channels in the output feature maps.
norm (str): the normalization to use.
top_block (nn.Module or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list. The top_block
further downsamples the feature map. It must have an attribute
"num_levels", meaning the number of extra FPN levels added by
this block, and "in_feature", which is a string representing
its input feature (e.g., p5).
fuse_type (str): types for fusing the top down features and the lateral
ones. It can be "sum" (default), which sums up element-wise; or "avg",
which takes the element-wise mean of the two.
"""
super(FPNINDI, self).__init__()
assert isinstance(bottom_up, Backbone)
# Feature map strides and channels from
# the bottom up network (e.g. ResNet)
self.grad_method = grad_method.lower()
self.timesteps = timesteps
self.neumann_iterations = neumann_iterations
self.gala = gala
self.memory_mode = memory_mode
input_shapes = bottom_up.output_shape()
in_strides = [input_shapes[f].stride for f in in_features]
in_channels = [input_shapes[f].channels for f in in_features]
_assert_strides_are_log2_contiguous(in_strides)
lateral_convs = []
output_convs = []
td_convs = []
td_mapping = []
horizontal_names = []
stages = []
use_bias = norm == ""
for idx, it_in_channels in enumerate(in_channels):
lateral_norm = get_norm(norm, out_channels)
output_norm = get_norm(norm, out_channels)
# Feedforward connections
lateral_conv = Conv2d(
it_in_channels,
out_channels,
kernel_size=1,
bias=use_bias,
norm=lateral_norm
)
weight_init.c2_xavier_fill(lateral_conv)
# Horizontal connections
output_conv = hConvGRUCell(
input_size=out_channels,
hidden_size=out_channels,
kernel_size=3,
batchnorm=True,
timesteps=timesteps,
gala=self.gala,
norm=norm,
grad_method=self.grad_method)
stage = int(math.log2(in_strides[idx]))
stages += [stage]
self.add_module("fpn_lateral{}".format(stage), lateral_conv)
self.add_module("fpn_output{}".format(stage), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
horizontal_names.append("fpn_lateral{}".format(stage))
# TD connections
if idx < (len(in_channels) - 1):
# Treat idx as the higher layer. Mapping is high -> low.
td_conv = tdConvGRUCell(
fan_in=out_channels,
td_fan_in=out_channels,
diff_fan_in=out_channels,
kernel_size=3,
batchnorm=True,
timesteps=timesteps,
norm=norm,
grad_method=self.grad_method)
else:
td_conv = Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm)
weight_init.c2_xavier_fill(td_conv)
self.add_module("fpn_topdown{}".format(stage), td_conv)
td_convs.append(td_conv)
td_mapping += [[stage, int(math.log2(in_strides[idx - 1]))]]
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.td_convs = td_convs[::-1]
self.horizontal_names = horizontal_names[::-1]
self.td_mapping = td_mapping[::-1]
self.stages = stages[::-1]
self.top_block = top_block
self.in_features = in_features
self.bottom_up = bottom_up
# Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
self._out_feature_strides = {
"p{}".format(int(math.log2(s))): s for s in in_strides}
# top block output feature maps.
if self.top_block is not None:
for s in range(stage, stage + self.top_block.num_levels):
self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1)
self._out_features = list(self._out_feature_strides.keys())
self._out_feature_channels = {
k: out_channels for k in self._out_features}
self._size_divisibility = in_strides[-1]
assert fuse_type in {"avg", "sum"}
self._fuse_type = fuse_type
@property
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
"""
Args:
input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to
feature map tensor for each feature level in high to low resolution order.
Returns:
dict[str->Tensor]:
mapping from feature map name to FPN feature map tensor
in high to low resolution order. Returned feature names follow the FPN
paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
["p2", "p3", ..., "p6"].
"""
# Reverse feature maps into top-down order (from low to high resolution)
bottom_up_features = self.bottom_up(x)
x = [bottom_up_features[f] for f in self.in_features[::-1]]
results = [[] for _ in range(len(self.stages))]
num_layers = len(self.stages)
hidden_states = {}
lateral_activities = {}
if self.grad_method == 'cbp' or self.grad_method == 'rbp':
results, penalty = self.neumann(
x=x,
num_layers=num_layers,
hidden_states=hidden_states,
lateral_activities=lateral_activities,
results=results)
elif self.grad_method == 'bptt':
results = self.bptt(
x=x,
num_layers=num_layers,
hidden_states=hidden_states,
lateral_activities=lateral_activities,
results=results)
# Finish up
if self.top_block is not None:
top_block_in_feature = bottom_up_features.get(
self.top_block.in_feature, None)
if top_block_in_feature is None:
top_block_in_feature = results[
self._out_features.index(self.top_block.in_feature)]
results.extend(self.top_block(top_block_in_feature))
assert len(self._out_features) == len(results)
out_dict = dict(zip(self._out_features, results))
if self.grad_method == 'cbp':
out_dict['penalty'] = penalty
return out_dict
def neumann(self, x, num_layers, hidden_states, lateral_activities, results):
"""Run neumann RBP."""
# Now move through remaining layers (with feedback)
with torch.no_grad():
for n in range(self.timesteps - 1):
for layer_idx, (
features,
lateral_conv,
output_conv,
h_name,
stage,
td_conv,
td_map) in enumerate(
zip(
x,
self.lateral_convs,
self.output_convs,
self.horizontal_names,
self.stages,
self.td_convs,
self.td_mapping)):
# FF connections -- only compute once
stage = "fpn_lateral{}".format(stage)
if n == 0:
with torch.enable_grad():
lateral_features = lateral_conv(features)
lateral_activities[stage] = lateral_features
else:
lateral_features = lateral_activities[stage]
# Horizontal connections
if n == 0:
hidden_states[stage] = torch.zeros_like(lateral_features) # noqa
# hidden_states[stage] = F.softplus(lateral_features)
recurrent_features = output_conv(
input_=lateral_features,
h_=hidden_states[stage])
hidden_states[stage] = recurrent_features
# Detatch and require_grads for each of the hidden_states
#print('hidden', [v.mean().item() for v in hidden_states.values()])
prev_states = {}
for k, v in hidden_states.items():
if not v.requires_grad:
v = v.clone().detach().requires_grad_()
prev_states[k] = v
for layer_idx, (
features,
lateral_conv,
output_conv,
h_name,
stage,
td_conv,
td_map) in enumerate(
zip(
x,
self.lateral_convs,
self.output_convs,
self.horizontal_names,
self.stages,
self.td_convs,
self.td_mapping)):
# FF connections -- only compute once
stage = "fpn_lateral{}".format(stage)
lateral_features = lateral_activities[stage]
# Horizontal connections
recurrent_features = output_conv(
input_=lateral_features,
h_=prev_states[stage])
hidden_states[stage] = recurrent_features
# Compute jacobians from top-to-bottom
penalty = 0.
# print('hidden2', [v.mean().item() for v in hidden_states.values()])
# print('prev2', [v.mean().item() for v in prev_states.values()])
# print('\n')
#import pdb; pdb.set_trace()
for layer_idx, stage in enumerate(self.stages[1:]):
corrected_idx = stage - 2
stage_name = 'fpn_lateral{}'.format(stage)
prev_state = prev_states[stage_name]
last_state = hidden_states[stage_name]
internal_state = RBPFun.apply(
prev_state,
last_state,
0,
0,
stage_name,
self.neumann_iterations)
results[corrected_idx] = internal_state
if self.memory_mode:
raise NotImplementedError(
'Need one more Top-Down pass here.')
# Accrue the penalities
#import pdb; pdb.set_trace()
penalty = penalty + CBP_penalty(
last_state=last_state,
prev_state=prev_state,
compute_hessian=(self.grad_method == 'cbp'))
# One last pass for Output convs/TDs
for layer_idx, (
features,
lateral_conv,
output_conv,
h_name,
stage,
td_conv,
td_map) in enumerate(
zip(
x,
self.lateral_convs,
self.output_convs,
self.horizontal_names,
self.stages,
self.td_convs,
self.td_mapping)):
# FF connections -- only compute once
stage = "fpn_lateral{}".format(stage)
lateral_features = lateral_activities[stage]
# Replace their TD with ours
# Gather activity from one layer above
if layer_idx > 0:
higher_name = 'fpn_lateral{}'.format(
self.stages[layer_idx - 1])
# higher_activity = hidden_states[higher_name]
higher_activity = lateral_activities[higher_name]
prev_features = td_conv(
lower_=recurrent_features,
higher_=higher_activity)
if not self.memory_mode:
hidden_states[stage] = prev_features
results[layer_idx] = prev_features
#print('TD {} -> {}'.format(higher_name, stage))
else:
results[layer_idx] = td_conv(recurrent_features)
results = results[::-1]
return results, penalty
def bptt(self, x, num_layers, hidden_states, lateral_activities, results):
"""Run backprop through time."""
# Now move through remaining layers (with feedback)
for n in range(self.timesteps - 1):
for layer_idx, (
features,
lateral_conv,
output_conv,
h_name,
stage,
td_conv,
td_map) in enumerate(
zip(
x,
self.lateral_convs,
self.output_convs,
self.horizontal_names,
self.stages,
self.td_convs,
self.td_mapping)):
# FF connections -- only compute once
stage = "fpn_lateral{}".format(stage)
if n == 0:
lateral_features = lateral_conv(features)
lateral_activities[stage] = lateral_features
else:
lateral_features = lateral_activities[stage]
# Horizontal connections
if n == 0:
hidden_states[stage] = torch.zeros_like(lateral_features) # noqa
# hidden_states[stage] = F.softplus(lateral_features)
recurrent_features = output_conv(
input_=lateral_features,
h_=hidden_states[stage])
hidden_states[stage] = recurrent_features
for layer_idx, (
features,
lateral_conv,
output_conv,
h_name,
stage,
td_conv,
td_map) in enumerate(
zip(
x,
self.lateral_convs,
self.output_convs,
self.horizontal_names,
self.stages,
self.td_convs,
self.td_mapping)):
# FF connections -- only compute once
stage = "fpn_lateral{}".format(stage)
lateral_features = lateral_activities[stage]
# Horizontal connections
recurrent_features = output_conv(
input_=lateral_features,
h_=hidden_states[stage])
hidden_states[stage] = recurrent_features
# Replace their TD with ours
# Gather activity from one layer above
if layer_idx > 0:
higher_name = 'fpn_lateral{}'.format(
self.stages[layer_idx - 1])
# higher_activity = hidden_states[higher_name]
higher_activity = lateral_activities[higher_name]
prev_features = td_conv(
lower_=recurrent_features,
higher_=higher_activity)
if not self.memory_mode:
hidden_states[stage] = prev_features
results[layer_idx] = prev_features
#print('TD {} -> {}'.format(higher_name, stage))
else:
results[layer_idx] = td_conv(recurrent_features)
results = results[::-1]
return results
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name],
stride=self._out_feature_strides[name]
)
for name in self._out_features
}
class FPNGN(Backbone):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(
self,
bottom_up,
in_features,
out_channels,
norm="",
gala=False,
top_block=None,
fuse_type="sum",
grad_method='bptt',
neumann_iterations=15,
memory_mode=False,
timesteps=3):
"""
Args:
bottom_up (Backbone): module representing the bottom up subnetwork.
Must be a subclass of :class:`Backbone`. The multi-scale feature
maps generated by the bottom up network, and listed in `in_features`,
are used to generate FPN levels.
in_features (list[str]): names of the input feature maps coming
from the backbone to which FPN is attached. For example, if the
backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
of these may be used; order must be from high to low resolution.
out_channels (int): number of channels in the output feature maps.
norm (str): the normalization to use.
top_block (nn.Module or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list. The top_block
further downsamples the feature map. It must have an attribute
"num_levels", meaning the number of extra FPN levels added by
this block, and "in_feature", which is a string representing
its input feature (e.g., p5).
fuse_type (str): types for fusing the top down features and the lateral
ones. It can be "sum" (default), which sums up element-wise; or "avg",
which takes the element-wise mean of the two.
"""
super(FPNGN, self).__init__()
assert isinstance(bottom_up, Backbone)
# Feature map strides and channels from
# the bottom up network (e.g. ResNet)
self.grad_method = grad_method.lower()
self.timesteps = timesteps
self.neumann_iterations = neumann_iterations
self.gala = gala
self.memory_mode = memory_mode
input_shapes = bottom_up.output_shape()
in_strides = [input_shapes[f].stride for f in in_features]
in_channels = [input_shapes[f].channels for f in in_features]
_assert_strides_are_log2_contiguous(in_strides)
lateral_convs = []
output_convs = []
td_convs = []
td_mapping = []
horizontal_names = []
stages = []
use_bias = norm == ""
for idx, it_in_channels in enumerate(in_channels):
lateral_norm = get_norm(norm, out_channels)
output_norm = get_norm(norm, out_channels)
# Feedforward connections
lateral_conv = Conv2d(
it_in_channels,
out_channels,
kernel_size=1,
bias=use_bias,
norm=lateral_norm
)
weight_init.c2_xavier_fill(lateral_conv)
if idx < (len(in_channels) - 1):
# Horizontal connections
output_conv = hConvGRUCell(
input_size=out_channels,
hidden_size=out_channels,
kernel_size=3,
batchnorm=True,
timesteps=timesteps,
gala=self.gala,
norm=norm,
grad_method=self.grad_method)
else:
# Because the last layer is handled specially
output_conv = Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm)
weight_init.c2_xavier_fill(output_conv)
stage = int(math.log2(in_strides[idx]))
stages += [stage]
self.add_module("fpn_lateral{}".format(stage), lateral_conv)
self.add_module("fpn_output{}".format(stage), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
horizontal_names.append("fpn_lateral{}".format(stage))
# TD connections
if idx > 0:
# Treat idx as the higher layer. Mapping is high -> low.
td_conv = tdConvGRUCell(
fan_in=out_channels,
td_fan_in=out_channels,
diff_fan_in=out_channels,
kernel_size=1,
batchnorm=True,
timesteps=timesteps,
norm=norm,
grad_method=self.grad_method)
self.add_module("fpn_topdown{}".format(stage), td_conv)
td_convs.append(td_conv)
td_mapping += [[stage, int(math.log2(in_strides[idx - 1]))]]
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.td_convs = td_convs[::-1]
self.horizontal_names = horizontal_names[::-1]
self.td_mapping = td_mapping[::-1]
self.stages = stages[::-1]
self.top_block = top_block
self.in_features = in_features
self.bottom_up = bottom_up
# Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
self._out_feature_strides = {
"p{}".format(int(math.log2(s))): s for s in in_strides}
# top block output feature maps.
if self.top_block is not None:
for s in range(stage, stage + self.top_block.num_levels):
self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1)
self._out_features = list(self._out_feature_strides.keys())
self._out_feature_channels = {
k: out_channels for k in self._out_features}
self._size_divisibility = in_strides[-1]
assert fuse_type in {"avg", "sum"}
self._fuse_type = fuse_type
@property
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
"""
Args:
input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to
feature map tensor for each feature level in high to low resolution order.
Returns:
dict[str->Tensor]:
mapping from feature map name to FPN feature map tensor
in high to low resolution order. Returned feature names follow the FPN
paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
["p2", "p3", ..., "p6"].
"""
# Reverse feature maps into top-down order (from low to high resolution)
bottom_up_features = self.bottom_up(x)
x = [bottom_up_features[f] for f in self.in_features[::-1]]
results = [[] for _ in range(len(self.stages))]
num_layers = len(self.stages)
hidden_states = {}
lateral_activities = {}
# Run transformation on highest layer
prev_features = self.lateral_convs[0](x[0])
# if self.debug:
# results.insert(0, self.output_convs[0](prev_features))
#else:
results[-1] = self.output_convs[0](prev_features) # noqa Eventually convert this to recurrent
stage = "fpn_lateral{}".format(self.stages[0])
hidden_states[stage] = prev_features
if self.grad_method == 'cbp' or self.grad_method == 'rbp':
results, penalty = self.neumann(
x=x,
num_layers=num_layers,
hidden_states=hidden_states,
lateral_activities=lateral_activities,
results=results)
elif self.grad_method == 'bptt':
results = self.bptt(
x=x,
num_layers=num_layers,
hidden_states=hidden_states,
lateral_activities=lateral_activities,
results=results)
# Finish up
if self.top_block is not None:
top_block_in_feature = bottom_up_features.get(
self.top_block.in_feature, None)
if top_block_in_feature is None:
top_block_in_feature = results[
self._out_features.index(self.top_block.in_feature)]
results.extend(self.top_block(top_block_in_feature))
assert len(self._out_features) == len(results)
out_dict = dict(zip(self._out_features, results))
if self.grad_method == 'cbp':
out_dict['penalty'] = penalty
return out_dict
def neumann(self, x, num_layers, hidden_states, lateral_activities, results):
"""Run neumann RBP."""
# Now move through remaining layers (with feedback)
with torch.no_grad():
for n in range(self.timesteps - 1):
#print('hidden', n, [v.mean().item() for v in hidden_states.values()])
for layer_idx, (
features,
lateral_conv,
output_conv,
h_name,
stage,
td_conv,
td_map) in enumerate(
zip(
x[1:],
self.lateral_convs[1:],
self.output_convs[1:],
self.horizontal_names[1:],
self.stages[1:],
self.td_convs,
self.td_mapping)):
# FF connections -- only compute once
stage = "fpn_lateral{}".format(stage)
if n == 0:
with torch.enable_grad():
lateral_features = lateral_conv(features)
lateral_activities[stage] = lateral_features
else:
lateral_features = lateral_activities[stage]
# Horizontal connections
if n == 0:
hidden_states[stage] = torch.zeros_like(lateral_features) # noqa
# hidden_states[stage] = F.softplus(lateral_features)
lateral_features = output_conv(
input_=lateral_features,
h_=hidden_states[stage])
hidden_states[stage] = lateral_features
# Replace their TD with ours
# Gather activity from one layer above
higher_name = 'fpn_lateral{}'.format(
self.stages[layer_idx])
higher_activity = hidden_states[higher_name]
prev_features = td_conv(
lower_=lateral_features,
higher_=higher_activity)
hidden_states[stage] = prev_features
# TODO: Add top-block below to recurrent loop
# Detatch and require_grads for each of the hidden_states
#print('hidden', [v.mean().item() for v in hidden_states.values()])
prev_states = {}
for k, v in hidden_states.items():
if not v.requires_grad:
v = v.clone().detach().requires_grad_()
prev_states[k] = v
#print('prev', [v.mean().item() for v in prev_states.values()])
# Compute last timestep and update hidden_states again
for layer_idx, (
features,
lateral_conv,
output_conv,
h_name,
stage,
td_conv,
td_map) in enumerate(
zip(
x[1:],
self.lateral_convs[1:],
self.output_convs[1:],
self.horizontal_names[1:],
self.stages[1:],
self.td_convs,
self.td_mapping)):
# FF connections -- only compute once
corrected_idx = stage - 2
stage = "fpn_lateral{}".format(stage)
lateral_features = lateral_activities[stage]
# Horizontal connections
lateral_features = output_conv(
input_=lateral_features,
h_=prev_states[stage])
hidden_states[stage] = lateral_features
# Replace their TD with ours
# Gather activity from one layer above
higher_name = 'fpn_lateral{}'.format(
self.stages[layer_idx])
# print(stage, higher_name)
higher_activity = hidden_states[higher_name]
prev_features = td_conv(
lower_=lateral_features,
higher_=higher_activity)
if not self.memory_mode:
hidden_states[stage] = prev_features
# Compute jacobians from top-to-bottom
penalty = 0.
# print('hidden2', [v.mean().item() for v in hidden_states.values()])
# print('prev2', [v.mean().item() for v in prev_states.values()])
# print('\n')
#import pdb; pdb.set_trace()
for layer_idx, stage in enumerate(self.stages[1:]):
corrected_idx = stage - 2
stage_name = 'fpn_lateral{}'.format(stage)
prev_state = prev_states[stage_name]
last_state = hidden_states[stage_name]
internal_state = RBPFun.apply(
prev_state,
last_state,
0,
0,
stage_name,
self.neumann_iterations)
results[corrected_idx] = internal_state
if self.memory_mode:
raise NotImplementedError(
'Need one more Top-Down pass here.')
# Accrue the penalities
#import pdb; pdb.set_trace()
penalty = penalty + CBP_penalty(
last_state=last_state,
prev_state=prev_state,
compute_hessian=(self.grad_method == 'cbp'))
return results, penalty
def bptt(self, x, num_layers, hidden_states, lateral_activities, results):
"""Run backprop through time."""
# Now move through remaining layers (with feedback)
for n in range(self.timesteps):
for layer_idx, (
features,
lateral_conv,
output_conv,
h_name,
stage,
td_conv,
td_map) in enumerate(
zip(
x[1:],
self.lateral_convs[1:],
self.output_convs[1:],
self.horizontal_names[1:],
self.stages[1:],
self.td_convs,
self.td_mapping)):
corrected_idx = num_layers - layer_idx - 2
# FF connections -- only compute once
stage = "fpn_lateral{}".format(stage)
if n == 0:
lateral_features = lateral_conv(features)
lateral_activities[stage] = lateral_features
else:
lateral_features = lateral_activities[stage]
# Horizontal connections
if n == 0:
hidden_states[stage] = torch.zeros_like(lateral_features) # noqa
# hidden_states[stage] = F.softplus(lateral_features)
lateral_features = output_conv(
input_=lateral_features,
h_=hidden_states[stage])
hidden_states[stage] = lateral_features
# Replace their TD with ours
# Gather activity from one layer above
higher_name = 'fpn_lateral{}'.format(
self.stages[layer_idx])
higher_activity = hidden_states[higher_name]
prev_features = td_conv(
lower_=lateral_features,
higher_=higher_activity)
if not self.memory_mode:
hidden_states[stage] = prev_features
results[corrected_idx] = prev_features
# TODO: Add top-block below to recurrent loop
return results
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name],
stride=self._out_feature_strides[name]
)
for name in self._out_features
}
class FPN(Backbone):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(
self,
bottom_up,
in_features,
out_channels,
norm="",
top_block=None,
fuse_type="sum"
):
"""
Args:
bottom_up (Backbone): module representing the bottom up subnetwork.
Must be a subclass of :class:`Backbone`. The multi-scale feature
maps generated by the bottom up network, and listed in `in_features`,
are used to generate FPN levels.
in_features (list[str]): names of the input feature maps coming
from the backbone to which FPN is attached. For example, if the
backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
of these may be used; order must be from high to low resolution.
out_channels (int): number of channels in the output feature maps.
norm (str): the normalization to use.
top_block (nn.Module or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list. The top_block
further downsamples the feature map. It must have an attribute
"num_levels", meaning the number of extra FPN levels added by
this block, and "in_feature", which is a string representing
its input feature (e.g., p5).
fuse_type (str): types for fusing the top down features and the lateral
ones. It can be "sum" (default), which sums up element-wise; or "avg",
which takes the element-wise mean of the two.
"""
super(FPN, self).__init__()
assert isinstance(bottom_up, Backbone)
# Feature map strides and channels from the bottom up network (e.g. ResNet)
input_shapes = bottom_up.output_shape()
in_strides = [input_shapes[f].stride for f in in_features]
in_channels = [input_shapes[f].channels for f in in_features]
_assert_strides_are_log2_contiguous(in_strides)
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, it_in_channels in enumerate(in_channels):
lateral_norm = get_norm(norm, out_channels)
output_norm = get_norm(norm, out_channels)
lateral_conv = Conv2d(
it_in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm
)
output_conv = Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
stage = int(math.log2(in_strides[idx]))
self.add_module("fpn_lateral{}".format(stage), lateral_conv)
self.add_module("fpn_output{}".format(stage), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.top_block = top_block
self.in_features = in_features
self.bottom_up = bottom_up
# Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in in_strides}
# top block output feature maps.
if self.top_block is not None:
for s in range(stage, stage + self.top_block.num_levels):
self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1)
self._out_features = list(self._out_feature_strides.keys())
self._out_feature_channels = {k: out_channels for k in self._out_features}
self._size_divisibility = in_strides[-1]
assert fuse_type in {"avg", "sum"}
self._fuse_type = fuse_type
@property
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
"""
Args:
input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to
feature map tensor for each feature level in high to low resolution order.
Returns:
dict[str->Tensor]:
mapping from feature map name to FPN feature map tensor
in high to low resolution order. Returned feature names follow the FPN
paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
["p2", "p3", ..., "p6"].
"""
# Reverse feature maps into top-down order (from low to high resolution)
bottom_up_features = self.bottom_up(x)
x = [bottom_up_features[f] for f in self.in_features[::-1]]
results = []
prev_features = self.lateral_convs[0](x[0])
results.append(self.output_convs[0](prev_features))
for features, lateral_conv, output_conv in zip(
x[1:], self.lateral_convs[1:], self.output_convs[1:]
):
top_down_features = F.interpolate(prev_features, scale_factor=2, mode="nearest")
lateral_features = lateral_conv(features)
prev_features = lateral_features + top_down_features
if self._fuse_type == "avg":
prev_features /= 2
results.insert(0, output_conv(prev_features))
if self.top_block is not None:
top_block_in_feature = bottom_up_features.get(self.top_block.in_feature, None)
if top_block_in_feature is None:
top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)]
results.extend(self.top_block(top_block_in_feature))
assert len(self._out_features) == len(results)
return dict(zip(self._out_features, results))
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def _assert_strides_are_log2_contiguous(strides):
"""
Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2".
"""
for i, stride in enumerate(strides[1:], 1):
assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format(
stride, strides[i - 1]
)
class LastLevelMaxPool(nn.Module):
"""
This module is used in the original FPN to generate a downsampled
P6 feature from P5.
"""
def __init__(self):
super().__init__()
self.num_levels = 1
self.in_feature = "p5"
def forward(self, x):
return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
class LastLevelP6P7(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7 from
C5 feature.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.num_levels = 2
self.in_feature = "res5"
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
weight_init.c2_xavier_fill(module)
def forward(self, c5):
p6 = self.p6(c5)
p7 = self.p7(F.relu(p6))
return [p6, p7]
class LastLevelP6P7GN(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7 from
C5 feature.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.num_levels = 2
self.in_feature = "res5"
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
weight_init.c2_xavier_fill(module)
def forward(self, c5):
p6 = self.p6(c5)
p7 = self.p7(F.softplus(p6))
return [p6, p7]
@BACKBONE_REGISTRY.register()
def build_resnet_fpngn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPNGN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_resnet_fpnindi_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPNINDI(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_resnet_fpngn_gala_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPNGN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
gala=True,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_resnet_fpngn_cbp10_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPNGN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
gala=False,
grad_method='cbp',
timesteps=10,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_resnet_fpn_gn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_gn_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_resnet_fpn_gnbn_lowlevel_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_gnbn_lowlevel_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_resnet_fpn_gnbn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_gnbn_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_retinanet_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
in_channels_p6p7 = bottom_up.output_shape()["res5"].channels
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelP6P7(in_channels_p6p7, out_channels),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_retinanet_resnet_fpngn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
in_channels_p6p7 = bottom_up.output_shape()["res5"].channels
backbone = FPNGN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelP6P7GN(in_channels_p6p7, out_channels),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
| 38.663657 | 102 | 0.560933 |
6567646669fc2e550176f0fa826e19590d429cf4 | 3,481 | py | Python | python/pitft/Stoner.py | sophiathekitty/micro_display | 8459221a12dbf36f934b49f0b8c008d07922b2c6 | [
"MIT"
] | 1 | 2021-11-30T07:24:29.000Z | 2021-11-30T07:24:29.000Z | python/pitft/Stoner.py | sophiathekitty/micro_display | 8459221a12dbf36f934b49f0b8c008d07922b2c6 | [
"MIT"
] | 1 | 2022-01-08T22:45:25.000Z | 2022-01-11T00:15:02.000Z | python/pitft/Stoner.py | sophiathekitty/micro_display | 8459221a12dbf36f934b49f0b8c008d07922b2c6 | [
"MIT"
] | null | null | null | import time
import datetime
import digitalio
import board
from adafruit_rgb_display.rgb import color565
import adafruit_rgb_display.st7789 as st7789
from PIL import Image, ImageDraw, ImageFont
import urllib.request
import json
class Stoner:
def __init__(self):
self.font_big = ImageFont.truetype("/var/www/html/python/pitft/fonts/ViceCitySans.otf", 98)
self.font_small = ImageFont.truetype("/var/www/html/python/pitft/fonts/ViceCitySans.otf", 44)
self.colorIndex = 0
self.colors = ["#f26441","#f27f41","#f29441","#f2ae41","#f2dd41","#ecf241","#ccf241","#a8f241","#85f241","#6df241","#44f241","#64f241","#7ff241","#94f241","#c0f241","#e3f241","#f2da41","#f2b441","#f2a041","#f27441"]
self.color2Index = round(len(self.colors) / 2)
self.mode = "none"
def Load(self):
with urllib.request.urlopen("http://localhost/api/settings/?name=stoner_mode&default=none") as json_url:
buf = json_url.read()
data = json.loads(buf.decode('utf-8'))
self.mode = data
def StonerTime(self):
self.Load()
n = datetime.datetime.now()
h = n.hour
m = n.minute
if((h == 4 or h == 16) and m == 20 and (self.mode == "both" or self.mode == "420")):
return 420
if((h == 7 or h == 19) and m == 10 and (self.mode == "both" or self.mode == "710")):
return 710
return 0
def Draw(self):
if(self.StonerTime() == 420):
return self.Draw420()
if(self.StonerTime() == 710):
return self.Draw710()
return Image.open("/var/www/html/python/pitft/bud.jpg")
def Draw420(self):
im = Image.open("/var/www/html/python/pitft/bud.jpg")
draw = ImageDraw.Draw(im)
x = 20
y = 10
self.colorIndex += 1
self.color2Index -= 1
if(self.colorIndex >= len(self.colors)):
self.colorIndex = 0
if(self.color2Index < 0):
self.color2Index = len(self.colors) - 1
draw.text((x-2,y-2), "420", font=self.font_big, fill="#000000")
draw.text((x+4,y+4), "420", font=self.font_big, fill="#000000")
draw.text((x,y), "420", font=self.font_big, fill=self.colors[self.colorIndex])
x = 40
y = 85
draw.text((x-2,y-2), "Blaze It!!", font=self.font_small, fill="#000000")
draw.text((x+4,y+4), "Blaze It!!", font=self.font_small, fill="#000000")
draw.text((x,y), "Blaze It!!", font=self.font_small, fill=self.colors[self.color2Index])
return im
def Draw710(self):
im = Image.open("/var/www/html/python/pitft/wax.jpg")
draw = ImageDraw.Draw(im)
x = 50
y = 10
self.colorIndex += 1
self.color2Index -= 1
if(self.colorIndex >= len(self.colors)):
self.colorIndex = 0
if(self.color2Index < 0):
self.color2Index = len(self.colors) - 1
draw.text((x-2,y-2), "710", font=self.font_big, fill="#000000")
draw.text((x+4,y+4), "710", font=self.font_big, fill="#000000")
draw.text((x,y), "710", font=self.font_big, fill=self.colors[self.colorIndex])
x = 50
y = 85
draw.text((x-2,y-2), "Dab It!!", font=self.font_small, fill="#000000")
draw.text((x+4,y+4), "Dab It!!", font=self.font_small, fill="#000000")
draw.text((x,y), "Dab It!!", font=self.font_small, fill=self.colors[self.color2Index])
return im
| 40.952941 | 223 | 0.579144 |
7048e3230145c59843683c72c2d2735a74e6634e | 375 | py | Python | robots/build/upper_14_back/catkin_generated/pkg.develspace.context.pc.py | eiphy/lita | 262d6ccabde8467db47278dc39574e5ea34abda2 | [
"BSD-3-Clause"
] | 4 | 2019-01-11T02:56:06.000Z | 2019-03-27T14:26:25.000Z | robots/build/upper_14_back/catkin_generated/pkg.develspace.context.pc.py | eiphy/lita | 262d6ccabde8467db47278dc39574e5ea34abda2 | [
"BSD-3-Clause"
] | 5 | 2019-01-10T11:18:54.000Z | 2019-03-03T09:33:40.000Z | robots/build/upper_14_back/catkin_generated/pkg.develspace.context.pc.py | eiphy/lita | 262d6ccabde8467db47278dc39574e5ea34abda2 | [
"BSD-3-Clause"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "upper_14"
PROJECT_SPACE_DIR = "/home/ei/engine/lita/robots/devel"
PROJECT_VERSION = "0.5.0"
| 41.666667 | 68 | 0.701333 |
d306afbe653f8522053df8d275337493385dde68 | 750 | py | Python | setup.py | amadev/airship-promenade | 91989a7481af1691f2386d90899c050aa2a7f93f | [
"Apache-2.0"
] | null | null | null | setup.py | amadev/airship-promenade | 91989a7481af1691f2386d90899c050aa2a7f93f | [
"Apache-2.0"
] | 1 | 2021-04-30T20:38:26.000Z | 2021-04-30T20:38:26.000Z | setup.py | michal2novak/airship-promenade | b268029e4a812ca20ac9820d1ccdc75217295fec | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
setup(
setup_requires=['setuptools>=17.1', 'pbr>=2.0.0'],
pbr=True
)
| 32.608696 | 74 | 0.745333 |
01aecb5c979667063246ce5e7d60ff312d4a0d36 | 4,911 | py | Python | meta.py | twuilliam/shrec-sketches-helpers | ae208e0fc514abeb0312eae6c389c6a86a3ffde1 | [
"MIT"
] | 5 | 2021-07-30T09:10:30.000Z | 2022-02-10T09:19:17.000Z | meta.py | FutureXZC/shrec-sketches-helpers | 4fed67ae74a9561672a88e6b0ab571cebd50b688 | [
"MIT"
] | 1 | 2021-03-18T02:44:46.000Z | 2021-03-18T08:53:05.000Z | meta.py | FutureXZC/shrec-sketches-helpers | 4fed67ae74a9561672a88e6b0ab571cebd50b688 | [
"MIT"
] | 2 | 2020-11-07T12:05:33.000Z | 2022-02-11T06:52:50.000Z | import argparse
import os
import numpy as np
import pandas as pd
def config():
parser = argparse.ArgumentParser(description='SHREC meta')
parser.add_argument('--data_dir',
type=str,
required=True,
help='data folder path')
parser.add_argument('--dataset',
required=True,
choices=['13', '14'],
help='dataset')
args = parser.parse_args()
return args
def get_df_sketches(data_dir, sk_path):
split = []
cat = []
paths = []
ids = []
for root, _, files in os.walk(os.path.join(data_dir, sk_path)):
for f in files:
if f[-3:] == 'png':
split.append(root.split(os.path.sep)[-1])
cat.append(root.split(os.path.sep)[-2])
ids.append(os.path.splitext(f)[0])
paths.append(os.path.join(sk_path, cat[-1], split[-1], f))
df = pd.DataFrame(data={'cat': cat, 'split': split, 'id': ids},
index=paths)
return df
def get_df_models(data_dir, cad_anno, cad_path):
# read meta file
fpath = os.path.join(data_dir, cad_anno)
with open(fpath, 'r') as f:
content = f.readlines()
labels = {}
current_cat = ''
for line in content[3:]:
line = line.strip('\r\n')
line = line.strip('\t')
line = line.strip()
if len(line.split()) == 3:
current_cat = line.split()[0]
elif line != '':
labels[line] = current_cat
# read model folder
cat = []
ids = []
paths = []
for root, _, files in os.walk(os.path.join(data_dir, cad_path)):
for f in files:
if f[-3:] == 'off':
ids.append(os.path.splitext(f)[0])
cat.append(labels[ids[-1][1:]])
paths.append(os.path.join(cad_path, f))
df = pd.DataFrame(data={'cat': cat, 'id': ids},
index=paths)
return df
def split_models(df_sk, df_cad):
vv, cc = np.unique(df_cad['cat'], return_counts=True)
coi = vv[cc > 50]
n_coi = cc[cc > 50]
new_df_sk = df_sk.loc[df_sk['cat'].isin(coi)].copy()
new_df_cad = df_cad.loc[df_cad['cat'].isin(coi)].copy()
# randomly split instances
np.random.seed(1234)
new_df_cad.loc[:, 'split'] = 'train'
for c, n in zip(coi, n_coi):
to_select = int(np.floor(n * 0.2))
subset = new_df_cad.loc[new_df_cad['cat'] == c, 'id']
id_to_select = np.random.choice(subset, size=to_select, replace=False)
new_df_cad.loc[new_df_cad['id'].isin(id_to_select), 'split'] = 'test'
return new_df_sk, new_df_cad
def main():
args = config()
if args.dataset == '14':
base = 'SHREC14'
# get sketch labels
sk_path = os.path.join(base, 'SHREC14LSSTB_SKETCHES', 'SHREC14LSSTB_SKETCHES')
df_sk = get_df_sketches(args.data_dir, sk_path)
cad_path = os.path.join(base, 'SHREC14LSSTB_TARGET_MODELS')
eval_path = os.path.join(base, 'SHREC14_Sketch_Evaluation_CVIU')
cad_anno = os.path.join(eval_path, 'SHREC14_SBR_Model.cla')
elif args.dataset == '13':
base = 'SHREC13'
# get sketch labels (in two different folders)
sk_path_tr = os.path.join(
base, 'SHREC13_SBR_TRAINING_SKETCHES', 'SHREC13_SBR_TRAINING_SKETCHES')
sk_path_te = os.path.join(
base, 'SHREC13_SBR_TESTING_SKETCHES')
tmp1 = get_df_sketches(args.data_dir, sk_path_tr)
tmp1['split'] = 'train'
tmp2 = get_df_sketches(args.data_dir, sk_path_te)
tmp2['split'] = 'test'
df_sk = pd.concat([tmp1, tmp2])
# get cad labels
cad_path = os.path.join(base, 'SHREC13_SBR_TARGET_MODELS', 'models')
eval_path = os.path.join(base, 'SHREC2013_Sketch_Evaluation')
cad_anno = os.path.join(eval_path, 'SHREC13_SBR_Model.cla')
# get cad labels
df_cad = get_df_models(args.data_dir, cad_anno, cad_path)
save_dir = os.path.join('labels', base)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
df_sk.to_hdf(os.path.join(save_dir, 'sk_orig.hdf5'), 'sk')
df_cad.to_hdf(os.path.join(save_dir, 'cad_orig.hdf5'), 'cad')
with open(os.path.join(save_dir, 'cad.txt'), 'w') as f:
for item in df_cad.index:
f.write('%s\n' % item)
if args.dataset == '14':
# split between train and test cad models
# following Qi et al BMVC 2018
new_df_sk, new_df_cad = split_models(df_sk, df_cad)
save_dir = os.path.join('labels', 'PART-' + base)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
new_df_sk.to_hdf(os.path.join(save_dir, 'sk_orig.hdf5'), 'sk')
new_df_cad.to_hdf(os.path.join(save_dir, 'cad_orig.hdf5'), 'cad')
if __name__ == "__main__":
main()
| 31.88961 | 86 | 0.577886 |
21c35215abba7aa724771409646e09586a78847a | 391 | py | Python | userapi/wsgi.py | apuc/django-rest-framework | 863f2dcca5f2a677ac0e477fc704cc54cd9a53f8 | [
"MIT"
] | null | null | null | userapi/wsgi.py | apuc/django-rest-framework | 863f2dcca5f2a677ac0e477fc704cc54cd9a53f8 | [
"MIT"
] | 6 | 2021-03-30T14:08:14.000Z | 2021-09-08T02:21:23.000Z | userapi/wsgi.py | apuc/django-rest-framework | 863f2dcca5f2a677ac0e477fc704cc54cd9a53f8 | [
"MIT"
] | null | null | null | """
WSGI config for userapi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'userapi.settings')
application = get_wsgi_application()
| 23 | 78 | 0.785166 |
48922d6d17d814db7dc985c75e11a63fb7ffc75d | 923 | py | Python | configs/selfsup/relative_loc/relative-loc_resnet50_8xb64-steplr-70e_in1k.py | mitming/mmselfsup | 5b5cb474776291cfcb9a1140afd11b696e11fcab | [
"Apache-2.0"
] | 355 | 2021-12-16T04:32:49.000Z | 2022-03-31T22:15:23.000Z | configs/selfsup/relative_loc/relative-loc_resnet50_8xb64-steplr-70e_in1k.py | mitming/mmselfsup | 5b5cb474776291cfcb9a1140afd11b696e11fcab | [
"Apache-2.0"
] | 89 | 2021-12-16T05:15:42.000Z | 2022-03-31T10:57:39.000Z | configs/selfsup/relative_loc/relative-loc_resnet50_8xb64-steplr-70e_in1k.py | mitming/mmselfsup | 5b5cb474776291cfcb9a1140afd11b696e11fcab | [
"Apache-2.0"
] | 74 | 2021-12-16T04:40:02.000Z | 2022-03-31T08:40:32.000Z | _base_ = [
'../_base_/models/relative-loc.py',
'../_base_/datasets/imagenet_relative-loc.py',
'../_base_/schedules/sgd_steplr-200e_in1k.py',
'../_base_/default_runtime.py',
]
# optimizer
optimizer = dict(
type='SGD',
lr=0.2,
weight_decay=1e-4,
momentum=0.9,
paramwise_options={
'\\Aneck.': dict(weight_decay=5e-4),
'\\Ahead.': dict(weight_decay=5e-4)
})
# learning policy
lr_config = dict(
policy='step',
step=[30, 50],
warmup='linear',
warmup_iters=5, # 5 ep
warmup_ratio=0.1,
warmup_by_epoch=True)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=70)
# the max_keep_ckpts controls the max number of ckpt file in your work_dirs
# if it is 3, when CheckpointHook (in mmcv) saves the 4th ckpt
# it will remove the oldest one to keep the number of total ckpts as 3
checkpoint_config = dict(interval=10, max_keep_ckpts=3)
| 27.147059 | 75 | 0.671723 |
5ce666849b23cc852aaea973adff39ad0785f9dd | 303 | py | Python | exercises/superheros.py | tamara19-meet/y2s18-python_review | d0d1e67e5620d1360f64b7a6f699a944d56fbf5f | [
"MIT"
] | null | null | null | exercises/superheros.py | tamara19-meet/y2s18-python_review | d0d1e67e5620d1360f64b7a6f699a944d56fbf5f | [
"MIT"
] | null | null | null | exercises/superheros.py | tamara19-meet/y2s18-python_review | d0d1e67e5620d1360f64b7a6f699a944d56fbf5f | [
"MIT"
] | null | null | null | # Write your solutions for 1.5 here!
class superheroes:
def __int__(self, name, superpower, strength):
self.name=name
self.superpower=superpower
self.strength=strength
def print_me(self):
print(self.name +str( self.strength))
superhero = superheroes("tamara","fly", 10)
superhero.print_me()
| 23.307692 | 47 | 0.745875 |
e32231c6fa5dade60c0aa822a230284f163e0972 | 124,769 | py | Python | tensorflow/python/kernel_tests/lookup_ops_test.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 4 | 2020-09-23T01:20:01.000Z | 2022-03-08T06:09:29.000Z | tensorflow/python/kernel_tests/lookup_ops_test.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 59 | 2019-06-17T09:37:49.000Z | 2022-01-19T01:21:34.000Z | tensorflow/python/kernel_tests/lookup_ops_test.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 2 | 2020-03-25T12:52:20.000Z | 2020-08-11T09:31:43.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lookup ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
import six
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import counter
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
from tensorflow.python.training.tracking import util as trackable
class BaseLookupTableTest(test.TestCase):
def getHashTable(self):
if tf2.enabled():
return lookup_ops.StaticHashTable
else:
return lookup_ops.StaticHashTableV1
def getVocabularyTable(self):
if tf2.enabled():
return lookup_ops.StaticVocabularyTable
else:
return lookup_ops.StaticVocabularyTableV1
def initialize_table(self, table):
if not tf2.enabled():
self.evaluate(table.initializer)
class StaticHashTableTest(BaseLookupTableTest):
def testStaticHashTable(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
exported_keys_tensor, exported_values_tensor = table.export()
self.assertItemsEqual([b"brain", b"salad", b"surgery"],
self.evaluate(exported_keys_tensor))
self.assertItemsEqual([0, 1, 2], self.evaluate(exported_values_tensor))
def testStaticHashTableFindHighRank(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([["brain", "salad"],
["tank", "tarkus"]])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testStaticHashTableInitWithPythonArrays(self):
default_val = -1
keys = ["brain", "salad", "surgery"]
values = [0, 1, 2]
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(
keys, values, value_dtype=dtypes.int64), default_val)
self.initialize_table(table)
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testStaticHashTableInitWithNumPyArrays(self):
default_val = -1
keys = np.array(["brain", "salad", "surgery"], dtype=np.str)
values = np.array([0, 1, 2], dtype=np.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testMultipleStaticHashTables(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1 = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
table2 = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
table3 = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table1)
self.initialize_table(table2)
self.initialize_table(table3)
self.assertAllEqual(3, self.evaluate(table1.size()))
self.assertAllEqual(3, self.evaluate(table2.size()))
self.assertAllEqual(3, self.evaluate(table3.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = self.evaluate([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testStaticHashTableWithTensorDefault(self):
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testStaticHashTableWithSparseTensorInput(self):
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
sp_indices = [[0, 0], [0, 1], [1, 0]]
sp_shape = [2, 2]
input_tensor = sparse_tensor.SparseTensor(
constant_op.constant(sp_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "tank"]),
constant_op.constant(sp_shape, dtypes.int64))
output = table.lookup(input_tensor)
out_indices, out_values, out_shape = self.evaluate(output)
self.assertAllEqual([0, 1, -1], out_values)
self.assertAllEqual(sp_indices, out_indices)
self.assertAllEqual(sp_shape, out_shape)
def testSignatureMismatch(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
# Ref types do not produce a lookup signature mismatch.
input_string_ref = variables.Variable("brain")
self.evaluate(input_string_ref.initializer)
self.assertEqual(0, self.evaluate(table.lookup(input_string_ref)))
input_string = constant_op.constant([1, 2, 3], dtypes.int64)
with self.assertRaises(TypeError):
table.lookup(input_string)
with self.assertRaises(TypeError):
self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), "UNK")
def testDTypes(self):
default_val = -1
with self.assertRaises(TypeError):
self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(["a"], [1], [dtypes.string],
dtypes.int64), default_val)
@test_util.run_v1_only("(Cached) Sessions not available in TF2.0")
def testNotInitialized(self):
with self.cached_session():
default_val = -1
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(["a"], [1],
value_dtype=dtypes.int64),
default_val)
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
with self.assertRaisesOpError("Table not initialized"):
self.evaluate(output)
@test_util.run_v1_only("(Cached) Sessions not available in TF2.0")
def testInitializeTwice(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
with self.assertRaisesOpError("Table already initialized"):
self.initialize_table(table)
def testInitializationWithInvalidDimensions(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2, 3, 4], dtypes.int64)
raised_error = ValueError
if context.executing_eagerly():
raised_error = errors_impl.InvalidArgumentError
with self.assertRaises(raised_error):
self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
@test_util.run_v1_only("Sessions not available in TF2.0")
def testMultipleSessions(self):
# Start a server
server = server_lib.Server({"local0": ["localhost:0"]},
protocol="grpc",
start=True)
# Create two sessions sharing the same state
session1 = session.Session(server.target)
session2 = session.Session(server.target)
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values),
default_val,
name="t1")
# Init the table in the first session.
with session1:
self.initialize_table(table)
self.assertAllEqual(3, self.evaluate(table.size()))
# Init the table in the second session and verify that we do not get a
# "Table already initialized" error.
with session2:
table.initializer.run()
self.assertAllEqual(3, self.evaluate(table.size()))
def testStaticHashTableInt32String(self):
default_val = "n/a"
keys = constant_op.constant([0, 1, 2], dtypes.int32)
values = constant_op.constant(["brain", "salad", "surgery"])
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
input_tensor = constant_op.constant([0, 1, -1])
output = table.lookup(input_tensor)
result = self.evaluate(output)
self.assertAllEqual([b"brain", b"salad", b"n/a"], result)
def testTableUseInFunction(self):
if not context.executing_eagerly():
self.skipTest("Only Eager mode test.")
keys = constant_op.constant([0, 1, 2], dtypes.int32)
values = constant_op.constant(["brain", "salad", "surgery"])
table = self.getHashTable()(lookup_ops.KeyValueTensorInitializer(
keys, values), "n/a")
@function.defun()
def lookup_table_func(k):
return table.lookup(k)
result = lookup_table_func(constant_op.constant([0, 1, -1]))
self.assertAllEqual([b"brain", b"salad", b"n/a"], result)
result = lookup_table_func(constant_op.constant([2, -1, 1]))
self.assertAllEqual([b"surgery", b"n/a", b"salad"], result)
def testTableCreatedInFunction(self):
if not context.executing_eagerly():
self.skipTest("Only Eager mode test.")
keys = constant_op.constant([0, 1, 2], dtypes.int32)
values = constant_op.constant(["brain", "salad", "surgery"])
@function.defun()
def lookup_table_func(k):
table = self.getHashTable()(lookup_ops.KeyValueTensorInitializer(
keys, values), "n/a")
return table.lookup(k)
result = lookup_table_func(constant_op.constant([0, 1, -1]))
self.assertAllEqual([b"brain", b"salad", b"n/a"], result)
result = lookup_table_func(constant_op.constant([2, -1, 1]))
self.assertAllEqual([b"surgery", b"n/a", b"salad"], result)
class KeyValueTensorInitializerTest(BaseLookupTableTest):
def test_string(self):
init = lookup_ops.KeyValueTensorInitializer(
("brain", "salad", "surgery"), (0, 1, 2), dtypes.string, dtypes.int64)
table = self.getHashTable()(init, default_value=-1)
self.initialize_table(table)
def test_multiple_tables(self):
with ops.name_scope("table_scope"):
init1 = lookup_ops.KeyValueTensorInitializer(
("brain", "salad", "surgery"), (0, 1, 2), dtypes.string, dtypes.int64)
table1 = self.getHashTable()(init1, default_value=-1)
if not context.executing_eagerly():
self.assertEqual("hash_table", table1.name)
self.assertEqual("table_scope/hash_table",
table1.resource_handle.op.name)
init2 = lookup_ops.KeyValueTensorInitializer(
("brain", "salad", "surgery"), (0, 1, 2), dtypes.string, dtypes.int64)
table2 = self.getHashTable()(init2, default_value=-1)
if not context.executing_eagerly():
self.assertEqual("hash_table_1", table2.name)
self.assertEqual("table_scope/hash_table_1",
table2.resource_handle.op.name)
def test_int64(self):
init = lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),
dtypes.int64, dtypes.int64)
table = self.getHashTable()(init, default_value=-1)
self.initialize_table(table)
def test_int32(self):
init = lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),
dtypes.int32, dtypes.int64)
with self.assertRaises(errors_impl.OpError):
table = self.getHashTable()(init, default_value=-1)
self.initialize_table(table)
class InitializeTableFromFileOpTest(BaseLookupTableTest):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def testInitializeStringTable(self):
vocabulary_file = self._createVocabFile("one_column_1.txt")
default_value = -1
init = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)
self.assertIn("one_column_1.txt_-2_-1", init._shared_name)
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
output = table.lookup(constant_op.constant(["brain", "salad", "tank"]))
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testInitializeInt64Table(self):
vocabulary_file = self._createVocabFile(
"one_column_int64.txt", values=("42", "1", "-1000"))
with self.cached_session():
default_value = -1
init = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.int64, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)
self.assertIn("one_column_int64.txt_-2_-1", init._shared_name)
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
output = table.lookup(
constant_op.constant((42, 1, 11), dtype=dtypes.int64))
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testInitializeIndexTable(self):
vocabulary_file = self._createVocabFile("one_column_2.txt")
with self.cached_session():
default_value = "UNK"
key_index = lookup_ops.TextFileIndex.LINE_NUMBER
value_index = lookup_ops.TextFileIndex.WHOLE_LINE
init = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.int64, key_index, dtypes.string, value_index)
self.assertIn("one_column_2.txt_-1_-2", init._shared_name)
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
output = table.lookup(input_values)
result = self.evaluate(output)
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], result)
def testMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.cached_session():
default_value = -1
key_index = 1
value_index = 2
init = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.string, key_index, dtypes.int64, value_index)
self.assertIn("three_columns.txt_1_2", init._shared_name)
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([1, 5, 6], result)
def testInvalidDataTypeInMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.cached_session():
default_value = -1
key_index = 2
value_index = 1
init = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.string, key_index, dtypes.int64, value_index)
self.assertIn("three_columns.txt_2_1", init._shared_name)
with self.assertRaisesOpError("is not a valid"):
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
def testInvalidDataType(self):
vocabulary_file = self._createVocabFile("one_column_3.txt")
with self.cached_session():
default_value = "UNK"
key_index = lookup_ops.TextFileIndex.WHOLE_LINE
value_index = lookup_ops.TextFileIndex.LINE_NUMBER
with self.assertRaises(ValueError):
init = lookup_ops.TextFileInitializer(vocabulary_file, dtypes.int64,
key_index, dtypes.string,
value_index)
self.assertIn("one_column_3.txt_-2_-1", init._shared_name)
self.getHashTable()(init, default_value)
def testInvalidIndex(self):
vocabulary_file = self._createVocabFile("one_column_4.txt")
with self.cached_session():
default_value = -1
key_index = 1 # second column of the line
value_index = lookup_ops.TextFileIndex.LINE_NUMBER
init = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.string, key_index, dtypes.int64, value_index)
self.assertIn("one_column_4.txt_1_-1", init._shared_name)
with self.assertRaisesOpError("Invalid number of columns"):
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
def testInitializeSameTableWithMultipleNodes(self):
vocabulary_file = self._createVocabFile("one_column_5.txt")
with self.cached_session():
default_value = -1
init1 = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)
self.assertIn("one_column_5.txt_-2_-1", init1._shared_name)
table1 = self.getHashTable()(init1, default_value)
init2 = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)
self.assertIn("one_column_5.txt_-2_-1", init2._shared_name)
table2 = self.getHashTable()(init2, default_value)
init3 = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)
self.assertIn("one_column_5.txt_-2_-1", init3._shared_name)
table3 = self.getHashTable()(init3, default_value)
self.evaluate(lookup_ops.tables_initializer())
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = self.evaluate([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testInitializeTableWithNoFilename(self):
with self.cached_session():
default_value = -1
with self.assertRaises(ValueError):
self.getHashTable()(lookup_ops.TextFileInitializer(
"", dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER), default_value)
def testInitializeWithVocabSize(self):
with self.cached_session():
default_value = -1
vocab_size = 3
vocabulary_file1 = self._createVocabFile("one_column6.txt")
init1 = lookup_ops.TextFileInitializer(
vocabulary_file1,
dtypes.string,
lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size)
self.assertIn("one_column6.txt_3_-2_-1", init1._shared_name)
table1 = self.getHashTable()(init1, default_value)
# Initialize from file.
self.initialize_table(table1)
self.assertEqual(vocab_size, self.evaluate(table1.size()))
vocabulary_file2 = self._createVocabFile("one_column7.txt")
vocab_size = 5
init2 = lookup_ops.TextFileInitializer(
vocabulary_file2,
dtypes.string,
lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size)
self.assertIn("one_column7.txt_5_-2_-1", init2._shared_name)
with self.assertRaisesOpError("Invalid vocab_size"):
table2 = self.getHashTable()(init2, default_value)
self.initialize_table(table2)
vocab_size = 1
vocabulary_file3 = self._createVocabFile("one_column3.txt")
init3 = lookup_ops.TextFileInitializer(
vocabulary_file3,
dtypes.string,
lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size)
self.assertIn("one_column3.txt_1_-2_-1", init3._shared_name)
table3 = self.getHashTable()(init3, default_value)
# Smaller vocab size reads only vocab_size records.
self.initialize_table(table3)
self.assertEqual(vocab_size, self.evaluate(table3.size()))
@test_util.run_v1_only("placeholder usage")
def testFeedVocabularyName(self):
vocabulary_file = self._createVocabFile("feed_vocabulary.txt")
with self.cached_session():
default_value = -1
init = lookup_ops.TextFileInitializer(
"old_file.txt", dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)
self.assertIn("old_file.txt_-2_-1", init._shared_name)
table = self.getHashTable()(init, default_value)
# Initialize with non existing file (old_file.txt) should fail.
# TODO(yleon): Update message, which might change per FileSystem.
with self.assertRaisesOpError("old_file.txt"):
table.initializer.run()
# Initialize the model feeding the vocabulary file.
filenames = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
table.initializer.run(feed_dict={filenames[0]: vocabulary_file})
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testInvalidFilenames(self):
vocabulary_file = self._createVocabFile("filename_shape.txt")
with self.cached_session():
default_value = -1
# Invalid data type
other_type = constant_op.constant(1)
with self.assertRaises(Exception) as cm:
self.getHashTable()(lookup_ops.TextFileInitializer(
other_type, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER), default_value)
self.assertIsInstance(cm.exception, (ValueError, TypeError))
# Non-scalar filename
filenames = constant_op.constant([vocabulary_file, vocabulary_file])
if not context.executing_eagerly():
with self.assertRaises(Exception) as cm:
self.getHashTable()(lookup_ops.TextFileInitializer(
filenames, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER),
default_value)
self.assertIsInstance(cm.exception, (ValueError, TypeError))
else:
with self.assertRaises(errors_impl.InvalidArgumentError):
self.getHashTable()(lookup_ops.TextFileInitializer(
filenames, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER),
default_value)
def testIdToStringTable(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.cached_session():
default_value = "UNK"
vocab_size = 3
init = lookup_ops.TextFileStringTableInitializer(
vocab_file, vocab_size=vocab_size)
self.assertTrue("feat_to_id_1.txt_3_-1_-2", init._shared_name)
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
out = table.lookup(input_values)
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"],
self.evaluate(out))
self.assertEqual(vocab_size, self.evaluate(table.size()))
def testStringToIdTable(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
init = lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size)
self.assertTrue("feat_to_id_2.txt_3_-1_-2", init._shared_name)
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, -1], self.evaluate(out))
self.assertEqual(vocab_size, self.evaluate(table.size()))
def testInt64ToIdTable(self):
vocab_file = self._createVocabFile(
"feat_to_id_3.txt", values=("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
init = lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64)
self.assertTrue("feat_to_id_3.txt_3_-1_-2", init._shared_name)
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
out = table.lookup(
constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64))
self.assertAllEqual((0, 1, 2, -1), self.evaluate(out))
self.assertEqual(vocab_size, self.evaluate(table.size()))
class StaticVocabularyTableTest(BaseLookupTableTest):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def testStringStaticVocabularyTable(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
vocab_size = 3
oov_buckets = 1
table = self.getVocabularyTable()(lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), oov_buckets)
self.initialize_table(table)
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))
def testInt32StaticVocabularyTable(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt", ("42", "1", "-1000"))
vocab_size = 3
oov_buckets = 1
table = self.getVocabularyTable()(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
oov_buckets,
lookup_key_dtype=dtypes.int32)
self.initialize_table(table)
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))
def testInt64StaticVocabularyTable(self):
vocab_file = self._createVocabFile("feat_to_id_3.txt", ("42", "1", "-1000"))
vocab_size = 3
oov_buckets = 1
table = self.getVocabularyTable()(lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64), oov_buckets)
self.initialize_table(table)
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))
def testStringStaticVocabularyTableNoInitializer(self):
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = self.getVocabularyTable()(None, oov_buckets)
self.initialize_table(table)
values = constant_op.constant(("brain", "salad", "surgery"))
out = table.lookup(values)
self.assertAllEqual(
[
3, # fingerprint("brain") mod 5.
1, # fingerprint("salad") mod 5.
4 # fingerprint("surgery") mod 5
],
self.evaluate(out))
self.assertEqual(oov_buckets, self.evaluate(table.size()))
def testStaticVocabularyTableWithMultipleInitializers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
vocab_size = 3
oov_buckets = 3
init = lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size)
table1 = self.getVocabularyTable()(init, oov_buckets, name="table1")
table2 = self.getVocabularyTable()(init, oov_buckets, name="table2")
self.evaluate(lookup_ops.tables_initializer())
input_string = constant_op.constant(
["fruit", "brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string)
out2 = table2.lookup(input_string)
out1, out2 = self.evaluate([out1, out2])
self.assertAllEqual([5, 0, 1, 2, 5], out1)
self.assertAllEqual([5, 0, 1, 2, 5], out2)
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table1.size()))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table2.size()))
def testStaticVocabularyTableInitializationAcrossSessions(self):
vocab_file = self._createVocabFile("feat_to_id_5.txt")
with self.cached_session():
vocab_size = 3
oov_buckets = 1
table1 = self.getVocabularyTable()(lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), oov_buckets)
self.initialize_table(table1)
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string_1)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out1))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table1.size()))
with self.cached_session():
vocab_size = 3
oov_buckets = 1
# Underlying lookup table already initialized in previous session.
# No need to initialize table2
table2 = self.getVocabularyTable()(lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), oov_buckets)
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out2 = table2.lookup(input_string_2)
self.assertAllEqual([3, 1, 3], self.evaluate(out2))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table2.size()))
def testSparseTensor(self):
vocab_file = self._createVocabFile("feat_to_id_7.txt")
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "brain", "surgery", "tarkus"],
dtypes.string),
constant_op.constant(input_shape, dtypes.int64))
table = self.getVocabularyTable()(lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=3), 1)
self.initialize_table(table)
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = self.evaluate(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt32SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),
constant_op.constant(input_shape, dtypes.int64))
table = self.getVocabularyTable()(
lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),
dtypes.int64, dtypes.int64),
1,
lookup_key_dtype=dtypes.int32)
self.initialize_table(table)
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = self.evaluate(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt64SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),
constant_op.constant(input_shape, dtypes.int64))
table = self.getVocabularyTable()(lookup_ops.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), 1)
self.initialize_table(table)
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = self.evaluate(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testStaticVocabularyTableNoInnerTable(self):
table = self.getVocabularyTable()(None, num_oov_buckets=1)
self.assertIsNone(table.resource_handle)
class DenseHashTableOpTest(test.TestCase):
def testBasic(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
deleted_key=-1)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
remove_string = constant_op.constant([12, 15], dtypes.int64)
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([11, 12, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([0, -1, -1], result)
def testBasicBool(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([True, True, True, True], dtypes.bool)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.bool,
default_value=False,
empty_key=0,
deleted_key=-1)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
remove_string = constant_op.constant([11, 15], dtypes.int64)
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([11, 12, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([False, True, False], result)
def testSameEmptyAndDeletedKey(self):
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Empty and deleted keys"):
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=42,
deleted_key=42)
self.assertAllEqual(0, self.evaluate(table.size()))
@test_util.run_v1_only("uses placeholders")
def testLookupUnknownShape(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
deleted_key=-1)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
placeholder_keys = array_ops.placeholder(dtypes.int64)
output = table.lookup(placeholder_keys)
self.assertAllEqual(None, output.get_shape())
result = output.eval({placeholder_keys: [11, 12, 15]})
self.assertAllEqual([0, 1, -1], result)
def testMapStringToFloat(self):
with self.cached_session():
keys = constant_op.constant(["a", "b", "c", "d"], dtypes.string)
values = constant_op.constant([0.0, 1.1, 2.2, 3.3], dtypes.float32)
default_value = constant_op.constant(-1.5, dtypes.float32)
table = lookup_ops.DenseHashTable(
dtypes.string,
dtypes.float32,
default_value=default_value,
empty_key="",
deleted_key="$")
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
remove_string = constant_op.constant(["b", "e"])
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["a", "b", "d", "e"], dtypes.string)
output = table.lookup(input_string)
self.assertAllEqual([4], output.get_shape())
result = self.evaluate(output)
self.assertAllClose([0, -1.5, 3.3, -1.5], result)
def testMapInt64ToFloat(self):
for float_dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([0.0, 1.1, 2.2, 3.3], float_dtype)
default_value = constant_op.constant(-1.5, float_dtype)
table = lookup_ops.DenseHashTable(
dtypes.int64,
float_dtype,
default_value=default_value,
empty_key=0,
deleted_key=-1)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
remove_string = constant_op.constant([12, 15], dtypes.int64)
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([11, 12, 14, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([4], output.get_shape())
result = self.evaluate(output)
self.assertAllClose([0, -1.5, 3.3, -1.5], result)
def testVectorValues(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([[0, 1, 2, 3], [3, 4, 5, 6], [6, 7, 8, 9]],
dtypes.int64)
default_value = constant_op.constant([-1, -2, -3, -4], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=0,
deleted_key=-1,
initial_num_buckets=4)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
self.assertAllEqual(4, len(self.evaluate(table.export()[0])))
self.evaluate(
table.insert(
constant_op.constant([14], dtypes.int64),
constant_op.constant([[2, 3, 4, 5]], dtypes.int64)))
self.assertAllEqual(4, self.evaluate(table.size()))
self.assertAllEqual(8, len(self.evaluate(table.export()[0])))
remove_string = constant_op.constant([12, 16], dtypes.int64)
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
self.assertAllEqual(8, len(self.evaluate(table.export()[0])))
input_string = constant_op.constant([11, 12, 14, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([4, 4],
output.shape,
msg="Saw shape: %s" % output.shape)
result = self.evaluate(output)
self.assertAllEqual(
[[0, 1, 2, 3], [-1, -2, -3, -4], [2, 3, 4, 5], [-1, -2, -3, -4]],
result)
def testVectorKeys(self):
with self.cached_session():
keys = constant_op.constant([[0, 1], [1, 2], [1, 3]], dtypes.int64)
values = constant_op.constant([10, 11, 12], dtypes.int64)
empty_key = constant_op.constant([0, 3], dtypes.int64)
deleted_key = constant_op.constant([-1, -1], dtypes.int64)
default_value = constant_op.constant(-1, dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
initial_num_buckets=8)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
self.evaluate(
table.insert(
constant_op.constant([[0, 0]], dtypes.int64),
constant_op.constant([13], dtypes.int64)))
self.assertAllEqual(4, self.evaluate(table.size()))
self.assertAllEqual(8, len(self.evaluate(table.export()[0])))
remove_string = constant_op.constant([[1, 2], [7, 8]], dtypes.int64)
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
self.assertAllEqual(8, len(self.evaluate(table.export()[0])))
input_string = constant_op.constant([[0, 1], [1, 2], [1, 3], [0, 2]],
dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([4], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([10, -1, 12, -1], result)
def testResize(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
deleted_key=-1,
initial_num_buckets=4)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
self.assertAllEqual(4, len(self.evaluate(table.export()[0])))
keys2 = constant_op.constant([12, 99], dtypes.int64)
self.evaluate(table.remove(keys2))
self.assertAllEqual(2, self.evaluate(table.size()))
self.assertAllEqual(4, len(self.evaluate(table.export()[0])))
keys3 = constant_op.constant([13, 14, 15, 16, 17], dtypes.int64)
values3 = constant_op.constant([3, 4, 5, 6, 7], dtypes.int64)
self.evaluate(table.insert(keys3, values3))
self.assertAllEqual(6, self.evaluate(table.size()))
self.assertAllEqual(16, len(self.evaluate(table.export()[0])))
keys4 = constant_op.constant([10, 11, 12, 13, 14, 15, 16, 17, 18],
dtypes.int64)
output = table.lookup(keys4)
self.assertAllEqual([-1, 0, -1, 3, 4, 5, 6, 7, -1], self.evaluate(output))
def testExport(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([1, 2, 3, 4], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=100,
deleted_key=200,
initial_num_buckets=8)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
keys2 = constant_op.constant([12, 15], dtypes.int64)
self.evaluate(table.remove(keys2))
self.assertAllEqual(3, self.evaluate(table.size()))
exported_keys, exported_values = table.export()
np_keys = self.evaluate(exported_keys)
np_values = self.evaluate(exported_values)
self.assertAllEqual(8, len(np_keys))
self.assertAllEqual(8, len(np_values))
# pair up keys and values, drop extra added dimension
pairs = np.dstack((np_keys.flatten(), np_values.flatten()))[0]
# sort by key
pairs = pairs[pairs[:, 0].argsort()]
self.assertAllEqual([[11, 1], [13, 3], [14, 4], [100, 0], [100, 0],
[100, 0], [100, 0], [200, 2]], pairs)
@test_util.run_v1_only("Saver V1 only")
def testSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
default_value = -1
empty_key = 0
deleted_key = -1
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=32)
save = saver.Saver()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
keys2 = constant_op.constant([12, 15], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertIsInstance(val, six.string_types)
self.assertEqual(save_path, val)
with self.session(graph=ops.Graph()) as sess:
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=64)
table.insert(
constant_op.constant([11, 14], dtypes.int64),
constant_op.constant([12, 24], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant([10, 11, 12, 13, 14], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, -1, 2, 3], output.eval())
@test_util.run_in_graph_and_eager_modes
def testObjectSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_prefix = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
default_value = -1
empty_key = 0
deleted_key = -1
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
save_table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=32)
save_checkpoint = trackable.Checkpoint(table=save_table)
self.assertAllEqual(0, self.evaluate(save_table.size()))
self.evaluate(save_table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(save_table.size()))
self.assertAllEqual(32, len(self.evaluate(save_table.export()[0])))
save_path = save_checkpoint.save(save_prefix)
del save_table, save_checkpoint
load_table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=64)
self.evaluate(
load_table.insert(
constant_op.constant([11, 14], dtypes.int64),
constant_op.constant([12, 24], dtypes.int64)))
self.assertAllEqual(2, self.evaluate(load_table.size()))
self.assertAllEqual(64, len(self.evaluate(load_table.export()[0])))
restore_checkpoint = trackable.Checkpoint(table=load_table)
# Restore the saved values in the parameter nodes.
restore_checkpoint.restore(save_path).run_restore_ops()
self.assertAllEqual(3, self.evaluate(load_table.size()))
self.assertAllEqual(32, len(self.evaluate(load_table.export()[0])))
input_string = constant_op.constant([10, 11, 12, 13, 14], dtypes.int64)
output = load_table.lookup(input_string)
self.assertAllEqual([-1, 0, 1, 2, -1], self.evaluate(output))
@test_util.run_v1_only("Saver V1 only")
def testVectorSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "vector_save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
deleted_key = constant_op.constant([-2, -3], dtypes.int64)
default_value = constant_op.constant([-1, -2], dtypes.int64)
keys = constant_op.constant([[11, 12], [11, 14], [12, 13], [13, 14]],
dtypes.int64)
values = constant_op.constant([[0, 1], [2, 3], [2, 4], [4, 5]],
dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=32)
save = saver.Saver()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
keys2 = constant_op.constant([[12, 13], [16, 17]], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertIsInstance(val, six.string_types)
self.assertEqual(save_path, val)
with self.session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
deleted_key = constant_op.constant([-2, -3], dtypes.int64)
default_value = constant_op.constant([-1, -2], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=64)
table.insert(
constant_op.constant([[11, 12], [13, 15]], dtypes.int64),
constant_op.constant([[21, 22], [23, 24]], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant(
[[11, 12], [11, 14], [11, 15], [13, 14], [13, 15]], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([[0, 1], [2, 3], [-1, -2], [4, 5], [-1, -2]],
output.eval())
@test_util.run_v1_only("Saver V1 only")
def testVectorScalarSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "vector_scalar_save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
deleted_key = constant_op.constant([-1, -1], dtypes.int64)
default_value = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant([[11, 12], [11, 14], [12, 13], [13, 14]],
dtypes.int64)
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t2",
checkpoint=True,
initial_num_buckets=32)
save = saver.Saver()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
keys2 = constant_op.constant([[12, 13], [15, 16]], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertIsInstance(val, six.string_types)
self.assertEqual(save_path, val)
with self.session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
deleted_key = constant_op.constant([-1, -1], dtypes.int64)
default_value = constant_op.constant(-1, dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t2",
checkpoint=True,
initial_num_buckets=64)
table.insert(
constant_op.constant([[11, 12], [13, 15]], dtypes.int64),
constant_op.constant([3, 4], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant(
[[11, 12], [11, 14], [11, 15], [13, 14], [13, 15]], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([0, 1, -1, 3, -1], output.eval())
def testReprobe(self):
with self.cached_session():
# Insert 6 keys into a table with 8 buckets.
# The values are chosen to make sure collisions occur when using GCC STL
keys = constant_op.constant([11, 12, 13, 19, 20, 21], dtypes.int64)
values = constant_op.constant([51, 52, 53, 54, 55, 56], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
deleted_key=-1,
initial_num_buckets=8)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(6, self.evaluate(table.size()))
input_string = constant_op.constant([10, 11, 12, 13, 14, 19, 20, 21, 22],
dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([9], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([-1, 51, 52, 53, -1, 54, 55, 56, -1], result)
def testCustomEmptyKey(self):
with self.cached_session():
keys = constant_op.constant([11, 0, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=12,
deleted_key=-1)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([11, 0, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testErrors(self):
with self.cached_session():
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
deleted_key=-1)
# Inserting the empty key returns an error
keys1 = constant_op.constant([11, 0], dtypes.int64)
values1 = constant_op.constant([0, 1], dtypes.int64)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"empty_key"):
self.evaluate(table.insert(keys1, values1))
# Looking up the empty key returns an error
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"empty_key"):
self.evaluate(table.lookup(keys1))
# Inserting the deleted key returns an error
keys2 = constant_op.constant([11, -1], dtypes.int64)
values2 = constant_op.constant([0, 1], dtypes.int64)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"deleted_key"):
self.evaluate(table.insert(keys2, values2))
# Looking up the empty key returns an error
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"deleted_key"):
self.evaluate(table.lookup(keys2))
# Arbitrary tensors of keys are not supported
keys = constant_op.constant([[11, 0], [12, 1]], dtypes.int64)
values = constant_op.constant([[11, 0], [12, 1]], dtypes.int64)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Expected key shape"):
self.evaluate(table.lookup(keys))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Expected key shape"):
self.evaluate(table.insert(keys, values))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Number of buckets must be"):
table2 = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=17,
deleted_key=-1,
initial_num_buckets=12)
self.assertAllEqual(0, self.evaluate(table2.size()))
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Empty and deleted keys must have same shape"):
table3 = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=42,
deleted_key=[1, 2])
self.assertAllEqual(0, self.evaluate(table3.size()))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Empty and deleted keys cannot be equal"):
table4 = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=42,
deleted_key=42)
self.assertAllEqual(0, self.evaluate(table4.size()))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Empty and deleted keys cannot be equal"):
table5 = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=[1, 2, 3],
deleted_key=[1, 2, 3])
self.assertAllEqual(0, self.evaluate(table5.size()))
class IndexTableFromFile(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def test_string_index_table_from_file(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_string_index_table_from_multicolumn_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain\t300", "salad\t20", "surgery\t1"))
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_column_index=0,
value_column_index=lookup_ops.TextFileIndex.LINE_NUMBER)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_string_index_table_from_multicolumn_file_custom_delimiter(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain 300", "salad 20", "surgery 1"))
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_column_index=0,
value_column_index=lookup_ops.TextFileIndex.LINE_NUMBER,
delimiter=" ")
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_string_index_table_from_file_tensor_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
vocabulary_file = constant_op.constant(vocabulary_file)
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
if not context.executing_eagerly():
self.assertEqual(1,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
@test_util.run_v1_only("placeholder usage")
def test_string_index_table_from_file_placeholder_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
vocabulary_placeholder = array_ops.placeholder(dtypes.string, [])
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_placeholder, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
feed_dict = {vocabulary_placeholder.name: vocabulary_file}
lookup_ops.tables_initializer().run(feed_dict=feed_dict)
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
self.assertEqual(0,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
def test_int32_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab2.txt", values=("42", "1", "-1000"))
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_int64_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab3.txt", values=("42", "1", "-1000"))
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_index_table_from_file_with_default_value(self):
default_value = -42
vocabulary_file = self._createVocabFile("f2i_vocab4.txt")
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, default_value), self.evaluate(ids))
def test_index_table_from_file_with_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab5.txt")
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1000)
ids = table.lookup(
constant_op.constant(["salad", "surgery", "tarkus", "toccata"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual(
(
1, # From vocabulary file.
2, # From vocabulary file.
867, # 3 + fingerprint("tarkus") mod 300.
860), # 3 + fingerprint("toccata") mod 300.
self.evaluate(ids))
def test_index_table_from_file_fails_with_empty_vocabulary_file_name(self):
self.assertRaises(
ValueError, lookup_ops.index_table_from_file, vocabulary_file="")
def test_index_table_from_file_fails_with_empty_vocabulary(self):
self.assertRaises(
ValueError, lookup_ops.index_table_from_file, vocabulary_file=None)
def test_index_table_from_file_str_fails_with_zero_size_vocabulary(self):
vocabulary_file = self._createVocabFile("zero_vocab_str.txt")
self.assertRaisesRegexp(
ValueError,
"vocab_size must be greater than 0, got 0. "
"vocabulary_file: .*zero_vocab_str.txt",
lookup_ops.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
def test_index_table_from_file_tensor_fails_with_zero_size_vocabulary(self):
vocabulary_file = constant_op.constant(
self._createVocabFile("zero_vocab_tensor.txt"))
self.assertRaisesRegexp(
ValueError,
"vocab_size must be greater than 0, got 0. "
"vocabulary_file: .*zero_vocab_tensor.txt",
lookup_ops.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
def test_index_table_from_file_with_vocab_size_too_small(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=2)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, -1, -1), self.evaluate(ids))
self.assertEqual(2, self.evaluate(table.size()))
def test_index_table_from_file_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size"):
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
self.evaluate(table.initializer)
def test_index_table_from_file_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab8.txt")
self.assertRaises(
ValueError,
lookup_ops.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, -1), self.evaluate(ids))
self.assertEqual(3, self.evaluate(table.size()))
def test_index_table_from_file_with_invalid_hashers(self):
vocabulary_file = self._createVocabFile("invalid_hasher.txt")
with self.cached_session():
with self.assertRaises(TypeError):
lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=1)
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=lookup_ops.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
def test_index_table_from_file_table_ref_with_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab9.txt")
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
self.assertIsNotNone(table.resource_handle)
def test_index_table_from_file_table_ref_without_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab10.txt")
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=0)
self.assertIsNotNone(table.resource_handle)
class IndexTableFromTensor(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_index_table_from_tensor_with_tensor_init(self):
table = lookup_ops.index_table_from_tensor(
vocabulary_list=("brain", "salad", "surgery"), num_oov_buckets=1)
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(
table.lookup(constant_op.constant(("salad", "surgery", "tarkus"))))
else:
# Reinitializing a table in eager should work.
table = lookup_ops.index_table_from_tensor(
vocabulary_list=("brain", "salad", "surgery"), num_oov_buckets=1)
self.evaluate(lookup_ops.tables_initializer())
ids = table.lookup(constant_op.constant(("salad", "surgery", "tarkus")))
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_int32_index_table_from_tensor_with_tensor_init(self):
with self.cached_session():
table = lookup_ops.index_table_from_tensor(
vocabulary_list=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_int64_index_table_from_tensor_with_tensor_init(self):
with self.cached_session():
table = lookup_ops.index_table_from_tensor(
vocabulary_list=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_index_table_from_tensor_with_default_value(self):
default_value = -42
with self.cached_session():
table = lookup_ops.index_table_from_tensor(
vocabulary_list=["brain", "salad", "surgery"],
default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, default_value), self.evaluate(ids))
def test_index_table_from_tensor_missing_vocabulary_list(self):
with self.cached_session():
with self.assertRaisesRegexp(ValueError,
"vocabulary_list must be specified"):
lookup_ops.index_table_from_tensor(
vocabulary_list=None, num_oov_buckets=1)
def test_index_table_from_tensor_empty_vocabulary_list(self):
with self.cached_session():
with self.assertRaisesRegexp(
errors_impl.OpError, "keys and values cannot be empty"):
_ = lookup_ops.index_table_from_tensor(
vocabulary_list=np.array([], dtype=np.str_), num_oov_buckets=1)
self.evaluate(lookup_ops.tables_initializer())
def test_index_table_from_tensor_with_invalid_hashers(self):
with self.cached_session():
with self.assertRaises(TypeError):
lookup_ops.index_table_from_tensor(
vocabulary_list=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=1)
table = lookup_ops.index_table_from_tensor(
vocabulary_list=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=lookup_ops.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
class IndexToStringTableFromFileTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def test_index_to_string_table(self):
vocabulary_path = self._createVocabFile("i2f_vocab1.txt")
# vocabulary_file supports string and tensor
type_funcs = [str, constant_op.constant]
for type_func in type_funcs:
vocabulary_file = type_func(vocabulary_path)
with self.cached_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file)
features = table.lookup(
constant_op.constant([0, 1, 2, 3], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
self.evaluate(features))
def test_index_to_string_table_from_multicolumn_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain\t300", "salad\t20", "surgery\t1"))
with self.cached_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
key_column_index=lookup_ops.TextFileIndex.LINE_NUMBER,
value_column_index=0)
features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
self.evaluate(features))
def test_index_to_string_table_from_multicolumn_file_custom_delimiter(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain 300", "salad 20", "surgery 1"))
with self.cached_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
key_column_index=lookup_ops.TextFileIndex.LINE_NUMBER,
value_column_index=0,
delimiter=" ")
features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
self.evaluate(features))
def test_index_to_string_table_with_default_value(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.cached_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"salad", b"surgery", default_value),
self.evaluate(features))
def test_index_to_string_table_with_vocab_size_too_small(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.cached_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=2,
default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"salad", default_value, default_value),
self.evaluate(features))
def test_index_to_string_table_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size"):
_ = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
self.evaluate(lookup_ops.tables_initializer())
def test_index_to_string_table_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.cached_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"salad", b"surgery", b"UNK"),
self.evaluate(features))
class IndexToStringTableFromTensorTest(test.TestCase):
def test_index_to_string_table_from_tensor(self):
with self.cached_session():
vocabulary_list = constant_op.constant(["brain", "salad", "surgery"])
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=vocabulary_list)
indices = constant_op.constant([0, 1, 2, 3], dtypes.int64)
features = table.lookup(indices)
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
self.evaluate(features))
def test_duplicate_entries(self):
with self.cached_session():
vocabulary_list = constant_op.constant(["hello", "hello"])
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=vocabulary_list)
indices = constant_op.constant([0, 1, 4], dtypes.int64)
features = table.lookup(indices)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"hello", b"hello", b"UNK"), self.evaluate(features))
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.cached_session():
vocabulary_list = constant_op.constant(["brain", "salad", "surgery"])
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=vocabulary_list, default_value=default_value)
indices = constant_op.constant([1, 2, 4], dtypes.int64)
features = table.lookup(indices)
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"salad", b"surgery", default_value),
self.evaluate(features))
class IdTableWithHashBucketsTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
@test_util.run_deprecated_v1
def testStringIdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value),
oov_buckets)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, table.size().eval())
@test_util.run_deprecated_v1
def testInt32IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt", ("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value),
oov_buckets,
key_dtype=dtypes.int32)
table.initializer.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, table.size().eval())
@test_util.run_deprecated_v1
def testInt64IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_3.txt", ("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value), oov_buckets)
table.initializer.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, table.size().eval())
@test_util.run_deprecated_v1
def testStringIdTableWithOnlyHashBucket(self):
with self.cached_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup_ops.IdTableWithHashBuckets(None, oov_buckets)
table.initializer.run()
values = constant_op.constant(("brain", "salad", "surgery"))
out = table.lookup(values)
self.assertAllEqual(
[
3, # fingerprint("brain") mod 5.
1, # fingerprint("salad") mod 5.
4 # fingerprint("surgery") mod 5
],
self.evaluate(out))
self.assertEqual(oov_buckets, table.size().eval())
@test_util.run_deprecated_v1
def testInt32IdTableWithOnlyHashBucket(self):
with self.cached_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup_ops.IdTableWithHashBuckets(
None, oov_buckets, key_dtype=dtypes.int32)
table.initializer.run()
input_string = constant_op.constant([42, 1, -1000], dtype=dtypes.int32)
out = table.lookup(input_string)
self.assertAllEqual(
[
1, # fingerprint("42") mod 5.
4, # fingerprint("1") mod 5.
2 # fingerprint("-1000") mod 5
],
self.evaluate(out))
self.assertEqual(oov_buckets, table.size().eval())
def testFloat64IdTableWithOnlyHashBucket(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup_ops.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.float64)
def testBoolIdTableWithOnlyHashBucket(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup_ops.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.bool)
@test_util.run_deprecated_v1
def testIdTableWithHashBucketsWithMultipleInitializers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.cached_session() as sess:
default_value = -1
vocab_size = 3
oov_buckets = 3
vocab_table = lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value)
table1 = lookup_ops.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup_ops.FastHashSpec,
name="table1")
table2 = lookup_ops.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup_ops.StrongHashSpec((1, 2)),
name="table2")
lookup_ops.tables_initializer().run()
input_string = constant_op.constant(
["fruit", "brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string)
out2 = table2.lookup(input_string)
out1, out2 = self.evaluate([out1, out2])
self.assertAllEqual([5, 0, 1, 2, 5], out1)
self.assertAllEqual([5, 0, 1, 2, 3], out2)
self.assertEqual(vocab_size + oov_buckets, table1.size().eval())
self.assertEqual(vocab_size + oov_buckets, table2.size().eval())
test_util.assert_ops_in_graph({
"table1_Lookup/hash_bucket": "StringToHashBucketFast",
"table2_Lookup/hash_bucket": "StringToHashBucketStrong",
}, sess.graph)
@test_util.run_deprecated_v1
def testIdTableWithHashBucketsInitializationAcrossSessions(self):
vocab_file = self._createVocabFile("feat_to_id_5.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table1 = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value),
oov_buckets)
table1.initializer.run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string_1)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out1))
self.assertEqual(vocab_size + oov_buckets, table1.size().eval())
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
# Underlying lookup table already initialized in previous session.
# No need to call table2.initializer.run()
table2 = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value),
oov_buckets)
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out2 = table2.lookup(input_string_2)
self.assertAllEqual([3, 1, 3], self.evaluate(out2))
self.assertEqual(vocab_size + oov_buckets, table2.size().eval())
@test_util.run_deprecated_v1
def testIdTableWithHashBucketsWithMultipleInitializersDifferentDefault(self):
vocab_file = self._createVocabFile("feat_to_id_6.txt")
with self.cached_session() as sess:
default_value1 = -1
vocab_size = 3
oov_buckets = 0
table1 = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value1),
oov_buckets)
default_value2 = -2
table2 = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value2),
oov_buckets)
lookup_ops.tables_initializer().run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out1 = table1.lookup(input_string_1)
out2 = table2.lookup(input_string_2)
out1, out2 = self.evaluate([out1, out2])
self.assertAllEqual([0, 1, 2, -1], out1)
self.assertAllEqual([-2, 1, -2], out2)
self.assertEqual(vocab_size + oov_buckets, table1.size().eval())
self.assertEqual(vocab_size + oov_buckets, table2.size().eval())
@test_util.run_deprecated_v1
def testSparseTensor(self):
vocab_file = self._createVocabFile("feat_to_id_7.txt")
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "brain", "surgery", "tarkus"],
dtypes.string),
constant_op.constant(input_shape, dtypes.int64))
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(vocab_file, vocab_size=3),
-1), 1)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
@test_util.run_deprecated_v1
def testInt32SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),
constant_op.constant(input_shape, dtypes.int64))
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1),
1,
key_dtype=dtypes.int32)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
@test_util.run_deprecated_v1
def testInt64SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),
constant_op.constant(input_shape, dtypes.int64))
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1),
1,
key_dtype=dtypes.int64)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testIdTableWithHashBucketsWithInvalidHashers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
lookup_table = lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value)
with self.assertRaises(TypeError):
lookup_ops.IdTableWithHashBuckets(
lookup_table, oov_buckets, hasher_spec=1)
table = lookup_ops.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup_ops.HasherSpec("my-awesome-hash", None))
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
with self.assertRaises(ValueError):
table.lookup(input_string)
with self.assertRaises(ValueError):
table = lookup_ops.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup_ops.StrongHashSpec([]))
with self.assertRaises(ValueError):
table = lookup_ops.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup_ops.StrongHashSpec([1, 2, 3]))
with self.assertRaises(TypeError):
table = lookup_ops.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup_ops.StrongHashSpec([None, 2]))
def testIdTableWithHashBucketsNoInnerTable(self):
with self.cached_session():
table = lookup_ops.IdTableWithHashBuckets(None, num_oov_buckets=1)
self.assertIsNone(table.resource_handle)
class MutableHashTableOpTest(test.TestCase):
def testMutableHashTable(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery", "tarkus"])
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
remove_string = constant_op.constant(["tarkus", "tank"])
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
exported_keys, exported_values = table.export()
# exported data is in the order of the internal map, i.e. undefined
sorted_keys = np.sort(self.evaluate(exported_keys))
sorted_values = np.sort(self.evaluate(exported_values))
self.assertAllEqual([b"brain", b"salad", b"surgery"], sorted_keys)
self.assertAllEqual([0, 1, 2], sorted_values)
@test_util.run_v1_only("SaverV1")
def testSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
default_val = -1
keys = constant_op.constant(["b", "c", "d"], dtypes.string)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
save = saver.Saver()
self.evaluate(variables.global_variables_initializer())
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
val = save.save(sess, save_path)
self.assertIsInstance(val, six.string_types)
self.assertEqual(save_path, val)
with self.session(graph=ops.Graph()) as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
default_val = -1
table = lookup_ops.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
self.evaluate(
table.insert(
constant_op.constant(["a", "c"], dtypes.string),
constant_op.constant([12, 24], dtypes.int64)))
self.assertAllEqual(2, self.evaluate(table.size()))
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["a", "b", "c", "d", "e"],
dtypes.string)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, 1, 2, -1], self.evaluate(output))
@test_util.run_in_graph_and_eager_modes
def testObjectSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_prefix = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
default_val = -1
keys = constant_op.constant(["b", "c", "d"], dtypes.string)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
checkpoint = trackable.Checkpoint(table=table, v0=v0, v1=v1)
self.evaluate([v0.initializer, v1.initializer])
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
save_path = checkpoint.save(save_prefix)
del table, checkpoint, v0, v1
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
default_val = -1
table = lookup_ops.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
self.evaluate(
table.insert(
constant_op.constant(["a", "c"], dtypes.string),
constant_op.constant([12, 24], dtypes.int64)))
self.assertAllEqual(2, self.evaluate(table.size()))
checkpoint = trackable.Checkpoint(table=table, v0=v0, v1=v1)
# Restore the saved values in the parameter nodes.
checkpoint.restore(save_path).run_restore_ops()
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["a", "b", "c", "d", "e"],
dtypes.string)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, 1, 2, -1], self.evaluate(output))
@test_util.run_v1_only("Multiple sessions")
def testSharing(self):
# Start a server to store the table state
server = server_lib.Server({"local0": ["localhost:0"]},
protocol="grpc",
start=True)
# Create two sessions sharing the same state
session1 = session.Session(server.target)
session2 = session.Session(server.target)
table = lookup_ops.MutableHashTable(
dtypes.int64, dtypes.string, "-", name="t1")
# Populate the table in the first session
with session1:
self.assertAllEqual(0, table.size().eval())
keys = constant_op.constant([11, 12], dtypes.int64)
values = constant_op.constant(["a", "b"])
table.insert(keys, values).run()
self.assertAllEqual(2, table.size().eval())
output = table.lookup(constant_op.constant([11, 12, 13], dtypes.int64))
self.assertAllEqual([b"a", b"b", b"-"], output.eval())
# Verify that we can access the shared data from the second session
with session2:
self.assertAllEqual(2, table.size().eval())
output = table.lookup(constant_op.constant([10, 11, 12], dtypes.int64))
self.assertAllEqual([b"-", b"a", b"b"], output.eval())
def testMutableHashTableOfTensors(self):
with self.cached_session():
default_val = constant_op.constant([-1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery", "tarkus"])
values = constant_op.constant([[0, 1], [2, 3], [4, 5], [6, 7]],
dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
remove_string = constant_op.constant(["tarkus", "tank"])
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3, 2], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([[0, 1], [2, 3], [-1, -1]], result)
exported_keys, exported_values = table.export()
# exported data is in the order of the internal map, i.e. undefined
sorted_keys = np.sort(self.evaluate(exported_keys))
sorted_values = np.sort(self.evaluate(exported_values), axis=0)
self.assertAllEqual([b"brain", b"salad", b"surgery"], sorted_keys)
sorted_expected_values = np.sort([[4, 5], [2, 3], [0, 1]], axis=0)
self.assertAllEqual(sorted_expected_values, sorted_values)
def testMutableHashTableExportInsert(self):
with self.cached_session():
default_val = constant_op.constant([-1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64)
table1 = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, self.evaluate(table1.size()))
self.evaluate(table1.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table1.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
expected_output = [[0, 1], [2, 3], [-1, -1]]
output1 = table1.lookup(input_string)
self.assertAllEqual(expected_output, self.evaluate(output1))
exported_keys, exported_values = table1.export()
self.assertAllEqual(3, self.evaluate(exported_keys).size)
self.assertAllEqual(6, self.evaluate(exported_values).size)
# Populate a second table from the exported data
table2 = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, self.evaluate(table2.size()))
self.evaluate(table2.insert(exported_keys, exported_values))
self.assertAllEqual(3, self.evaluate(table2.size()))
# Verify lookup result is still the same
output2 = table2.lookup(input_string)
self.assertAllEqual(expected_output, self.evaluate(output2))
def testMutableHashTableOfTensorsInvalidShape(self):
with self.cached_session():
default_val = constant_op.constant([-1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
# Shape [6] instead of [3, 2]
values = constant_op.constant([0, 1, 2, 3, 4, 5], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
self.evaluate(table.insert(keys, values))
# Shape [2,3] instead of [3, 2]
values = constant_op.constant([[0, 1, 2], [3, 4, 5]], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
self.evaluate(table.insert(keys, values))
# Shape [2, 2] instead of [3, 2]
values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
self.evaluate(table.insert(keys, values))
# Shape [3, 1] instead of [3, 2]
values = constant_op.constant([[0], [2], [4]], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
self.evaluate(table.insert(keys, values))
# Valid Insert
values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
def testMutableHashTableInvalidDefaultValue(self):
with self.cached_session():
default_val = constant_op.constant([[-1, -1]], dtypes.int64)
with self.assertRaisesOpError("Default value must be a vector"):
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, self.evaluate(table.size()))
def testMutableHashTableDuplicateInsert(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery", "brain"])
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([3, 1, -1], result)
def testMutableHashTableFindHighRank(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([["brain", "salad"],
["tank", "tarkus"]])
output = table.lookup(input_string)
self.assertAllEqual([2, 2], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testMutableHashTableInsertHighRank(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant([["brain", "salad"], ["surgery", "tank"]])
values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank", "tarkus"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, 1, 3, -1], result)
def testMutableHashTableRemoveHighRank(self):
with self.test_session():
default_val = -1
keys = constant_op.constant([["brain", "salad"], ["surgery", "tank"]])
values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
remove_string = constant_op.constant(["salad", "tarkus"])
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank", "tarkus"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, -1, 3, -1], result)
def testMutableHashTableOfTensorsFindHighRank(self):
with self.cached_session():
default_val = constant_op.constant([-1, -1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([[0, 1, 2], [2, 3, 4], [4, 5, 6]],
dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([["brain", "salad"],
["tank", "tarkus"]])
output = table.lookup(input_string)
self.assertAllEqual([2, 2, 3], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual(
[[[0, 1, 2], [2, 3, 4]], [[-1, -1, -1], [-1, -1, -1]]], result)
def testMutableHashTableOfTensorsRemoveHighRank(self):
with self.test_session():
default_val = constant_op.constant([-1, -1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([[0, 1, 2], [2, 3, 4], [4, 5, 6]],
dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
remove_string = constant_op.constant([["brain", "tank"]])
self.evaluate(table.remove(remove_string))
self.assertAllEqual(2, self.evaluate(table.size()))
input_string = constant_op.constant([["brain", "salad"],
["surgery", "tank"]])
output = table.lookup(input_string)
self.assertAllEqual([2, 2, 3], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual(
[[[-1, -1, -1], [2, 3, 4]], [[4, 5, 6], [-1, -1, -1]]], result)
def testMultipleMutableHashTables(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1 = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table2 = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table3 = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.evaluate(table1.insert(keys, values))
self.evaluate(table2.insert(keys, values))
self.evaluate(table3.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table1.size()))
self.assertAllEqual(3, self.evaluate(table2.size()))
self.assertAllEqual(3, self.evaluate(table3.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = self.evaluate([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testMutableHashTableWithTensorDefault(self):
with self.cached_session():
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testSignatureMismatch(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
# insert with keys of the wrong type
with self.assertRaises(ValueError):
self.evaluate(table.insert(constant_op.constant([4, 5, 6]), values))
# insert with values of the wrong type
with self.assertRaises(ValueError):
self.evaluate(table.insert(keys, constant_op.constant(["a", "b", "c"])))
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string_ref = variables.Variable("brain")
input_int64_ref = variables.Variable(-1, dtype=dtypes.int64)
self.evaluate(variables.global_variables_initializer())
# Ref types do not produce an insert signature mismatch.
self.evaluate(table.insert(input_string_ref, input_int64_ref))
self.assertAllEqual(3, self.evaluate(table.size()))
# Ref types do not produce a lookup signature mismatch.
self.assertEqual(-1, self.evaluate(table.lookup(input_string_ref)))
# lookup with keys of the wrong type
input_string = constant_op.constant([1, 2, 3], dtypes.int64)
with self.assertRaises(ValueError):
self.evaluate(table.lookup(input_string))
# default value of the wrong type
with self.assertRaises(TypeError):
lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, "UNK")
def testMutableHashTableStringFloat(self):
with self.cached_session():
default_val = -1.5
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1.1, 2.2], dtypes.float32)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.float32,
default_val)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllClose([0, 1.1, default_val], result)
def testMutableHashTableIntFloat(self):
with self.cached_session():
default_val = -1.0
keys = constant_op.constant([3, 7, 0], dtypes.int64)
values = constant_op.constant([7.5, -1.2, 9.9], dtypes.float32)
table = lookup_ops.MutableHashTable(dtypes.int64, dtypes.float32,
default_val)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([7, 0, 11], dtypes.int64)
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllClose([-1.2, 9.9, default_val], result)
def testMutableHashTableInt64String(self):
with self.cached_session():
default_val = "n/a"
keys = constant_op.constant([0, 1, 2], dtypes.int64)
values = constant_op.constant(["brain", "salad", "surgery"])
table = lookup_ops.MutableHashTable(dtypes.int64, dtypes.string,
default_val)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([0, 1, 3], dtypes.int64)
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual((b"brain", b"salad", b"n/a"), result)
class MutableHashTableBenchmark(test.Benchmark):
def _create_table(self):
return lookup_ops.MutableHashTable(dtypes.int64, dtypes.float32, 0.0)
def benchmark_single_repeated_scalar_insert_scalar(self):
table = self._create_table()
value = variables.Variable(1.0)
insert = table.insert(0, value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=10000)
assert sess.run(size) == 1
def benchmark_many_repeated_scalar_insert_scalar(self):
table = self._create_table()
c = dataset_ops.make_one_shot_iterator(counter.Counter()).get_next()
value = variables.Variable(1.0)
insert = table.insert(c, value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=10000)
assert sess.run(size) >= 10000
def benchmark_single_repeated_batch_32_insert_scalar(self):
table = self._create_table()
value = variables.Variable([1.0] * 32)
insert = table.insert(list(range(32)), value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=1000)
assert sess.run(size) == 32
def benchmark_many_repeated_batch_32_insert_scalar(self):
table = self._create_table()
c = dataset_ops.make_one_shot_iterator(counter.Counter()).get_next()
value = variables.Variable([1.0] * 32)
insert = table.insert(32 * c + list(range(32)), value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=1000)
assert sess.run(size) >= 1000 * 32
class DenseHashTableBenchmark(MutableHashTableBenchmark):
def _create_table(self):
return lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.float32,
default_value=0.0,
empty_key=-1,
deleted_key=-2)
if __name__ == "__main__":
test.main()
| 39.836845 | 80 | 0.661494 |
4167024a2b9107d4588772e4f0467fdcb2779091 | 13,839 | py | Python | twisted/words/test/test_jabbercomponent.py | twonds/twisted | d6e270a465d371c3bed01bf369af497b77eb9f1e | [
"Unlicense",
"MIT"
] | 1 | 2021-01-27T19:11:21.000Z | 2021-01-27T19:11:21.000Z | twisted/words/test/test_jabbercomponent.py | twonds/twisted | d6e270a465d371c3bed01bf369af497b77eb9f1e | [
"Unlicense",
"MIT"
] | null | null | null | twisted/words/test/test_jabbercomponent.py | twonds/twisted | d6e270a465d371c3bed01bf369af497b77eb9f1e | [
"Unlicense",
"MIT"
] | 3 | 2017-01-04T01:24:15.000Z | 2020-06-18T16:14:56.000Z | # Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.protocols.jabber.component}
"""
from twisted.python import failure
from twisted.python.hashlib import sha1
from twisted.trial import unittest
from twisted.words.protocols.jabber import component, xmlstream
from twisted.words.protocols.jabber.jid import JID
from twisted.words.xish import domish
from twisted.words.xish.utility import XmlPipe
class DummyTransport:
def __init__(self, list):
self.list = list
def write(self, bytes):
self.list.append(bytes)
class ComponentInitiatingInitializerTest(unittest.TestCase):
def setUp(self):
self.output = []
self.authenticator = xmlstream.Authenticator()
self.authenticator.password = 'secret'
self.xmlstream = xmlstream.XmlStream(self.authenticator)
self.xmlstream.namespace = 'test:component'
self.xmlstream.send = self.output.append
self.xmlstream.connectionMade()
self.xmlstream.dataReceived(
"<stream:stream xmlns='test:component' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345' version='1.0'>")
self.xmlstream.sid = '12345'
self.init = component.ComponentInitiatingInitializer(self.xmlstream)
def testHandshake(self):
"""
Test basic operations of component handshake.
"""
d = self.init.initialize()
# the initializer should have sent the handshake request
handshake = self.output[-1]
self.assertEquals('handshake', handshake.name)
self.assertEquals('test:component', handshake.uri)
self.assertEquals(sha1("%s%s" % ('12345', 'secret')).hexdigest(),
unicode(handshake))
# successful authentication
handshake.children = []
self.xmlstream.dataReceived(handshake.toXml())
return d
class ComponentAuthTest(unittest.TestCase):
def authPassed(self, stream):
self.authComplete = True
def testAuth(self):
self.authComplete = False
outlist = []
ca = component.ConnectComponentAuthenticator("cjid", "secret")
xs = xmlstream.XmlStream(ca)
xs.transport = DummyTransport(outlist)
xs.addObserver(xmlstream.STREAM_AUTHD_EVENT,
self.authPassed)
# Go...
xs.connectionMade()
xs.dataReceived("<stream:stream xmlns='jabber:component:accept' xmlns:stream='http://etherx.jabber.org/streams' from='cjid' id='12345'>")
# Calculate what we expect the handshake value to be
hv = sha1("%s%s" % ("12345", "secret")).hexdigest()
self.assertEquals(outlist[1], "<handshake>%s</handshake>" % (hv))
xs.dataReceived("<handshake/>")
self.assertEquals(self.authComplete, True)
class JabberServiceHarness(component.Service):
def __init__(self):
self.componentConnectedFlag = False
self.componentDisconnectedFlag = False
self.transportConnectedFlag = False
def componentConnected(self, xmlstream):
self.componentConnectedFlag = True
def componentDisconnected(self):
self.componentDisconnectedFlag = True
def transportConnected(self, xmlstream):
self.transportConnectedFlag = True
class TestJabberServiceManager(unittest.TestCase):
def testSM(self):
# Setup service manager and test harnes
sm = component.ServiceManager("foo", "password")
svc = JabberServiceHarness()
svc.setServiceParent(sm)
# Create a write list
wlist = []
# Setup a XmlStream
xs = sm.getFactory().buildProtocol(None)
xs.transport = self
xs.transport.write = wlist.append
# Indicate that it's connected
xs.connectionMade()
# Ensure the test service harness got notified
self.assertEquals(True, svc.transportConnectedFlag)
# Jump ahead and pretend like the stream got auth'd
xs.dispatch(xs, xmlstream.STREAM_AUTHD_EVENT)
# Ensure the test service harness got notified
self.assertEquals(True, svc.componentConnectedFlag)
# Pretend to drop the connection
xs.connectionLost(None)
# Ensure the test service harness got notified
self.assertEquals(True, svc.componentDisconnectedFlag)
class RouterTest(unittest.TestCase):
"""
Tests for L{component.Router}.
"""
def test_addRoute(self):
"""
Test route registration and routing on incoming stanzas.
"""
router = component.Router()
routed = []
router.route = lambda element: routed.append(element)
pipe = XmlPipe()
router.addRoute('example.org', pipe.sink)
self.assertEquals(1, len(router.routes))
self.assertEquals(pipe.sink, router.routes['example.org'])
element = domish.Element(('testns', 'test'))
pipe.source.send(element)
self.assertEquals([element], routed)
def test_route(self):
"""
Test routing of a message.
"""
component1 = XmlPipe()
component2 = XmlPipe()
router = component.Router()
router.addRoute('component1.example.org', component1.sink)
router.addRoute('component2.example.org', component2.sink)
outgoing = []
component2.source.addObserver('/*',
lambda element: outgoing.append(element))
stanza = domish.Element((None, 'presence'))
stanza['from'] = 'component1.example.org'
stanza['to'] = 'component2.example.org'
component1.source.send(stanza)
self.assertEquals([stanza], outgoing)
def test_routeDefault(self):
"""
Test routing of a message using the default route.
The default route is the one with C{None} as its key in the
routing table. It is taken when there is no more specific route
in the routing table that matches the stanza's destination.
"""
component1 = XmlPipe()
s2s = XmlPipe()
router = component.Router()
router.addRoute('component1.example.org', component1.sink)
router.addRoute(None, s2s.sink)
outgoing = []
s2s.source.addObserver('/*', lambda element: outgoing.append(element))
stanza = domish.Element((None, 'presence'))
stanza['from'] = 'component1.example.org'
stanza['to'] = 'example.com'
component1.source.send(stanza)
self.assertEquals([stanza], outgoing)
class ListenComponentAuthenticatorTest(unittest.TestCase):
"""
Tests for L{component.ListenComponentAuthenticator}.
"""
def setUp(self):
self.output = []
authenticator = component.ListenComponentAuthenticator('secret')
self.xmlstream = xmlstream.XmlStream(authenticator)
self.xmlstream.send = self.output.append
def loseConnection(self):
"""
Stub loseConnection because we are a transport.
"""
self.xmlstream.connectionLost("no reason")
def test_streamStarted(self):
"""
The received stream header should set several attributes.
"""
observers = []
def addOnetimeObserver(event, observerfn):
observers.append((event, observerfn))
xs = self.xmlstream
xs.addOnetimeObserver = addOnetimeObserver
xs.makeConnection(self)
self.assertIdentical(None, xs.sid)
self.assertFalse(xs._headerSent)
xs.dataReceived("<stream:stream xmlns='jabber:component:accept' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"to='component.example.org'>")
self.assertEqual((0, 0), xs.version)
self.assertNotIdentical(None, xs.sid)
self.assertTrue(xs._headerSent)
self.assertEquals(('/*', xs.authenticator.onElement), observers[-1])
def test_streamStartedWrongNamespace(self):
"""
The received stream header should have a correct namespace.
"""
streamErrors = []
xs = self.xmlstream
xs.sendStreamError = streamErrors.append
xs.makeConnection(self)
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"to='component.example.org'>")
self.assertEquals(1, len(streamErrors))
self.assertEquals('invalid-namespace', streamErrors[-1].condition)
def test_streamStartedNoTo(self):
"""
The received stream header should have a 'to' attribute.
"""
streamErrors = []
xs = self.xmlstream
xs.sendStreamError = streamErrors.append
xs.makeConnection(self)
xs.dataReceived("<stream:stream xmlns='jabber:component:accept' "
"xmlns:stream='http://etherx.jabber.org/streams'>")
self.assertEquals(1, len(streamErrors))
self.assertEquals('improper-addressing', streamErrors[-1].condition)
def test_onElement(self):
"""
We expect a handshake element with a hash.
"""
handshakes = []
xs = self.xmlstream
xs.authenticator.onHandshake = handshakes.append
handshake = domish.Element(('jabber:component:accept', 'handshake'))
handshake.addContent('1234')
xs.authenticator.onElement(handshake)
self.assertEqual('1234', handshakes[-1])
def test_onElementNotHandshake(self):
"""
Reject elements that are not handshakes
"""
handshakes = []
streamErrors = []
xs = self.xmlstream
xs.authenticator.onHandshake = handshakes.append
xs.sendStreamError = streamErrors.append
element = domish.Element(('jabber:component:accept', 'message'))
xs.authenticator.onElement(element)
self.assertFalse(handshakes)
self.assertEquals('not-authorized', streamErrors[-1].condition)
def test_onHandshake(self):
"""
Receiving a handshake matching the secret authenticates the stream.
"""
authd = []
def authenticated(xs):
authd.append(xs)
xs = self.xmlstream
xs.addOnetimeObserver(xmlstream.STREAM_AUTHD_EVENT, authenticated)
xs.sid = '1234'
theHash = '32532c0f7dbf1253c095b18b18e36d38d94c1256'
xs.authenticator.onHandshake(theHash)
self.assertEqual('<handshake/>', self.output[-1])
self.assertEquals(1, len(authd))
def test_onHandshakeWrongHash(self):
"""
Receiving a bad handshake should yield a stream error.
"""
streamErrors = []
authd = []
def authenticated(xs):
authd.append(xs)
xs = self.xmlstream
xs.addOnetimeObserver(xmlstream.STREAM_AUTHD_EVENT, authenticated)
xs.sendStreamError = streamErrors.append
xs.sid = '1234'
theHash = '1234'
xs.authenticator.onHandshake(theHash)
self.assertEquals('not-authorized', streamErrors[-1].condition)
self.assertEquals(0, len(authd))
class XMPPComponentServerFactoryTest(unittest.TestCase):
"""
Tests for L{component.XMPPComponentServerFactory}.
"""
def setUp(self):
self.router = component.Router()
self.factory = component.XMPPComponentServerFactory(self.router,
'secret')
self.xmlstream = self.factory.buildProtocol(None)
self.xmlstream.thisEntity = JID('component.example.org')
def test_makeConnection(self):
"""
A new connection increases the stream serial count. No logs by default.
"""
self.xmlstream.dispatch(self.xmlstream,
xmlstream.STREAM_CONNECTED_EVENT)
self.assertEqual(0, self.xmlstream.serial)
self.assertEqual(1, self.factory.serial)
self.assertIdentical(None, self.xmlstream.rawDataInFn)
self.assertIdentical(None, self.xmlstream.rawDataOutFn)
def test_makeConnectionLogTraffic(self):
"""
Setting logTraffic should set up raw data loggers.
"""
self.factory.logTraffic = True
self.xmlstream.dispatch(self.xmlstream,
xmlstream.STREAM_CONNECTED_EVENT)
self.assertNotIdentical(None, self.xmlstream.rawDataInFn)
self.assertNotIdentical(None, self.xmlstream.rawDataOutFn)
def test_onError(self):
"""
An observer for stream errors should trigger onError to log it.
"""
self.xmlstream.dispatch(self.xmlstream,
xmlstream.STREAM_CONNECTED_EVENT)
class TestError(Exception):
pass
reason = failure.Failure(TestError())
self.xmlstream.dispatch(reason, xmlstream.STREAM_ERROR_EVENT)
self.assertEqual(1, len(self.flushLoggedErrors(TestError)))
def test_connectionInitialized(self):
"""
Make sure a new stream is added to the routing table.
"""
self.xmlstream.dispatch(self.xmlstream, xmlstream.STREAM_AUTHD_EVENT)
self.assertIn('component.example.org', self.router.routes)
self.assertIdentical(self.xmlstream,
self.router.routes['component.example.org'])
def test_connectionLost(self):
"""
Make sure a stream is removed from the routing table on disconnect.
"""
self.xmlstream.dispatch(self.xmlstream, xmlstream.STREAM_AUTHD_EVENT)
self.xmlstream.dispatch(None, xmlstream.STREAM_END_EVENT)
self.assertNotIn('component.example.org', self.router.routes)
| 32.716312 | 145 | 0.633861 |
af3cb186f9ab2913c4969b1a5a7d85c2e5bb292e | 26,722 | py | Python | tests/test_writable_nested_model_serializer.py | d1opensource/drf-writable-nested | 81839646011c38d233e3aa66c93bf1a7085883ba | [
"BSD-2-Clause"
] | null | null | null | tests/test_writable_nested_model_serializer.py | d1opensource/drf-writable-nested | 81839646011c38d233e3aa66c93bf1a7085883ba | [
"BSD-2-Clause"
] | 1 | 2017-12-08T22:54:02.000Z | 2017-12-08T23:00:06.000Z | tests/test_writable_nested_model_serializer.py | d1opensource/drf-writable-nested | 81839646011c38d233e3aa66c93bf1a7085883ba | [
"BSD-2-Clause"
] | 1 | 2020-03-15T19:34:32.000Z | 2020-03-15T19:34:32.000Z | import uuid
from rest_framework.exceptions import ValidationError
from django.test import TestCase
from django.http.request import QueryDict
from . import (
models,
serializers,
)
class WritableNestedModelSerializerTest(TestCase):
def get_initial_data(self):
return {
'username': 'test',
'profile': {
'access_key': {
'key': 'key',
},
'sites': [
{
'url': 'http://google.com',
},
{
'url': 'http://yahoo.com',
},
],
'avatars': [
{
'image': 'image-1.png',
},
{
'image': 'image-2.png',
},
],
'message_set': [
{
'message': 'Message 1'
},
{
'message': 'Message 2'
},
{
'message': 'Message 3'
},
]
},
}
def test_create(self):
serializer = serializers.UserSerializer(data=self.get_initial_data())
serializer.is_valid(raise_exception=True)
user = serializer.save()
self.assertIsNotNone(user)
self.assertEqual(user.username, 'test')
profile = user.profile
self.assertIsNotNone(profile)
self.assertIsNotNone(profile.access_key)
self.assertEqual(profile.access_key.key, 'key')
self.assertEqual(profile.sites.count(), 2)
self.assertSetEqual(
set(profile.sites.values_list('url', flat=True)),
{'http://google.com', 'http://yahoo.com'}
)
self.assertEqual(profile.avatars.count(), 2)
self.assertSetEqual(
set(profile.avatars.values_list('image', flat=True)),
{'image-1.png', 'image-2.png'}
)
# Check instances count
self.assertEqual(models.User.objects.count(), 1)
self.assertEqual(models.Profile.objects.count(), 1)
self.assertEqual(models.Site.objects.count(), 2)
self.assertEqual(models.Avatar.objects.count(), 2)
self.assertEqual(models.AccessKey.objects.count(), 1)
def test_create_with_not_specified_reverse_one_to_one(self):
serializer = serializers.UserSerializer(data={'username': 'test'})
serializer.is_valid(raise_exception=True)
user = serializer.save()
self.assertFalse(models.Profile.objects.filter(user=user).exists())
def test_create_with_empty_reverse_one_to_one(self):
serializer = serializers.UserSerializer(
data={'username': 'test', 'profile': None})
serializer.is_valid(raise_exception=True)
user = serializer.save()
self.assertFalse(models.Profile.objects.filter(user=user).exists())
def test_create_with_custom_field(self):
data = self.get_initial_data()
data['custom_field'] = 'custom value'
serializer = serializers.CustomSerializer(data=data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
self.assertIsNotNone(user)
def test_create_with_generic_relation(self):
first_tag = 'the_first_tag'
next_tag = 'the_next_tag'
data = {
'tags': [
{'tag': first_tag},
{'tag': next_tag},
],
}
serializer = serializers.TaggedItemSerializer(data=data)
serializer.is_valid(raise_exception=True)
item = serializer.save()
self.assertIsNotNone(item)
self.assertEqual(2, models.Tag.objects.count())
self.assertEqual(first_tag, item.tags.all()[0].tag)
self.assertEqual(next_tag, item.tags.all()[1].tag)
def test_update(self):
serializer = serializers.UserSerializer(data=self.get_initial_data())
serializer.is_valid(raise_exception=True)
user = serializer.save()
# Check instances count
self.assertEqual(models.User.objects.count(), 1)
self.assertEqual(models.Profile.objects.count(), 1)
self.assertEqual(models.Site.objects.count(), 2)
self.assertEqual(models.Avatar.objects.count(), 2)
self.assertEqual(models.Message.objects.count(), 3)
# Update
user_pk = user.pk
profile_pk = user.profile.pk
message_to_update_str_pk = str(user.profile.message_set.first().pk)
message_to_update_pk = user.profile.message_set.last().pk
serializer = serializers.UserSerializer(
instance=user,
data={
'pk': user_pk,
'username': 'new',
'profile': {
'pk': profile_pk,
'access_key': None,
'sites': [
{
'url': 'http://new-site.com',
},
],
'avatars': [
{
'pk': user.profile.avatars.earliest('pk').pk,
'image': 'old-image-1.png',
},
{
'image': 'new-image-1.png',
},
{
'image': 'new-image-2.png',
},
],
'message_set': [
{
'pk': message_to_update_str_pk,
'message': 'Old message 1'
},
{
'pk': message_to_update_pk,
'message': 'Old message 2'
},
{
'message': 'New message 1'
}
],
},
},
)
serializer.is_valid(raise_exception=True)
user = serializer.save()
user.refresh_from_db()
self.assertIsNotNone(user)
self.assertEqual(user.pk, user_pk)
self.assertEqual(user.username, 'new')
profile = user.profile
self.assertIsNotNone(profile)
self.assertIsNone(profile.access_key)
self.assertEqual(profile.pk, profile_pk)
self.assertEqual(profile.sites.count(), 1)
self.assertSetEqual(
set(profile.sites.values_list('url', flat=True)),
{'http://new-site.com'}
)
self.assertEqual(profile.avatars.count(), 3)
self.assertSetEqual(
set(profile.avatars.values_list('image', flat=True)),
{'old-image-1.png', 'new-image-1.png', 'new-image-2.png'}
)
self.assertSetEqual(
set(profile.message_set.values_list('message', flat=True)),
{'Old message 1', 'Old message 2', 'New message 1'}
)
# Check that message which supposed to be updated still in profile
# message_set (new message wasn't created instead of update)
self.assertIn(
message_to_update_pk,
profile.message_set.values_list('id', flat=True)
)
self.assertIn(
uuid.UUID(message_to_update_str_pk),
profile.message_set.values_list('id', flat=True)
)
# Check instances count
self.assertEqual(models.User.objects.count(), 1)
self.assertEqual(models.Profile.objects.count(), 1)
self.assertEqual(models.Avatar.objects.count(), 3)
self.assertEqual(models.Message.objects.count(), 3)
# Access key shouldn't be removed because it is FK
self.assertEqual(models.AccessKey.objects.count(), 1)
# Sites shouldn't be deleted either as it is M2M
self.assertEqual(models.Site.objects.count(), 3)
def test_update_raise_protected_error(self):
serializer = serializers.UserSerializer(data=self.get_initial_data())
serializer.is_valid(raise_exception=True)
user = serializer.save()
user.user_avatar = user.profile.avatars.first()
user.save()
serializer = serializers.ProfileSerializer(
instance=user.profile,
data={
'access_key': None,
'sites': [],
'avatars': [
{
'pk': user.profile.avatars.last().id,
'image': 'old-image-1.png',
},
{
'image': 'new-image-1.png',
},
],
'message_set': [],
}
)
serializer.is_valid(raise_exception=True)
with self.assertRaises(ValidationError):
serializer.save()
# Check that protected avatar haven't been deleted
self.assertEqual(models.Avatar.objects.count(), 3)
def test_update_with_empty_reverse_one_to_one(self):
serializer = serializers.UserSerializer(data=self.get_initial_data())
serializer.is_valid(raise_exception=True)
user = serializer.save()
self.assertIsNotNone(user.profile)
serializer = serializers.UserSerializer(
instance=user,
data={
'pk': user.pk,
'username': 'new',
'profile': None
}
)
serializer.is_valid(raise_exception=True)
user = serializer.save()
self.assertFalse(models.Profile.objects.filter(user=user).exists())
def test_partial_update(self):
serializer = serializers.UserSerializer(data=self.get_initial_data())
serializer.is_valid(raise_exception=True)
user = serializer.save()
# Check instances count
self.assertEqual(models.User.objects.count(), 1)
self.assertEqual(models.Profile.objects.count(), 1)
self.assertEqual(models.Site.objects.count(), 2)
self.assertEqual(models.Avatar.objects.count(), 2)
self.assertEqual(models.AccessKey.objects.count(), 1)
# Partial update
user_pk = user.pk
profile_pk = user.profile.pk
serializer = serializers.UserSerializer(
instance=user,
partial=True,
data={
'pk': user_pk,
'username': 'new',
}
)
serializer.is_valid(raise_exception=True)
user = serializer.save()
user.refresh_from_db()
self.assertIsNotNone(user)
self.assertEqual(user.pk, user_pk)
self.assertEqual(user.username, 'new')
profile = user.profile
self.assertIsNotNone(profile)
self.assertIsNotNone(profile.access_key)
self.assertEqual(profile.access_key.key, 'key')
self.assertEqual(profile.pk, profile_pk)
self.assertEqual(profile.sites.count(), 2)
self.assertSetEqual(
set(profile.sites.values_list('url', flat=True)),
{'http://google.com', 'http://yahoo.com'}
)
self.assertEqual(profile.avatars.count(), 2)
self.assertSetEqual(
set(profile.avatars.values_list('image', flat=True)),
{'image-1.png', 'image-2.png'}
)
# Check instances count
self.assertEqual(models.User.objects.count(), 1)
self.assertEqual(models.Profile.objects.count(), 1)
self.assertEqual(models.Site.objects.count(), 2)
self.assertEqual(models.Avatar.objects.count(), 2)
self.assertEqual(models.AccessKey.objects.count(), 1)
def test_partial_update_direct_fk(self):
serializer = serializers.UserSerializer(data=self.get_initial_data())
serializer.is_valid(raise_exception=True)
user = serializer.save()
# Check instances count
self.assertEqual(models.User.objects.count(), 1)
self.assertEqual(models.Profile.objects.count(), 1)
self.assertEqual(models.Site.objects.count(), 2)
self.assertEqual(models.Avatar.objects.count(), 2)
self.assertEqual(models.AccessKey.objects.count(), 1)
# Partial update
user_pk = user.pk
profile_pk = user.profile.pk
access_key_pk = user.profile.access_key.pk
serializer = serializers.UserSerializer(
instance=user,
partial=True,
data={
'pk': user_pk,
'profile': {
'pk': profile_pk,
'access_key': {
'pk': access_key_pk,
'key': 'new',
}
},
}
)
serializer.is_valid(raise_exception=True)
user = serializer.save()
user.refresh_from_db()
self.assertIsNotNone(user)
self.assertEqual(user.pk, user_pk)
self.assertEqual(user.username, 'test')
profile = user.profile
self.assertIsNotNone(profile)
access_key = profile.access_key
self.assertIsNotNone(access_key)
self.assertEqual(access_key.key, 'new')
self.assertEqual(access_key.pk, access_key_pk)
# Check instances count
self.assertEqual(models.User.objects.count(), 1)
self.assertEqual(models.Profile.objects.count(), 1)
self.assertEqual(models.Site.objects.count(), 2)
self.assertEqual(models.Avatar.objects.count(), 2)
self.assertEqual(models.AccessKey.objects.count(), 1)
def test_nested_partial_update(self):
serializer = serializers.UserSerializer(data=self.get_initial_data())
serializer.is_valid(raise_exception=True)
user = serializer.save()
# Check instances count
self.assertEqual(models.User.objects.count(), 1)
self.assertEqual(models.Profile.objects.count(), 1)
self.assertEqual(models.Site.objects.count(), 2)
self.assertEqual(models.Avatar.objects.count(), 2)
self.assertEqual(models.AccessKey.objects.count(), 1)
# Partial update
user_pk = user.pk
profile_pk = user.profile.pk
serializer = serializers.UserSerializer(
instance=user,
partial=True,
data={
'pk': user_pk,
'profile': {
'pk': profile_pk,
'access_key': {
'key': 'new',
}
},
}
)
serializer.is_valid(raise_exception=True)
user = serializer.save()
user.refresh_from_db()
self.assertIsNotNone(user)
self.assertEqual(user.pk, user_pk)
self.assertEqual(user.username, 'test')
profile = user.profile
self.assertIsNotNone(profile)
self.assertIsNotNone(profile.access_key)
self.assertEqual(profile.access_key.key, 'new')
self.assertEqual(profile.pk, profile_pk)
self.assertEqual(profile.sites.count(), 2)
self.assertSetEqual(
set(profile.sites.values_list('url', flat=True)),
{'http://google.com', 'http://yahoo.com'}
)
self.assertEqual(profile.avatars.count(), 2)
self.assertSetEqual(
set(profile.avatars.values_list('image', flat=True)),
{'image-1.png', 'image-2.png'}
)
# Check instances count
self.assertEqual(models.User.objects.count(), 1)
self.assertEqual(models.Profile.objects.count(), 1)
self.assertEqual(models.Site.objects.count(), 2)
self.assertEqual(models.Avatar.objects.count(), 2)
# Old access key shouldn't be deleted
self.assertEqual(models.AccessKey.objects.count(), 2)
def test_update_with_generic_relation(self):
item = models.TaggedItem.objects.create()
serializer = serializers.TaggedItemSerializer(
instance=item,
data={
'tags': [{
'tag': 'the_tag',
}]
}
)
serializer.is_valid(raise_exception=True)
serializer.save()
item.refresh_from_db()
self.assertEqual(1, item.tags.count())
serializer = serializers.TaggedItemSerializer(
instance=item,
data={
'tags': [{
'pk': item.tags.get().pk,
'tag': 'the_new_tag',
}]
}
)
serializer.is_valid(raise_exception=True)
serializer.save()
item.refresh_from_db()
self.assertEqual('the_new_tag', item.tags.get().tag)
serializer = serializers.TaggedItemSerializer(
instance=item,
data={
'tags': [{
'tag': 'the_third_tag',
}]
}
)
serializer.is_valid(raise_exception=True)
serializer.save()
item.refresh_from_db()
self.assertEqual(1, item.tags.count())
self.assertEqual('the_third_tag', item.tags.get().tag)
def test_create_m2m_with_existing_related_objects(self):
users = [
models.User.objects.create(username='first user'),
models.User.objects.create(username='second user'),
]
users_data = serializers.UserSerializer(
users,
many=True
).data
users_data.append({'username': 'third user'})
data = {
'name': 'Team',
'members': users_data,
}
serializer = serializers.TeamSerializer(data=data)
self.assertTrue(serializer.is_valid())
team = serializer.save()
self.assertEqual(3, team.members.count())
self.assertEqual(3, models.User.objects.count())
self.assertEqual('first user', team.members.first().username)
# Update
data = serializers.TeamSerializer(team).data
data['members'].append({'username': 'fourth user'})
serializer = serializers.TeamSerializer(team, data=data)
self.assertTrue(serializer.is_valid())
team = serializer.save()
self.assertEqual(4, team.members.count())
self.assertEqual(4, models.User.objects.count())
self.assertEqual('fourth user', team.members.last().username)
def test_create_fk_with_existing_related_object(self):
user = models.User.objects.create(username='user one')
profile = models.Profile.objects.create(user=user)
avatar = models.Avatar.objects.create(profile=profile)
data = self.get_initial_data()
data['profile']['avatars'][0]['pk'] = avatar.pk
serializer = serializers.UserSerializer(data=data)
self.assertTrue(serializer.is_valid())
new_user = serializer.save()
self.assertEqual(2, models.Avatar.objects.count())
avatar.refresh_from_db()
self.assertEqual('image-1.png', avatar.image)
self.assertNotEqual(new_user.profile, profile)
self.assertEqual(new_user.profile, avatar.profile)
def test_create_with_existing_direct_fk_object(self):
access_key = models.AccessKey.objects.create(
key='the-key',
)
serializer = serializers.AccessKeySerializer(
instance=access_key,
)
data = self.get_initial_data()
data['profile']['access_key'] = serializer.data
data['profile']['access_key']['key'] = 'new-key'
serializer = serializers.UserSerializer(
data=data,
)
self.assertTrue(serializer.is_valid())
user = serializer.save()
access_key.refresh_from_db()
self.assertEqual(access_key, user.profile.access_key)
self.assertEqual('new-key', access_key.key)
def test_create_with_save_kwargs(self):
data = self.get_initial_data()
serializer = serializers.UserSerializer(data=data)
serializer.is_valid(raise_exception=True)
user = serializer.save(
profile={
'access_key': {'key': 'key2'},
'sites': {'url': 'http://test.com'}
},
)
self.assertEqual('key2', user.profile.access_key.key)
sites = list(user.profile.sites.all())
self.assertEqual('http://test.com', sites[0].url)
self.assertEqual('http://test.com', sites[1].url)
def test_custom_pk(self):
data = {
'username': 'username',
'custompks': [{
'slug': 'custom-key',
}]
}
serializer = serializers.UserWithCustomPKSerializer(
data=data,
)
self.assertTrue(serializer.is_valid())
user = serializer.save()
self.assertEqual('custom-key',
user.custompks.first().slug)
data['custompks'].append({
'slug': 'next-key',
})
data['custompks'][0]['slug'] = 'key2'
serializer = serializers.UserWithCustomPKSerializer(
data=data,
instance=user,
)
self.assertTrue(serializer.is_valid())
user = serializer.save()
user.refresh_from_db()
custompks = list(user.custompks.all())
self.assertEqual(2, len(custompks))
self.assertEqual('key2', custompks[0].slug)
self.assertEqual('next-key', custompks[1].slug)
self.assertEqual(2, models.CustomPK.objects.count())
def get_another_initial_data(self):
return {
'username': 'test',
'another_profile': {
'another_access_key': {
'key': 'key',
},
'another_sites': [
{
'url': 'http://google.com',
},
{
'url': 'http://yahoo.com',
},
],
'another_avatars': [
{
'image': 'image-1.png',
},
{
'image': 'image-2.png',
},
],
},
}
def test_create_another_user_with_explicit_source(self):
serializer = serializers.AnotherUserSerializer(
data=self.get_another_initial_data())
serializer.is_valid(raise_exception=True)
user = serializer.save()
self.assertIsNotNone(user)
self.assertEqual(user.username, 'test')
profile = user.anotherprofile
self.assertIsNotNone(profile)
self.assertIsNotNone(profile.access_key)
self.assertEqual(profile.access_key.key, 'key')
self.assertEqual(profile.sites.count(), 2)
self.assertSetEqual(
set(profile.sites.values_list('url', flat=True)),
{'http://google.com', 'http://yahoo.com'}
)
self.assertEqual(profile.avatars.count(), 2)
self.assertSetEqual(
set(profile.avatars.values_list('image', flat=True)),
{'image-1.png', 'image-2.png'}
)
# Check instances count
self.assertEqual(models.User.objects.count(), 1)
self.assertEqual(models.AnotherProfile.objects.count(), 1)
self.assertEqual(models.Site.objects.count(), 2)
self.assertEqual(models.AnotherAvatar.objects.count(), 2)
self.assertEqual(models.AccessKey.objects.count(), 1)
def test_update_another_user_with_explicit_source(self):
serializer = serializers.AnotherUserSerializer(
data=self.get_another_initial_data())
serializer.is_valid(raise_exception=True)
user = serializer.save()
# Update
user_pk = user.pk
profile_pk = user.anotherprofile.pk
serializer = serializers.AnotherUserSerializer(
instance=user,
data={
'pk': user_pk,
'username': 'new',
'another_profile': {
'pk': profile_pk,
'another_access_key': None,
'another_sites': [
{
'url': 'http://new-site.com',
},
],
'another_avatars': [
{
'pk': user.anotherprofile.avatars.earliest('pk').pk,
'image': 'old-image-1.png',
},
{
'image': 'new-image-1.png',
},
{
'image': 'new-image-2.png',
},
],
},
},
)
serializer.is_valid(raise_exception=True)
user = serializer.save()
user.refresh_from_db()
self.assertIsNotNone(user)
self.assertEqual(user.pk, user_pk)
self.assertEqual(user.username, 'new')
profile = user.anotherprofile
self.assertIsNotNone(profile)
self.assertIsNone(profile.access_key)
self.assertEqual(profile.pk, profile_pk)
self.assertEqual(profile.sites.count(), 1)
self.assertSetEqual(
set(profile.sites.values_list('url', flat=True)),
{'http://new-site.com'}
)
self.assertEqual(profile.avatars.count(), 3)
self.assertSetEqual(
set(profile.avatars.values_list('image', flat=True)),
{'old-image-1.png', 'new-image-1.png', 'new-image-2.png'}
)
# Check instances count
self.assertEqual(models.User.objects.count(), 1)
self.assertEqual(models.AnotherProfile.objects.count(), 1)
self.assertEqual(models.AnotherAvatar.objects.count(), 3)
# Access key shouldn't be removed because it is FK
self.assertEqual(models.AccessKey.objects.count(), 1)
# Sites shouldn't be deleted either as it is M2M
self.assertEqual(models.Site.objects.count(), 3)
def test_create_with_html_input_data(self):
"""Serializer should not fail if request type is multipart
"""
# DRF sets data to `QueryDict` when request type is `multipart`
data = QueryDict('name=team')
serializer = serializers.TeamSerializer(
data=data
)
serializer.is_valid(raise_exception=True)
team = serializer.save()
self.assertTrue(models.Team.objects.filter(id=team.id).exists())
self.assertEqual(team.name, 'team')
| 36.455662 | 80 | 0.547339 |
79c4dd93143fa8ecfe711491a8026fb2c505179a | 7,891 | py | Python | statsmodels/duration/tests/test_survfunc.py | amitmse/statsmodels | 54d6c2b6821b79d3c28c04f66bd981b1ae4f5f29 | [
"BSD-3-Clause"
] | 1 | 2015-07-26T19:34:17.000Z | 2015-07-26T19:34:17.000Z | statsmodels/duration/tests/test_survfunc.py | gef756/statsmodels | c09c1b871ddb50378ff8c54405e2117c13e64a85 | [
"BSD-3-Clause"
] | null | null | null | statsmodels/duration/tests/test_survfunc.py | gef756/statsmodels | c09c1b871ddb50378ff8c54405e2117c13e64a85 | [
"BSD-3-Clause"
] | 1 | 2021-11-22T22:08:58.000Z | 2021-11-22T22:08:58.000Z | import numpy as np
from statsmodels.duration.survfunc import SurvfuncRight, survdiff, plot_survfunc
from numpy.testing import assert_allclose
from numpy.testing import dec
import pandas as pd
import os
# If true, the output is written to a multi-page pdf file.
pdf_output = False
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
else:
plt.close(fig)
"""
library(survival)
ti1 = c(3, 1, 2, 3, 2, 1, 5, 3)
st1 = c(0, 1, 1, 1, 0, 0, 1, 0)
ti2 = c(1, 1, 2, 3, 7, 1, 5, 3, 9)
st2 = c(0, 1, 0, 0, 1, 0, 1, 0, 1)
ti = c(ti1, ti2)
st = c(st1, st2)
ix = c(rep(1, length(ti1)), rep(2, length(ti2)))
sd = survdiff(Surv(ti, st) ~ ix)
"""
ti1 = np.r_[3, 1, 2, 3, 2, 1, 5, 3]
st1 = np.r_[0, 1, 1, 1, 0, 0, 1, 0]
times1 = np.r_[1, 2, 3, 5]
surv_prob1 = np.r_[0.8750000, 0.7291667, 0.5468750, 0.0000000]
surv_prob_se1 = np.r_[0.1169268, 0.1649762, 0.2005800, np.nan]
ti2 = np.r_[1, 1, 2, 3, 7, 1, 5, 3, 9]
st2 = np.r_[0, 1, 0, 0, 1, 0, 1, 0, 1]
times2 = np.r_[1, 5, 7, 9]
surv_prob2 = np.r_[0.8888889, 0.5925926, 0.2962963, 0.0000000]
surv_prob_se2 = np.r_[0.1047566, 0.2518034, 0.2444320, np.nan]
cur_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(cur_dir, 'results', 'bmt.csv')
bmt = pd.read_csv(fp)
def test_survfunc1():
"""
Test where all times have at least 1 event.
"""
sr = SurvfuncRight(ti1, st1)
assert_allclose(sr.surv_prob, surv_prob1, atol=1e-5, rtol=1e-5)
assert_allclose(sr.surv_prob_se, surv_prob_se1, atol=1e-5, rtol=1e-5)
assert_allclose(sr.surv_times, times1)
def test_survfunc2():
"""
Test where some times have no events.
"""
sr = SurvfuncRight(ti2, st2)
assert_allclose(sr.surv_prob, surv_prob2, atol=1e-5, rtol=1e-5)
assert_allclose(sr.surv_prob_se, surv_prob_se2, atol=1e-5, rtol=1e-5)
assert_allclose(sr.surv_times, times2)
def test_survdiff_basic():
# Constants taken from R, code above
ti = np.concatenate((ti1, ti2))
st = np.concatenate((st1, st2))
groups = np.ones(len(ti))
groups[0:len(ti1)] = 0
z, p = survdiff(ti, st, groups)
assert_allclose(z, 2.14673, atol=1e-4, rtol=1e-4)
assert_allclose(p, 0.14287, atol=1e-4, rtol=1e-4)
def test_simultaneous_cb():
# The exact numbers here are regression tests, but they are close
# to page 103 of Klein and Moeschberger.
df = bmt.loc[bmt["Group"] == "ALL", :]
sf = SurvfuncRight(df["T"], df["Status"])
lcb1, ucb1 = sf.simultaneous_cb(transform="log")
lcb2, ucb2 = sf.simultaneous_cb(transform="arcsin")
ti = sf.surv_times.tolist()
ix = [ti.index(x) for x in (110, 122, 129, 172)]
assert_allclose(lcb1[ix], np.r_[0.43590582, 0.42115592, 0.4035897, 0.38785927])
assert_allclose(ucb1[ix], np.r_[0.93491636, 0.89776803, 0.87922239, 0.85894181])
assert_allclose(lcb2[ix], np.r_[0.52115708, 0.48079378, 0.45595321, 0.43341115])
assert_allclose(ucb2[ix], np.r_[0.96465636, 0.92745068, 0.90885428, 0.88796708])
def test_bmt():
# All tests against SAS
# Results taken from here:
# http://support.sas.com/documentation/cdl/en/statug/68162/HTML/default/viewer.htm#statug_lifetest_details03.htm
# Confidence intervals for 25% percentile of the survival
# distribution (for "ALL" subjects), taken from the SAS web site
cb = {"linear" : [107, 276],
"cloglog" : [86, 230],
"log" : [107, 332],
"asinsqrt" : [104, 276],
"logit" : [104, 230]}
dfa = bmt[bmt.Group == "ALL"]
cur_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(cur_dir, 'results', 'bmt_results.csv')
rslt = pd.read_csv(fp)
sf = SurvfuncRight(dfa["T"].values, dfa.Status.values)
assert_allclose(sf.surv_times, rslt.t)
assert_allclose(sf.surv_prob, rslt.s, atol=1e-4, rtol=1e-4)
assert_allclose(sf.surv_prob_se, rslt.se, atol=1e-4, rtol=1e-4)
for method in "linear", "cloglog", "log", "logit", "asinsqrt":
lcb, ucb = sf.quantile_ci(0.25, method=method)
assert_allclose(cb[method], np.r_[lcb, ucb])
def test_survdiff():
# Results come from R survival and survMisc packages (survMisc is
# used for non G-rho family tests but does not seem to support
# stratification)
df = bmt[bmt.Group != "ALL"]
# Not stratified
stat, p = survdiff(df["T"], df.Status, df.Group)
assert_allclose(stat, 13.44556, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, weight_type="gb")
assert_allclose(stat, 15.38787, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, weight_type="tw")
assert_allclose(stat, 14.98382, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, weight_type="fh", fh_p=0.5)
assert_allclose(stat, 14.46866, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, weight_type="fh", fh_p=1)
assert_allclose(stat, 14.84500, atol=1e-4, rtol=1e-4)
# 5 strata
strata = np.arange(df.shape[0]) % 5
df["strata"] = strata
stat, p = survdiff(df["T"], df.Status, df.Group, strata=df.strata)
assert_allclose(stat, 11.97799, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, strata=df.strata,
weight_type="fh", fh_p=0.5)
assert_allclose(stat, 12.6257, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, strata=df.strata,
weight_type="fh", fh_p=1)
assert_allclose(stat, 12.73565, atol=1e-4, rtol=1e-4)
# 8 strata
df["strata"] = np.arange(df.shape[0]) % 8
stat, p = survdiff(df["T"], df.Status, df.Group, strata=df.strata)
assert_allclose(stat, 12.12631, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, strata=df.strata,
weight_type="fh", fh_p=0.5)
assert_allclose(stat, 12.9633, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, strata=df.strata,
weight_type="fh", fh_p=1)
assert_allclose(stat, 13.35259, atol=1e-4, rtol=1e-4)
@dec.skipif(not have_matplotlib)
def test_plot_km():
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_survfunc.pdf")
else:
pdf = None
sr1 = SurvfuncRight(ti1, st1)
sr2 = SurvfuncRight(ti2, st2)
fig = plot_survfunc(sr1)
close_or_save(pdf, fig)
fig = plot_survfunc(sr2)
close_or_save(pdf, fig)
fig = plot_survfunc([sr1, sr2])
close_or_save(pdf, fig)
# Plot the SAS BMT data
gb = bmt.groupby("Group")
sv = []
for g in gb:
s0 = SurvfuncRight(g[1]["T"], g[1]["Status"], title=g[0])
sv.append(s0)
fig = plot_survfunc(sv)
ax = fig.get_axes()[0]
ax.set_position([0.1, 0.1, 0.64, 0.8])
ha, lb = ax.get_legend_handles_labels()
leg = fig.legend([ha[k] for k in (0,2,4)], [lb[k] for k in (0,2,4)],
'center right')
close_or_save(pdf, fig)
# Simultaneous CB for BMT data
ii = bmt.Group == "ALL"
sf = SurvfuncRight(bmt.loc[ii, "T"], bmt.loc[ii, "Status"])
fig = sf.plot()
ax = fig.get_axes()[0]
ax.set_position([0.1, 0.1, 0.64, 0.8])
ha, lb = ax.get_legend_handles_labels()
lcb, ucb = sf.simultaneous_cb(transform="log")
plt.fill_between(sf.surv_times, lcb, ucb, color="lightgrey")
lcb, ucb = sf.simultaneous_cb(transform="arcsin")
plt.plot(sf.surv_times, lcb, color="darkgrey")
plt.plot(sf.surv_times, ucb, color="darkgrey")
plt.plot(sf.surv_times, sf.surv_prob - 2*sf.surv_prob_se, color="red")
plt.plot(sf.surv_times, sf.surv_prob + 2*sf.surv_prob_se, color="red")
plt.xlim(100, 600)
close_or_save(pdf, fig)
if pdf_output:
pdf.close()
| 32.742739 | 116 | 0.633633 |
4a572ccd8583fd7c42858fb304a6a36a5f579a68 | 1,327 | py | Python | savu/test/travis/plugin_tests/loader_tests/nx_monitor_loader_test.py | dtasev/Savu | acb2578c85472e76cb292c4242c1ed2f2332f3e3 | [
"Apache-2.0"
] | 39 | 2015-03-30T14:03:42.000Z | 2022-03-16T16:50:33.000Z | savu/test/travis/plugin_tests/loader_tests/nx_monitor_loader_test.py | dtasev/Savu | acb2578c85472e76cb292c4242c1ed2f2332f3e3 | [
"Apache-2.0"
] | 670 | 2015-02-11T11:08:09.000Z | 2022-03-21T09:27:57.000Z | savu/test/travis/plugin_tests/loader_tests/nx_monitor_loader_test.py | DTasev/Savu | acb2578c85472e76cb292c4242c1ed2f2332f3e3 | [
"Apache-2.0"
] | 54 | 2015-02-13T14:09:52.000Z | 2022-01-24T13:57:09.000Z | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: nx_xrd_loader_test
:platform: Unix
:synopsis: testing the nx_xrd loader
.. moduleauthor:: Aaron Parsons <scientificsoftware@diamond.ac.uk>
"""
import unittest
from savu.test import test_utils as tu
from savu.test.travis.framework_tests.plugin_runner_test import \
run_protected_plugin_runner
class NxMonitorLoaderTest(unittest.TestCase):
global data_file, experiment
data_file = 'mm.nxs'
experiment = None
def test_nx_monitor(self):
process_list = 'loaders/basic_monitor_process.nxs'
options = tu.initialise_options(data_file, experiment, process_list)
run_protected_plugin_runner(options)
tu.cleanup(options)
if __name__ == "__main__":
unittest.main()
| 31.595238 | 76 | 0.749812 |
bb4a98f22f718d957e108bd4fa6a766371579925 | 3,831 | py | Python | test/test_quantity.py | keefehuang/c3 | 1df65bc43d891c7af77c8eff0ef8879b2fe5c3e6 | [
"Apache-2.0"
] | 45 | 2020-11-02T13:26:26.000Z | 2022-03-20T13:13:42.000Z | test/test_quantity.py | keefehuang/c3 | 1df65bc43d891c7af77c8eff0ef8879b2fe5c3e6 | [
"Apache-2.0"
] | 165 | 2020-10-30T17:20:25.000Z | 2022-03-31T12:59:48.000Z | test/test_quantity.py | keefehuang/c3 | 1df65bc43d891c7af77c8eff0ef8879b2fe5c3e6 | [
"Apache-2.0"
] | 28 | 2020-10-21T04:10:26.000Z | 2022-02-17T17:44:55.000Z | """Unit tests for Quantity class"""
import hjson
import numpy as np
import pytest
from c3.c3objs import Quantity, hjson_encode
amp = Quantity(value=0.0, min_val=-1.0, max_val=+1.0, unit="V")
amp_dict = {
"value": 0.0,
"min_val": -1.0,
"max_val": 1.0,
"unit": "V",
"symbol": "\\alpha",
}
freq = Quantity(
value=5.6e9, min_val=5.595e9, max_val=5.605e9, unit="Hz 2pi", symbol="\\omega"
)
freq_dict = {
"value": 5.6e9,
"min_val": 5.595e9,
"max_val": 5.605e9,
"unit": "Hz 2pi",
"symbol": "\\omega",
}
gate_time = Quantity(
value=5.3246e-9, min_val=2e-9, max_val=10e-9, unit="s", symbol=r"t_g"
)
matrix = Quantity(
value=[[0, 1], [1, 0]],
min_val=[[0, 0], [0, 0]],
max_val=[[1, 1], [1, 1]],
unit="",
symbol=r"M",
)
@pytest.mark.unit
def test_qty_2pi() -> None:
assert freq.asdict() == freq_dict
@pytest.mark.unit
def test_qty_set_2pi() -> None:
freq.set_value(5.602e9)
assert freq.get_value() - 5.602e9 * 2 * np.pi < 1e-8
@pytest.mark.unit
def test_qty_asdict() -> None:
assert amp.asdict() == amp_dict
@pytest.mark.unit
def test_qty_write_cfg() -> None:
print(hjson.dumps(amp.asdict(), default=hjson_encode))
@pytest.mark.unit
def test_qty_read_cfg() -> None:
assert Quantity(**amp_dict).asdict() == amp.asdict()
@pytest.mark.unit
def test_qty_str() -> None:
assert str(gate_time) == "5.325 ns "
@pytest.mark.unit
def test_qty_set() -> None:
gate_time.set_value(7e-9)
assert gate_time.get_value() - 7e-9 < 1e-15
@pytest.mark.unit
def test_qty_max() -> None:
gate_time.set_opt_value(1.0)
assert gate_time.get_value() - 10e-9 < 1e-15
@pytest.mark.unit
def test_qty_min() -> None:
gate_time.set_opt_value(-1.0)
assert gate_time.get_value() - 2e-9 < 1e-15
@pytest.mark.unit
def test_qty_get_opt() -> None:
gate_time.set_value(6e-9)
assert gate_time.get_opt_value() < 1e-15
@pytest.mark.unit
def test_qty_matrix_str() -> None:
assert str(matrix) == "0.000 1.000 1.000 0.000 "
@pytest.mark.unit
def test_qty_matrix_set() -> None:
matrix.set_value([[1.0, 0.0], [0.0, 1.0]])
assert (matrix.numpy() == [[1, 0], [0, 1]]).all()
@pytest.mark.unit
def test_qty_matrix_set_opt() -> None:
assert (matrix.get_opt_value() == [1.0, -1.0, -1.0, 1.0]).all()
@pytest.mark.unit
def test_qty_np_conversions() -> None:
a = Quantity(value=3, unit="unit")
assert repr(a) == "3.000 unit"
assert np.mod(a, 2) == 1.0
assert type(a.numpy()) is np.float64 or type(a.numpy()) is np.ndarray
assert a + a == 6
np.array([a]) # test conversion
np.array(a)
float(a)
assert np.mod([a], 2) == np.array([[1.0]])
assert list(a) == [3.0]
b = Quantity(np.array([0.0000001, 0.00001]))
np.array([b])
c = Quantity([0, 0.1], min_val=0, max_val=1)
assert len(c) == 2
assert c.shape == (2,)
@pytest.mark.unit
def test_qty_math() -> None:
a = 0.5
b = Quantity(2)
assert a + b == 2.5
assert b + a == 2.5
assert a - b == -1.5
assert b - a == 1.5
assert a * b == 1.0
assert b * a == 1.0
np.testing.assert_allclose(a ** b, 0.25)
assert b ** a == 2 ** 0.5
np.testing.assert_allclose(a / b, 0.25)
assert b / a == 4.0
assert b % a == 0
qty = Quantity(3, min_val=0, max_val=5)
qty.subtract(1.3)
np.testing.assert_allclose(qty, 1.7)
qty = Quantity(3, min_val=0, max_val=5)
qty.add(0.3)
np.testing.assert_allclose(qty, 3.3)
@pytest.mark.unit
def get_and_set() -> None:
np.testing.assert_allclose(
Quantity(0.3, min_val=0, max_val=1).get_opt_value(), [-0.4]
)
for val in np.linspace(0, 2):
a = Quantity(val, min_val=-1, max_val=2)
opt_val = a.get_opt_value()
a.set_opt_value(opt_val)
np.testing.assert_allclose(a, val)
| 22.803571 | 82 | 0.605586 |
87ec01f4f4fcd08bc7c3cdd2fbd533cc751e8bd3 | 779 | py | Python | var/spack/repos/builtin/packages/pangolin/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/pangolin/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/pangolin/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Pangolin(CMakePackage):
"""Pangolin is a lightweight portable rapid development library for
managing OpenGL display / interaction and abstracting video input."""
homepage = "https://github.com/stevenlovegrove/Pangolin"
git = "https://github.com/stevenlovegrove/Pangolin.git"
version('master', branch='master')
# Required dependencies
depends_on('cmake@2.8.12:', type='build')
depends_on('gl')
depends_on('glew')
depends_on('glu', type='link')
# Optional dependencies
depends_on('eigen')
| 28.851852 | 73 | 0.713736 |
59221067cfd769ae80078f550463a65c20e928b9 | 3,747 | py | Python | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Geometry/Two_Dimensional/Planform/rescale_non_dimensional.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Geometry/Two_Dimensional/Planform/rescale_non_dimensional.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Geometry/Two_Dimensional/Planform/rescale_non_dimensional.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | ## @ingroup Planform
#rescale_non_dimensional.py
# Created : May 2020, E. Botero
# Modified:
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
import numpy as np
# ----------------------------------------------------------------------
# Set Origin Non-Dimensional
# ----------------------------------------------------------------------
def set_origin_non_dimensional(vehicle):
""" Places the origin of all major components in a
non-dimensional fashion. This is useful for optimization or
generative design
Assumptions:
None
Source:
N/A
Inputs:
vehicle [SUAVE Vehicle]
.fuselages.*.origin
.fuselages.fuselage.lengths.total
.wings.*.origin
.wings.main_wing.lengths.total
.networks.*.origin
Outputs:
vehicle [SUAVE Vehicle]
.fuselages.*.non_dimensional_origin
.wings.*.non_dimensional_origin
.networks.*.non_dimensional_origin
Properties Used:
None
"""
try:
length_scale = vehicle.fuselages.fuselage.lengths.total
except:
try:
length_scale = vehicle.wings.main_wing.lengths.total
except:
length_scale = 1.
for wing in vehicle.wings:
origin = wing.origin
non_dim = np.array(origin)/length_scale
wing.non_dimensional_origin = non_dim.tolist()
for fuse in vehicle.fuselages:
origin = fuse.origin
non_dim = np.array(origin)/length_scale
fuse.non_dimensional_origin = non_dim.tolist()
for prop in vehicle.networks:
origins = prop.origin
prop.non_dimensional_origin.clear()
for eng in range(int(prop.number_of_engines)):
origin = np.array(origins[eng])/length_scale
prop.non_dimensional_origin.append(origin.tolist())
return vehicle
# ----------------------------------------------------------------------
# Scale to Non-Dimensional
# ----------------------------------------------------------------------
def set_origin_dimensional(vehicle):
""" Places the origin of all components
Assumptions:
None
Source:
N/A
Inputs:
vehicle [SUAVE Vehicle]
Outputs:
vehicle [SUAVE Vehicle]
Properties Used:
None
"""
try:
length_scale = vehicle.fuselages.fuselage.lengths.total
except:
try:
length_scale = vehicle.wings.main_wing.lengths.total
except:
length_scale = 1.
for wing in vehicle.wings:
non_dim = wing.non_dimensional_origin
origin = np.array(non_dim)*length_scale
wing.origin = origin.tolist()
for fuse in vehicle.fuselages:
non_dim = fuse.non_dimensional_origin
origin = np.array(non_dim)*length_scale
fuse.origin = origin.tolist()
for net in vehicle.networks:
n = int(net.number_of_engines)
non_dims = net.non_dimensional_origin
net.origin.clear()
origin = np.zeros((n,3))
for eng in range(0,n):
origin[eng,:] = np.array(non_dims[0])*length_scale
if eng % 2 != 0:
origin[eng,1] = -origin[eng,1]
elif (eng % 2 == 0) and (eng == n-1):
origin[eng,1] = 0.
net.origin = origin.tolist()
return vehicle | 26.202797 | 72 | 0.494796 |
9f931adb9a1ffdb809cdf3bc9d4dc69da36aac34 | 4,319 | py | Python | scripts/utils/modify_metadata.py | vsevdrob/nft-brownie-ipfs-pinata-hashlips | 32080aa0eca2ee3143e1845adf0a246bbe148192 | [
"MIT"
] | 2 | 2022-03-06T21:14:58.000Z | 2022-03-16T00:22:22.000Z | scripts/utils/modify_metadata.py | webdriedesign/nft-brownie-ipfs-pinata-hashlips | 32080aa0eca2ee3143e1845adf0a246bbe148192 | [
"MIT"
] | 2 | 2022-03-17T00:53:53.000Z | 2022-03-17T00:59:20.000Z | scripts/utils/modify_metadata.py | webdriedesign/nft-brownie-ipfs-pinata-hashlips | 32080aa0eca2ee3143e1845adf0a246bbe148192 | [
"MIT"
] | 1 | 2022-03-06T21:08:22.000Z | 2022-03-06T21:08:22.000Z | from brownie import network
from helper_brownie import CHAINS
from scripts.utils.pinata import upload_file
from scripts.utils.helper import dump_to_json, load_from_json
from scripts.utils.config import PATH, PINATA, HASHLIPS
from scripts.collectible.config import (
COLLECTION,
SPREADSHEET,
SINGLE_EDITION_COLLECTION,
)
from scripts.utils.spreadsheet import _get_nft_spreadsheet_data
def main():
modify_metadata()
def modify_metadata(_token_id: int = None):
"""
Modify metadata that is generated by hashlips engine.
Insert additional data from spreadsheet.
Upload image and metadata to IPFS/Pinata if enabled.
Return token_uri.
"""
token_id = _token_id
"""
@dev Add to image path a dinamic endpoint that is based on the random file name.
"""
image_path = (
PATH["images"] + f"/{token_id}.png"
if HASHLIPS["enabled"] or not SINGLE_EDITION_COLLECTION["enabled"]
else PATH["images"] + f"/{SINGLE_EDITION_COLLECTION['file_name']}"
)
metadata_path = PATH["token_metadata"] + f"/{token_id}.json"
token_uri_path = PATH["token_URIs"] + f"/{token_id}.json"
metadata = load_from_json(metadata_path)
token_uri = load_from_json(token_uri_path)
print(f"Modifying metadata of token ID: {token_id} ...")
# Delete unnecessary keys made by hashlips engine.
if HASHLIPS["enabled"]:
try:
del metadata["dna"]
del metadata["date"]
del metadata["edition"]
del metadata["compiler"]
except KeyError:
print(f"---KeyError occured. Working further on tokenId {token_id}---")
# Inserting spreadsheet data to the metadata.
if SPREADSHEET["enabled"]:
ss_data = _get_nft_spreadsheet_data(PATH["spreadsheet"], token_id)
metadata["name"] = ss_data["NAME"]
metadata["description"] = ss_data["DESCRIPTION"]
metadata["creator"] = ss_data["CREATOR"]
metadata["artist"] = ss_data["ARTIST"]
if (
HASHLIPS["enabled"]
and not HASHLIPS["include_generated_metadata_attributes"]
):
metadata["attributes"] = []
for key, value in ss_data.items():
if key in SPREADSHEET["trait_types"]:
for v in value: # loop through value list
metadata["attributes"].append(
{"trait_type": key, "value": v.capitalize()}
)
else:
if SINGLE_EDITION_COLLECTION["enabled"]:
metadata["name"] = COLLECTION["artwork"]["name"]
metadata["description"] = COLLECTION["artwork"]["description"]
else:
metadata["name"] = COLLECTION["artwork"]["name"] + f" #{token_id}"
metadata["description"] = COLLECTION["artwork"]["description"]
metadata["creator"] = COLLECTION["artwork"]["creator"]
metadata["artist"] = COLLECTION["artwork"]["artist"]
# Inserting external link to the metadata.
if COLLECTION["external_link"]["enabled"]:
metadata["external_link"] = _get_nft_external_link(token_id)
# Inserting additional key/value to the metadata.
if COLLECTION["artwork"]["additional_metadata"]["enabled"]:
for k, v in COLLECTION["artwork"]["additional_metadata"]["data"].items():
metadata[k] = v
if PINATA["enabled"] and network.show_active() not in CHAINS["local"]:
# metadata["image"] = upload_to_ipfs(image_path)
metadata["image"] = upload_file(image_path)
dump_to_json(metadata, metadata_path)
# token_uri[str(token_id)] = upload_to_ipfs(metadata_path)
token_uri[str(token_id)] = upload_file(metadata_path)
dump_to_json(token_uri, token_uri_path)
else:
metadata["image"] = f"ipfs://YourImageUri/{token_id}.png"
dump_to_json(metadata, metadata_path)
token_uri[str(token_id)] = f"ipfs://YourTokenUri/{token_id}.json"
dump_to_json(token_uri, token_uri_path)
print(f"Finished modifying metadata of token ID: {token_id}")
return token_uri[str(token_id)]
def _get_nft_external_link(_token_id):
if COLLECTION["external_link"]["include_token_id"]:
return COLLECTION["external_link"]["url"] + str(_token_id)
return COLLECTION["external_link"]["url"]
| 34.830645 | 84 | 0.648761 |
a92d539a31291e9cd8bcfa41577737679402103e | 11,536 | py | Python | SystemsReasoner/OrphanFixer.py | buzztoys/MDK | a542ffd9f6dd835e837e3e59af73027c0010d2bb | [
"BSD-3-Clause"
] | null | null | null | SystemsReasoner/OrphanFixer.py | buzztoys/MDK | a542ffd9f6dd835e837e3e59af73027c0010d2bb | [
"BSD-3-Clause"
] | null | null | null | SystemsReasoner/OrphanFixer.py | buzztoys/MDK | a542ffd9f6dd835e837e3e59af73027c0010d2bb | [
"BSD-3-Clause"
] | null | null | null | #Creator: Louise Anderson
from java.lang import *
from com.nomagic.magicdraw.core import Application
from com.nomagic.uml2.ext.jmi.helpers import StereotypesHelper
from com.nomagic.magicdraw.openapi.uml import SessionManager
from com.nomagic.magicdraw.openapi.uml import ModelElementsManager
from com.nomagic.uml2.ext.jmi.helpers import ModelHelper
from com.nomagic.uml2.ext.magicdraw.classes.mdkernel import *
from com.nomagic.uml2.ext.magicdraw.classes.mdinterfaces import Interface
from com.nomagic.uml2.ext.magicdraw.mdprofiles import Stereotype
#from com.nomagic.uml2.ext.magicdraw.classes.mdkernel import Property
#from com.nomagic.uml2.ext.magicdraw.classes.mdkernel import Package
#from com.nomagic.uml2.ext.magicdraw.classes.mdkernel import AggregationKindEnum
#from com.nomagic.uml2.ext.magicdraw.classes.mdkernel import Enumeration
#from com.nomagic.uml2.ext.magicdraw.classes.mdkernel import Constraint
from com.nomagic.uml2.ext.magicdraw.compositestructures.mdports import Port
from com.nomagic.uml2.ext.magicdraw.classes.mdassociationclasses import AssociationClass
from com.nomagic.magicdraw.copypaste import CopyPasteManager
from com.nomagic.uml2.ext.magicdraw.activities.mdfundamentalactivities import Activity
from javax.swing import JOptionPane
from com.nomagic.magicdraw.teamwork.application import TeamworkUtils
import sys
import traceback
import os
#import json
import MDUtils._MDUtils as MDUtils
reload(MDUtils)
import SRUtils
reload(SRUtils)
import Validate_Structure
reload(Validate_Structure)
import Specialize
reload(Specialize)
import csv
gl = Application.getInstance().getGUILog()
project = Application.getInstance().getProject()
proxyMan = project.getProxyManager()
ef = project.getElementsFactory()
mem = ModelElementsManager.getInstance()
sm = SessionManager.getInstance()
disposeOrphanProxy=False
CSAF=StereotypesHelper.getProfile(project,"Mission Service Architecture Framework")
listPort={}
listProp={}
def ProxyResolver(proxyMan):
proxies=proxyMan.getProxies() #returns all registered proxies
count=0
StereoCollect=[]
listPort={}
listProp={}
#StereoMap = open("StereoMap.dat","w")
fileStereo=open("GoodStereo.csv","w")
filePort=open("GoodPort.csv","w")
fileProperty=open("GoodProp.csv","w")
#GoodStereo=(fileStereo,delimiter=',',quoting=csv.QUOTE_ALL)
##trying to filter proxies for only msaf ones
proxies=StereotypesHelper.filterByProfile(proxies,CSAF)
for proxy in proxies:
#first need to check if orphan proxy
#only want to work on proxies in the msaf
if True==proxyMan.isGhostProxy(proxy):
disposeOrphanProxy=False
#(need to build in cases here of things we want to dispose)
ProxyId=proxy.getID()
#gl.log("what up what up the id of the proxy is===>"+str(ProxyId))
if isinstance(proxy,Generalization): #this works
count+=1
disposeOrphanProxy=True
gl.log("Removing orphan generalizations")
if isinstance(proxy,Stereotype):
gl.log("The name of the orphaned stereotype=====>"+proxy.getQualifiedName())
elemSt=StereotypesHelper.getExtendedElements(proxy)
#stereos=
#get control service framework
for elem in elemSt:
gl.log("This element==> "+elem.getQualifiedName()+" is using the orphan proxy===> "+proxy.getQualifiedName())
stereo=MDUtils.getUserSelections([Stereotype], CSAF, False,'Select Stereotype to Replace Orphan Stereotype==>'+proxy.getName(),[Stereotype,Package])
for elem in elemSt:
if stereo is not None:
#StereoCollect[proxyID]=stereo.getID()
gl.log("This element is using an orphaned stereotype====>"+elem.getQualifiedName()+ " and will be replaced with selected Stereotype===>"+stereo.getQualifiedName())
StereotypesHelper.removeStereotype(elem,proxy)
StereotypesHelper.addStereotype(elem,stereo)
fileStereo.write(str(ProxyId)+","+str(stereo.getID())+"\n")
else:
#StereoCollect[proxyID]=None
gl.log("Cancel was selected for the Stereotype, so the element will be left alone======>"+elem.getQualifiedName())
StereotypesHelper.removeStereotype(elem,proxy)
fileStereo.write(str(ProxyId)+","+"0"+"\n")
#need to add flag here
disposeOrphanProxy=True
if isinstance(proxy,Port):
portRedef=proxy.get_portOfRedefinedPort()
portNew=list(portRedef)
for port in portNew:
gl.log("This port==> "+port.getQualifiedName()+" is using the orphan proxy===> "+proxy.getQualifiedName())
portSelect=MDUtils.getUserSelections([Port], CSAF, False,'Select Port to Replace Orphan Port (used in redefintion)==>'+proxy.getQualifiedName(),[Port,Package,Class,Interface])
for port in portNew:
#gl.log("I just want to see what ports we are getting here====>"+port.getQualifiedName())
#this gets all ports that are using the orphan port as a redefinition
redefList=port.getRedefinedPort()
redefList.remove(proxy)
if portSelect is not None:
#StereoCollect[proxyID]=stereo.getID()
gl.log("This port is using an orphaned port for redefinition====>"+port.getQualifiedName()+ " and will be replaced with selected port===>"+portSelect.getQualifiedName())
redefList.add(portSelect)
filePort.write(str(ProxyId)+","+str(portSelect.getID())+"\n")
else:
#StereoCollect[proxyID]=None
gl.log("Cancel was selected for the Port Selection, so the port will be deleted======>"+port.getQualifiedName())
#StereotypesHelper.removeStereotype(elem,proxy)
filePort.write(str(ProxyId)+","+"0"+"\n")
#need to add flag here
if port.isEditable():
listPort[port]=port.getQualifiedName()
mem.removeElement(port)
else:
gl.log("Error the element you are trying to delete is not editable")
disposeOrphanProxy=True
if isinstance(proxy,Property) and not isinstance(proxy,Port):
propertyRedef=proxy.get_propertyOfRedefinedProperty()
propNew=list(propertyRedef)
for prop in propNew:
gl.log("This property==> "+prop.getQualifiedName()+" is using the orphan proxy===> "+proxy.getQualifiedName())
propSelect=MDUtils.getUserSelections([Property], CSAF, False,'Select Property to Replace Orphan Property (used in redefinition)==>'+proxy.getQualifiedName(),[Property,Package,Class,Interface])
for prop in propNew:
#gl.log("I just want to see what ports we are getting here====>"+port.getQualifiedName())
#this gets all ports that are using the orphan port as a redefinition
redefList=prop.getRedefinedProperty()
redefList.remove(proxy)
if propSelect is not None:
#StereoCollect[proxyID]=stereo.getID()
gl.log("This property is using an orphaned property for redefinition====>"+prop.getQualifiedName()+ " and will be replaced with selected property===>"+propSelect.getQualifiedName())
redefList.add(propSelect)
fileProperty.write(str(ProxyId)+","+str(propSelect.getID())+"\n")
else:
#StereoCollect[proxyID]=None
gl.log("Cancel was selected for the Port Selection, so the port will be deleted======>"+prop.getQualifiedName())
#StereotypesHelper.removeStereotype(elem,proxy)
fileProperty.write(str(ProxyId)+","+"0"+"\n")
#need to add flag here
if prop.isEditable():
listProp[prop]=prop.getQualifiedName()
mem.removeElement(prop)
else:
gl.log("Error the element you are trying to delete is not editable")
disposeOrphanProxy=True
#need to add how to handle ports properties associations...whatever we can get
#get all elements that are stereotyped this
#check to make sure you have lock on full project
#first created, creates you a mapping
#remember what to replace with and what to delete and re-run with lists of items
if disposeOrphanProxy==True:
#decide whether or not we want to dispose of orphan proxy
#gl.log("***********************If we come here we will dispose of an orphan")
proxy.dispose()
#mem.removeElement(proxy)================>This does not work
# else: #if we are not just getting rid of orphan need to have the fix cases
# gl.log("if we come here we will make something not a proxy anymore")
# #proxyMan.makeNotProxy(proxy) ------this does very scary things
gl.log("************************LIST OF PROPERTIES DELETED**********************")
for p in listProp:
gl.log("Property Deleted===> "+listProp[p])
gl.log("************************LIST OF PORTS DELETED**********************")
for q in listPort:
gl.log("Port Deleted===> "+listPort[q])
#now do the jason dump and the file write
#strStereo=json.dumps([StereoCollect])
#StereoMap.write(strStereo)
fileStereo.close()
filePort.close()
fileProperty.close()
# redefs = p.getRedefinedProperty()
# if len(redefs) > 0:
# redef = redefs[0]
# type = redef.getType()
# if type is not None:
# for g in type.get_generalizationOfGeneral():
# potential = g.getSpecific()
# if potential.getName().endswith("Fix Me!"):
# return potential
return None
def blah(selects):
ids=selects.getID()
gl.log("what up what up===>"+(ids))
return
def run(mode):
selected = Application.getInstance().getMainFrame().getBrowser().getActiveTree().getSelectedNode().getUserObject()
if mode == 'b':
selected = Application.getInstance().getMainFrame().getBrowser().getActiveTree().getSelectedNode().getUserObject()
try:
SessionManager.getInstance().createSession("orphan")
#blah(selected)
ProxyResolver(proxyMan)
SessionManager.getInstance().closeSession()
except:
SessionManager.getInstance().cancelSession()
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
messages=traceback.format_exception(exceptionType, exceptionValue, exceptionTraceback)
for message in messages:
gl.log(message)
| 51.730942 | 208 | 0.614078 |
3548628d37ef4d69539ec6725c07c2398469e6e8 | 1,359 | py | Python | ecommerce/permissions.py | umarmughal824/micromasters | ea92d3bcea9be4601150fc497302ddacc1161622 | [
"BSD-3-Clause"
] | null | null | null | ecommerce/permissions.py | umarmughal824/micromasters | ea92d3bcea9be4601150fc497302ddacc1161622 | [
"BSD-3-Clause"
] | 5 | 2021-03-02T01:42:17.000Z | 2021-06-10T20:42:02.000Z | ecommerce/permissions.py | umarmughal824/micromasters | ea92d3bcea9be4601150fc497302ddacc1161622 | [
"BSD-3-Clause"
] | null | null | null | """
Permission classes for ecommerce
"""
import logging
from rest_framework.permissions import BasePermission
from ecommerce.api import generate_cybersource_sa_signature
from profiles.api import get_social_username
log = logging.getLogger(__name__)
class IsSignedByCyberSource(BasePermission):
"""
Confirms that the message is signed by CyberSource
"""
def has_permission(self, request, view):
"""
Returns true if request params are signed by CyberSource
"""
signature = generate_cybersource_sa_signature(request.data)
if request.data['signature'] == signature:
return True
else:
log.error(
"Cybersource signature failed: we expected %s but we got %s. Payload: %s",
signature,
request.data['signature'],
request.data,
)
return False
class IsLoggedInUser(BasePermission):
"""
Confirms that the username in the request body is the same as the logged in user's.
"""
def has_permission(self, request, view):
"""
Returns true if the username in the request body matches the logged in user.
"""
try:
return request.data['username'] == get_social_username(request.user)
except KeyError:
return False
| 27.18 | 90 | 0.635026 |
47ac40c37360c3a0317ffe632a84c65c3ace91dd | 1,721 | py | Python | hearts/services/player.py | MHeasell/hearts-server | ecaff52803a97f7f250439d60faf50a1872cf38b | [
"MIT"
] | null | null | null | hearts/services/player.py | MHeasell/hearts-server | ecaff52803a97f7f250439d60faf50a1872cf38b | [
"MIT"
] | null | null | null | hearts/services/player.py | MHeasell/hearts-server | ecaff52803a97f7f250439d60faf50a1872cf38b | [
"MIT"
] | null | null | null | from passlib.apps import custom_app_context as pwd_context
class PlayerStateError(Exception):
def __init__(self, msg=""):
self.message = msg
class PlayerExistsError(PlayerStateError):
pass
class PlayerService(object):
def __init__(self):
self._players = {}
self._usernames = {}
self._next_id = 1
def get_player(self, player_id):
data = self._players.get(player_id)
if data is None:
return None
out_data = {
"id": int(data["id"]),
"name": data["name"]
}
return out_data
def get_player_id(self, name):
return self._usernames.get(name)
def get_player_by_name(self, name):
player_id = self.get_player_id(name)
if player_id is None:
return None
return self.get_player(player_id)
def create_player(self, name, password):
if name in self._usernames:
raise PlayerExistsError()
password_hash = pwd_context.encrypt(password)
player_id = self._next_id
self._next_id += 1
self._players[player_id] = {
"id": player_id,
"name": name,
"password_hash": password_hash
}
self._usernames[name] = player_id
return player_id
def auth_player(self, player_id, password):
player = self._players.get(player_id)
if player is None:
return False
pwd_hash = player["password_hash"]
return pwd_context.verify(password, pwd_hash)
def remove_player(self, player_id):
name = self._players[player_id]["name"]
del self._usernames[name]
del self._players[player_id]
| 23.256757 | 58 | 0.602557 |
86310421988810c8fb88b646100e7371c808c160 | 1,304 | py | Python | mud/migrations/0013_auto_20190715_2206.py | cspt2-build-week-abr/Backend | 2e349f3ff73db1cbe9bb4f13fceae4847fe47f2f | [
"MIT"
] | null | null | null | mud/migrations/0013_auto_20190715_2206.py | cspt2-build-week-abr/Backend | 2e349f3ff73db1cbe9bb4f13fceae4847fe47f2f | [
"MIT"
] | 8 | 2019-12-04T23:44:37.000Z | 2022-02-10T11:45:47.000Z | mud/migrations/0013_auto_20190715_2206.py | cspt2-build-week-abr/Backend | 2e349f3ff73db1cbe9bb4f13fceae4847fe47f2f | [
"MIT"
] | null | null | null | # Generated by Django 2.2.3 on 2019-07-16 03:06
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('mud', '0012_auto_20190715_2202'),
]
operations = [
migrations.RenameField(
model_name='areas',
old_name='exits',
new_name='adjacents',
),
migrations.RemoveField(
model_name='areas',
name='coords',
),
migrations.AlterField(
model_name='areas',
name='id',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='pokeballs',
name='id',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='pokemon',
name='id',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='users',
name='id',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
]
| 29.636364 | 106 | 0.577454 |
affb1f3baa25f32e67a614f79b5ccf444b2bda95 | 314 | py | Python | symphony/cli/pyinventory/common/constant.py | fbcode/magma_old | 054ef8e079478bda36d2b13b8a88386c6dc94ef2 | [
"BSD-3-Clause"
] | null | null | null | symphony/cli/pyinventory/common/constant.py | fbcode/magma_old | 054ef8e079478bda36d2b13b8a88386c6dc94ef2 | [
"BSD-3-Clause"
] | 6 | 2021-03-31T19:59:59.000Z | 2022-01-22T12:56:47.000Z | symphony/cli/pyinventory/common/constant.py | fbcode/magma_old | 054ef8e079478bda36d2b13b8a88386c6dc94ef2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2004-present Facebook All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
__version__ = "3.0.0"
EQUIPMENTS_TO_SEARCH = 10
LOCATIONS_TO_SEARCH = 5
PAGINATION_STEP = 1000
USER_ROLE = 0
SUPERUSER_ROLE = 3
| 24.153846 | 58 | 0.761146 |
320c3b168bc43387c8158b67d29eb063f7ca48f3 | 3,453 | py | Python | Split_data_five_CV.py | ShiuLab/Pathway_gene_prediction_in_tomato | a2488fdbc673df03b6065221e0a0a67c86f29230 | [
"MIT"
] | null | null | null | Split_data_five_CV.py | ShiuLab/Pathway_gene_prediction_in_tomato | a2488fdbc673df03b6065221e0a0a67c86f29230 | [
"MIT"
] | null | null | null | Split_data_five_CV.py | ShiuLab/Pathway_gene_prediction_in_tomato | a2488fdbc673df03b6065221e0a0a67c86f29230 | [
"MIT"
] | null | null | null | import sys,os
import random
import copy
from random import shuffle
inp = open('Sly_pathway_annotation_20190117_with_expression_5_members_nonoverlapping.txt','r').readlines()
P = {} ###P[pathway] = [gene1,gene2,...]
for inl in inp:
pa = inl.strip().split('\t')[0]
gene = inl.split('\t')[1].strip()
if pa not in P:
P[pa] = []
P[pa].append(gene)
size = open('Nonoverlapping_pathway_size.txt','w')
for pa in P:
size.write('%s\t%s\n'%(pa,len(P[pa])))
size.close()
test = open('Pathway_genes_for_testing.txt','w')
T = {}
P_copy = copy.deepcopy(P)
for pa in P:
if len(P[pa])>25:
random_t = random.sample(P[pa],5)
for t in random_t:
if pa not in T:
T[pa] = []
T[pa].append(t)
P_copy[pa].remove(t)
test.write('%s\t%s\n'%(pa,t))
test.close()
training1 = open('Pathway_genes_for_5_training_set1.txt','w')
validation1 = open('Pathway_genes_for_5_validation_set1.txt','w')
training2 = open('Pathway_genes_for_5_training_set2.txt','w')
validation2 = open('Pathway_genes_for_5_validation_set2.txt','w')
training3 = open('Pathway_genes_for_5_training_set3.txt','w')
validation3 = open('Pathway_genes_for_5_validation_set3.txt','w')
training4 = open('Pathway_genes_for_5_training_set4.txt','w')
validation4 = open('Pathway_genes_for_5_validation_set4.txt','w')
training5 = open('Pathway_genes_for_5_training_set5.txt','w')
validation5 = open('Pathway_genes_for_5_validation_set5.txt','w')
for pa in P_copy:
gene_list = list(P_copy[pa])
shuffle(gene_list)
residue = len(gene_list)%5
aa = [0,0,0,0,0]
for i in range(5-residue,5):
aa[i] += 1
for n in range(0,int(len(gene_list)/5)+aa[0]):
validation1.write('%s\t%s\n'%(pa,gene_list[n]))
training2.write('%s\t%s\n'%(pa,gene_list[n]))
training3.write('%s\t%s\n'%(pa,gene_list[n]))
training4.write('%s\t%s\n'%(pa,gene_list[n]))
training5.write('%s\t%s\n'%(pa,gene_list[n]))
for n in range(int(len(gene_list)/5)+aa[0],int(len(gene_list)/5) * 2 +aa[0]+aa[1]):
validation2.write('%s\t%s\n'%(pa,gene_list[n]))
training1.write('%s\t%s\n'%(pa,gene_list[n]))
training3.write('%s\t%s\n'%(pa,gene_list[n]))
training4.write('%s\t%s\n'%(pa,gene_list[n]))
training5.write('%s\t%s\n'%(pa,gene_list[n]))
for n in range(int(len(gene_list)/5) * 2 +aa[0]+aa[1] ,int(len(gene_list)/5) * 3 +aa[0]+aa[1]+aa[2]):
validation3.write('%s\t%s\n'%(pa,gene_list[n]))
training1.write('%s\t%s\n'%(pa,gene_list[n]))
training2.write('%s\t%s\n'%(pa,gene_list[n]))
training4.write('%s\t%s\n'%(pa,gene_list[n]))
training5.write('%s\t%s\n'%(pa,gene_list[n]))
for n in range(int(len(gene_list)/5) * 3 +aa[0]+aa[1]+aa[2] ,int(len(gene_list)/5) * 4 +aa[0]+aa[1]+aa[2]+aa[3]):
validation4.write('%s\t%s\n'%(pa,gene_list[n]))
training1.write('%s\t%s\n'%(pa,gene_list[n]))
training2.write('%s\t%s\n'%(pa,gene_list[n]))
training3.write('%s\t%s\n'%(pa,gene_list[n]))
training5.write('%s\t%s\n'%(pa,gene_list[n]))
for n in range(int(len(gene_list)/5) * 4 +aa[0]+aa[1]+aa[2]+aa[3],int(len(gene_list)/5) * 5 +aa[0]+aa[1]+aa[2]+aa[3]+aa[4]):
validation5.write('%s\t%s\n'%(pa,gene_list[n]))
training1.write('%s\t%s\n'%(pa,gene_list[n]))
training2.write('%s\t%s\n'%(pa,gene_list[n]))
training3.write('%s\t%s\n'%(pa,gene_list[n]))
training4.write('%s\t%s\n'%(pa,gene_list[n]))
validation1.close()
training1.close()
validation2.close()
training2.close()
validation3.close()
training3.close()
validation4.close()
training4.close()
validation5.close()
training5.close()
| 34.53 | 125 | 0.667825 |
b36d3a4de8125b26e6cf2dfa71a6349721ed2ae7 | 1,351 | py | Python | tests/workflowTests/runMHFitWorkflow.py | MarkTravers/magLabUtilities | e116c8cb627cd82c3b8ba651dd6979b66e568632 | [
"MIT"
] | null | null | null | tests/workflowTests/runMHFitWorkflow.py | MarkTravers/magLabUtilities | e116c8cb627cd82c3b8ba651dd6979b66e568632 | [
"MIT"
] | null | null | null | tests/workflowTests/runMHFitWorkflow.py | MarkTravers/magLabUtilities | e116c8cb627cd82c3b8ba651dd6979b66e568632 | [
"MIT"
] | null | null | null | #!python3
if __name__=='__main__':
fp = './tests/workflowTests/datafiles/testLoops.xlsx'
tuneHistoryFP = './tests/workflowTests/datafiles/tuneHistory00.txt'
parameterDefs = {
'hCoercive': {'initialValue':605.0, 'limits':[0.0,10000.0]},
'xInit': {'initialValue':61.25, 'limits':[60.0,90.0]},
'mSat': {'initialValue':1.65e6, 'limits':[1.0e6,2.0e6]},
'hCoop': {'initialValue':1190.0, 'limits':[100.0,10000.0]},
'hAnh': {'initialValue':5200.0, 'limits':[100.0,10000.0]},
'xcPow': {'initialValue':2.0, 'limits':[0.0,10.0]}
}
gradientDescentConfig = {
'hCoercive': {'localNeighborSteps':[-15.0, 0.0, 15.0]},
'xInit': {'localNeighborSteps':[-0.25, 0.0, 0.25]},
'mSat': {'localNeighborSteps':[0.001e6, 0.0, 0.001e6]},
'hCoop': {'localNeighborSteps':[-15.0, 0.0, 15.0]},
'hAnh': {'localNeighborSteps':[-15.0, 0.0, 15.0]},
'xcPow': {'localNeighborSteps':[-0.1, 0.0, 0.1]}
}
| 56.291667 | 93 | 0.415248 |
bbbf653f4cf86895e1ad4b93d88547fc15b57385 | 14,244 | py | Python | tmp.py | HwangToeMat/tmp | a4f48443b16b5e07a9cf95f54651ade8c7669134 | [
"Apache-2.0"
] | 10 | 2020-08-28T08:03:28.000Z | 2022-03-26T21:20:44.000Z | alphapose/utils/pPose_nms.py | HwangToeMat/PoseEstimation_Scoring-Your-Video | 16c49b00007135d9b274b6c1e23d6e6c942ec951 | [
"Apache-2.0"
] | null | null | null | alphapose/utils/pPose_nms.py | HwangToeMat/PoseEstimation_Scoring-Your-Video | 16c49b00007135d9b274b6c1e23d6e6c942ec951 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import os
import zipfile
import time
from multiprocessing.dummy import Pool as ThreadPool
import torch
import numpy as np
''' Constant Configuration '''
delta1 = 1
mu = 1.7
delta2 = 2.65
gamma = 22.48
scoreThreds = 0.3
matchThreds = 5
alpha = 0.1
#pool = ThreadPool(4)
def pose_nms(bboxes, bbox_scores, bbox_ids, pose_preds, pose_scores, areaThres=0):
'''
Parametric Pose NMS algorithm
bboxes: bbox locations list (n, 4)
bbox_scores: bbox scores list (n, 1)
bbox_ids: bbox tracking ids list (n, 1)
pose_preds: pose locations list (n, kp_num, 2)
pose_scores: pose scores list (n, kp_num, 1)
'''
#global ori_pose_preds, ori_pose_scores, ref_dists
pose_scores[pose_scores == 0] = 1e-5
kp_nums = pose_preds.size()[1]
final_result = []
ori_bboxes = bboxes.clone()
ori_bbox_scores = bbox_scores.clone()
ori_bbox_ids = bbox_ids.clone()
ori_pose_preds = pose_preds.clone()
ori_pose_scores = pose_scores.clone()
xmax = bboxes[:, 2]
xmin = bboxes[:, 0]
ymax = bboxes[:, 3]
ymin = bboxes[:, 1]
widths = xmax - xmin
heights = ymax - ymin
ref_dists = alpha * np.maximum(widths, heights)
nsamples = bboxes.shape[0]
human_scores = pose_scores.mean(dim=1)
human_ids = np.arange(nsamples)
# Do pPose-NMS
pick = []
merge_ids = []
while(human_scores.shape[0] != 0):
# Pick the one with highest score
pick_id = torch.argmax(human_scores)
pick.append(human_ids[pick_id])
# num_visPart = torch.sum(pose_scores[pick_id] > 0.2)
# Get numbers of match keypoints by calling PCK_match
ref_dist = ref_dists[human_ids[pick_id]]
simi = get_parametric_distance(pick_id, pose_preds, pose_scores, ref_dist)
num_match_keypoints = PCK_match(pose_preds[pick_id], pose_preds, ref_dist)
# Delete humans who have more than matchThreds keypoints overlap and high similarity
delete_ids = torch.from_numpy(np.arange(human_scores.shape[0]))[((simi > gamma) | (num_match_keypoints >= matchThreds))]
if delete_ids.shape[0] == 0:
delete_ids = pick_id
#else:
# delete_ids = torch.from_numpy(delete_ids)
merge_ids.append(human_ids[delete_ids])
pose_preds = np.delete(pose_preds, delete_ids, axis=0)
pose_scores = np.delete(pose_scores, delete_ids, axis=0)
human_ids = np.delete(human_ids, delete_ids)
human_scores = np.delete(human_scores, delete_ids, axis=0)
bbox_scores = np.delete(bbox_scores, delete_ids, axis=0)
bbox_ids = np.delete(bbox_ids, delete_ids, axis=0)
assert len(merge_ids) == len(pick)
preds_pick = ori_pose_preds[pick]
scores_pick = ori_pose_scores[pick]
bbox_scores_pick = ori_bbox_scores[pick]
bboxes_pick = ori_bboxes[pick]
bbox_ids_pick = ori_bbox_ids[pick]
#final_result = pool.map(filter_result, zip(scores_pick, merge_ids, preds_pick, pick, bbox_scores_pick))
#final_result = [item for item in final_result if item is not None]
for j in range(len(pick)):
ids = np.arange(kp_nums)
max_score = torch.max(scores_pick[j, ids, 0])
if max_score < scoreThreds:
continue
# Merge poses
merge_id = merge_ids[j]
merge_pose, merge_score = p_merge_fast(
preds_pick[j], ori_pose_preds[merge_id], ori_pose_scores[merge_id], ref_dists[pick[j]])
max_score = torch.max(merge_score[ids])
if max_score < scoreThreds:
continue
xmax = max(merge_pose[:, 0])
xmin = min(merge_pose[:, 0])
ymax = max(merge_pose[:, 1])
ymin = min(merge_pose[:, 1])
bbox = bboxes_pick[j].cpu().tolist()
if (1.5 ** 2 * (xmax - xmin) * (ymax - ymin) < areaThres):
continue
final_result.append({
'box': [bbox[0], bbox[1], bbox[2]-bbox[0],bbox[3]-bbox[1]],
'keypoints': merge_pose - 0.3,
'kp_score': merge_score,
'proposal_score': torch.mean(merge_score) + bbox_scores_pick[j] + 1.25 * max(merge_score),
'idx' : ori_bbox_ids[merge_id].tolist()
})
return final_result
def filter_result(args):
score_pick, merge_id, pred_pick, pick, bbox_score_pick = args
global ori_pose_preds, ori_pose_scores, ref_dists
kp_nums = ori_pose_preds.size()[1]
ids = np.arange(kp_nums)
max_score = torch.max(score_pick[ids, 0])
if max_score < scoreThreds:
return None
# Merge poses
merge_pose, merge_score = p_merge_fast(
pred_pick, ori_pose_preds[merge_id], ori_pose_scores[merge_id], ref_dists[pick])
max_score = torch.max(merge_score[ids])
if max_score < scoreThreds:
return None
xmax = max(merge_pose[:, 0])
xmin = min(merge_pose[:, 0])
ymax = max(merge_pose[:, 1])
ymin = min(merge_pose[:, 1])
if (1.5 ** 2 * (xmax - xmin) * (ymax - ymin) < 40 * 40.5):
return None
return {
'keypoints': merge_pose - 0.3,
'kp_score': merge_score,
'proposal_score': torch.mean(merge_score) + bbox_score_pick + 1.25 * max(merge_score)
}
def p_merge(ref_pose, cluster_preds, cluster_scores, ref_dist):
'''
Score-weighted pose merging
INPUT:
ref_pose: reference pose -- [kp_num, 2]
cluster_preds: redundant poses -- [n, kp_num, 2]
cluster_scores: redundant poses score -- [n, kp_num, 1]
ref_dist: reference scale -- Constant
OUTPUT:
final_pose: merged pose -- [kp_num, 2]
final_score: merged score -- [kp_num]
'''
dist = torch.sqrt(torch.sum(
torch.pow(ref_pose[np.newaxis, :] - cluster_preds, 2),
dim=2
)) # [n, kp_num]
kp_num = ref_pose.size()[0]
ref_dist = min(ref_dist, 15)
mask = (dist <= ref_dist)
final_pose = torch.zeros(kp_num, 2)
final_score = torch.zeros(kp_num)
if cluster_preds.dim() == 2:
cluster_preds.unsqueeze_(0)
cluster_scores.unsqueeze_(0)
if mask.dim() == 1:
mask.unsqueeze_(0)
for i in range(kp_num):
cluster_joint_scores = cluster_scores[:, i][mask[:, i]] # [k, 1]
cluster_joint_location = cluster_preds[:, i, :][mask[:, i].unsqueeze(
-1).repeat(1, 2)].view((torch.sum(mask[:, i]), -1))
# Get an normalized score
normed_scores = cluster_joint_scores / torch.sum(cluster_joint_scores)
# Merge poses by a weighted sum
final_pose[i, 0] = torch.dot(cluster_joint_location[:, 0], normed_scores.squeeze(-1))
final_pose[i, 1] = torch.dot(cluster_joint_location[:, 1], normed_scores.squeeze(-1))
final_score[i] = torch.dot(cluster_joint_scores.transpose(0, 1).squeeze(0), normed_scores.squeeze(-1))
return final_pose, final_score
def p_merge_fast(ref_pose, cluster_preds, cluster_scores, ref_dist):
'''
Score-weighted pose merging
INPUT:
ref_pose: reference pose -- [kp_num, 2]
cluster_preds: redundant poses -- [n, kp_num, 2]
cluster_scores: redundant poses score -- [n, kp_num, 1]
ref_dist: reference scale -- Constant
OUTPUT:
final_pose: merged pose -- [kp_num, 2]
final_score: merged score -- [kp_num]
'''
dist = torch.sqrt(torch.sum(
torch.pow(ref_pose[np.newaxis, :] - cluster_preds, 2),
dim=2
))
kp_num = ref_pose.size()[0]
ref_dist = min(ref_dist, 15)
mask = (dist <= ref_dist)
final_pose = torch.zeros(kp_num, 2)
final_score = torch.zeros(kp_num)
if cluster_preds.dim() == 2:
cluster_preds.unsqueeze_(0)
cluster_scores.unsqueeze_(0)
if mask.dim() == 1:
mask.unsqueeze_(0)
# Weighted Merge
masked_scores = cluster_scores.mul(mask.float().unsqueeze(-1))
normed_scores = masked_scores / torch.sum(masked_scores, dim=0)
final_pose = torch.mul(cluster_preds, normed_scores.repeat(1, 1, 2)).sum(dim=0)
final_score = torch.mul(masked_scores, normed_scores).sum(dim=0)
return final_pose, final_score
def get_parametric_distance(i, all_preds, keypoint_scores, ref_dist):
pick_preds = all_preds[i]
pred_scores = keypoint_scores[i]
dist = torch.sqrt(torch.sum(
torch.pow(pick_preds[np.newaxis, :] - all_preds, 2),
dim=2
))
mask = (dist <= 1)
kp_nums = all_preds.size()[1]
# Define a keypoints distance
score_dists = torch.zeros(all_preds.shape[0], kp_nums)
keypoint_scores.squeeze_()
if keypoint_scores.dim() == 1:
keypoint_scores.unsqueeze_(0)
if pred_scores.dim() == 1:
pred_scores.unsqueeze_(1)
# The predicted scores are repeated up to do broadcast
pred_scores = pred_scores.repeat(1, all_preds.shape[0]).transpose(0, 1)
score_dists[mask] = torch.tanh(pred_scores[mask] / delta1) * torch.tanh(keypoint_scores[mask] / delta1)
point_dist = torch.exp((-1) * dist / delta2)
final_dist = torch.sum(score_dists, dim=1) + mu * torch.sum(point_dist, dim=1)
return final_dist
def PCK_match(pick_pred, all_preds, ref_dist):
dist = torch.sqrt(torch.sum(
torch.pow(pick_pred[np.newaxis, :] - all_preds, 2),
dim=2
))
ref_dist = min(ref_dist, 7)
num_match_keypoints = torch.sum(
dist / ref_dist <= 1,
dim=1
)
return num_match_keypoints
def write_json(all_results, outputpath, form=None, for_eval=False, js_name = 'result'):
'''
all_result: result dict of predictions
outputpath: output directory
'''
json_results = []
json_results_cmu = {}
for im_res in all_results:
im_name = im_res['imgname']
for human in im_res['result']:
keypoints = []
result = {}
if for_eval:
result['image_id'] = int(os.path.basename(im_name).split('.')[0].split('_')[-1])
else:
result['image_id'] = os.path.basename(im_name)
result['category_id'] = 1
kp_preds = human['keypoints']
kp_scores = human['kp_score']
pro_scores = human['proposal_score']
for n in range(kp_scores.shape[0]):
keypoints.append(float(kp_preds[n, 0]))
keypoints.append(float(kp_preds[n, 1]))
keypoints.append(float(kp_scores[n]))
result['keypoints'] = keypoints
result['score'] = float(pro_scores)
result['box'] = human['box']
#pose track results by PoseFlow
if 'idx' in human.keys():
result['idx'] = human['idx']
if form == 'cmu': # the form of CMU-Pose
if result['image_id'] not in json_results_cmu.keys():
json_results_cmu[result['image_id']]={}
json_results_cmu[result['image_id']]['version']="AlphaPose v0.3"
json_results_cmu[result['image_id']]['bodies']=[]
tmp={'joints':[]}
result['keypoints'].append((result['keypoints'][15]+result['keypoints'][18])/2)
result['keypoints'].append((result['keypoints'][16]+result['keypoints'][19])/2)
result['keypoints'].append((result['keypoints'][17]+result['keypoints'][20])/2)
indexarr=[0,51,18,24,30,15,21,27,36,42,48,33,39,45,6,3,12,9]
for i in indexarr:
tmp['joints'].append(result['keypoints'][i])
tmp['joints'].append(result['keypoints'][i+1])
tmp['joints'].append(result['keypoints'][i+2])
json_results_cmu[result['image_id']]['bodies'].append(tmp)
elif form == 'open': # the form of OpenPose
if result['image_id'] not in json_results_cmu.keys():
json_results_cmu[result['image_id']]={}
json_results_cmu[result['image_id']]['version']="AlphaPose v0.3"
json_results_cmu[result['image_id']]['people']=[]
tmp={'pose_keypoints_2d':[]}
result['keypoints'].append((result['keypoints'][15]+result['keypoints'][18])/2)
result['keypoints'].append((result['keypoints'][16]+result['keypoints'][19])/2)
result['keypoints'].append((result['keypoints'][17]+result['keypoints'][20])/2)
indexarr=[0,51,18,24,30,15,21,27,36,42,48,33,39,45,6,3,12,9]
for i in indexarr:
tmp['pose_keypoints_2d'].append(result['keypoints'][i])
tmp['pose_keypoints_2d'].append(result['keypoints'][i+1])
tmp['pose_keypoints_2d'].append(result['keypoints'][i+2])
json_results_cmu[result['image_id']]['people'].append(tmp)
else:
json_results.append(result)
if form == 'cmu': # the form of CMU-Pose
with open(os.path.join(outputpath,'alphapose-results.json'), 'w') as json_file:
json_file.write(json.dumps(json_results_cmu))
if not os.path.exists(os.path.join(outputpath,'sep-json')):
os.mkdir(os.path.join(outputpath,'sep-json'))
for name in json_results_cmu.keys():
with open(os.path.join(outputpath,'sep-json',name.split('.')[0]+'.json'),'w') as json_file:
json_file.write(json.dumps(json_results_cmu[name]))
elif form == 'open': # the form of OpenPose
with open(os.path.join(outputpath,'alphapose-results.json'), 'w') as json_file:
json_file.write(json.dumps(json_results_cmu))
if not os.path.exists(os.path.join(outputpath,'sep-json')):
os.mkdir(os.path.join(outputpath,'sep-json'))
for name in json_results_cmu.keys():
with open(os.path.join(outputpath,'sep-json',name.split('.')[0]+'.json'),'w') as json_file:
json_file.write(json.dumps(json_results_cmu[name]))
else:
with open(os.path.join(outputpath, js_name + '.json'), 'w') as json_file:
json_file.write(json.dumps(json_results))
| 37.782493 | 128 | 0.605097 |
8c01707306498fe60daba58888256a985f1f44da | 2,592 | py | Python | divio_cli/upload/boilerplate.py | bykof/divio-cli | 14a5df39e0e6d7e804e51578bd0db372db8a7596 | [
"BSD-3-Clause"
] | null | null | null | divio_cli/upload/boilerplate.py | bykof/divio-cli | 14a5df39e0e6d7e804e51578bd0db372db8a7596 | [
"BSD-3-Clause"
] | null | null | null | divio_cli/upload/boilerplate.py | bykof/divio-cli | 14a5df39e0e6d7e804e51578bd0db372db8a7596 | [
"BSD-3-Clause"
] | null | null | null | import glob
import os
import tarfile
import click
from .. import settings
from ..utils import get_bytes_io
from ..validators.boilerplate import validate_boilerplate
from ..validators.common import load_config
from .common import add_meta_files
BOILERPLATE_EXCLUDE_DEFAULTS = ["boilerplate.json", ".git"]
def normalize_path(path):
return os.path.normpath(path)
def get_boilerplate_files(boilerplate_path):
config = load_config(
settings.BOILERPLATE_CONFIG_FILENAME, boilerplate_path
)
excluded_patterns = (
config.get("excluded", []) + BOILERPLATE_EXCLUDE_DEFAULTS
)
# glob excludes
excluded = []
for exclude in excluded_patterns:
excluded += glob.glob(normalize_path(exclude).rstrip("/"))
excluded = set(excluded)
matches = []
for path, subdirs, files in os.walk(boilerplate_path, topdown=True):
subdirs[:] = [
sub
for sub in subdirs
if normalize_path(os.path.join(path, sub)) not in excluded
]
if normalize_path(path) not in excluded: # check root level excludes
for fname in files:
fpath = os.path.join(path, fname)
if normalize_path(fpath) not in excluded:
matches.append(fpath)
return excluded, matches
def upload_boilerplate(client, path=None, noinput=False):
path = path or "."
errors = validate_boilerplate(path)
if errors:
message = "The following errors happened during validation:"
message = "{}\n - {}".format(message, "\n - ".join(errors))
raise click.ClickException(message)
excludes, files = get_boilerplate_files(path)
if not noinput:
click.secho(
"The following files will be included in your "
"boilerplate and uploaded to the Divio Cloud:".format(len(files)),
fg="yellow",
)
click.echo(os.linesep.join(files))
click.confirm(
"Are you sure you want to continue and upload "
"the preceding (#{}) files to the Divio Cloud?".format(len(files)),
default=True,
show_default=True,
abort=True,
)
archive_obj = create_boilerplate_archive(path, files)
return client.upload_boilerplate(archive_obj)
def create_boilerplate_archive(path, files):
fobj = get_bytes_io()
with tarfile.open(mode="w:gz", fileobj=fobj) as tar:
add_meta_files(tar, path, settings.BOILERPLATE_CONFIG_FILENAME)
for f in files:
tar.add(f)
fobj.seek(0)
return fobj
| 28.483516 | 79 | 0.643519 |
67e641daacd9c439999d369a141717c739c0aa52 | 5,368 | py | Python | common/pullrequest.py | TizenAPI-Bot/tizenfx-jenkins-scripts | 0017119bddd36246f906a7d9a15fc08d49fbddb0 | [
"Apache-2.0"
] | null | null | null | common/pullrequest.py | TizenAPI-Bot/tizenfx-jenkins-scripts | 0017119bddd36246f906a7d9a15fc08d49fbddb0 | [
"Apache-2.0"
] | null | null | null | common/pullrequest.py | TizenAPI-Bot/tizenfx-jenkins-scripts | 0017119bddd36246f906a7d9a15fc08d49fbddb0 | [
"Apache-2.0"
] | 2 | 2019-08-30T04:01:13.000Z | 2019-12-12T03:50:58.000Z | #!/usr/bin/env python3
#
# Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from time import sleep
from github import Github, GithubObject, GithubException
from common.buildlog import BuildLog
DIFF_PATTERN = re.compile(r'^@@ \-([0-9,]+) \+([0-9,]+) @@')
class PullRequest:
def __init__(self, env):
self.number = env.github_pr_number
self.state = env.github_pr_state
self.target_branch = env.github_pr_target_branch
gh = Github(env.github_token)
repo = gh.get_repo(env.github_repo)
self._ghpr = repo.get_pull(self.number)
self.latest_commit = self._ghpr.get_commits().reversed[0]
self.changed_files = self._ghpr.get_files()
self._map_difflines()
def _map_difflines(self):
self._line_to_position_map = {}
self._file_diffhunk_paris = {}
for f in self.changed_files:
path = f.filename
if f.patch is None:
continue
self._line_to_position_map[path] = {}
diff_lines = []
line_number = 0
for position, line in enumerate(f.patch.split("\n")):
m = DIFF_PATTERN.match(line)
if m is not None:
hunkrange = m.group(2).split(',')
if len(hunkrange) == 1:
hunkrange.append(1)
diff_lines.append(list(map(int, hunkrange)))
line_number = int(hunkrange[0])
continue
elif line[0] == '-':
continue
self._line_to_position_map[path][line_number] = position
line_number += 1
self._file_diffhunk_paris[path] = diff_lines
def set_status(self, state,
target_url=GithubObject.NotSet,
description=GithubObject.NotSet,
context=GithubObject.NotSet):
if self._ghpr.commits < 1:
return False
self.latest_commit.create_status(
state, target_url, description, context)
return True
def set_labels(self, *labels):
self._ghpr.set_labels(*labels)
def add_to_labels(self, *labels):
try:
self._ghpr.add_to_labels(*labels)
except GithubException as err:
print('Warning: ' + err.data['message'])
def remove_from_labels(self, label):
try:
self._ghpr.remove_from_labels(label)
except GithubException as err:
print('Warning: ' + err.data['message'])
def exists_in_labels(self, label):
for lb in self._ghpr.labels:
if lb.name == label:
return True
return False
def get_labels(self):
return self._ghpr.get_labels()
def create_review_comment(self, path, line_number, body):
position = self._line_to_position_map[path][line_number]
for c in self._ghpr.get_comments():
if c.path == path and c.position == position and c.body == body:
return False
if self._ghpr.commits < 1:
return False
self._ghpr.create_review_comment(
body, self.latest_commit, path, position)
return True
def create_issue_comment(self, body):
self._ghpr.create_issue_comment(body)
def report_warnings_as_review_comment(self, logfile):
if not os.path.exists(logfile):
return
build_log = BuildLog(logfile)
count = 0
for f in self.changed_files:
path = f.filename
if f.patch is None:
continue
for line in self._file_diffhunk_paris[path]:
for warn in build_log.warnings:
if not path.endswith(warn['file']):
continue
wcode = warn['code']
wline = warn['line']
wmsg = warn['message']
if line[0] <= wline and wline < (line[0] + line[1]):
body = 'warning {}: {}'.format(wcode, wmsg)
self.create_review_comment(path, wline, body)
count += 1
if count > 50:
print('Too many comments! Skip the rest!')
return
sleep(0.5)
def report_errors_as_issue_comment(self, logfile):
if not os.path.exists(logfile):
return
bl = BuildLog(logfile)
if len(bl.errors) < 1:
return
body = '### Build Error:\n'
for err in bl.errors:
body += '> {}({}): {}: {}\n' \
.format(err['file'], err['line'], err['code'], err['message'])
self.create_issue_comment(body)
| 34.632258 | 78 | 0.567064 |
2814f05c5d7ae770c09c9f07d073b9d5e3aafc8d | 6,135 | py | Python | testing/test_model.py | Nicholas-Schaub/ome-types | fcfca3a9a66eee16071317aae0a4e3fb96316dca | [
"MIT"
] | null | null | null | testing/test_model.py | Nicholas-Schaub/ome-types | fcfca3a9a66eee16071317aae0a4e3fb96316dca | [
"MIT"
] | null | null | null | testing/test_model.py | Nicholas-Schaub/ome-types | fcfca3a9a66eee16071317aae0a4e3fb96316dca | [
"MIT"
] | null | null | null | import pickle
import re
from pathlib import Path
from unittest import mock
from xml.dom import minidom
from xml.etree import ElementTree
import pytest
import util
from pydantic import ValidationError
from xmlschema.validators.exceptions import XMLSchemaValidationError
from ome_types import from_tiff, from_xml, model, to_xml
from ome_types.schema import NS_OME, URI_OME, get_schema, to_xml_element
SHOULD_FAIL_READ = {
# Some timestamps have negative years which datetime doesn't support.
"timestampannotation",
}
SHOULD_RAISE_READ = {"bad"}
SHOULD_FAIL_ROUNDTRIP = {
# Order of elements in StructuredAnnotations and Union are jumbled.
"timestampannotation-posix-only",
"transformations-downgrade",
}
SKIP_ROUNDTRIP = {
# These have XMLAnnotations with extra namespaces and mixed content, which
# the automated round-trip test code doesn't properly verify yet. So even
# though these files do appear to round-trip correctly when checked by eye,
# we'll play it safe and skip them until the test is fixed.
"spim",
"xmlannotation-body-space",
"xmlannotation-multi-value",
"xmlannotation-svg",
}
def mark_xfail(fname):
return pytest.param(
fname,
marks=pytest.mark.xfail(
strict=True, reason="Unexpected success. You fixed it!"
),
)
def mark_skip(fname):
return pytest.param(fname, marks=pytest.mark.skip)
def true_stem(p):
return p.name.partition(".")[0]
all_xml = list((Path(__file__).parent / "data").glob("*.ome.xml"))
xml_read = [mark_xfail(f) if true_stem(f) in SHOULD_FAIL_READ else f for f in all_xml]
xml_roundtrip = []
for f in all_xml:
stem = true_stem(f)
if stem in SHOULD_FAIL_READ | SHOULD_RAISE_READ:
continue
elif stem in SHOULD_FAIL_ROUNDTRIP:
f = mark_xfail(f)
elif stem in SKIP_ROUNDTRIP:
f = mark_skip(f)
xml_roundtrip.append(f)
@pytest.mark.parametrize("xml", xml_read, ids=true_stem)
def test_from_xml(xml, benchmark):
if true_stem(xml) in SHOULD_RAISE_READ:
with pytest.raises(XMLSchemaValidationError):
assert benchmark(from_xml, xml)
else:
assert benchmark(from_xml, xml)
def test_from_tiff(benchmark):
"""Test that OME metadata extractions from Tiff headers works."""
_path = Path(__file__).parent / "data" / "ome.tiff"
ome = benchmark(from_tiff, _path)
assert len(ome.images) == 1
assert ome.images[0].id == "Image:0"
assert ome.images[0].pixels.size_x == 6
assert ome.images[0].pixels.channels[0].samples_per_pixel == 1
@pytest.mark.parametrize("xml", xml_roundtrip, ids=true_stem)
def test_roundtrip(xml, benchmark):
"""Ensure we can losslessly round-trip XML through the model and back."""
xml = str(xml)
schema = get_schema(xml)
def canonicalize(xml, strip_empty):
d = schema.decode(xml, use_defaults=True)
# Strip extra whitespace in the schemaLocation value.
d["@xsi:schemaLocation"] = re.sub(r"\s+", " ", d["@xsi:schemaLocation"])
root = schema.encode(d, path=NS_OME + "OME", use_defaults=True)
# These are the tags that appear in the example files with empty
# content. Since our round-trip will drop empty elements, we'll need to
# strip them from the "original" documents before comparison.
if strip_empty:
for tag in ("Description", "LightPath", "Map"):
for e in root.findall(f".//{NS_OME}{tag}[.='']..."):
e.remove(e.find(f"{NS_OME}{tag}"))
# ET.canonicalize can't handle an empty namespace so we need to
# re-register the OME namespace with an actual name before calling
# tostring.
ElementTree.register_namespace("ome", URI_OME)
xml_out = ElementTree.tostring(root, "unicode")
xml_out = util.canonicalize(xml_out, strip_text=True)
xml_out = minidom.parseString(xml_out).toprettyxml(indent=" ")
return xml_out
original = canonicalize(xml, True)
ome = from_xml(xml)
rexml = benchmark(to_xml, ome)
assert canonicalize(rexml, False) == original
def test_to_xml_with_kwargs():
"""Ensure kwargs are passed to ElementTree"""
ome = from_xml(Path(__file__).parent / "data" / "example.ome.xml")
with mock.patch("xml.etree.ElementTree.tostring") as mocked_et_tostring:
element = to_xml_element(ome)
# Use an ElementTree.tostring kwarg and assert that it was passed through
to_xml(element, xml_declaration=True)
assert mocked_et_tostring.call_args.xml_declaration
@pytest.mark.parametrize("xml", xml_read, ids=true_stem)
def test_serialization(xml):
"""Test pickle serialization and reserialization."""
if true_stem(xml) in SHOULD_RAISE_READ:
pytest.skip("Can't pickle unreadable xml")
ome = from_xml(xml)
serialized = pickle.dumps(ome)
deserialized = pickle.loads(serialized)
assert ome == deserialized
def test_no_id():
"""Test that ids are optional, and auto-increment."""
i = model.Instrument(id=20)
assert i.id == "Instrument:20"
i2 = model.Instrument()
assert i2.id == "Instrument:21"
# but validation still works
with pytest.raises(ValueError):
model.Instrument(id="nonsense")
def test_required_missing():
"""Test subclasses with non-default arguments still work."""
with pytest.raises(ValidationError) as e:
_ = model.BooleanAnnotation()
assert "1 validation error for BooleanAnnotation" in str(e.value)
assert "value\n field required" in str(e.value)
with pytest.raises(ValidationError) as e:
_ = model.Label()
assert "2 validation errors for Label" in str(e.value)
assert "x\n field required" in str(e.value)
assert "y\n field required" in str(e.value)
def test_refs():
xml = Path(__file__).parent / "data" / "two-screens-two-plates-four-wells.ome.xml"
ome = from_xml(xml)
assert ome.screens[0].plate_ref[0].ref is ome.plates[0]
def test_with_ome_ns():
xml = Path(__file__).parent / "data" / "ome_ns.ome.xml"
ome = from_xml(xml)
assert ome.experimenters
| 33.895028 | 86 | 0.68802 |
17916df8a0bb16544c7f00cc4d02074bf5554e1f | 10,368 | py | Python | python/pyarrow/__init__.py | mfcabrera/arrow | 7a9ba6178d243779cf964b27095b4c5024223cf0 | [
"Apache-2.0"
] | null | null | null | python/pyarrow/__init__.py | mfcabrera/arrow | 7a9ba6178d243779cf964b27095b4c5024223cf0 | [
"Apache-2.0"
] | null | null | null | python/pyarrow/__init__.py | mfcabrera/arrow | 7a9ba6178d243779cf964b27095b4c5024223cf0 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# flake8: noqa
import os as _os
import sys as _sys
try:
from ._generated_version import version as __version__
except ImportError:
# Package is not installed, parse git tag at runtime
try:
import setuptools_scm
# Code duplicated from setup.py to avoid a dependency on each other
def parse_git(root, **kwargs):
"""
Parse function for setuptools_scm that ignores tags for non-C++
subprojects, e.g. apache-arrow-js-XXX tags.
"""
from setuptools_scm.git import parse
kwargs['describe_command'] = \
"git describe --dirty --tags --long --match 'apache-arrow-[0-9].*'"
return parse(root, **kwargs)
__version__ = setuptools_scm.get_version('../',
parse=parse_git)
except ImportError:
__version__ = None
import pyarrow.compat as compat
from pyarrow.lib import cpu_count, set_cpu_count
from pyarrow.lib import (null, bool_,
int8, int16, int32, int64,
uint8, uint16, uint32, uint64,
time32, time64, timestamp, date32, date64,
float16, float32, float64,
binary, string, utf8, decimal128,
list_, struct, union, dictionary, field,
type_for_alias,
DataType, DictionaryType, ListType, StructType,
UnionType, TimestampType, Time32Type, Time64Type,
FixedSizeBinaryType, Decimal128Type,
DictionaryMemo,
Field,
Schema,
schema,
Array, Tensor,
array, chunked_array, column,
from_numpy_dtype,
NullArray,
NumericArray, IntegerArray, FloatingPointArray,
BooleanArray,
Int8Array, UInt8Array,
Int16Array, UInt16Array,
Int32Array, UInt32Array,
Int64Array, UInt64Array,
ListArray, UnionArray,
BinaryArray, StringArray,
FixedSizeBinaryArray,
DictionaryArray,
Date32Array, Date64Array,
TimestampArray, Time32Array, Time64Array,
Decimal128Array, StructArray,
ArrayValue, Scalar, NA, _NULL as NULL,
BooleanValue,
Int8Value, Int16Value, Int32Value, Int64Value,
UInt8Value, UInt16Value, UInt32Value, UInt64Value,
HalfFloatValue, FloatValue, DoubleValue, ListValue,
BinaryValue, StringValue, FixedSizeBinaryValue,
DecimalValue, UnionValue, StructValue, DictionaryValue,
Date32Value, Date64Value,
Time32Value, Time64Value,
TimestampValue)
# Buffers, allocation
from pyarrow.lib import (Buffer, ResizableBuffer, foreign_buffer, py_buffer,
compress, decompress, allocate_buffer)
from pyarrow.lib import (MemoryPool, LoggingMemoryPool, ProxyMemoryPool,
total_allocated_bytes, set_memory_pool,
default_memory_pool, logging_memory_pool,
proxy_memory_pool, log_memory_allocations)
# I/O
from pyarrow.lib import (HdfsFile, NativeFile, PythonFile,
CompressedInputStream, CompressedOutputStream,
FixedSizeBufferWriter,
BufferReader, BufferOutputStream,
OSFile, MemoryMappedFile, memory_map,
create_memory_map, have_libhdfs, have_libhdfs3,
MockOutputStream, input_stream, output_stream)
from pyarrow.lib import (ChunkedArray, Column, RecordBatch, Table,
concat_tables)
# Exceptions
from pyarrow.lib import (ArrowException,
ArrowKeyError,
ArrowInvalid,
ArrowIOError,
ArrowMemoryError,
ArrowNotImplementedError,
ArrowTypeError,
ArrowSerializationError,
PlasmaObjectExists)
# Serialization
from pyarrow.lib import (deserialize_from, deserialize,
deserialize_components,
serialize, serialize_to, read_serialized,
SerializedPyObject, SerializationContext,
SerializationCallbackError,
DeserializationCallbackError)
from pyarrow.filesystem import FileSystem, LocalFileSystem
from pyarrow.hdfs import HadoopFileSystem
import pyarrow.hdfs as hdfs
from pyarrow.ipc import (Message, MessageReader,
RecordBatchFileReader, RecordBatchFileWriter,
RecordBatchStreamReader, RecordBatchStreamWriter,
read_message, read_record_batch, read_schema,
read_tensor, write_tensor,
get_record_batch_size, get_tensor_size,
open_stream,
open_file,
serialize_pandas, deserialize_pandas)
import pyarrow.ipc as ipc
def open_stream(source):
"""
pyarrow.open_stream deprecated since 0.12, use pyarrow.ipc.open_stream
"""
import warnings
warnings.warn("pyarrow.open_stream is deprecated, please use "
"pyarrow.ipc.open_stream")
return ipc.open_stream(source)
def open_file(source):
"""
pyarrow.open_file deprecated since 0.12, use pyarrow.ipc.open_file
"""
import warnings
warnings.warn("pyarrow.open_file is deprecated, please use "
"pyarrow.ipc.open_file")
return ipc.open_file(source)
localfs = LocalFileSystem.get_instance()
from pyarrow.serialization import (default_serialization_context,
register_default_serialization_handlers,
register_torch_serialization_handlers)
import pyarrow.types as types
# Entry point for starting the plasma store
def _plasma_store_entry_point():
"""Entry point for starting the plasma store.
This can be used by invoking e.g.
``plasma_store -s /tmp/plasma -m 1000000000``
from the command line and will start the plasma_store executable with the
given arguments.
"""
import pyarrow
plasma_store_executable = _os.path.join(pyarrow.__path__[0],
"plasma_store_server")
_os.execv(plasma_store_executable, _sys.argv)
# ----------------------------------------------------------------------
# Deprecations
from pyarrow.util import _deprecate_api # noqa
# ----------------------------------------------------------------------
# Returning absolute path to the pyarrow include directory (if bundled, e.g. in
# wheels)
def get_include():
"""
Return absolute path to directory containing Arrow C++ include
headers. Similar to numpy.get_include
"""
return _os.path.join(_os.path.dirname(__file__), 'include')
def get_libraries():
"""
Return list of library names to include in the `libraries` argument for C
or Cython extensions using pyarrow
"""
return ['arrow', 'arrow_python']
def get_library_dirs():
"""
Return lists of directories likely to contain Arrow C++ libraries for
linking C or Cython extensions using pyarrow
"""
package_cwd = _os.path.dirname(__file__)
library_dirs = [package_cwd]
# Search library paths via pkg-config. This is necessary if the user
# installed libarrow and the other shared libraries manually and they
# are not shipped inside the pyarrow package (see also ARROW-2976).
from subprocess import call, PIPE, Popen
pkg_config_executable = _os.environ.get('PKG_CONFIG', None) or 'pkg-config'
for package in ["arrow", "plasma", "arrow_python"]:
cmd = '{0} --exists {1}'.format(pkg_config_executable, package).split()
try:
if call(cmd) == 0:
cmd = [pkg_config_executable, "--libs-only-L", package]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
library_dir = out.rstrip().decode('utf-8')[2:] # strip "-L"
if library_dir not in library_dirs:
library_dirs.append(library_dir)
except FileNotFoundError:
pass
if _sys.platform == 'win32':
# TODO(wesm): Is this necessary, or does setuptools within a conda
# installation add Library\lib to the linker path for MSVC?
python_base_install = _os.path.dirname(_sys.executable)
library_lib = _os.path.join(python_base_install, 'Library', 'lib')
if _os.path.exists(_os.path.join(library_lib, 'arrow.lib')):
library_dirs.append(library_lib)
# ARROW-4074: Allow for ARROW_HOME to be set to some other directory
if 'ARROW_HOME' in _os.environ:
library_dirs.append(_os.path.join(_os.environ['ARROW_HOME'], 'lib'))
return library_dirs
| 40.342412 | 83 | 0.590374 |
44913312710961a72011909459389f7525655e4d | 1,856 | py | Python | day09/django/app1/dateview/addTeachPlans.py | Vanessa-kriby/Python | 1fbef67852fb362712fc48fa5c3c29eac68fe202 | [
"Apache-2.0"
] | null | null | null | day09/django/app1/dateview/addTeachPlans.py | Vanessa-kriby/Python | 1fbef67852fb362712fc48fa5c3c29eac68fe202 | [
"Apache-2.0"
] | null | null | null | day09/django/app1/dateview/addTeachPlans.py | Vanessa-kriby/Python | 1fbef67852fb362712fc48fa5c3c29eac68fe202 | [
"Apache-2.0"
] | null | null | null | from app1.models import *
from app1.util.utils import *
def addTeachPlans(request):
'''
get:
http://127.0.0.1:8000/app1/addTeachPlans?dno=002&cno=103&tno=103&tpno=004&credit=3.0&teach_date=2011-09-01&evaluation_method=考试
post:
http://127.0.0.1:8000/app1/addTeachPlans
'''
try:
if(request.method=='POST'):
teadata=json.loads(request.body)
data=teadata["data"]
for item in data:
# did=request.GET.get("dno")
# cid=request.GET.get("cno")
# tid=request.GET.get("tno")
# tpid=request.GET.get("tpno")
# cr=request.GET.get("credit")
# te=request.GET.get("teach_date")
# ev=request.GET.get("evaluation_method")
did=item["dno"]
cid=item["cno"]
tid=item["tno"]
tpid=item["tpno"]
cr=item["credit"]
te=item["teach_date"]
ev=item["evaluation_method"]
Dno=Department.objects.get(dno=did)
Cno=Course.objects.get(cno=cid)
Tno=Teacher.objects.get(tno=tid)
result=TeachPlan()
result.department=Dno
result.course=Cno
result.teacher=Tno
result.tpno=tpid
result.credit=cr
result.teach_date=te
result.evaluation_method=ev
result.save()
result=TeachPlan.objects.all().values("tpno","credit","teach_date","evaluation_method","department__dno","department__dname","course__cno","course__cname","teacher__tno","teacher__tname")
return showJsonresult(result)
except Exception as e:
response={}
response['msg']=str(e)
response['err_num']=1
return showJsonerror(response) | 35.692308 | 195 | 0.547953 |
7562e2c5a7105132bfab47ddd95c12a11b1a40cf | 96,096 | py | Python | python/ray/data/tests/test_dataset.py | linyiyue/ray | 90d2456ec70270a1f894ec3ef6f3004533859e03 | [
"Apache-2.0"
] | null | null | null | python/ray/data/tests/test_dataset.py | linyiyue/ray | 90d2456ec70270a1f894ec3ef6f3004533859e03 | [
"Apache-2.0"
] | 68 | 2021-02-06T08:04:52.000Z | 2022-03-12T08:08:47.000Z | python/ray/data/tests/test_dataset.py | linyiyue/ray | 90d2456ec70270a1f894ec3ef6f3004533859e03 | [
"Apache-2.0"
] | null | null | null | import os
import random
import requests
import shutil
import time
from unittest.mock import patch
import math
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from fsspec.implementations.local import LocalFileSystem
from pytest_lazyfixture import lazy_fixture
import ray
from ray.tests.conftest import * # noqa
from ray.data.datasource import DummyOutputDatasource
from ray.data.datasource.csv_datasource import CSVDatasource
from ray.data.block import BlockAccessor
from ray.data.datasource.file_based_datasource import _unwrap_protocol
from ray.data.extensions.tensor_extension import (
TensorArray, TensorDtype, ArrowTensorType, ArrowTensorArray)
import ray.data.tests.util as util
from ray.data.tests.conftest import * # noqa
def maybe_pipeline(ds, enabled):
if enabled:
return ds.pipeline(parallelism=1)
else:
return ds
@pytest.mark.parametrize("pipelined", [False, True])
def test_basic_actors(shutdown_only, pipelined):
ray.init(num_cpus=2)
ds = ray.data.range(5)
ds = maybe_pipeline(ds, pipelined)
assert sorted(ds.map(lambda x: x + 1,
compute="actors").take()) == [1, 2, 3, 4, 5]
@pytest.mark.parametrize("pipelined", [False, True])
def test_avoid_placement_group_capture(shutdown_only, pipelined):
ray.init(num_cpus=2)
@ray.remote
def run():
ds0 = ray.data.range(5)
ds = maybe_pipeline(ds0, pipelined)
assert sorted(ds.map(lambda x: x + 1).take()) == [1, 2, 3, 4, 5]
ds = maybe_pipeline(ds0, pipelined)
assert ds.count() == 5
ds = maybe_pipeline(ds0, pipelined)
assert sorted(ds.iter_rows()) == [0, 1, 2, 3, 4]
pg = ray.util.placement_group([{"CPU": 1}])
ray.get(run.options(placement_group=pg).remote())
@pytest.mark.parametrize("pipelined", [False, True])
def test_equal_split(shutdown_only, pipelined):
ray.init(num_cpus=2)
def range2x(n):
if pipelined:
return ray.data.range(n).repeat(2)
else:
return ray.data.range(2 * n)
def counts(shards):
@ray.remote(num_cpus=0)
def count(s):
return s.count()
return ray.get([count.remote(s) for s in shards])
r1 = counts(range2x(10).split(3, equal=True))
assert all(c == 6 for c in r1), r1
r2 = counts(range2x(10).split(3, equal=False))
assert all(c >= 6 for c in r2), r2
assert not all(c == 6 for c in r2), r2
def test_callable_classes(shutdown_only):
ray.init(num_cpus=1)
ds = ray.data.range(10)
class StatefulFn:
def __init__(self):
self.num_reuses = 0
def __call__(self, x):
r = self.num_reuses
self.num_reuses += 1
return r
# map
task_reuse = ds.map(StatefulFn, compute="tasks").take()
assert sorted(task_reuse) == list(range(10)), task_reuse
actor_reuse = ds.map(StatefulFn, compute="actors").take()
assert sorted(actor_reuse) == list(range(10, 20)), actor_reuse
class StatefulFn:
def __init__(self):
self.num_reuses = 0
def __call__(self, x):
r = self.num_reuses
self.num_reuses += 1
return [r]
# flat map
task_reuse = ds.flat_map(StatefulFn, compute="tasks").take()
assert sorted(task_reuse) == list(range(10)), task_reuse
actor_reuse = ds.flat_map(StatefulFn, compute="actors").take()
assert sorted(actor_reuse) == list(range(10, 20)), actor_reuse
# map batches
task_reuse = ds.map_batches(StatefulFn, compute="tasks").take()
assert sorted(task_reuse) == list(range(10)), task_reuse
actor_reuse = ds.map_batches(StatefulFn, compute="actors").take()
assert sorted(actor_reuse) == list(range(10, 20)), actor_reuse
class StatefulFn:
def __init__(self):
self.num_reuses = 0
def __call__(self, x):
r = self.num_reuses
self.num_reuses += 1
return r > 0
# filter
task_reuse = ds.filter(StatefulFn, compute="tasks").take()
assert len(task_reuse) == 9, task_reuse
actor_reuse = ds.filter(StatefulFn, compute="actors").take()
assert len(actor_reuse) == 10, actor_reuse
@pytest.mark.parametrize("pipelined", [False, True])
def test_basic(ray_start_regular_shared, pipelined):
ds0 = ray.data.range(5)
ds = maybe_pipeline(ds0, pipelined)
assert sorted(ds.map(lambda x: x + 1).take()) == [1, 2, 3, 4, 5]
ds = maybe_pipeline(ds0, pipelined)
assert ds.count() == 5
ds = maybe_pipeline(ds0, pipelined)
assert sorted(ds.iter_rows()) == [0, 1, 2, 3, 4]
def test_zip(ray_start_regular_shared):
ds1 = ray.data.range(5)
ds2 = ray.data.range(5).map(lambda x: x + 1)
ds = ds1.zip(ds2)
assert ds.schema() == tuple
assert ds.take() == [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)]
with pytest.raises(ValueError):
ds.zip(ray.data.range(3))
def test_zip_arrow(ray_start_regular_shared):
ds1 = ray.data.range_arrow(5).map(lambda r: {"id": r["value"]})
ds2 = ray.data.range_arrow(5).map(
lambda r: {"a": r["value"] + 1, "b": r["value"] + 2})
ds = ds1.zip(ds2)
assert "{id: int64, a: int64, b: int64}" in str(ds)
assert ds.count() == 5
result = [r.as_pydict() for r in ds.take()]
assert result[0] == {"id": 0, "a": 1, "b": 2}
# Test duplicate column names.
ds = ds1.zip(ds1).zip(ds1)
assert ds.count() == 5
assert "{id: int64, id_1: int64, id_2: int64}" in str(ds)
result = [r.as_pydict() for r in ds.take()]
assert result[0] == {"id": 0, "id_1": 0, "id_2": 0}
def test_batch_tensors(ray_start_regular_shared):
import torch
ds = ray.data.from_items([torch.tensor([0, 0]) for _ in range(40)])
res = "Dataset(num_blocks=40, num_rows=40, schema=<class 'torch.Tensor'>)"
assert str(ds) == res, str(ds)
with pytest.raises(pa.lib.ArrowInvalid):
next(ds.iter_batches(batch_format="pyarrow"))
df = next(ds.iter_batches(batch_format="pandas"))
assert df.to_dict().keys() == {0, 1}
def test_tensors(ray_start_regular_shared):
# Create directly.
ds = ray.data.range_tensor(5, shape=(3, 5))
assert str(ds) == ("Dataset(num_blocks=5, num_rows=5, "
"schema=<Tensor: shape=(None, 3, 5), dtype=int64>)")
# Transform.
ds = ds.map_batches(lambda t: np.expand_dims(t, 3))
assert str(ds) == ("Dataset(num_blocks=5, num_rows=5, "
"schema=<Tensor: shape=(None, 3, 5, 1), dtype=int64>)")
# Pandas conversion.
res = ray.data.range_tensor(10).map_batches(
lambda t: t + 2, batch_format="pandas").take(2)
assert str(res) == "[ArrowRow({'0': 2}), ArrowRow({'0': 3})]", res
# From other formats.
ds = ray.data.range(10).map_batches(lambda x: np.array(x))
assert str(ds) == ("Dataset(num_blocks=10, num_rows=10, "
"schema=<Tensor: shape=(None,), dtype=int64>)")
ds = ray.data.range(10).map(lambda x: np.array(x))
assert str(ds) == ("Dataset(num_blocks=10, num_rows=10, "
"schema=<Tensor: shape=(None,), dtype=int64>)")
ds = ray.data.from_items([np.zeros(shape=(2, 2, 2)) for _ in range(4)])
assert str(ds) == (
"Dataset(num_blocks=4, num_rows=4, "
"schema=<Tensor: shape=(None, 2, 2, 2), dtype=float64>)"), ds
def test_tensor_array_ops(ray_start_regular_shared):
outer_dim = 3
inner_shape = (2, 2, 2)
shape = (outer_dim, ) + inner_shape
num_items = np.prod(np.array(shape))
arr = np.arange(num_items).reshape(shape)
df = pd.DataFrame({"one": [1, 2, 3], "two": TensorArray(arr)})
def apply_arithmetic_ops(arr):
return 2 * (arr + 1) / 3
def apply_comparison_ops(arr):
return arr % 2 == 0
def apply_logical_ops(arr):
return arr & (3 * arr) | (5 * arr)
# Op tests, using NumPy as the groundtruth.
np.testing.assert_equal(
apply_arithmetic_ops(arr), apply_arithmetic_ops(df["two"]))
np.testing.assert_equal(
apply_comparison_ops(arr), apply_comparison_ops(df["two"]))
np.testing.assert_equal(
apply_logical_ops(arr), apply_logical_ops(df["two"]))
def test_tensor_array_reductions(ray_start_regular_shared):
outer_dim = 3
inner_shape = (2, 2, 2)
shape = (outer_dim, ) + inner_shape
num_items = np.prod(np.array(shape))
arr = np.arange(num_items).reshape(shape)
df = pd.DataFrame({"one": list(range(outer_dim)), "two": TensorArray(arr)})
# Reduction tests, using NumPy as the groundtruth.
for name, reducer in TensorArray.SUPPORTED_REDUCERS.items():
np_kwargs = {}
if name in ("std", "var"):
# Pandas uses a ddof default of 1 while NumPy uses 0.
# Give NumPy a ddof kwarg of 1 in order to ensure equivalent
# standard deviation calculations.
np_kwargs["ddof"] = 1
np.testing.assert_equal(df["two"].agg(name),
reducer(arr, axis=0, **np_kwargs))
def test_arrow_tensor_array_getitem(ray_start_regular_shared):
outer_dim = 3
inner_shape = (2, 2, 2)
shape = (outer_dim, ) + inner_shape
num_items = np.prod(np.array(shape))
arr = np.arange(num_items).reshape(shape)
t_arr = ArrowTensorArray.from_numpy(arr)
for idx in range(outer_dim):
np.testing.assert_array_equal(t_arr[idx], arr[idx])
# Test __iter__.
for t_subarr, subarr in zip(t_arr, arr):
np.testing.assert_array_equal(t_subarr, subarr)
# Test to_pylist.
np.testing.assert_array_equal(t_arr.to_pylist(), list(arr))
# Test slicing and indexing.
t_arr2 = t_arr[1:]
np.testing.assert_array_equal(t_arr2.to_numpy(), arr[1:])
for idx in range(1, outer_dim):
np.testing.assert_array_equal(t_arr2[idx - 1], arr[idx])
def test_tensors_in_tables_from_pandas(ray_start_regular_shared):
outer_dim = 3
inner_shape = (2, 2, 2)
shape = (outer_dim, ) + inner_shape
num_items = np.prod(np.array(shape))
arr = np.arange(num_items).reshape(shape)
df = pd.DataFrame({"one": list(range(outer_dim)), "two": list(arr)})
# Cast column to tensor extension dtype.
df["two"] = df["two"].astype(TensorDtype())
ds = ray.data.from_pandas([ray.put(df)])
values = [[s["one"], s["two"]] for s in ds.take()]
expected = list(zip(list(range(outer_dim)), arr))
for v, e in zip(sorted(values), expected):
np.testing.assert_equal(v, e)
def test_tensors_in_tables_pandas_roundtrip(ray_start_regular_shared):
outer_dim = 3
inner_shape = (2, 2, 2)
shape = (outer_dim, ) + inner_shape
num_items = np.prod(np.array(shape))
arr = np.arange(num_items).reshape(shape)
df = pd.DataFrame({"one": list(range(outer_dim)), "two": TensorArray(arr)})
ds = ray.data.from_pandas([ray.put(df)])
ds_df = ray.get(ds.to_pandas())[0]
assert ds_df.equals(df)
def test_tensors_in_tables_parquet_roundtrip(ray_start_regular_shared,
tmp_path):
outer_dim = 3
inner_shape = (2, 2, 2)
shape = (outer_dim, ) + inner_shape
num_items = np.prod(np.array(shape))
arr = np.arange(num_items).reshape(shape)
df = pd.DataFrame({"one": list(range(outer_dim)), "two": TensorArray(arr)})
ds = ray.data.from_pandas([ray.put(df)])
ds.write_parquet(str(tmp_path))
ds = ray.data.read_parquet(str(tmp_path))
values = [[s["one"], s["two"]] for s in ds.take()]
expected = list(zip(list(range(outer_dim)), arr))
for v, e in zip(sorted(values), expected):
np.testing.assert_equal(v, e)
def test_tensors_in_tables_parquet_with_schema(ray_start_regular_shared,
tmp_path):
outer_dim = 3
inner_shape = (2, 2, 2)
shape = (outer_dim, ) + inner_shape
num_items = np.prod(np.array(shape))
arr = np.arange(num_items).reshape(shape)
df = pd.DataFrame({"one": list(range(outer_dim)), "two": TensorArray(arr)})
ds = ray.data.from_pandas([ray.put(df)])
ds.write_parquet(str(tmp_path))
schema = pa.schema([
("one", pa.int32()),
("two", ArrowTensorType(inner_shape, pa.from_numpy_dtype(arr.dtype))),
])
ds = ray.data.read_parquet(str(tmp_path), schema=schema)
values = [[s["one"], s["two"]] for s in ds.take()]
expected = list(zip(list(range(outer_dim)), arr))
for v, e in zip(sorted(values), expected):
np.testing.assert_equal(v, e)
def test_tensors_in_tables_parquet_pickle_manual_serde(
ray_start_regular_shared, tmp_path):
import pickle
outer_dim = 3
inner_shape = (2, 2, 2)
shape = (outer_dim, ) + inner_shape
num_items = np.prod(np.array(shape))
arr = np.arange(num_items).reshape(shape)
df = pd.DataFrame({
"one": list(range(outer_dim)),
"two": [pickle.dumps(a) for a in arr]
})
ds = ray.data.from_pandas([ray.put(df)])
ds.write_parquet(str(tmp_path))
ds = ray.data.read_parquet(str(tmp_path))
# Manually deserialize the tensor pickle bytes and cast to our tensor
# extension type.
def deser_mapper(batch: pd.DataFrame):
batch["two"] = [pickle.loads(a) for a in batch["two"]]
batch["two"] = batch["two"].astype(TensorDtype())
return batch
casted_ds = ds.map_batches(deser_mapper, batch_format="pandas")
values = [[s["one"], s["two"]] for s in casted_ds.take()]
expected = list(zip(list(range(outer_dim)), arr))
for v, e in zip(sorted(values), expected):
np.testing.assert_equal(v, e)
# Manually deserialize the pickle tensor bytes and directly cast it to a
# TensorArray.
def deser_mapper_direct(batch: pd.DataFrame):
batch["two"] = TensorArray([pickle.loads(a) for a in batch["two"]])
return batch
casted_ds = ds.map_batches(deser_mapper_direct, batch_format="pandas")
values = [[s["one"], s["two"]] for s in casted_ds.take()]
expected = list(zip(list(range(outer_dim)), arr))
for v, e in zip(sorted(values), expected):
np.testing.assert_equal(v, e)
def test_tensors_in_tables_parquet_bytes_manual_serde(ray_start_regular_shared,
tmp_path):
outer_dim = 3
inner_shape = (2, 2, 2)
shape = (outer_dim, ) + inner_shape
num_items = np.prod(np.array(shape))
arr = np.arange(num_items).reshape(shape)
df = pd.DataFrame({
"one": list(range(outer_dim)),
"two": [a.tobytes() for a in arr]
})
ds = ray.data.from_pandas([ray.put(df)])
ds.write_parquet(str(tmp_path))
ds = ray.data.read_parquet(str(tmp_path))
tensor_col_name = "two"
# Manually deserialize the tensor bytes and cast to a TensorArray.
def np_deser_mapper(batch: pa.Table):
# NOTE(Clark): We use NumPy to consolidate these potentially
# non-contiguous buffers, and to do buffer bookkeeping in general.
np_col = np.array([
np.ndarray(inner_shape, buffer=buf.as_buffer(), dtype=arr.dtype)
for buf in batch.column(tensor_col_name)
])
return batch.set_column(
batch._ensure_integer_index(tensor_col_name), tensor_col_name,
ArrowTensorArray.from_numpy(np_col))
ds = ds.map_batches(np_deser_mapper, batch_format="pyarrow")
values = [[s["one"], s["two"]] for s in ds.take()]
expected = list(zip(list(range(outer_dim)), arr))
for v, e in zip(sorted(values), expected):
np.testing.assert_equal(v, e)
def test_tensors_in_tables_parquet_bytes_manual_serde_udf(
ray_start_regular_shared, tmp_path):
outer_dim = 3
inner_shape = (2, 2, 2)
shape = (outer_dim, ) + inner_shape
num_items = np.prod(np.array(shape))
arr = np.arange(num_items).reshape(shape)
tensor_col_name = "two"
df = pd.DataFrame({
"one": list(range(outer_dim)),
tensor_col_name: [a.tobytes() for a in arr]
})
ds = ray.data.from_pandas([ray.put(df)])
ds.write_parquet(str(tmp_path))
# Manually deserialize the tensor bytes and cast to a TensorArray.
def np_deser_udf(block: pa.Table):
# NOTE(Clark): We use NumPy to consolidate these potentially
# non-contiguous buffers, and to do buffer bookkeeping in general.
np_col = np.array([
np.ndarray(inner_shape, buffer=buf.as_buffer(), dtype=arr.dtype)
for buf in block.column(tensor_col_name)
])
return block.set_column(
block._ensure_integer_index(tensor_col_name), tensor_col_name,
ArrowTensorArray.from_numpy(np_col))
ds = ray.data.read_parquet(str(tmp_path), _block_udf=np_deser_udf)
assert isinstance(ds.schema().field_by_name(tensor_col_name).type,
ArrowTensorType)
values = [[s["one"], s["two"]] for s in ds.take()]
expected = list(zip(list(range(outer_dim)), arr))
for v, e in zip(sorted(values), expected):
np.testing.assert_equal(v, e)
def test_tensors_in_tables_parquet_bytes_manual_serde_col_schema(
ray_start_regular_shared, tmp_path):
outer_dim = 3
inner_shape = (2, 2, 2)
shape = (outer_dim, ) + inner_shape
num_items = np.prod(np.array(shape))
arr = np.arange(num_items).reshape(shape)
tensor_col_name = "two"
df = pd.DataFrame({
"one": list(range(outer_dim)),
tensor_col_name: [a.tobytes() for a in arr]
})
ds = ray.data.from_pandas([ray.put(df)])
ds.write_parquet(str(tmp_path))
def _block_udf(block: pa.Table):
df = block.to_pandas()
df[tensor_col_name] += 1
return pa.Table.from_pandas(df)
ds = ray.data.read_parquet(
str(tmp_path),
_block_udf=_block_udf,
_tensor_column_schema={tensor_col_name: (arr.dtype, inner_shape)})
assert isinstance(ds.schema().field_by_name(tensor_col_name).type,
ArrowTensorType)
values = [[s["one"], s["two"]] for s in ds.take()]
expected = list(zip(list(range(outer_dim)), arr + 1))
for v, e in zip(sorted(values), expected):
np.testing.assert_equal(v, e)
@pytest.mark.skip(
reason=("Waiting for Arrow to support registering custom ExtensionType "
"casting kernels. See "
"https://issues.apache.org/jira/browse/ARROW-5890#"))
def test_tensors_in_tables_parquet_bytes_with_schema(ray_start_regular_shared,
tmp_path):
outer_dim = 3
inner_shape = (2, 2, 2)
shape = (outer_dim, ) + inner_shape
num_items = np.prod(np.array(shape))
arr = np.arange(num_items).reshape(shape)
df = pd.DataFrame({
"one": list(range(outer_dim)),
"two": [a.tobytes() for a in arr]
})
ds = ray.data.from_pandas([ray.put(df)])
ds.write_parquet(str(tmp_path))
schema = pa.schema([
("one", pa.int32()),
("two", ArrowTensorType(inner_shape, pa.from_numpy_dtype(arr.dtype))),
])
ds = ray.data.read_parquet(str(tmp_path), schema=schema)
values = [[s["one"], s["two"]] for s in ds.take()]
expected = list(zip(list(range(outer_dim)), arr))
for v, e in zip(sorted(values), expected):
np.testing.assert_equal(v, e)
@pytest.mark.skip(
reason=("Waiting for pytorch to support tensor creation from objects that "
"implement the __array__ interface. See "
"https://github.com/pytorch/pytorch/issues/51156"))
@pytest.mark.parametrize("pipelined", [False, True])
def test_tensors_in_tables_to_torch(ray_start_regular_shared, pipelined):
import torch
outer_dim = 3
inner_shape = (2, 2, 2)
shape = (outer_dim, ) + inner_shape
num_items = np.prod(np.array(shape))
arr = np.arange(num_items).reshape(shape)
df1 = pd.DataFrame({
"one": [1, 2, 3],
"two": TensorArray(arr),
"label": [1.0, 2.0, 3.0]
})
arr2 = np.arange(num_items, 2 * num_items).reshape(shape)
df2 = pd.DataFrame({
"one": [4, 5, 6],
"two": TensorArray(arr2),
"label": [4.0, 5.0, 6.0]
})
df = pd.concat([df1, df2])
ds = ray.data.from_pandas([ray.put(df1), ray.put(df2)])
ds = maybe_pipeline(ds, pipelined)
torchd = ds.to_torch(label_column="label", batch_size=2)
num_epochs = 2
for _ in range(num_epochs):
iterations = []
for batch in iter(torchd):
iterations.append(torch.cat((*batch[0], batch[1]), axis=1).numpy())
combined_iterations = np.concatenate(iterations)
assert np.array_equal(np.sort(df.values), np.sort(combined_iterations))
@pytest.mark.skip(
reason=(
"Waiting for Pandas DataFrame.values for extension arrays fix to be "
"released. See https://github.com/pandas-dev/pandas/pull/43160"))
@pytest.mark.parametrize("pipelined", [False, True])
def test_tensors_in_tables_to_tf(ray_start_regular_shared, pipelined):
import tensorflow as tf
outer_dim = 3
inner_shape = (2, 2, 2)
shape = (outer_dim, ) + inner_shape
num_items = np.prod(np.array(shape))
arr = np.arange(num_items).reshape(shape).astype(np.float)
# TODO(Clark): Ensure that heterogeneous columns is properly supported
# (tf.RaggedTensorSpec)
df1 = pd.DataFrame({
"one": TensorArray(arr),
"two": TensorArray(arr),
"label": TensorArray(arr),
})
arr2 = np.arange(num_items, 2 * num_items).reshape(shape).astype(np.float)
df2 = pd.DataFrame({
"one": TensorArray(arr2),
"two": TensorArray(arr2),
"label": TensorArray(arr2),
})
df = pd.concat([df1, df2])
ds = ray.data.from_pandas([ray.put(df1), ray.put(df2)])
ds = maybe_pipeline(ds, pipelined)
tfd = ds.to_tf(
label_column="label",
output_signature=(tf.TensorSpec(
shape=(None, 2, 2, 2, 2), dtype=tf.float32),
tf.TensorSpec(
shape=(None, 1, 2, 2, 2), dtype=tf.float32)))
iterations = []
for batch in tfd.as_numpy_iterator():
iterations.append(np.concatenate((batch[0], batch[1]), axis=1))
combined_iterations = np.concatenate(iterations)
arr = np.array(
[[np.asarray(v) for v in values] for values in df.to_numpy()])
np.testing.assert_array_equal(arr, combined_iterations)
@pytest.mark.parametrize(
"fs,data_path", [(None, lazy_fixture("local_path")),
(lazy_fixture("local_fs"), lazy_fixture("local_path")),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path"))])
def test_numpy_roundtrip(ray_start_regular_shared, fs, data_path):
ds = ray.data.range_tensor(10, parallelism=2)
ds.write_numpy(data_path, filesystem=fs)
ds = ray.data.read_numpy(data_path, filesystem=fs)
assert str(ds) == ("Dataset(num_blocks=2, num_rows=?, "
"schema=<Tensor: shape=(None, 1), dtype=int64>)")
assert str(
ds.take()) == ("[array([0]), array([1]), array([2]), "
"array([3]), array([4]), array([5]), array([6]), "
"array([7]), array([8]), array([9])]"), ds.take()
def test_numpy_read(ray_start_regular_shared, tmp_path):
path = os.path.join(tmp_path, "test_np_dir")
os.mkdir(path)
np.save(
os.path.join(path, "test.npy"), np.expand_dims(np.arange(0, 10), 1))
ds = ray.data.read_numpy(path)
assert str(ds) == ("Dataset(num_blocks=1, num_rows=?, "
"schema=<Tensor: shape=(None, 1), dtype=int64>)")
assert str(
ds.take()) == ("[array([0]), array([1]), array([2]), "
"array([3]), array([4]), array([5]), array([6]), "
"array([7]), array([8]), array([9])]"), ds.take()
@pytest.mark.parametrize("fs,data_path,endpoint_url", [
(None, lazy_fixture("local_path"), None),
(lazy_fixture("local_fs"), lazy_fixture("local_path"), None),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path"), lazy_fixture("s3_server"))
])
def test_numpy_write(ray_start_regular_shared, fs, data_path, endpoint_url):
ds = ray.data.range_tensor(10, parallelism=2)
ds._set_uuid("data")
ds.write_numpy(data_path, filesystem=fs)
file_path1 = os.path.join(data_path, "data_000000.npy")
file_path2 = os.path.join(data_path, "data_000001.npy")
if endpoint_url is None:
arr1 = np.load(file_path1)
arr2 = np.load(file_path2)
else:
from s3fs.core import S3FileSystem
s3 = S3FileSystem(client_kwargs={"endpoint_url": endpoint_url})
arr1 = np.load(s3.open(file_path1))
arr2 = np.load(s3.open(file_path2))
np.testing.assert_equal(np.concatenate((arr1, arr2)), ds.take())
def test_read_text(ray_start_regular_shared, tmp_path):
path = os.path.join(tmp_path, "test_text")
os.mkdir(path)
with open(os.path.join(path, "file1.txt"), "w") as f:
f.write("hello\n")
f.write("world")
with open(os.path.join(path, "file2.txt"), "w") as f:
f.write("goodbye")
ds = ray.data.read_text(path)
assert sorted(ds.take()) == ["goodbye", "hello", "world"]
@pytest.mark.parametrize("pipelined", [False, True])
def test_write_datasource(ray_start_regular_shared, pipelined):
output = DummyOutputDatasource()
ds0 = ray.data.range(10, parallelism=2)
ds = maybe_pipeline(ds0, pipelined)
ds.write_datasource(output)
if pipelined:
assert output.num_ok == 2
else:
assert output.num_ok == 1
assert output.num_failed == 0
assert ray.get(output.data_sink.get_rows_written.remote()) == 10
ray.get(output.data_sink.set_enabled.remote(False))
ds = maybe_pipeline(ds0, pipelined)
with pytest.raises(ValueError):
ds.write_datasource(output)
if pipelined:
assert output.num_ok == 2
else:
assert output.num_ok == 1
assert output.num_failed == 1
assert ray.get(output.data_sink.get_rows_written.remote()) == 10
def test_empty_dataset(ray_start_regular_shared):
ds = ray.data.range(0)
assert ds.count() == 0
assert ds.size_bytes() is None
assert ds.schema() is None
ds = ray.data.range(1)
ds = ds.filter(lambda x: x > 1)
assert str(ds) == \
"Dataset(num_blocks=1, num_rows=0, schema=Unknown schema)"
def test_schema(ray_start_regular_shared):
ds = ray.data.range(10)
ds2 = ray.data.range_arrow(10)
ds3 = ds2.repartition(5)
ds4 = ds3.map(lambda x: {"a": "hi", "b": 1.0}).limit(5).repartition(1)
assert str(ds) == \
"Dataset(num_blocks=10, num_rows=10, schema=<class 'int'>)"
assert str(ds2) == \
"Dataset(num_blocks=10, num_rows=10, schema={value: int64})"
assert str(ds3) == \
"Dataset(num_blocks=5, num_rows=10, schema={value: int64})"
assert str(ds4) == \
"Dataset(num_blocks=1, num_rows=5, schema={a: string, b: double})"
def test_lazy_loading_exponential_rampup(ray_start_regular_shared):
ds = ray.data.range(100, parallelism=20)
assert len(ds._blocks._blocks) == 1
assert ds.take(10) == list(range(10))
assert len(ds._blocks._blocks) == 2
assert ds.take(20) == list(range(20))
assert len(ds._blocks._blocks) == 4
assert ds.take(30) == list(range(30))
assert len(ds._blocks._blocks) == 8
assert ds.take(50) == list(range(50))
assert len(ds._blocks._blocks) == 16
assert ds.take(100) == list(range(100))
assert len(ds._blocks._blocks) == 20
def test_limit(ray_start_regular_shared):
ds = ray.data.range(100, parallelism=20)
for i in range(100):
assert ds.limit(i).take(200) == list(range(i))
def test_convert_types(ray_start_regular_shared):
plain_ds = ray.data.range(1)
arrow_ds = plain_ds.map(lambda x: {"a": x})
assert arrow_ds.take() == [{"a": 0}]
assert "ArrowRow" in arrow_ds.map(lambda x: str(x)).take()[0]
arrow_ds = ray.data.range_arrow(1)
assert arrow_ds.map(lambda x: "plain_{}".format(x["value"])).take() \
== ["plain_0"]
assert arrow_ds.map(lambda x: {"a": (x["value"],)}).take() == \
[{"a": (0,)}]
def test_from_items(ray_start_regular_shared):
ds = ray.data.from_items(["hello", "world"])
assert ds.take() == ["hello", "world"]
def test_repartition(ray_start_regular_shared):
ds = ray.data.range(20, parallelism=10)
assert ds.num_blocks() == 10
assert ds.sum() == 190
assert ds._block_sizes() == [2] * 10
ds2 = ds.repartition(5)
assert ds2.num_blocks() == 5
assert ds2.sum() == 190
# TODO: would be nice to re-distribute these more evenly
ds2._block_sizes() == [10, 10, 0, 0, 0]
ds3 = ds2.repartition(20)
assert ds3.num_blocks() == 20
assert ds3.sum() == 190
ds2._block_sizes() == [2] * 10 + [0] * 10
large = ray.data.range(10000, parallelism=10)
large = large.repartition(20)
assert large._block_sizes() == [500] * 20
def test_repartition_arrow(ray_start_regular_shared):
ds = ray.data.range_arrow(20, parallelism=10)
assert ds.num_blocks() == 10
assert ds.count() == 20
assert ds._block_sizes() == [2] * 10
ds2 = ds.repartition(5)
assert ds2.num_blocks() == 5
assert ds2.count() == 20
ds2._block_sizes() == [10, 10, 0, 0, 0]
ds3 = ds2.repartition(20)
assert ds3.num_blocks() == 20
assert ds3.count() == 20
ds2._block_sizes() == [2] * 10 + [0] * 10
large = ray.data.range_arrow(10000, parallelism=10)
large = large.repartition(20)
assert large._block_sizes() == [500] * 20
def test_from_pandas(ray_start_regular_shared):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
ds = ray.data.from_pandas([ray.put(df1), ray.put(df2)])
values = [(r["one"], r["two"]) for r in ds.take(6)]
rows = [(r.one, r.two) for _, r in pd.concat([df1, df2]).iterrows()]
assert values == rows
def test_from_numpy(ray_start_regular_shared):
arr1 = np.expand_dims(np.arange(0, 4), 1)
arr2 = np.expand_dims(np.arange(4, 8), 1)
ds = ray.data.from_numpy([ray.put(arr1), ray.put(arr2)])
values = np.array(ds.take(8))
np.testing.assert_equal(np.concatenate((arr1, arr2)), values)
def test_from_arrow(ray_start_regular_shared):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
ds = ray.data.from_arrow([
ray.put(pa.Table.from_pandas(df1)),
ray.put(pa.Table.from_pandas(df2))
])
values = [(r["one"], r["two"]) for r in ds.take(6)]
rows = [(r.one, r.two) for _, r in pd.concat([df1, df2]).iterrows()]
assert values == rows
def test_to_pandas(ray_start_regular_shared):
n = 5
df = pd.DataFrame({"value": list(range(n))})
ds = ray.data.range_arrow(n)
dfds = pd.concat(ray.get(ds.to_pandas()), ignore_index=True)
assert df.equals(dfds)
def test_to_numpy(ray_start_regular_shared):
# Tensor Dataset
ds = ray.data.range_tensor(10, parallelism=2)
arr = np.concatenate(ray.get(ds.to_numpy()))
np.testing.assert_equal(arr, np.expand_dims(np.arange(0, 10), 1))
# Table Dataset
ds = ray.data.range_arrow(10)
arr = np.concatenate(ray.get(ds.to_numpy()))
np.testing.assert_equal(arr, np.expand_dims(np.arange(0, 10), 1))
# Simple Dataset
ds = ray.data.range(10)
arr = np.concatenate(ray.get(ds.to_numpy()))
np.testing.assert_equal(arr, np.arange(0, 10))
def test_to_arrow(ray_start_regular_shared):
n = 5
# Zero-copy.
df = pd.DataFrame({"value": list(range(n))})
ds = ray.data.range_arrow(n)
dfds = pd.concat(
[t.to_pandas() for t in ray.get(ds.to_arrow())], ignore_index=True)
assert df.equals(dfds)
# Conversion.
df = pd.DataFrame({0: list(range(n))})
ds = ray.data.range(n)
dfds = pd.concat(
[t.to_pandas() for t in ray.get(ds.to_arrow())], ignore_index=True)
assert df.equals(dfds)
def test_get_blocks(ray_start_regular_shared):
blocks = ray.data.range(10).get_blocks()
assert len(blocks) == 10
out = []
for b in ray.get(blocks):
out.extend(list(BlockAccessor.for_block(b).iter_rows()))
out = sorted(out)
assert out == list(range(10)), out
def test_pandas_roundtrip(ray_start_regular_shared, tmp_path):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
ds = ray.data.from_pandas([ray.put(df1), ray.put(df2)])
dfds = pd.concat(ray.get(ds.to_pandas()))
assert pd.concat([df1, df2]).equals(dfds)
def test_fsspec_filesystem(ray_start_regular_shared, tmp_path):
"""Same as `test_parquet_read` but using a custom, fsspec filesystem.
TODO (Alex): We should write a similar test with a mock PyArrow fs, but
unfortunately pa.fs._MockFileSystem isn't serializable, so this may require
some effort.
"""
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
table = pa.Table.from_pandas(df1)
path1 = os.path.join(str(tmp_path), "test1.parquet")
pq.write_table(table, path1)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
table = pa.Table.from_pandas(df2)
path2 = os.path.join(str(tmp_path), "test2.parquet")
pq.write_table(table, path2)
fs = LocalFileSystem()
ds = ray.data.read_parquet([path1, path2], filesystem=fs)
# Test metadata-only parquet ops.
assert len(ds._blocks._blocks) == 1
assert ds.count() == 6
out_path = os.path.join(tmp_path, "out")
os.mkdir(out_path)
ds._set_uuid("data")
ds.write_parquet(out_path)
ds_df1 = pd.read_parquet(os.path.join(out_path, "data_000000.parquet"))
ds_df2 = pd.read_parquet(os.path.join(out_path, "data_000001.parquet"))
ds_df = pd.concat([ds_df1, ds_df2])
df = pd.concat([df1, df2])
assert ds_df.equals(df)
@pytest.mark.parametrize(
"fs,data_path",
[
(None, lazy_fixture("local_path")),
(lazy_fixture("local_fs"), lazy_fixture("local_path")),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path")),
(lazy_fixture("s3_fs_with_space"), lazy_fixture("s3_path_with_space")
) # Path contains space.
])
def test_parquet_read(ray_start_regular_shared, fs, data_path):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
table = pa.Table.from_pandas(df1)
setup_data_path = _unwrap_protocol(data_path)
path1 = os.path.join(setup_data_path, "test1.parquet")
pq.write_table(table, path1, filesystem=fs)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
table = pa.Table.from_pandas(df2)
path2 = os.path.join(setup_data_path, "test2.parquet")
pq.write_table(table, path2, filesystem=fs)
ds = ray.data.read_parquet(data_path, filesystem=fs)
# Test metadata-only parquet ops.
assert len(ds._blocks._blocks) == 1
assert ds.count() == 6
assert ds.size_bytes() > 0
assert ds.schema() is not None
input_files = ds.input_files()
assert len(input_files) == 2, input_files
assert "test1.parquet" in str(input_files)
assert "test2.parquet" in str(input_files)
assert str(ds) == \
"Dataset(num_blocks=2, num_rows=6, " \
"schema={one: int64, two: string})", ds
assert repr(ds) == \
"Dataset(num_blocks=2, num_rows=6, " \
"schema={one: int64, two: string})", ds
assert len(ds._blocks._blocks) == 1
# Forces a data read.
values = [[s["one"], s["two"]] for s in ds.take()]
assert len(ds._blocks._blocks) == 2
assert sorted(values) == [[1, "a"], [2, "b"], [3, "c"], [4, "e"], [5, "f"],
[6, "g"]]
# Test column selection.
ds = ray.data.read_parquet(data_path, columns=["one"], filesystem=fs)
values = [s["one"] for s in ds.take()]
assert sorted(values) == [1, 2, 3, 4, 5, 6]
assert ds.schema().names == ["one"]
@pytest.mark.parametrize(
"fs,data_path", [(None, lazy_fixture("local_path")),
(lazy_fixture("local_fs"), lazy_fixture("local_path")),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path"))])
def test_parquet_read_partitioned(ray_start_regular_shared, fs, data_path):
df = pd.DataFrame({
"one": [1, 1, 1, 3, 3, 3],
"two": ["a", "b", "c", "e", "f", "g"]
})
table = pa.Table.from_pandas(df)
pq.write_to_dataset(
table,
root_path=_unwrap_protocol(data_path),
partition_cols=["one"],
filesystem=fs,
use_legacy_dataset=False)
ds = ray.data.read_parquet(data_path, filesystem=fs)
# Test metadata-only parquet ops.
assert len(ds._blocks._blocks) == 1
assert ds.count() == 6
assert ds.size_bytes() > 0
assert ds.schema() is not None
input_files = ds.input_files()
assert len(input_files) == 2, input_files
assert str(ds) == \
"Dataset(num_blocks=2, num_rows=6, " \
"schema={two: string, " \
"one: dictionary<values=int32, indices=int32, ordered=0>})", ds
assert repr(ds) == \
"Dataset(num_blocks=2, num_rows=6, " \
"schema={two: string, " \
"one: dictionary<values=int32, indices=int32, ordered=0>})", ds
assert len(ds._blocks._blocks) == 1
# Forces a data read.
values = [[s["one"], s["two"]] for s in ds.take()]
assert len(ds._blocks._blocks) == 2
assert sorted(values) == [[1, "a"], [1, "b"], [1, "c"], [3, "e"], [3, "f"],
[3, "g"]]
# Test column selection.
ds = ray.data.read_parquet(data_path, columns=["one"], filesystem=fs)
values = [s["one"] for s in ds.take()]
assert sorted(values) == [1, 1, 1, 3, 3, 3]
def test_parquet_read_partitioned_with_filter(ray_start_regular_shared,
tmp_path):
df = pd.DataFrame({
"one": [1, 1, 1, 3, 3, 3],
"two": ["a", "a", "b", "b", "c", "c"]
})
table = pa.Table.from_pandas(df)
pq.write_to_dataset(
table,
root_path=str(tmp_path),
partition_cols=["one"],
use_legacy_dataset=False)
# 2 partitions, 1 empty partition, 1 block/read task
ds = ray.data.read_parquet(
str(tmp_path), parallelism=1, filter=(pa.dataset.field("two") == "a"))
values = [[s["one"], s["two"]] for s in ds.take()]
assert len(ds._blocks._blocks) == 1
assert sorted(values) == [[1, "a"], [1, "a"]]
# 2 partitions, 1 empty partition, 2 block/read tasks, 1 empty block
ds = ray.data.read_parquet(
str(tmp_path), parallelism=2, filter=(pa.dataset.field("two") == "a"))
values = [[s["one"], s["two"]] for s in ds.take()]
assert len(ds._blocks._blocks) == 2
assert sorted(values) == [[1, "a"], [1, "a"]]
def test_parquet_read_with_udf(ray_start_regular_shared, tmp_path):
one_data = list(range(6))
df = pd.DataFrame({
"one": one_data,
"two": 2 * ["a"] + 2 * ["b"] + 2 * ["c"]
})
table = pa.Table.from_pandas(df)
pq.write_to_dataset(
table,
root_path=str(tmp_path),
partition_cols=["two"],
use_legacy_dataset=False)
def _block_udf(block: pa.Table):
df = block.to_pandas()
df["one"] += 1
return pa.Table.from_pandas(df)
# 1 block/read task
ds = ray.data.read_parquet(
str(tmp_path), parallelism=1, _block_udf=_block_udf)
ones, twos = zip(*[[s["one"], s["two"]] for s in ds.take()])
assert len(ds._blocks._blocks) == 1
np.testing.assert_array_equal(sorted(ones), np.array(one_data) + 1)
# 2 blocks/read tasks
ds = ray.data.read_parquet(
str(tmp_path), parallelism=2, _block_udf=_block_udf)
ones, twos = zip(*[[s["one"], s["two"]] for s in ds.take()])
assert len(ds._blocks._blocks) == 2
np.testing.assert_array_equal(sorted(ones), np.array(one_data) + 1)
# 2 blocks/read tasks, 1 empty block
ds = ray.data.read_parquet(
str(tmp_path),
parallelism=2,
filter=(pa.dataset.field("two") == "a"),
_block_udf=_block_udf)
ones, twos = zip(*[[s["one"], s["two"]] for s in ds.take()])
assert len(ds._blocks._blocks) == 2
np.testing.assert_array_equal(sorted(ones), np.array(one_data[:2]) + 1)
@pytest.mark.parametrize("fs,data_path,endpoint_url", [
(None, lazy_fixture("local_path"), None),
(lazy_fixture("local_fs"), lazy_fixture("local_path"), None),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path"), lazy_fixture("s3_server"))
])
def test_parquet_write(ray_start_regular_shared, fs, data_path, endpoint_url):
if endpoint_url is None:
storage_options = {}
else:
storage_options = dict(client_kwargs=dict(endpoint_url=endpoint_url))
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
df = pd.concat([df1, df2])
ds = ray.data.from_pandas([ray.put(df1), ray.put(df2)])
path = os.path.join(data_path, "test_parquet_dir")
if fs is None:
os.mkdir(path)
else:
fs.create_dir(_unwrap_protocol(path))
ds._set_uuid("data")
ds.write_parquet(path, filesystem=fs)
path1 = os.path.join(path, "data_000000.parquet")
path2 = os.path.join(path, "data_000001.parquet")
dfds = pd.concat([
pd.read_parquet(path1, storage_options=storage_options),
pd.read_parquet(path2, storage_options=storage_options)
])
assert df.equals(dfds)
if fs is None:
shutil.rmtree(path)
else:
fs.delete_dir(_unwrap_protocol(path))
@pytest.mark.parametrize("fs,data_path,endpoint_url", [
(None, lazy_fixture("local_path"), None),
(lazy_fixture("local_fs"), lazy_fixture("local_path"), None),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path"), lazy_fixture("s3_server"))
])
def test_parquet_write_create_dir(ray_start_regular_shared, fs, data_path,
endpoint_url):
if endpoint_url is None:
storage_options = {}
else:
storage_options = dict(client_kwargs=dict(endpoint_url=endpoint_url))
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
df = pd.concat([df1, df2])
ds = ray.data.from_pandas([ray.put(df1), ray.put(df2)])
path = os.path.join(data_path, "test_parquet_dir")
ds._set_uuid("data")
ds.write_parquet(path, filesystem=fs)
# Ensure that directory was created.
if fs is None:
assert os.path.isdir(path)
else:
assert fs.get_file_info(
_unwrap_protocol(path)).type == pa.fs.FileType.Directory
# Check that data was properly written to the directory.
path1 = os.path.join(path, "data_000000.parquet")
path2 = os.path.join(path, "data_000001.parquet")
dfds = pd.concat([
pd.read_parquet(path1, storage_options=storage_options),
pd.read_parquet(path2, storage_options=storage_options)
])
assert df.equals(dfds)
# Ensure that directories that already exist are left alone and that the
# attempted creation still succeeds.
path3 = os.path.join(path, "data_0000002.parquet")
path4 = os.path.join(path, "data_0000003.parquet")
if fs is None:
os.rename(path1, path3)
os.rename(path2, path4)
else:
fs.move(_unwrap_protocol(path1), _unwrap_protocol(path3))
fs.move(_unwrap_protocol(path2), _unwrap_protocol(path4))
ds.write_parquet(path, filesystem=fs)
# Check that the original Parquet files were left untouched and that the
# new ones were added.
dfds = pd.concat([
pd.read_parquet(path1, storage_options=storage_options),
pd.read_parquet(path2, storage_options=storage_options),
pd.read_parquet(path3, storage_options=storage_options),
pd.read_parquet(path4, storage_options=storage_options)
])
assert pd.concat([df, df]).equals(dfds)
if fs is None:
shutil.rmtree(path)
else:
fs.delete_dir(_unwrap_protocol(path))
def test_parquet_write_with_udf(ray_start_regular_shared, tmp_path):
data_path = str(tmp_path)
one_data = list(range(6))
df1 = pd.DataFrame({"one": one_data[:3], "two": ["a", "b", "c"]})
df2 = pd.DataFrame({"one": one_data[3:], "two": ["e", "f", "g"]})
df = pd.concat([df1, df2])
ds = ray.data.from_pandas([ray.put(df1), ray.put(df2)])
def _block_udf(block: pa.Table):
df = block.to_pandas()
df["one"] += 1
return pa.Table.from_pandas(df)
# 2 write tasks
ds._set_uuid("data")
ds.write_parquet(data_path, _block_udf=_block_udf)
path1 = os.path.join(data_path, "data_000000.parquet")
path2 = os.path.join(data_path, "data_000001.parquet")
dfds = pd.concat([pd.read_parquet(path1), pd.read_parquet(path2)])
expected_df = df
expected_df["one"] += 1
assert expected_df.equals(dfds)
@pytest.mark.parametrize(
"fs,data_path", [(None, lazy_fixture("local_path")),
(lazy_fixture("local_fs"), lazy_fixture("local_path")),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path"))])
def test_parquet_roundtrip(ray_start_regular_shared, fs, data_path):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
ds = ray.data.from_pandas([ray.put(df1), ray.put(df2)])
ds._set_uuid("data")
path = os.path.join(data_path, "test_parquet_dir")
if fs is None:
os.mkdir(path)
else:
fs.create_dir(_unwrap_protocol(path))
ds.write_parquet(path, filesystem=fs)
ds2 = ray.data.read_parquet(path, parallelism=2, filesystem=fs)
ds2df = pd.concat(ray.get(ds2.to_pandas()))
assert pd.concat([df1, df2]).equals(ds2df)
# Test metadata ops.
for block, meta in zip(ds2._blocks, ds2._blocks.get_metadata()):
BlockAccessor.for_block(ray.get(block)).size_bytes() == meta.size_bytes
if fs is None:
shutil.rmtree(path)
else:
fs.delete_dir(_unwrap_protocol(path))
def test_convert_to_pyarrow(ray_start_regular_shared, tmp_path):
ds = ray.data.range(100)
assert ds.to_dask().sum().compute()[0] == 4950
path = os.path.join(tmp_path, "test_parquet_dir")
os.mkdir(path)
ds.write_parquet(path)
assert ray.data.read_parquet(path).count() == 100
def test_pyarrow(ray_start_regular_shared):
ds = ray.data.range_arrow(5)
assert ds.map(lambda x: {"b": x["value"] + 2}).take() == \
[{"b": 2}, {"b": 3}, {"b": 4}, {"b": 5}, {"b": 6}]
assert ds.map(lambda x: {"b": x["value"] + 2}) \
.filter(lambda x: x["b"] % 2 == 0).take() == \
[{"b": 2}, {"b": 4}, {"b": 6}]
assert ds.filter(lambda x: x["value"] == 0) \
.flat_map(lambda x: [{"b": x["value"] + 2}, {"b": x["value"] + 20}]) \
.take() == [{"b": 2}, {"b": 20}]
def test_read_binary_files(ray_start_regular_shared):
with util.gen_bin_files(10) as (_, paths):
ds = ray.data.read_binary_files(paths, parallelism=10)
for i, item in enumerate(ds.iter_rows()):
expected = open(paths[i], "rb").read()
assert expected == item
# Test metadata ops.
assert ds.count() == 10
assert "bytes" in str(ds.schema()), ds
assert "bytes" in str(ds), ds
def test_read_binary_files_with_fs(ray_start_regular_shared):
with util.gen_bin_files(10) as (tempdir, paths):
# All the paths are absolute, so we want the root file system.
fs, _ = pa.fs.FileSystem.from_uri("/")
ds = ray.data.read_binary_files(paths, filesystem=fs, parallelism=10)
for i, item in enumerate(ds.iter_rows()):
expected = open(paths[i], "rb").read()
assert expected == item
def test_read_binary_files_with_paths(ray_start_regular_shared):
with util.gen_bin_files(10) as (_, paths):
ds = ray.data.read_binary_files(
paths, include_paths=True, parallelism=10)
for i, (path, item) in enumerate(ds.iter_rows()):
assert path == paths[i]
expected = open(paths[i], "rb").read()
assert expected == item
# TODO(Clark): Hitting S3 in CI is currently broken due to some AWS
# credentials issue, unskip this test once that's fixed or once ported to moto.
@pytest.mark.skip(reason="Shouldn't hit S3 in CI")
def test_read_binary_files_s3(ray_start_regular_shared):
ds = ray.data.read_binary_files(["s3://anyscale-data/small-files/0.dat"])
item = ds.take(1).pop()
expected = requests.get(
"https://anyscale-data.s3.us-west-2.amazonaws.com/small-files/0.dat"
).content
assert item == expected
def test_iter_batches_basic(ray_start_regular_shared):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": [2, 3, 4]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": [5, 6, 7]})
df3 = pd.DataFrame({"one": [7, 8, 9], "two": [8, 9, 10]})
df4 = pd.DataFrame({"one": [10, 11, 12], "two": [11, 12, 13]})
dfs = [df1, df2, df3, df4]
ds = ray.data.from_pandas(
[ray.put(df1), ray.put(df2),
ray.put(df3), ray.put(df4)])
# Default.
for batch, df in zip(ds.iter_batches(batch_format="pandas"), dfs):
assert isinstance(batch, pd.DataFrame)
assert batch.equals(df)
# pyarrow.Table format.
for batch, df in zip(ds.iter_batches(batch_format="pyarrow"), dfs):
assert isinstance(batch, pa.Table)
assert batch.equals(pa.Table.from_pandas(df))
# blocks format.
for batch, df in zip(ds.iter_batches(batch_format="native"), dfs):
assert batch.to_pandas().equals(df)
# Batch size.
batch_size = 2
batches = list(
ds.iter_batches(batch_size=batch_size, batch_format="pandas"))
assert all(len(batch) == batch_size for batch in batches)
assert (len(batches) == math.ceil(
(len(df1) + len(df2) + len(df3) + len(df4)) / batch_size))
assert pd.concat(
batches, ignore_index=True).equals(pd.concat(dfs, ignore_index=True))
# Batch size larger than block.
batch_size = 4
batches = list(
ds.iter_batches(batch_size=batch_size, batch_format="pandas"))
assert all(len(batch) == batch_size for batch in batches)
assert (len(batches) == math.ceil(
(len(df1) + len(df2) + len(df3) + len(df4)) / batch_size))
assert pd.concat(
batches, ignore_index=True).equals(pd.concat(dfs, ignore_index=True))
# Batch size drop partial.
batch_size = 5
batches = list(
ds.iter_batches(
batch_size=batch_size, drop_last=True, batch_format="pandas"))
assert all(len(batch) == batch_size for batch in batches)
assert (len(batches) == (len(df1) + len(df2) + len(df3) + len(df4)) //
batch_size)
assert pd.concat(
batches, ignore_index=True).equals(
pd.concat(dfs, ignore_index=True)[:10])
# Batch size don't drop partial.
batch_size = 5
batches = list(
ds.iter_batches(
batch_size=batch_size, drop_last=False, batch_format="pandas"))
assert all(len(batch) == batch_size for batch in batches[:-1])
assert (len(batches[-1]) == (len(df1) + len(df2) + len(df3) + len(df4)) %
batch_size)
assert (len(batches) == math.ceil(
(len(df1) + len(df2) + len(df3) + len(df4)) / batch_size))
assert pd.concat(
batches, ignore_index=True).equals(pd.concat(dfs, ignore_index=True))
# Prefetch.
for batch, df in zip(
ds.iter_batches(prefetch_blocks=1, batch_format="pandas"), dfs):
assert isinstance(batch, pd.DataFrame)
assert batch.equals(df)
batch_size = 2
batches = list(
ds.iter_batches(
prefetch_blocks=2, batch_size=batch_size, batch_format="pandas"))
assert all(len(batch) == batch_size for batch in batches)
assert (len(batches) == math.ceil(
(len(df1) + len(df2) + len(df3) + len(df4)) / batch_size))
assert pd.concat(
batches, ignore_index=True).equals(pd.concat(dfs, ignore_index=True))
def test_iter_batches_grid(ray_start_regular_shared):
# Tests slicing, batch combining, and partial batch dropping logic over
# a grid of dataset, batching, and dropping configurations.
# Grid: num_blocks x num_rows_block_1 x ... x num_rows_block_N x
# batch_size x drop_last
seed = int(time.time())
print(f"Seeding RNG for test_iter_batches_grid with: {seed}")
random.seed(seed)
max_num_blocks = 20
max_num_rows_per_block = 20
num_blocks_samples = 3
block_sizes_samples = 3
batch_size_samples = 3
for num_blocks in np.random.randint(
1, max_num_blocks + 1, size=num_blocks_samples):
block_sizes_list = [
np.random.randint(1, max_num_rows_per_block + 1, size=num_blocks)
for _ in range(block_sizes_samples)
]
for block_sizes in block_sizes_list:
# Create the dataset with the given block sizes.
dfs = []
running_size = 0
for block_size in block_sizes:
dfs.append(
pd.DataFrame({
"value": list(
range(running_size, running_size + block_size))
}))
running_size += block_size
num_rows = running_size
ds = ray.data.from_pandas([ray.put(df) for df in dfs])
for batch_size in np.random.randint(
1, num_rows + 1, size=batch_size_samples):
for drop_last in (False, True):
batches = list(
ds.iter_batches(
batch_size=batch_size,
drop_last=drop_last,
batch_format="pandas"))
if num_rows % batch_size == 0 or not drop_last:
# Number of batches should be equal to
# num_rows / batch_size, rounded up.
assert len(batches) == math.ceil(num_rows / batch_size)
# Concatenated batches should equal the DataFrame
# representation of the entire dataset.
assert pd.concat(
batches, ignore_index=True).equals(
pd.concat(
ray.get(ds.to_pandas()),
ignore_index=True))
else:
# Number of batches should be equal to
# num_rows / batch_size, rounded down.
assert len(batches) == num_rows // batch_size
# Concatenated batches should equal the DataFrame
# representation of the dataset with the partial batch
# remainder sliced off.
assert pd.concat(
batches, ignore_index=True).equals(
pd.concat(
ray.get(ds.to_pandas()), ignore_index=True)
[:batch_size * (num_rows // batch_size)])
if num_rows % batch_size == 0 or drop_last:
assert all(
len(batch) == batch_size for batch in batches)
else:
assert all(
len(batch) == batch_size for batch in batches[:-1])
assert len(batches[-1]) == num_rows % batch_size
def test_lazy_loading_iter_batches_exponential_rampup(
ray_start_regular_shared):
ds = ray.data.range(32, parallelism=8)
expected_num_blocks = [1, 2, 4, 4, 8, 8, 8, 8]
for _, expected in zip(ds.iter_batches(), expected_num_blocks):
assert len(ds._blocks._blocks) == expected
def test_map_batch(ray_start_regular_shared, tmp_path):
# Test input validation
ds = ray.data.range(5)
with pytest.raises(ValueError):
ds.map_batches(
lambda x: x + 1, batch_format="pyarrow", batch_size=-1).take()
# Test pandas
df = pd.DataFrame({"one": [1, 2, 3], "two": [2, 3, 4]})
table = pa.Table.from_pandas(df)
pq.write_table(table, os.path.join(tmp_path, "test1.parquet"))
ds = ray.data.read_parquet(str(tmp_path))
ds_list = ds.map_batches(
lambda df: df + 1, batch_size=1, batch_format="pandas").take()
values = [s["one"] for s in ds_list]
assert values == [2, 3, 4]
values = [s["two"] for s in ds_list]
assert values == [3, 4, 5]
# Test Pyarrow
ds = ray.data.read_parquet(str(tmp_path))
ds_list = ds.map_batches(
lambda pa: pa, batch_size=1, batch_format="pyarrow").take()
values = [s["one"] for s in ds_list]
assert values == [1, 2, 3]
values = [s["two"] for s in ds_list]
assert values == [2, 3, 4]
# Test batch
size = 300
ds = ray.data.range(size)
ds_list = ds.map_batches(
lambda df: df + 1, batch_size=17,
batch_format="pandas").take(limit=size)
for i in range(size):
# The pandas column is "0", and it originally has rows from 0~299.
# After the map batch, it should have 1~300.
row = ds_list[i]
assert row["0"] == i + 1
assert ds.count() == 300
# Test the lambda returns different types than the batch_format
# pandas => list block
ds = ray.data.read_parquet(str(tmp_path))
ds_list = ds.map_batches(lambda df: [1], batch_size=1).take()
assert ds_list == [1, 1, 1]
assert ds.count() == 3
# pyarrow => list block
ds = ray.data.read_parquet(str(tmp_path))
ds_list = ds.map_batches(
lambda df: [1], batch_size=1, batch_format="pyarrow").take()
assert ds_list == [1, 1, 1]
assert ds.count() == 3
# Test the wrong return value raises an exception.
ds = ray.data.read_parquet(str(tmp_path))
with pytest.raises(ValueError):
ds_list = ds.map_batches(
lambda df: 1, batch_size=2, batch_format="pyarrow").take()
def test_union(ray_start_regular_shared):
ds = ray.data.range(20, parallelism=10)
# Test lazy union.
ds = ds.union(ds, ds, ds, ds)
assert ds.num_blocks() == 50
assert ds.count() == 100
assert ds.sum() == 950
ds = ds.union(ds)
assert ds.count() == 200
assert ds.sum() == (950 * 2)
# Test materialized union.
ds2 = ray.data.from_items([1, 2, 3, 4, 5])
assert ds2.count() == 5
ds2 = ds2.union(ds2)
assert ds2.count() == 10
ds2 = ds2.union(ds)
assert ds2.count() == 210
def test_split_at_indices(ray_start_regular_shared):
ds = ray.data.range(10, parallelism=3)
with pytest.raises(ValueError):
ds.split_at_indices([])
with pytest.raises(ValueError):
ds.split_at_indices([-1])
with pytest.raises(ValueError):
ds.split_at_indices([3, 1])
splits = ds.split_at_indices([5])
r = [s.take() for s in splits]
assert r == [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
splits = ds.split_at_indices([2, 5])
r = [s.take() for s in splits]
assert r == [[0, 1], [2, 3, 4], [5, 6, 7, 8, 9]]
splits = ds.split_at_indices([2, 5, 5, 100])
r = [s.take() for s in splits]
assert r == [[0, 1], [2, 3, 4], [], [5, 6, 7, 8, 9], []]
splits = ds.split_at_indices([100])
r = [s.take() for s in splits]
assert r == [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], []]
splits = ds.split_at_indices([0])
r = [s.take() for s in splits]
assert r == [[], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]
def test_split(ray_start_regular_shared):
ds = ray.data.range(20, parallelism=10)
assert ds.num_blocks() == 10
assert ds.sum() == 190
assert ds._block_sizes() == [2] * 10
datasets = ds.split(5)
assert [2] * 5 == [len(dataset._blocks) for dataset in datasets]
assert 190 == sum([dataset.sum() for dataset in datasets])
datasets = ds.split(3)
assert [4, 3, 3] == [len(dataset._blocks) for dataset in datasets]
assert 190 == sum([dataset.sum() for dataset in datasets])
datasets = ds.split(1)
assert [10] == [len(dataset._blocks) for dataset in datasets]
assert 190 == sum([dataset.sum() for dataset in datasets])
datasets = ds.split(10)
assert [1] * 10 == [len(dataset._blocks) for dataset in datasets]
assert 190 == sum([dataset.sum() for dataset in datasets])
datasets = ds.split(11)
assert [1] * 10 + [0] == [len(dataset._blocks) for dataset in datasets]
assert 190 == sum([dataset.sum() for dataset in datasets])
def test_split_hints(ray_start_regular_shared):
@ray.remote
class Actor(object):
def __init__(self):
pass
def assert_split_assignment(block_node_ids, actor_node_ids,
expected_split_result):
"""Helper function to setup split hints test.
Args:
block_node_ids: a list of blocks with their locations. For
example ["node1", "node2"] represents two blocks with
"node1", "node2" as their location respectively.
actor_node_ids: a list of actors with their locations. For
example ["node1", "node2"] represents two actors with
"node1", "node2" as their location respectively.
expected_split_result: a list of allocation result, each entry
in the list stores the block_index in the split dataset.
For example, [[0, 1], [2]] represents the split result has
two datasets, datasets[0] contains block 0 and 1; and
datasets[1] contains block 2.
"""
num_blocks = len(block_node_ids)
ds = ray.data.range(num_blocks, parallelism=num_blocks)
blocks = list(ds._blocks)
assert len(block_node_ids) == len(blocks)
actors = [Actor.remote() for i in range(len(actor_node_ids))]
with patch("ray.experimental.get_object_locations") as location_mock:
with patch("ray.state.actors") as state_mock:
block_locations = {}
for i, node_id in enumerate(block_node_ids):
if node_id:
block_locations[blocks[i]] = {"node_ids": [node_id]}
location_mock.return_value = block_locations
actor_state = {}
for i, node_id in enumerate(actor_node_ids):
actor_state[actors[i]._actor_id.hex()] = {
"Address": {
"NodeID": node_id
}
}
state_mock.return_value = actor_state
datasets = ds.split(len(actors), locality_hints=actors)
assert len(datasets) == len(actors)
for i in range(len(actors)):
assert {blocks[j]
for j in expected_split_result[i]} == set(
datasets[i]._blocks)
assert_split_assignment(["node2", "node1", "node1"], ["node1", "node2"],
[[1, 2], [0]])
assert_split_assignment(["node1", "node1", "node1"], ["node1", "node2"],
[[2, 1], [0]])
assert_split_assignment(["node2", "node2", None], ["node1", "node2"],
[[0, 2], [1]])
assert_split_assignment(["node2", "node2", None], [None, None],
[[2, 1], [0]])
assert_split_assignment(["n1", "n2", "n3", "n1", "n2"], ["n1", "n2"],
[[0, 2, 3], [1, 4]])
assert_split_assignment(["n1", "n2"], ["n1", "n2", "n3"], [[0], [1], []])
# perfect split:
#
# split 300 blocks
# with node_ids interleaving between "n0", "n1", "n2"
#
# to 3 actors
# with has node_id "n1", "n2", "n0"
#
# expect that block 1, 4, 7... are assigned to actor with node_id n1
# block 2, 5, 8... are assigned to actor with node_id n2
# block 0, 3, 6... are assigned to actor with node_id n0
assert_split_assignment(
["n0", "n1", "n2"] * 100, ["n1", "n2", "n0"],
[range(1, 300, 3),
range(2, 300, 3),
range(0, 300, 3)])
# even split regardless of locality:
#
# split 301 blocks
# with block 0 to block 50 on "n0",
# block 51 to block 300 on "n1"
#
# to 3 actors
# with node_ids "n1", "n2", "n0"
#
# expect that block 200 to block 300 are assigned to actor with node_id n1
# block 100 to block 199 are assigned to actor with node_id n2
# block 0 to block 99 are assigned to actor with node_id n0
assert_split_assignment(["n0"] * 50 + ["n1"] * 251, ["n1", "n2", "n0"], [
range(200, 301),
range(100, 200),
list(range(0, 50)) + list(range(50, 100))
])
def test_from_dask(ray_start_regular_shared):
import dask.dataframe as dd
df = pd.DataFrame({"one": list(range(100)), "two": list(range(100))})
ddf = dd.from_pandas(df, npartitions=10)
ds = ray.data.from_dask(ddf)
dfds = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dfds)
def test_to_dask(ray_start_regular_shared):
from ray.util.dask import ray_dask_get
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
df = pd.concat([df1, df2])
ds = ray.data.from_pandas([ray.put(df1), ray.put(df2)])
ddf = ds.to_dask()
# Explicit Dask-on-Ray
assert df.equals(ddf.compute(scheduler=ray_dask_get))
# Implicit Dask-on-Ray.
assert df.equals(ddf.compute())
def test_from_modin(ray_start_regular_shared):
import modin.pandas as mopd
df = pd.DataFrame({"one": list(range(100)), "two": list(range(100))}, )
modf = mopd.DataFrame(df)
ds = ray.data.from_modin(modf)
dfds = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dfds)
def test_to_modin(ray_start_regular_shared):
# create two modin dataframes
# one directly from a pandas dataframe, and
# another from ray.dataset created from the original pandas dataframe
#
import modin.pandas as mopd
df = pd.DataFrame({"one": list(range(100)), "two": list(range(100))}, )
modf1 = mopd.DataFrame(df)
ds = ray.data.from_pandas([df])
modf2 = ds.to_modin()
assert modf1.equals(modf2)
@pytest.mark.parametrize("pipelined", [False, True])
def test_to_tf(ray_start_regular_shared, pipelined):
import tensorflow as tf
df1 = pd.DataFrame({
"one": [1, 2, 3],
"two": [1.0, 2.0, 3.0],
"label": [1.0, 2.0, 3.0]
})
df2 = pd.DataFrame({
"one": [4, 5, 6],
"two": [4.0, 5.0, 6.0],
"label": [4.0, 5.0, 6.0]
})
df3 = pd.DataFrame({"one": [7, 8], "two": [7.0, 8.0], "label": [7.0, 8.0]})
df = pd.concat([df1, df2, df3])
ds = ray.data.from_pandas([ray.put(df1), ray.put(df2), ray.put(df3)])
ds = maybe_pipeline(ds, pipelined)
tfd = ds.to_tf(
label_column="label",
output_signature=(tf.TensorSpec(shape=(None, 2), dtype=tf.float32),
tf.TensorSpec(shape=(None), dtype=tf.float32)))
iterations = []
for batch in tfd.as_numpy_iterator():
iterations.append(
np.concatenate((batch[0], batch[1].reshape(-1, 1)), axis=1))
combined_iterations = np.concatenate(iterations)
assert np.array_equal(df.values, combined_iterations)
def test_to_tf_feature_columns(ray_start_regular_shared):
import tensorflow as tf
df1 = pd.DataFrame({
"one": [1, 2, 3],
"two": [1.0, 2.0, 3.0],
"label": [1.0, 2.0, 3.0]
})
df2 = pd.DataFrame({
"one": [4, 5, 6],
"two": [4.0, 5.0, 6.0],
"label": [4.0, 5.0, 6.0]
})
df3 = pd.DataFrame({"one": [7, 8], "two": [7.0, 8.0], "label": [7.0, 8.0]})
df = pd.concat([df1, df2, df3]).drop("two", axis=1)
ds = ray.data.from_pandas([ray.put(df1), ray.put(df2), ray.put(df3)])
tfd = ds.to_tf(
label_column="label",
feature_columns=["one"],
output_signature=(tf.TensorSpec(shape=(None, 1), dtype=tf.float32),
tf.TensorSpec(shape=(None), dtype=tf.float32)))
iterations = []
for batch in tfd.as_numpy_iterator():
iterations.append(
np.concatenate((batch[0], batch[1].reshape(-1, 1)), axis=1))
combined_iterations = np.concatenate(iterations)
assert np.array_equal(df.values, combined_iterations)
@pytest.mark.parametrize("pipelined", [False, True])
def test_to_torch(ray_start_regular_shared, pipelined):
import torch
df1 = pd.DataFrame({
"one": [1, 2, 3],
"two": [1.0, 2.0, 3.0],
"label": [1.0, 2.0, 3.0]
})
df2 = pd.DataFrame({
"one": [4, 5, 6],
"two": [4.0, 5.0, 6.0],
"label": [4.0, 5.0, 6.0]
})
df3 = pd.DataFrame({"one": [7, 8], "two": [7.0, 8.0], "label": [7.0, 8.0]})
df = pd.concat([df1, df2, df3])
ds = ray.data.from_pandas([ray.put(df1), ray.put(df2), ray.put(df3)])
ds = maybe_pipeline(ds, pipelined)
torchd = ds.to_torch(label_column="label", batch_size=3)
num_epochs = 1 if pipelined else 2
for _ in range(num_epochs):
iterations = []
for batch in iter(torchd):
iterations.append(torch.cat((*batch[0], batch[1]), axis=1).numpy())
combined_iterations = np.concatenate(iterations)
assert np.array_equal(np.sort(df.values), np.sort(combined_iterations))
def test_to_torch_feature_columns(ray_start_regular_shared):
import torch
df1 = pd.DataFrame({
"one": [1, 2, 3],
"two": [1.0, 2.0, 3.0],
"label": [1.0, 2.0, 3.0]
})
df2 = pd.DataFrame({
"one": [4, 5, 6],
"two": [4.0, 5.0, 6.0],
"label": [4.0, 5.0, 6.0]
})
df3 = pd.DataFrame({"one": [7, 8], "two": [7.0, 8.0], "label": [7.0, 8.0]})
df = pd.concat([df1, df2, df3]).drop("two", axis=1)
ds = ray.data.from_pandas([ray.put(df1), ray.put(df2), ray.put(df3)])
torchd = ds.to_torch(
label_column="label", feature_columns=["one"], batch_size=3)
iterations = []
for batch in iter(torchd):
iterations.append(torch.cat((*batch[0], batch[1]), axis=1).numpy())
combined_iterations = np.concatenate(iterations)
assert np.array_equal(df.values, combined_iterations)
@pytest.mark.parametrize("fs,data_path,endpoint_url", [
(None, lazy_fixture("local_path"), None),
(lazy_fixture("local_fs"), lazy_fixture("local_path"), None),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path"), lazy_fixture("s3_server"))
])
def test_json_read(ray_start_regular_shared, fs, data_path, endpoint_url):
if endpoint_url is None:
storage_options = {}
else:
storage_options = dict(client_kwargs=dict(endpoint_url=endpoint_url))
# Single file.
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(data_path, "test1.json")
df1.to_json(
path1, orient="records", lines=True, storage_options=storage_options)
ds = ray.data.read_json(path1, filesystem=fs)
dsdf = ray.get(ds.to_pandas())[0]
assert df1.equals(dsdf)
# Test metadata ops.
assert ds.count() == 3
assert ds.input_files() == [_unwrap_protocol(path1)]
assert "{one: int64, two: string}" in str(ds), ds
# Two files, parallelism=2.
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
path2 = os.path.join(data_path, "test2.json")
df2.to_json(
path2, orient="records", lines=True, storage_options=storage_options)
ds = ray.data.read_json([path1, path2], parallelism=2, filesystem=fs)
dsdf = pd.concat(ray.get(ds.to_pandas()))
df = pd.concat([df1, df2])
assert df.equals(dsdf)
# Test metadata ops.
for block, meta in zip(ds._blocks, ds._blocks.get_metadata()):
BlockAccessor.for_block(ray.get(block)).size_bytes() == meta.size_bytes
# Three files, parallelism=2.
df3 = pd.DataFrame({"one": [7, 8, 9], "two": ["h", "i", "j"]})
path3 = os.path.join(data_path, "test3.json")
df3.to_json(
path3, orient="records", lines=True, storage_options=storage_options)
ds = ray.data.read_json(
[path1, path2, path3], parallelism=2, filesystem=fs)
df = pd.concat([df1, df2, df3], ignore_index=True)
dsdf = pd.concat(ray.get(ds.to_pandas()), ignore_index=True)
assert df.equals(dsdf)
# Directory, two files.
path = os.path.join(data_path, "test_json_dir")
if fs is None:
os.mkdir(path)
else:
fs.create_dir(_unwrap_protocol(path))
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(path, "data0.json")
df1.to_json(
path1, orient="records", lines=True, storage_options=storage_options)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
path2 = os.path.join(path, "data1.json")
df2.to_json(
path2, orient="records", lines=True, storage_options=storage_options)
ds = ray.data.read_json(path, filesystem=fs)
df = pd.concat([df1, df2])
dsdf = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dsdf)
if fs is None:
shutil.rmtree(path)
else:
fs.delete_dir(_unwrap_protocol(path))
# Two directories, three files.
path1 = os.path.join(data_path, "test_json_dir1")
path2 = os.path.join(data_path, "test_json_dir2")
if fs is None:
os.mkdir(path1)
os.mkdir(path2)
else:
fs.create_dir(_unwrap_protocol(path1))
fs.create_dir(_unwrap_protocol(path2))
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
file_path1 = os.path.join(path1, "data0.json")
df1.to_json(
file_path1,
orient="records",
lines=True,
storage_options=storage_options)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
file_path2 = os.path.join(path2, "data1.json")
df2.to_json(
file_path2,
orient="records",
lines=True,
storage_options=storage_options)
df3 = pd.DataFrame({"one": [7, 8, 9], "two": ["h", "i", "j"]})
file_path3 = os.path.join(path2, "data2.json")
df3.to_json(
file_path3,
orient="records",
lines=True,
storage_options=storage_options)
ds = ray.data.read_json([path1, path2], filesystem=fs)
df = pd.concat([df1, df2, df3])
dsdf = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dsdf)
if fs is None:
shutil.rmtree(path1)
shutil.rmtree(path2)
else:
fs.delete_dir(_unwrap_protocol(path1))
fs.delete_dir(_unwrap_protocol(path2))
# Directory and file, two files.
dir_path = os.path.join(data_path, "test_json_dir")
if fs is None:
os.mkdir(dir_path)
else:
fs.create_dir(_unwrap_protocol(dir_path))
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(dir_path, "data0.json")
df1.to_json(
path1, orient="records", lines=True, storage_options=storage_options)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
path2 = os.path.join(data_path, "data1.json")
df2.to_json(
path2, orient="records", lines=True, storage_options=storage_options)
ds = ray.data.read_json([dir_path, path2], filesystem=fs)
df = pd.concat([df1, df2])
dsdf = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dsdf)
if fs is None:
shutil.rmtree(dir_path)
else:
fs.delete_dir(_unwrap_protocol(dir_path))
def test_zipped_json_read(ray_start_regular_shared, tmp_path):
# Single file.
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(tmp_path, "test1.json.gz")
df1.to_json(path1, compression="gzip", orient="records", lines=True)
ds = ray.data.read_json(path1)
assert df1.equals(ray.get(ds.to_pandas())[0])
# Test metadata ops.
assert ds.count() == 3
assert ds.input_files() == [path1]
# Two files, parallelism=2.
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
path2 = os.path.join(tmp_path, "test2.json.gz")
df2.to_json(path2, compression="gzip", orient="records", lines=True)
ds = ray.data.read_json([path1, path2], parallelism=2)
dsdf = pd.concat(ray.get(ds.to_pandas()))
assert pd.concat([df1, df2]).equals(dsdf)
# Test metadata ops.
for block, meta in zip(ds._blocks, ds._blocks.get_metadata()):
BlockAccessor.for_block(ray.get(block)).size_bytes()
# Directory and file, two files.
dir_path = os.path.join(tmp_path, "test_json_dir")
os.mkdir(dir_path)
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(dir_path, "data0.json.gz")
df1.to_json(path1, compression="gzip", orient="records", lines=True)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
path2 = os.path.join(tmp_path, "data1.json.gz")
df2.to_json(path2, compression="gzip", orient="records", lines=True)
ds = ray.data.read_json([dir_path, path2])
df = pd.concat([df1, df2])
dsdf = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dsdf)
shutil.rmtree(dir_path)
@pytest.mark.parametrize("fs,data_path,endpoint_url", [
(None, lazy_fixture("local_path"), None),
(lazy_fixture("local_fs"), lazy_fixture("local_path"), None),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path"), lazy_fixture("s3_server"))
])
def test_json_write(ray_start_regular_shared, fs, data_path, endpoint_url):
if endpoint_url is None:
storage_options = {}
else:
storage_options = dict(client_kwargs=dict(endpoint_url=endpoint_url))
# Single block.
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
ds = ray.data.from_pandas([ray.put(df1)])
ds._set_uuid("data")
ds.write_json(data_path, filesystem=fs)
file_path = os.path.join(data_path, "data_000000.json")
assert df1.equals(
pd.read_json(
file_path,
orient="records",
lines=True,
storage_options=storage_options))
# Two blocks.
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
ds = ray.data.from_pandas([ray.put(df1), ray.put(df2)])
ds._set_uuid("data")
ds.write_json(data_path, filesystem=fs)
file_path2 = os.path.join(data_path, "data_000001.json")
df = pd.concat([df1, df2])
ds_df = pd.concat([
pd.read_json(
file_path,
orient="records",
lines=True,
storage_options=storage_options),
pd.read_json(
file_path2,
orient="records",
lines=True,
storage_options=storage_options)
])
assert df.equals(ds_df)
@pytest.mark.parametrize(
"fs,data_path", [(None, lazy_fixture("local_path")),
(lazy_fixture("local_fs"), lazy_fixture("local_path")),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path"))])
def test_json_roundtrip(ray_start_regular_shared, fs, data_path):
# Single block.
df = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
ds = ray.data.from_pandas([ray.put(df)])
ds._set_uuid("data")
ds.write_json(data_path, filesystem=fs)
file_path = os.path.join(data_path, "data_000000.json")
ds2 = ray.data.read_json([file_path], filesystem=fs)
ds2df = pd.concat(ray.get(ds2.to_pandas()))
assert ds2df.equals(df)
# Test metadata ops.
for block, meta in zip(ds2._blocks, ds2._blocks.get_metadata()):
BlockAccessor.for_block(ray.get(block)).size_bytes() == meta.size_bytes
if fs is None:
os.remove(file_path)
else:
fs.delete_file(_unwrap_protocol(file_path))
# Two blocks.
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
ds = ray.data.from_pandas([ray.put(df), ray.put(df2)])
ds._set_uuid("data")
ds.write_json(data_path, filesystem=fs)
ds2 = ray.data.read_json(data_path, parallelism=2, filesystem=fs)
ds2df = pd.concat(ray.get(ds2.to_pandas()))
assert pd.concat([df, df2]).equals(ds2df)
# Test metadata ops.
for block, meta in zip(ds2._blocks, ds2._blocks.get_metadata()):
BlockAccessor.for_block(ray.get(block)).size_bytes() == meta.size_bytes
@pytest.mark.parametrize(
"fs,data_path,endpoint_url",
[(None, lazy_fixture("local_path"), None),
(lazy_fixture("local_fs"), lazy_fixture("local_path"), None),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path"),
lazy_fixture("s3_server")),
(lazy_fixture("s3_fs_with_space"), lazy_fixture("s3_path_with_space"),
lazy_fixture("s3_server"))])
def test_csv_read(ray_start_regular_shared, fs, data_path, endpoint_url):
if endpoint_url is None:
storage_options = {}
else:
storage_options = dict(client_kwargs=dict(endpoint_url=endpoint_url))
# Single file.
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(data_path, "test1.csv")
df1.to_csv(path1, index=False, storage_options=storage_options)
ds = ray.data.read_csv(path1, filesystem=fs)
dsdf = ray.get(ds.to_pandas())[0]
assert df1.equals(dsdf)
# Test metadata ops.
assert ds.count() == 3
assert ds.input_files() == [_unwrap_protocol(path1)]
assert "{one: int64, two: string}" in str(ds), ds
# Two files, parallelism=2.
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
path2 = os.path.join(data_path, "test2.csv")
df2.to_csv(path2, index=False, storage_options=storage_options)
ds = ray.data.read_csv([path1, path2], parallelism=2, filesystem=fs)
dsdf = pd.concat(ray.get(ds.to_pandas()))
df = pd.concat([df1, df2])
assert df.equals(dsdf)
# Test metadata ops.
for block, meta in zip(ds._blocks, ds._blocks.get_metadata()):
BlockAccessor.for_block(ray.get(block)).size_bytes() == meta.size_bytes
# Three files, parallelism=2.
df3 = pd.DataFrame({"one": [7, 8, 9], "two": ["h", "i", "j"]})
path3 = os.path.join(data_path, "test3.csv")
df3.to_csv(path3, index=False, storage_options=storage_options)
ds = ray.data.read_csv([path1, path2, path3], parallelism=2, filesystem=fs)
df = pd.concat([df1, df2, df3], ignore_index=True)
dsdf = pd.concat(ray.get(ds.to_pandas()), ignore_index=True)
assert df.equals(dsdf)
# Directory, two files.
path = os.path.join(data_path, "test_csv_dir")
if fs is None:
os.mkdir(path)
else:
fs.create_dir(_unwrap_protocol(path))
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(path, "data0.csv")
df1.to_csv(path1, index=False, storage_options=storage_options)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
path2 = os.path.join(path, "data1.csv")
df2.to_csv(path2, index=False, storage_options=storage_options)
ds = ray.data.read_csv(path, filesystem=fs)
df = pd.concat([df1, df2])
dsdf = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dsdf)
if fs is None:
shutil.rmtree(path)
else:
fs.delete_dir(_unwrap_protocol(path))
# Two directories, three files.
path1 = os.path.join(data_path, "test_csv_dir1")
path2 = os.path.join(data_path, "test_csv_dir2")
if fs is None:
os.mkdir(path1)
os.mkdir(path2)
else:
fs.create_dir(_unwrap_protocol(path1))
fs.create_dir(_unwrap_protocol(path2))
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
file_path1 = os.path.join(path1, "data0.csv")
df1.to_csv(file_path1, index=False, storage_options=storage_options)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
file_path2 = os.path.join(path2, "data1.csv")
df2.to_csv(file_path2, index=False, storage_options=storage_options)
df3 = pd.DataFrame({"one": [7, 8, 9], "two": ["h", "i", "j"]})
file_path3 = os.path.join(path2, "data2.csv")
df3.to_csv(file_path3, index=False, storage_options=storage_options)
ds = ray.data.read_csv([path1, path2], filesystem=fs)
df = pd.concat([df1, df2, df3])
dsdf = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dsdf)
if fs is None:
shutil.rmtree(path1)
shutil.rmtree(path2)
else:
fs.delete_dir(_unwrap_protocol(path1))
fs.delete_dir(_unwrap_protocol(path2))
# Directory and file, two files.
dir_path = os.path.join(data_path, "test_csv_dir")
if fs is None:
os.mkdir(dir_path)
else:
fs.create_dir(_unwrap_protocol(dir_path))
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(dir_path, "data0.csv")
df1.to_csv(path1, index=False, storage_options=storage_options)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
path2 = os.path.join(data_path, "data1.csv")
df2.to_csv(path2, index=False, storage_options=storage_options)
ds = ray.data.read_csv([dir_path, path2], filesystem=fs)
df = pd.concat([df1, df2])
dsdf = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dsdf)
if fs is None:
shutil.rmtree(dir_path)
else:
fs.delete_dir(_unwrap_protocol(dir_path))
@pytest.mark.parametrize("fs,data_path,endpoint_url", [
(None, lazy_fixture("local_path"), None),
(lazy_fixture("local_fs"), lazy_fixture("local_path"), None),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path"), lazy_fixture("s3_server"))
])
def test_csv_write(ray_start_regular_shared, fs, data_path, endpoint_url):
if endpoint_url is None:
storage_options = {}
else:
storage_options = dict(client_kwargs=dict(endpoint_url=endpoint_url))
# Single block.
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
ds = ray.data.from_pandas([ray.put(df1)])
ds._set_uuid("data")
ds.write_csv(data_path, filesystem=fs)
file_path = os.path.join(data_path, "data_000000.csv")
assert df1.equals(pd.read_csv(file_path, storage_options=storage_options))
# Two blocks.
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
ds = ray.data.from_pandas([ray.put(df1), ray.put(df2)])
ds._set_uuid("data")
ds.write_csv(data_path, filesystem=fs)
file_path2 = os.path.join(data_path, "data_000001.csv")
df = pd.concat([df1, df2])
ds_df = pd.concat([
pd.read_csv(file_path, storage_options=storage_options),
pd.read_csv(file_path2, storage_options=storage_options)
])
assert df.equals(ds_df)
@pytest.mark.parametrize(
"fs,data_path", [(None, lazy_fixture("local_path")),
(lazy_fixture("local_fs"), lazy_fixture("local_path")),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path"))])
def test_csv_roundtrip(ray_start_regular_shared, fs, data_path):
# Single block.
df = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
ds = ray.data.from_pandas([ray.put(df)])
ds._set_uuid("data")
ds.write_csv(data_path, filesystem=fs)
file_path = os.path.join(data_path, "data_000000.csv")
ds2 = ray.data.read_csv([file_path], filesystem=fs)
ds2df = pd.concat(ray.get(ds2.to_pandas()))
assert ds2df.equals(df)
# Test metadata ops.
for block, meta in zip(ds2._blocks, ds2._blocks.get_metadata()):
BlockAccessor.for_block(ray.get(block)).size_bytes() == meta.size_bytes
# Two blocks.
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
ds = ray.data.from_pandas([ray.put(df), ray.put(df2)])
ds._set_uuid("data")
ds.write_csv(data_path, filesystem=fs)
ds2 = ray.data.read_csv(data_path, parallelism=2, filesystem=fs)
ds2df = pd.concat(ray.get(ds2.to_pandas()))
assert pd.concat([df, df2]).equals(ds2df)
# Test metadata ops.
for block, meta in zip(ds2._blocks, ds2._blocks.get_metadata()):
BlockAccessor.for_block(ray.get(block)).size_bytes() == meta.size_bytes
def test_sort_simple(ray_start_regular_shared):
num_items = 100
parallelism = 4
xs = list(range(num_items))
random.shuffle(xs)
ds = ray.data.from_items(xs, parallelism=parallelism)
assert ds.sort().take(num_items) == list(range(num_items))
assert ds.sort(descending=True).take(num_items) == list(
reversed(range(num_items)))
assert ds.sort(key=lambda x: -x).take(num_items) == list(
reversed(range(num_items)))
@pytest.mark.parametrize("pipelined", [False, True])
def test_random_shuffle(shutdown_only, pipelined):
def range(n, parallelism=200):
ds = ray.data.range(n, parallelism=parallelism)
if pipelined:
return ds.repeat(2)
else:
return ds
r1 = range(100).random_shuffle().take(999)
r2 = range(100).random_shuffle().take(999)
assert r1 != r2, (r1, r2)
r1 = range(100, parallelism=1).random_shuffle().take(999)
r2 = range(100, parallelism=1).random_shuffle().take(999)
assert r1 != r2, (r1, r2)
r1 = range(100).random_shuffle(num_blocks=1).take(999)
r2 = range(100).random_shuffle(num_blocks=1).take(999)
assert r1 != r2, (r1, r2)
r0 = range(100, parallelism=5).take(999)
r1 = range(100, parallelism=5).random_shuffle(seed=0).take(999)
r2 = range(100, parallelism=5).random_shuffle(seed=0).take(999)
r3 = range(100, parallelism=5).random_shuffle(seed=12345).take(999)
assert r1 == r2, (r1, r2)
assert r1 != r0, (r1, r0)
assert r1 != r3, (r1, r3)
r0 = ray.data.range_arrow(100, parallelism=5).take(999)
r1 = ray.data.range_arrow(
100, parallelism=5).random_shuffle(seed=0).take(999)
r2 = ray.data.range_arrow(
100, parallelism=5).random_shuffle(seed=0).take(999)
assert r1 == r2, (r1, r2)
assert r1 != r0, (r1, r0)
# Test move.
ds = range(100, parallelism=2)
r1 = ds.random_shuffle(_move=True).take(999)
if pipelined:
with pytest.raises(RuntimeError):
ds = ds.map(lambda x: x).take(999)
else:
# Source dataset should be unusable if not pipelining.
with pytest.raises(ValueError):
ds = ds.map(lambda x: x).take(999)
r2 = range(100).random_shuffle(_move=True).take(999)
assert r1 != r2, (r1, r2)
def test_random_shuffle_spread(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
resources={"foo": 100},
_system_config={"max_direct_call_object_size": 0})
cluster.add_node(resources={"bar:1": 100})
cluster.add_node(resources={"bar:2": 100})
cluster.add_node(resources={"bar:3": 100}, num_cpus=0)
ray.init(cluster.address)
@ray.remote
def get_node_id():
return ray.get_runtime_context().node_id.hex()
node1_id = ray.get(get_node_id.options(resources={"bar:1": 1}).remote())
node2_id = ray.get(get_node_id.options(resources={"bar:2": 1}).remote())
ds = ray.data.range(
100, parallelism=2).random_shuffle(_spread_resource_prefix="bar:")
blocks = ds.get_blocks()
ray.wait(blocks, num_returns=len(blocks), fetch_local=False)
location_data = ray.experimental.get_object_locations(blocks)
locations = []
for block in blocks:
locations.extend(location_data[block]["node_ids"])
assert set(locations) == {node1_id, node2_id}
def test_parquet_read_spread(ray_start_cluster, tmp_path):
cluster = ray_start_cluster
cluster.add_node(
resources={"foo": 100},
_system_config={"max_direct_call_object_size": 0})
cluster.add_node(resources={"bar:1": 100})
cluster.add_node(resources={"bar:2": 100})
cluster.add_node(resources={"bar:3": 100}, num_cpus=0)
ray.init(cluster.address)
@ray.remote
def get_node_id():
return ray.get_runtime_context().node_id.hex()
node1_id = ray.get(get_node_id.options(resources={"bar:1": 1}).remote())
node2_id = ray.get(get_node_id.options(resources={"bar:2": 1}).remote())
data_path = str(tmp_path)
df1 = pd.DataFrame({"one": list(range(100)), "two": list(range(100, 200))})
path1 = os.path.join(data_path, "test1.parquet")
df1.to_parquet(path1)
df2 = pd.DataFrame({
"one": list(range(300, 400)),
"two": list(range(400, 500))
})
path2 = os.path.join(data_path, "test2.parquet")
df2.to_parquet(path2)
ds = ray.data.read_parquet(data_path, _spread_resource_prefix="bar:")
# Force reads.
blocks = ds.get_blocks()
assert len(blocks) == 2
ray.wait(blocks, num_returns=len(blocks), fetch_local=False)
location_data = ray.experimental.get_object_locations(blocks)
locations = []
for block in blocks:
locations.extend(location_data[block]["node_ids"])
assert set(locations) == {node1_id, node2_id}
@pytest.mark.parametrize("num_items,parallelism", [(100, 1), (1000, 4)])
def test_sort_arrow(ray_start_regular, num_items, parallelism):
a = list(reversed(range(num_items)))
b = [f"{x:03}" for x in range(num_items)]
shard = int(np.ceil(num_items / parallelism))
offset = 0
dfs = []
while offset < num_items:
dfs.append(
pd.DataFrame({
"a": a[offset:offset + shard],
"b": b[offset:offset + shard]
}))
offset += shard
if offset < num_items:
dfs.append(pd.DataFrame({"a": a[offset:], "b": b[offset:]}))
ds = ray.data.from_pandas([ray.put(df) for df in dfs])
def assert_sorted(sorted_ds, expected_rows):
assert [tuple(row.values())
for row in sorted_ds.iter_rows()] == list(expected_rows)
assert_sorted(ds.sort(key="a"), zip(reversed(a), reversed(b)))
assert_sorted(ds.sort(key="b"), zip(a, b))
assert_sorted(ds.sort(key="a", descending=True), zip(a, b))
assert_sorted(
ds.sort(key=[("b", "descending")]), zip(reversed(a), reversed(b)))
def test_dataset_retry_exceptions(ray_start_regular, local_path):
@ray.remote
class Counter:
def __init__(self):
self.value = 0
def increment(self):
self.value += 1
return self.value
class FlakyCSVDatasource(CSVDatasource):
def __init__(self):
self.counter = Counter.remote()
def _read_file(self, f: "pa.NativeFile", path: str, **reader_args):
count = self.counter.increment.remote()
if ray.get(count) == 1:
raise ValueError()
else:
return CSVDatasource._read_file(self, f, path, **reader_args)
def _write_block(self, f: "pa.NativeFile", block: BlockAccessor,
**writer_args):
count = self.counter.increment.remote()
if ray.get(count) == 1:
raise ValueError()
else:
CSVDatasource._write_block(self, f, block, **writer_args)
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(local_path, "test1.csv")
df1.to_csv(path1, index=False, storage_options={})
ds1 = ray.data.read_datasource(
FlakyCSVDatasource(), parallelism=1, paths=path1)
ds1.write_datasource(
FlakyCSVDatasource(), path=local_path, dataset_uuid="data")
assert df1.equals(
pd.read_csv(
os.path.join(local_path, "data_000000.csv"), storage_options={}))
counter = Counter.remote()
def flaky_mapper(x):
count = counter.increment.remote()
if ray.get(count) == 1:
raise ValueError()
else:
return ray.get(count)
assert sorted(ds1.map(flaky_mapper).take()) == [2, 3, 4]
with pytest.raises(ValueError):
ray.data.read_datasource(
FlakyCSVDatasource(),
parallelism=1,
paths=path1,
ray_remote_args={
"retry_exceptions": False
}).take()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| 37.174468 | 79 | 0.613459 |
f0c6931041b1acb38c9a7ad6d45b1bbdbb0d9435 | 2,139 | py | Python | easter_102.py | fpicot/adventofcode | a544c17bcd58600117c6503e43f63e9e1ce57759 | [
"MIT"
] | null | null | null | easter_102.py | fpicot/adventofcode | a544c17bcd58600117c6503e43f63e9e1ce57759 | [
"MIT"
] | null | null | null | easter_102.py | fpicot/adventofcode | a544c17bcd58600117c6503e43f63e9e1ce57759 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re,weakref,itertools
_input = 'easter_10.input'
bots = []
inputbins = []
outputbins = {}
class robot:
bots = weakref.WeakValueDictionary()
def __init__(self, name, low_type, low_id, high_type, high_id ):
self.name = name
self.low_type = low_type
self.low_id = low_id
self.high_type = high_type
self.high_id = high_id
self.A = None
self.B = None
self.bots[self.name] = self
def load_chip(self,value):
if not self.A:
print("Bot {} is loading first chip ({})".format(self.name,value))
self.A = value
elif not self.B:
print("Bot {} is loading second chip ({})".format(self.name,value))
self.B = value
self.unload_chips()
else:
raise Exception("Bot {} is loading a third chip".format(self.name))
def unload_chips(self):
print("Bot {} is giving chips {} and {}".format(self.name,self.A,self.B))
low = min(self.A,self.B)
high = max(self.A,self.B)
if self.low_type == "bot":
robot.bots[self.low_id].load_chip( low )
else:
outputbins[self.low_id] = low
if self.high_type == "bot":
robot.bots[self.high_id].load_chip( high )
else:
outputbins[self.high_id] = high
self.A = None
self.B = None
def read_file(input_file):
_re_bot = re.compile('^bot (\d+) gives low to (output|bot) (\d+) and high to (output|bot) (\d+)$')
_re_bin = re.compile('^value (\d+) goes to bot (\d+)$')
with open(input_file) as f:
for line in f:
line = line.rstrip()
bot = _re_bot.match(line)
bin = _re_bin.match(line)
if bot:
bots.append( robot(bot[1],bot[2],bot[3],bot[4],bot[5]))
elif bin:
inputbins.append([bin[2],int(bin[1])])
else:
raise Exception ("Unable to interpret line : {}".format(line))
read_file(_input)
for inputbin in itertools.cycle(inputbins):
robot.bots[inputbin[0]].load_chip(inputbin[1])
if (('0' in outputbins) and ('1' in outputbins) and ('2' in outputbins)):
print ('Resultat : {}'.format(outputbins['0']*outputbins['1']*outputbins['2']))
quit()
| 28.905405 | 100 | 0.611968 |
a21d7ab90a5f93d6acfbbf54e831e6e142f7f37c | 21,803 | py | Python | openstack_dashboard/dashboards/project/images/images/tests.py | iwagner-inmar/horizon | c59343891fa5224c8650062299b478d4b8b951a8 | [
"Apache-2.0"
] | 1 | 2018-04-17T02:32:05.000Z | 2018-04-17T02:32:05.000Z | openstack_dashboard/dashboards/project/images/images/tests.py | Gandner/horizon | ee973271867a2298e470e547741f347175bf2def | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/project/images/images/tests.py | Gandner/horizon | ee973271867a2298e470e547741f347175bf2def | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from django.conf import settings
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.test.utils import override_settings
from django.urls import reverse
import mock
import six
from horizon import tables as horizon_tables
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.images.images import forms
from openstack_dashboard.dashboards.project.images.images import tables
IMAGES_INDEX_URL = reverse('horizon:project:images:index')
class CreateImageFormTests(test.ResetImageAPIVersionMixin, test.TestCase):
@mock.patch.object(api.glance, 'image_list_detailed')
def test_no_location_or_file(self, mock_image_list):
mock_image_list.side_effect = [
[self.images.list(), False, False],
[self.images.list(), False, False]
]
image_calls = [
mock.call(test.IsA(dict), filters={'disk_format': 'aki'}),
mock.call(test.IsA(dict), filters={'disk_format': 'ari'})
]
post = {
'name': u'Ubuntu 11.10',
'source_type': u'file',
'description': u'Login with admin/admin',
'disk_format': u'qcow2',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': 1}
files = {}
form = forms.CreateImageForm(post, files)
self.assertFalse(form.is_valid())
mock_image_list.assert_has_calls(image_calls)
@override_settings(OPENSTACK_API_VERSIONS={'image': 1})
def test_create_image_metadata_docker_v1(self):
form_data = {
'name': u'Docker image',
'description': u'Docker image test',
'source_type': u'url',
'image_url': u'/',
'disk_format': u'docker',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': False,
'protected': False,
'is_copying': False
}
meta = forms.create_image_metadata(form_data)
self.assertEqual(meta['disk_format'], 'raw')
self.assertEqual(meta['container_format'], 'docker')
self.assertIn('properties', meta)
self.assertNotIn('description', meta)
self.assertNotIn('architecture', meta)
self.assertEqual(meta['properties']['description'],
form_data['description'])
self.assertEqual(meta['properties']['architecture'],
form_data['architecture'])
def test_create_image_metadata_docker_v2(self):
form_data = {
'name': u'Docker image',
'description': u'Docker image test',
'source_type': u'url',
'image_url': u'/',
'disk_format': u'docker',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': False,
'protected': False,
'is_copying': False
}
meta = forms.create_image_metadata(form_data)
self.assertEqual(meta['disk_format'], 'raw')
self.assertEqual(meta['container_format'], 'docker')
self.assertNotIn('properties', meta)
self.assertEqual(meta['description'], form_data['description'])
self.assertEqual(meta['architecture'], form_data['architecture'])
class UpdateImageFormTests(test.ResetImageAPIVersionMixin, test.TestCase):
def test_is_format_field_editable(self):
form = forms.UpdateImageForm({})
disk_format = form.fields['disk_format']
self.assertFalse(disk_format.widget.attrs.get('readonly', False))
@mock.patch.object(api.glance, 'image_get')
def test_image_update(self, mock_image_get):
image = self.images.first()
mock_image_get.return_value = image
url = reverse('horizon:project:images:images:update',
args=[image.id])
res = self.client.get(url)
self.assertNoFormErrors(res)
self.assertEqual(res.context['image'].disk_format,
image.disk_format)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
image.id)
@override_settings(OPENSTACK_API_VERSIONS={'image': 1})
@mock.patch.object(api.glance, 'image_get')
@mock.patch.object(api.glance, 'image_update')
def test_image_update_post_v1(self, mock_image_update, mock_image_get):
image = self.images.first()
data = {
'name': u'Ubuntu 11.10',
'image_id': str(image.id),
'description': u'Login with admin/admin',
'source_type': u'url',
'image_url': u'http://cloud-images.ubuntu.com/releases/'
u'oneiric/release/ubuntu-11.10-server-cloudimg'
u'-amd64-disk1.img',
'disk_format': u'qcow2',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': False,
'protected': False,
'method': 'UpdateImageForm'}
mock_image_get.return_value = image
mock_image_update.return_value = image
url = reverse('horizon:project:images:images:update',
args=[image.id])
res = self.client.post(url, data)
self.assertNoFormErrors(res)
self.assertEqual(res.status_code, 302)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
str(image.id))
mock_image_update.assert_called_once_with(
test.IsHttpRequest(),
image.id,
is_public=data['is_public'],
protected=data['protected'],
disk_format=data['disk_format'],
container_format="bare",
name=data['name'],
min_ram=data['minimum_ram'],
min_disk=data['minimum_disk'],
properties={
'description': data['description'],
'architecture': data['architecture']})
@mock.patch.object(api.glance, 'image_get')
@mock.patch.object(api.glance, 'image_update')
def test_image_update_post_v2(self, mock_image_update, mock_image_get):
image = self.images.first()
data = {
'name': u'Ubuntu 11.10',
'image_id': str(image.id),
'description': u'Login with admin/admin',
'source_type': u'url',
'image_url': u'http://cloud-images.ubuntu.com/releases/'
u'oneiric/release/ubuntu-11.10-server-cloudimg'
u'-amd64-disk1.img',
'disk_format': u'qcow2',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': False,
'protected': False,
'method': 'UpdateImageForm'}
mock_image_get.return_value = image
mock_image_update.return_value = image
url = reverse('horizon:project:images:images:update',
args=[image.id])
res = self.client.post(url, data)
self.assertNoFormErrors(res)
self.assertEqual(res.status_code, 302)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
str(image.id))
mock_image_update.assert_called_once_with(
test.IsHttpRequest(),
image.id,
visibility='private',
protected=data['protected'],
disk_format=data['disk_format'],
container_format="bare",
name=data['name'],
min_ram=data['minimum_ram'],
min_disk=data['minimum_disk'],
description=data['description'],
architecture=data['architecture'])
class ImageViewTests(test.ResetImageAPIVersionMixin, test.TestCase):
@mock.patch.object(api.glance, 'image_list_detailed')
def test_image_create_get(self, mock_image_list):
mock_image_list.side_effect = [
[self.images.list(), False, False],
[self.images.list(), False, False]
]
image_calls = [
mock.call(test.IsHttpRequest(), filters={'disk_format': 'aki'}),
mock.call(test.IsHttpRequest(), filters={'disk_format': 'ari'})
]
url = reverse('horizon:project:images:images:create')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/images/images/create.html')
mock_image_list.assert_has_calls(image_calls)
@override_settings(OPENSTACK_API_VERSIONS={'image': 1})
def test_image_create_post_copy_from_v1(self):
data = {
'source_type': u'url',
'image_url': u'http://cloud-images.ubuntu.com/releases/'
u'oneiric/release/ubuntu-11.10-server-cloudimg'
u'-amd64-disk1.img',
'is_copying': True}
api_data = {'copy_from': data['image_url']}
self._test_image_create(data, api_data)
@override_settings(OPENSTACK_API_VERSIONS={'image': 1})
def test_image_create_post_location_v1(self):
data = {
'source_type': u'url',
'image_url': u'http://cloud-images.ubuntu.com/releases/'
u'oneiric/release/ubuntu-11.10-server-cloudimg'
u'-amd64-disk1.img',
'is_copying': False}
api_data = {'location': data['image_url']}
self._test_image_create(data, api_data)
@override_settings(IMAGES_ALLOW_LOCATION=True)
def test_image_create_post_location_v2(self):
data = {
'source_type': u'url',
'image_url': u'http://cloud-images.ubuntu.com/releases/'
u'oneiric/release/ubuntu-11.10-server-cloudimg'
u'-amd64-disk1.img'}
api_data = {'location': data['image_url']}
self._test_image_create(data, api_data)
@override_settings(OPENSTACK_API_VERSIONS={'image': 1})
def test_image_create_post_upload_v1(self):
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(b'123')
temp_file.flush()
temp_file.seek(0)
data = {'source_type': u'file',
'image_file': temp_file}
api_data = {'data': test.IsA(InMemoryUploadedFile)}
self._test_image_create(data, api_data)
def test_image_create_post_upload_v2(self):
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(b'123')
temp_file.flush()
temp_file.seek(0)
data = {'source_type': u'file',
'image_file': temp_file}
api_data = {'data': test.IsA(InMemoryUploadedFile)}
self._test_image_create(data, api_data)
@override_settings(OPENSTACK_API_VERSIONS={'image': 1})
def test_image_create_post_with_kernel_ramdisk_v1(self):
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(b'123')
temp_file.flush()
temp_file.seek(0)
data = {
'source_type': u'file',
'image_file': temp_file,
'kernel_id': '007e7d55-fe1e-4c5c-bf08-44b4a496482e',
'ramdisk_id': '007e7d55-fe1e-4c5c-bf08-44b4a496482a'
}
api_data = {'data': test.IsA(InMemoryUploadedFile)}
self._test_image_create(data, api_data)
def test_image_create_post_with_kernel_ramdisk_v2(self):
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(b'123')
temp_file.flush()
temp_file.seek(0)
data = {
'source_type': u'file',
'image_file': temp_file,
'kernel_id': '007e7d55-fe1e-4c5c-bf08-44b4a496482e',
'ramdisk_id': '007e7d55-fe1e-4c5c-bf08-44b4a496482a'
}
api_data = {'data': test.IsA(InMemoryUploadedFile)}
self._test_image_create(data, api_data)
@mock.patch.object(api.glance, 'image_create')
@mock.patch.object(api.glance, 'image_list_detailed')
def _test_image_create(self, extra_form_data, extra_api_data,
mock_image_list, mock_image_create):
data = {
'name': u'Ubuntu 11.10',
'description': u'Login with admin/admin',
'disk_format': u'qcow2',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': True,
'protected': False,
'method': 'CreateImageForm'}
data.update(extra_form_data)
api_data = {'container_format': 'bare',
'disk_format': data['disk_format'],
'protected': False,
'min_disk': data['minimum_disk'],
'min_ram': data['minimum_ram'],
'name': data['name']}
if api.glance.VERSIONS.active < 2:
api_data.update({'is_public': True,
'properties': {
'description': data['description'],
'architecture': data['architecture']}
})
else:
api_data.update({'visibility': 'public',
'description': data['description'],
'architecture': data['architecture']
})
api_data.update(extra_api_data)
mock_image_list.side_effect = [
[self.images.list(), False, False],
[self.images.list(), False, False]
]
image_list_calls = [
mock.call(test.IsHttpRequest(), filters={'disk_format': 'aki'}),
mock.call(test.IsHttpRequest(), filters={'disk_format': 'ari'})
]
mock_image_create.return_value = self.images.first()
url = reverse('horizon:project:images:images:create')
res = self.client.post(url, data)
self.assertNoFormErrors(res)
self.assertEqual(res.status_code, 302)
mock_image_list.assert_has_calls(image_list_calls)
mock_image_create.assert_called_once_with(test.IsHttpRequest(),
**api_data)
@mock.patch.object(api.glance, 'image_get')
def _test_image_detail_get(self, image, mock_image_get):
mock_image_get.return_value = image
res = self.client.get(reverse('horizon:project:images:images:detail',
args=[image.id]))
self.assertTemplateUsed(res,
'horizon/common/_detail.html')
self.assertEqual(res.context['image'].name, image.name)
self.assertEqual(res.context['image'].protected, image.protected)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
six.text_type(image.id))
@override_settings(OPENSTACK_API_VERSIONS={'image': 1})
def test_image_detail_get_v1(self):
image = self.images.first()
self._test_image_detail_get(image)
def test_image_detail_get_v2(self):
image = self.imagesV2.first()
self._test_image_detail_get(image)
@mock.patch.object(api.glance, 'image_get')
def _test_image_detail_custom_props_get(self, image, mock_image_get):
mock_image_get.return_value = image
res = self.client.get(reverse('horizon:project:images:images:detail',
args=[image.id]))
image_props = res.context['image_props']
# Test description property not displayed
image_keys = [prop[0] for prop in image_props]
self.assertNotIn(('description'), image_keys)
# Test custom properties are sorted
self.assertLess(image_props.index(('bar', 'bar', 'bar val')),
image_props.index(('foo', 'foo', 'foo val')))
# Test all custom properties appear in template
self.assertContains(res, '<dt title="bar">bar</dt>')
self.assertContains(res, '<dd>bar val</dd>')
self.assertContains(res, '<dt title="foo">foo</dt>')
self.assertContains(res, '<dd>foo val</dd>')
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
six.text_type(image.id))
@override_settings(OPENSTACK_API_VERSIONS={'image': 1})
def test_image_detail_custom_props_get_v1(self):
image = self.images.list()[8]
self._test_image_detail_custom_props_get(image)
def test_image_detail_custom_props_get_v2(self):
image = self.imagesV2.list()[2]
self._test_image_detail_custom_props_get(image)
@mock.patch.object(api.glance, 'image_get')
def _test_protected_image_detail_get(self, image, mock_image_get):
mock_image_get.return_value = image
res = self.client.get(
reverse('horizon:project:images:images:detail',
args=[image.id]))
self.assertTemplateUsed(res,
'horizon/common/_detail.html')
self.assertEqual(res.context['image'].protected, image.protected)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
six.text_type(image.id))
@override_settings(OPENSTACK_API_VERSIONS={'image': 1})
def test_protected_image_detail_get_v1(self):
image = self.images.list()[2]
self._test_protected_image_detail_get(image)
def test_protected_image_detail_get_v2(self):
image = self.imagesV2.list()[1]
self._test_protected_image_detail_get(image)
@mock.patch.object(api.glance, 'image_get')
def test_image_detail_get_with_exception(self, mock_image_get):
image = self.images.first()
mock_image_get.side_effect = self.exceptions.glance
url = reverse('horizon:project:images:images:detail',
args=[image.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, IMAGES_INDEX_URL)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
six.text_type(image.id))
@mock.patch.object(api.glance, 'image_get')
def test_image_update_get(self, mock_image_get):
image = self.images.filter(is_public=True)[0]
mock_image_get.return_value = image
res = self.client.get(
reverse('horizon:project:images:images:update',
args=[image.id]))
self.assertTemplateUsed(res,
'project/images/images/_update.html')
self.assertEqual(res.context['image'].name, image.name)
# Bug 1076216 - is_public checkbox not being set correctly
self.assertContains(res, "<input type='checkbox' id='id_public'"
" name='public' checked='checked'>",
html=True,
msg_prefix="The is_public checkbox is not checked")
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
six.text_type(image.id))
class OwnerFilterTests(test.TestCase):
def setUp(self):
super(OwnerFilterTests, self).setUp()
self.table = mock.Mock(spec=horizon_tables.DataTable)
self.table.request = self.request
@override_settings(IMAGES_LIST_FILTER_TENANTS=[{'name': 'Official',
'tenant': 'officialtenant',
'icon': 'fa-check'}])
def test_filter(self):
all_images = self.images.list()
table = self.table
self.filter_tenants = settings.IMAGES_LIST_FILTER_TENANTS
filter_ = tables.OwnerFilter()
images = filter_.filter(table, all_images, 'project')
self.assertEqual(images, self._expected('project'))
images = filter_.filter(table, all_images, 'public')
self.assertEqual(images, self._expected('public'))
images = filter_.filter(table, all_images, 'shared')
self.assertEqual(images, self._expected('shared'))
images = filter_.filter(table, all_images, 'officialtenant')
self.assertEqual(images, self._expected('officialtenant'))
def _expected(self, filter_string):
my_tenant_id = self.request.user.tenant_id
images = self.images.list()
special = map(lambda t: t['tenant'], self.filter_tenants)
if filter_string == 'public':
return [im for im in images if im.is_public]
if filter_string == 'shared':
return [im for im in images
if (not im.is_public and
im.owner != my_tenant_id and
im.owner not in special)]
if filter_string == 'project':
filter_string = my_tenant_id
return [im for im in images if im.owner == filter_string]
| 38.933929 | 79 | 0.597303 |
1bbfa7c5bd1dec8409aa98a715160f8324bc4c58 | 3,459 | py | Python | nnetsauce/boosting/bst.py | Techtonique/nnetsauce | edd5344598877bd8017db0d8889de5a0f970d2ec | [
"BSD-3-Clause-Clear"
] | 4 | 2020-12-04T05:36:05.000Z | 2021-10-30T10:35:02.000Z | nnetsauce/boosting/bst.py | Techtonique/nnetsauce | edd5344598877bd8017db0d8889de5a0f970d2ec | [
"BSD-3-Clause-Clear"
] | 5 | 2020-09-25T04:26:04.000Z | 2022-02-26T12:07:29.000Z | nnetsauce/boosting/bst.py | Techtonique/nnetsauce | edd5344598877bd8017db0d8889de5a0f970d2ec | [
"BSD-3-Clause-Clear"
] | 2 | 2021-02-22T04:59:28.000Z | 2021-10-30T10:35:07.000Z | """Boosting model"""
# Authors: Thierry Moudiki
#
# License: BSD 3 Clear
from ..base import Base
class Boosting(Base):
"""Boosting model class derived from class Base
Attributes:
obj: object
any object containing a method fit (obj.fit()) and a method predict
(obj.predict())
n_estimators: int
number of boosting iterations
learning_rate: float
learning rate
n_hidden_features: int
number of nodes in the hidden layer
activation_name: str
activation function: 'relu', 'tanh', 'sigmoid', 'prelu' or 'elu'
a: float
hyperparameter for 'prelu' or 'elu' activation function
nodes_sim: str
type of simulation for the nodes: 'sobol', 'hammersley', 'halton',
'uniform'
bias: boolean
indicates if the hidden layer contains a bias term (True) or not
(False)
dropout: float
regularization parameter; (random) percentage of nodes dropped out
of the training
direct_link: boolean
indicates if the original predictors are included (True) in model's
fitting or not (False)
n_clusters: int
number of clusters for 'kmeans' or 'gmm' clustering (could be 0:
no clustering)
cluster_encode: bool
defines how the variable containing clusters is treated (default is one-hot)
if `False`, then labels are used, without one-hot encoding
type_clust: str
type of clustering method: currently k-means ('kmeans') or Gaussian
Mixture Model ('gmm')
type_scaling: a tuple of 3 strings
scaling methods for inputs, hidden layer, and clustering respectively
(and when relevant).
Currently available: standardization ('std') or MinMax scaling ('minmax')
col_sample: float
percentage of covariates randomly chosen for training
row_sample: float
percentage of rows chosen for training, by stratified bootstrapping
seed: int
reproducibility seed for nodes_sim=='uniform'
backend: str
"cpu" or "gpu" or "tpu"
"""
# construct the object -----
def __init__(
self,
obj,
n_estimators=10,
learning_rate=0.1,
n_hidden_features=5,
activation_name="relu",
a=0.01,
nodes_sim="sobol",
bias=True,
dropout=0,
direct_link=True,
n_clusters=2,
cluster_encode=True,
type_clust="kmeans",
type_scaling=("std", "std", "std"),
col_sample=1,
row_sample=1,
seed=123,
backend="cpu",
):
super().__init__(
n_hidden_features=n_hidden_features,
activation_name=activation_name,
a=a,
nodes_sim=nodes_sim,
bias=bias,
dropout=dropout,
direct_link=direct_link,
n_clusters=n_clusters,
cluster_encode=cluster_encode,
type_clust=type_clust,
type_scaling=type_scaling,
col_sample=col_sample,
row_sample=row_sample,
seed=seed,
backend=backend,
)
self.obj = obj
self.n_estimators = n_estimators
self.learning_rate = learning_rate
| 27.452381 | 88 | 0.57878 |
5ae94aa02d6adc4106aeb1eb2d06a4d9999e5852 | 1,625 | py | Python | hassio/host/control.py | InfernoEmbedded/hassio | a401bf0bb8d81d76924254d5b8c9c493ad343468 | [
"Apache-2.0"
] | null | null | null | hassio/host/control.py | InfernoEmbedded/hassio | a401bf0bb8d81d76924254d5b8c9c493ad343468 | [
"Apache-2.0"
] | null | null | null | hassio/host/control.py | InfernoEmbedded/hassio | a401bf0bb8d81d76924254d5b8c9c493ad343468 | [
"Apache-2.0"
] | null | null | null | """Power control for host."""
import logging
from ..coresys import CoreSysAttributes
from ..exceptions import HostNotSupportedError
_LOGGER = logging.getLogger(__name__)
MANAGER = 'manager'
HOSTNAME = 'hostname'
class SystemControl(CoreSysAttributes):
"""Handle host power controls."""
def __init__(self, coresys):
"""Initialize host power handling."""
self.coresys = coresys
def _check_dbus(self, flag):
"""Check if systemd is connect or raise error."""
if flag == MANAGER and self.sys_dbus.systemd.is_connected:
return
if flag == HOSTNAME and self.sys_dbus.hostname.is_connected:
return
_LOGGER.error("No %s D-Bus connection available", flag)
raise HostNotSupportedError()
async def reboot(self):
"""Reboot host system."""
self._check_dbus(MANAGER)
_LOGGER.info("Initialize host reboot over systemd")
try:
await self.sys_core.shutdown()
finally:
await self.sys_dbus.systemd.reboot()
async def shutdown(self):
"""Shutdown host system."""
self._check_dbus(MANAGER)
_LOGGER.info("Initialize host power off over systemd")
try:
await self.sys_core.shutdown()
finally:
await self.sys_dbus.systemd.power_off()
async def set_hostname(self, hostname):
"""Set local a new Hostname."""
self._check_dbus(HOSTNAME)
_LOGGER.info("Set hostname %s", hostname)
await self.sys_dbus.hostname.set_static_hostname(hostname)
await self.sys_host.info.update()
| 28.508772 | 68 | 0.643692 |
62acef4b4d62945779f5929a7487e8b7fc877db4 | 666 | py | Python | Auth/migrations/0001_initial.py | m-krishnachaitanya/QCreator_Backend | 873716fb6c1a260aeb57b04197ad228145722933 | [
"MIT"
] | null | null | null | Auth/migrations/0001_initial.py | m-krishnachaitanya/QCreator_Backend | 873716fb6c1a260aeb57b04197ad228145722933 | [
"MIT"
] | null | null | null | Auth/migrations/0001_initial.py | m-krishnachaitanya/QCreator_Backend | 873716fb6c1a260aeb57b04197ad228145722933 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-11-21 10:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Login',
fields=[
('userid', models.AutoField(default=None, primary_key=True, serialize=False)),
('username', models.CharField(max_length=50, unique=True)),
('password', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('name', models.CharField(max_length=100)),
],
),
]
| 26.64 | 94 | 0.566066 |
d4eeadf0834bef53e7e4d41fd250c397fec4c2e5 | 6,296 | py | Python | onionfarm/onionrunner.py | j3ro3n/onionscan | 3176870f0f5f143322612d2770072e2d9d5b09c2 | [
"MIT"
] | null | null | null | onionfarm/onionrunner.py | j3ro3n/onionscan | 3176870f0f5f143322612d2770072e2d9d5b09c2 | [
"MIT"
] | null | null | null | onionfarm/onionrunner.py | j3ro3n/onionscan | 3176870f0f5f143322612d2770072e2d9d5b09c2 | [
"MIT"
] | null | null | null | from stem.control import Controller
from stem import Signal
from threading import Timer
from threading import Event
import codecs
import json
import os
import random
import subprocess
import sys
import time
from lib.helpers import get_master_list, get_tor_password
onions = []
session_onions = []
identity_lock = Event()
identity_lock.set()
def get_onion_list():
"""
Opens the onion_master_list that is specified in the pyonionscan.cfg file as onion_master_list. This file can be
downloaded from:
https://raw.githubusercontent.com/j3ro3n/onionscan/master/onionfarm/onion_master_list.txt
:return list stored_onions:
"""
onion_master_list = get_master_list()
if os.path.exists(onion_master_list):
with open(onion_master_list, "rb") as fd:
stored_onions = fd.read().splitlines()
else:
print("[!] No onion master list. Download it!")
sys.exit(0)
print(f"[*] Total onions for scanning: {len(stored_onions)}")
return stored_onions
def store_onion(onion):
"""
Writes the specified onion to the onion_master_list that is defined in pyonionscan.cfg.
:param onion:
"""
onion_master_list = get_master_list()
print(f"[++] Storing {onion} in master list.")
with codecs.open(onion_master_list, "ab", encoding="utf8") as fd:
fd.write(f"{onion}\n")
def run_onionscan(onion):
"""
Creates a subprocess for the onionscan and then monitors the scan. A timer of 5 minutes (300) is defined. If
the scan times out, the process is killed.
:param onion:
"""
print(f"[*] Onionscanning {onion}")
process = subprocess.Popen(["onionscan", "-webport=0", "--jsonReport", "--simpleReport=false", onion],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process_timer = Timer(300, handle_timeout, args=[process, onion])
process_timer.start()
# wait for the onion scan results
stdout = process.communicate()[0]
# we have received valid results so we can kill the timer
if process_timer.is_alive():
process_timer.cancel()
return stdout
else:
print("[!!!] Process timed out!")
return None
def handle_timeout(process, onion):
"""
If the timeout threshold is reached the process is killed and a new IP is requested from the Tor network.
:param process:
:param onion:
:return:
"""
global sessions_onions
global indentity_lock
# halt the main thread while we grab a new identity
identity_lock.clear()
# kill the onionscan process
try:
process.kill()
print("[!!!] Killed the onionscan process.")
except:
pass
# Now we switch TOR identities to make sure we have a good connection
with Controller.from_port(port=9051) as torcontrol:
tor_password = get_tor_password()
# authenticate to our local TOR Controller
torcontrol.authenticate(tor_password)
# send the signal for a new identity
torcontrol.signal(Signal.NEWNYM)
# wait for the new identity to be initialized
time.sleep(torcontrol.get_newnym_wait())
print("[!!!] Switched TOR identities.")
# push the onion back on to the list
session_onions.append(onion)
random.shuffle(session_onions)
# allow the main thread to resume executing
identity_lock.set()
return
def process_results(onion, json_response):
"""
Processes the json_response from the onion scan.
:param onion:
:param json_response:
:return:
"""
global onions
global session_onions
# create our output folder if necessary
if not os.path.exists("onionscan_results"):
os.mkdir("onionscan_results")
# write out the JSON results of the scan
with open(f"{'onionscan_results'}/{onion}.json", "wb") as fd:
fd.write(json_response)
# look for additional .onion domains to add to our scan list
scan_result = f"{json_response.decode('utf-8-sig')}"
scan_result = json.loads(scan_result)
if scan_result['identifierReport']['linkedOnions'] is not None:
add_new_onions(scan_result['identifierReport']['linkedOnions'])
if scan_result['identifierReport']['relatedOnionDomains'] is not None:
add_new_onions(scan_result['identifierReport']['relatedOnionDomains'])
if scan_result['identifierReport']['relatedOnionServices'] is not None:
add_new_onions(scan_result['identifierReport']['relatedOnionServices'])
return
def add_new_onions(new_onion_list):
"""
If new onions are discovered, add new onions to the onion_master_list via store_onion.
:param new_onion_list:
:return:
"""
global onions
global session_onions
for linked_onion in new_onion_list:
if linked_onion not in onions and linked_onion.endswith(".onion"):
print(f"[++] Discovered new .onion => {linked_onion}")
onions.append(linked_onion)
session_onions.append(linked_onion)
random.shuffle(session_onions)
store_onion(linked_onion)
return
def main():
"""
Our main function. Retrieves a list of onions to process, shuffles those onions, and then processes each onion.
"""
# get a list of onions to process
onions = get_onion_list()
# randomize the list a bit
random.shuffle(onions)
session_onions = list(onions)
count = 0
while count < len(onions):
# If the event is cleared we will halt here
# otherwise we continue executing
identity_lock.wait()
# grab a new onion to scan
print(f"[*] Running {count:d} of {len(onions):d}.")
onion = session_onions.pop()
onion = onion.decode('utf8')
# test to see if we have already retrieved results for this onion
if os.path.exists(f"onionscan_results/{onion}.json"):
print(f"[!] Already retrieved {onion}. Skipping.")
count += 1
continue
# run the onion scan
result = run_onionscan(onion)
# process the results
if result is not None:
if len(result):
process_results(onion, result)
count += 1
if __name__ == "__main__":
main() | 27.614035 | 117 | 0.661055 |
005dbe582a6dbbfed5229cd0756e0f852da66a77 | 4,601 | py | Python | picbiz/core/controllers/collect.py | scottyadean/picbiz | e14bb7456936ee43d8124279456f059affd0fa16 | [
"MIT"
] | null | null | null | picbiz/core/controllers/collect.py | scottyadean/picbiz | e14bb7456936ee43d8124279456f059affd0fa16 | [
"MIT"
] | null | null | null | picbiz/core/controllers/collect.py | scottyadean/picbiz | e14bb7456936ee43d8124279456f059affd0fa16 | [
"MIT"
] | null | null | null | import os
import json
import glob
import uuid;
import zipfile
from PIL import Image, ExifTags
from datetime import datetime
from django.conf import settings
from django.contrib.auth import authenticate, login, logout
from core.lib.controller import Controller, login_required
from core.lib.date_helpers import fix_date, get_date_from_ts, format_date
from core.lib.dict_helpers import index_by_dict
from core.lib.img_helpers import thumb_nail
from core.models.directory import Directory
from core.models.location import Location
from core.models.section import Section
from core.models.manifest import Manifest
class Collect():
actions = ['index', 'read', 'create', 'update', 'manifest' ]
@login_required
def router(req, **kwargs):
return Controller.route(Collect, Collect.actions, req, kwargs)
def index(req):
""" List the dir. to process """
current_dir = req.POST.get('current_dir', settings.UPLOAD_DIR)
dirs = Collect.get_dir_list(current_dir)
root = Collect.get_dir_list(settings.UPLOAD_ROOT, True)
dir_list = []
for d in dirs:
path = "{}{}".format(current_dir, d)
is_dir_in_db = Directory.objects.filter(**{'full_path':path}).values('id', 'name', 'status')
if len(is_dir_in_db) > 0 and is_dir_in_db[0]['status'] == 'done':
continue
status = is_dir_in_db[0]['status'] if len(is_dir_in_db) > 0 else "Not Processed"
dir_list.append( {'name':d, 'status':status, 'path': path } )
res = {'dir_list':dir_list, "total_dirs": len(dir_list), "root_path":settings.UPLOAD_ROOT, "root_dir":root, "current_dir":current_dir}
return Controller.render(req, res, 'collect/index.html')
def read(req):
if req.method != 'POST':
return Controller.goto('/collect/index')
path = req.POST.get('path')
data = {'status':'processing', 'name': os.path.basename(path), 'create_by': req.user.username }
mkdir, created = Directory.objects.get_or_create(full_path=path, defaults=data,)
sect = req.POST.get('section-select')
loc = req.POST.get('location-select')
sect_obj = Section.objects.filter(id=1).values('id', 'name')[0]
loc_obj = Location.objects.filter().values('id', 'name')[0]
ctx = {"path": path, "section_id":sect, "sect_obj":sect_obj, 'loc_id':loc, 'loc_obj':loc_obj, "mkdir": mkdir}
return Controller.render(req, ctx, 'collect/read.html')
def upload(req):
if req.method == 'POST':
fname = "{}{}".format(settings.UPLOAD_DIR, req.POST.get('name'))
with zipfile.ZipFile(req.FILES['images'],"r") as zip_ref:
zip_ref.extractall(fname)
return Collect.sort(req, name=fname)
else:
return Controller.render(req, {}, 'collect/upload.html')
def update(req):
"""
Update an a indexed image from the sort route.
"""
if req.method == "POST":
p = req.POST
m = Manifest.objects.get(id=p.get('id'))
m.subject = p.get('subject')
m.company_id = p.get('company_id')
m.location_id = p.get('location_id')
m.section_id = p.get('section_id')
m.date = p.get('date')
m.lat = p.get('lat')
m.lng = p.get('lng')
m.sequence = p.get('sequence')
m.import_status = 'sequence'
m.save()
return Controller.render_json({'success': True, 'params':req.POST})
def create(req):
post = req.POST;
dir = post.get('dir')
img_dir = "{}{}/*.jpg".format(settings.UPLOAD_DIR, dir)
out_dir = "{}{}/thumbs/".format(settings.UPLOAD_DIR, dir)
meta_data = thumb_nail(glob.glob(img_dir), out_dir, (700, 700))
for key, val in meta_data.items():
img = Manifest._format(post, val, 'init')
obj, created = Manifest.objects.get_or_create(directory_id=img['directory_id'], name=img['name'], defaults=img)
count = Manifest.objects.filter(directory_id=img['directory_id']).count()
return Controller.render_json({'success':True, 'count':count, 'directory_id':img['directory_id']})
def manifest(req):
imgs = Manifest.objects.filter(directory_id=req.GET.get('directory_id')).values(*Manifest.default_fields())
return Controller.render_json({'results':list(imgs)})
def get_dir_list(search_dir, reverse=False):
""" Get dir list by path set reverse order for new created 1st """
if not os.path.exists(search_dir):
os.makedirs(search_dir)
os.chdir(search_dir)
dirs = list(filter(os.path.isdir, os.listdir(search_dir)))
dirs.sort(key=lambda x: os.path.getmtime(x), reverse=reverse)
return dirs
| 40.008696 | 138 | 0.660291 |
ce47b771f4fdb0c612745ca4b7c36695f3853f7c | 14 | py | Python | titanicazertyu/__init__.py | Adriengith/titanicazertyu | 5ad6569c08918df98c1ef4a59ffc1887b3fd9cee | [
"MIT"
] | 723 | 2018-01-08T04:55:42.000Z | 2022-03-27T14:30:53.000Z | titanicazertyu/__init__.py | Adriengith/titanicazertyu | 5ad6569c08918df98c1ef4a59ffc1887b3fd9cee | [
"MIT"
] | 84 | 2021-08-30T19:05:45.000Z | 2022-03-30T16:59:36.000Z | titanicazertyu/__init__.py | Adriengith/titanicazertyu | 5ad6569c08918df98c1ef4a59ffc1887b3fd9cee | [
"MIT"
] | 179 | 2018-01-08T08:16:32.000Z | 2022-03-20T02:49:44.000Z | print("hello") | 14 | 14 | 0.714286 |
89716e8b0ced822207ba181f78d1649c9594180c | 25,954 | py | Python | pkgs/numexpr-2.5-np110py27_0/lib/python2.7/site-packages/numexpr/necompiler.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/numexpr-2.5-np110py27_0/lib/python2.7/site-packages/numexpr/necompiler.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/numexpr-2.5-np110py27_0/lib/python2.7/site-packages/numexpr/necompiler.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | ###################################################################
# Numexpr - Fast numerical array expression evaluator for NumPy.
#
# License: MIT
# Author: See AUTHORS.txt
#
# See LICENSE.txt and LICENSES/*.txt for details about copyright and
# rights to use.
####################################################################
import __future__
import sys
import numpy
import threading
from numexpr import interpreter, expressions, use_vml, is_cpu_amd_intel
from numexpr.utils import CacheDict
# Declare a double type that does not exist in Python space
double = numpy.double
if sys.version_info[0] < 3:
int_ = int
long_ = long
else:
int_ = numpy.int32
long_ = numpy.int64
typecode_to_kind = {'b': 'bool', 'i': 'int', 'l': 'long', 'f': 'float',
'd': 'double', 'c': 'complex', 's': 'bytes', 'n': 'none'}
kind_to_typecode = {'bool': 'b', 'int': 'i', 'long': 'l', 'float': 'f',
'double': 'd', 'complex': 'c', 'bytes': 's', 'none': 'n'}
type_to_typecode = {bool: 'b', int_: 'i', long_: 'l', float: 'f',
double: 'd', complex: 'c', bytes: 's'}
type_to_kind = expressions.type_to_kind
kind_to_type = expressions.kind_to_type
default_type = kind_to_type[expressions.default_kind]
# Final addtions for Python 3 (mainly for PyTables needs)
if sys.version_info[0] > 2:
typecode_to_kind['s'] = 'str'
kind_to_typecode['str'] = 's'
type_to_typecode[str] = 's'
scalar_constant_kinds = kind_to_typecode.keys()
class ASTNode(object):
"""Abstract Syntax Tree node.
Members:
astType -- type of node (op, constant, variable, raw, or alias)
astKind -- the type of the result (bool, float, etc.)
value -- value associated with this node.
An opcode, numerical value, a variable name, etc.
children -- the children below this node
reg -- the register assigned to the result for this node.
"""
cmpnames = ['astType', 'astKind', 'value', 'children']
def __init__(self, astType='generic', astKind='unknown',
value=None, children=()):
object.__init__(self)
self.astType = astType
self.astKind = astKind
self.value = value
self.children = tuple(children)
self.reg = None
def __eq__(self, other):
if self.astType == 'alias':
self = self.value
if other.astType == 'alias':
other = other.value
if not isinstance(other, ASTNode):
return False
for name in self.cmpnames:
if getattr(self, name) != getattr(other, name):
return False
return True
def __hash__(self):
if self.astType == 'alias':
self = self.value
return hash((self.astType, self.astKind, self.value, self.children))
def __str__(self):
return 'AST(%s, %s, %s, %s, %s)' % (self.astType, self.astKind,
self.value, self.children, self.reg)
def __repr__(self):
return '<AST object at %s>' % id(self)
def key(self):
return (self.astType, self.astKind, self.value, self.children)
def typecode(self):
return kind_to_typecode[self.astKind]
def postorderWalk(self):
for c in self.children:
for w in c.postorderWalk():
yield w
yield self
def allOf(self, *astTypes):
astTypes = set(astTypes)
for w in self.postorderWalk():
if w.astType in astTypes:
yield w
def expressionToAST(ex):
"""Take an expression tree made out of expressions.ExpressionNode,
and convert to an AST tree.
This is necessary as ExpressionNode overrides many methods to act
like a number.
"""
return ASTNode(ex.astType, ex.astKind, ex.value,
[expressionToAST(c) for c in ex.children])
def sigPerms(s):
"""Generate all possible signatures derived by upcasting the given
signature.
"""
codes = 'bilfdc'
if not s:
yield ''
elif s[0] in codes:
start = codes.index(s[0])
for x in codes[start:]:
for y in sigPerms(s[1:]):
yield x + y
elif s[0] == 's': # numbers shall not be cast to strings
for y in sigPerms(s[1:]):
yield 's' + y
else:
yield s
def typeCompileAst(ast):
"""Assign appropiate types to each node in the AST.
Will convert opcodes and functions to appropiate upcast version,
and add "cast" ops if needed.
"""
children = list(ast.children)
if ast.astType == 'op':
retsig = ast.typecode()
basesig = ''.join(x.typecode() for x in list(ast.children))
# Find some operation that will work on an acceptable casting of args.
for sig in sigPerms(basesig):
value = (ast.value + '_' + retsig + sig).encode('ascii')
if value in interpreter.opcodes:
break
else:
for sig in sigPerms(basesig):
funcname = (ast.value + '_' + retsig + sig).encode('ascii')
if funcname in interpreter.funccodes:
value = ('func_%sn' % (retsig + sig)).encode('ascii')
children += [ASTNode('raw', 'none',
interpreter.funccodes[funcname])]
break
else:
raise NotImplementedError(
"couldn't find matching opcode for '%s'"
% (ast.value + '_' + retsig + basesig))
# First just cast constants, then cast variables if necessary:
for i, (have, want) in enumerate(zip(basesig, sig)):
if have != want:
kind = typecode_to_kind[want]
if children[i].astType == 'constant':
children[i] = ASTNode('constant', kind, children[i].value)
else:
opname = "cast"
children[i] = ASTNode('op', kind, opname, [children[i]])
else:
value = ast.value
children = ast.children
return ASTNode(ast.astType, ast.astKind, value,
[typeCompileAst(c) for c in children])
class Register(object):
"""Abstraction for a register in the VM.
Members:
node -- the AST node this corresponds to
temporary -- True if this isn't an input or output
immediate -- not a register, but an immediate value
n -- the physical register number.
None if no number assigned yet.
"""
def __init__(self, astnode, temporary=False):
self.node = astnode
self.temporary = temporary
self.immediate = False
self.n = None
def __str__(self):
if self.temporary:
name = 'Temporary'
else:
name = 'Register'
return '%s(%s, %s, %s)' % (name, self.node.astType,
self.node.astKind, self.n,)
def __repr__(self):
return self.__str__()
class Immediate(Register):
"""Representation of an immediate (integer) operand, instead of
a register.
"""
def __init__(self, astnode):
Register.__init__(self, astnode)
self.immediate = True
def __str__(self):
return 'Immediate(%d)' % (self.node.value,)
def stringToExpression(s, types, context):
"""Given a string, convert it to a tree of ExpressionNode's.
"""
old_ctx = expressions._context.get_current_context()
try:
expressions._context.set_new_context(context)
# first compile to a code object to determine the names
if context.get('truediv', False):
flags = __future__.division.compiler_flag
else:
flags = 0
c = compile(s, '<expr>', 'eval', flags)
# make VariableNode's for the names
names = {}
for name in c.co_names:
if name == "None":
names[name] = None
elif name == "True":
names[name] = True
elif name == "False":
names[name] = False
else:
t = types.get(name, default_type)
names[name] = expressions.VariableNode(name, type_to_kind[t])
names.update(expressions.functions)
# now build the expression
ex = eval(c, names)
if expressions.isConstant(ex):
ex = expressions.ConstantNode(ex, expressions.getKind(ex))
elif not isinstance(ex, expressions.ExpressionNode):
raise TypeError("unsupported expression type: %s" % type(ex))
finally:
expressions._context.set_new_context(old_ctx)
return ex
def isReduction(ast):
prefixes = (b'sum_', b'prod_', b'min_', b'max_')
return any(ast.value.startswith(p) for p in prefixes)
def getInputOrder(ast, input_order=None):
"""Derive the input order of the variables in an expression.
"""
variables = {}
for a in ast.allOf('variable'):
variables[a.value] = a
variable_names = set(variables.keys())
if input_order:
if variable_names != set(input_order):
raise ValueError(
"input names (%s) don't match those found in expression (%s)"
% (input_order, variable_names))
ordered_names = input_order
else:
ordered_names = list(variable_names)
ordered_names.sort()
ordered_variables = [variables[v] for v in ordered_names]
return ordered_variables
def convertConstantToKind(x, kind):
# Exception for 'float' types that will return the NumPy float32 type
if kind == 'float':
return numpy.float32(x)
return kind_to_type[kind](x)
def getConstants(ast):
const_map = {}
for a in ast.allOf('constant'):
const_map[(a.astKind, a.value)] = a
ordered_constants = const_map.keys()
ordered_constants.sort()
constants_order = [const_map[v] for v in ordered_constants]
constants = [convertConstantToKind(a.value, a.astKind)
for a in constants_order]
return constants_order, constants
def sortNodesByOrder(nodes, order):
order_map = {}
for i, (_, v, _) in enumerate(order):
order_map[v] = i
dec_nodes = [(order_map[n.value], n) for n in nodes]
dec_nodes.sort()
return [a[1] for a in dec_nodes]
def assignLeafRegisters(inodes, registerMaker):
"""Assign new registers to each of the leaf nodes.
"""
leafRegisters = {}
for node in inodes:
key = node.key()
if key in leafRegisters:
node.reg = leafRegisters[key]
else:
node.reg = leafRegisters[key] = registerMaker(node)
def assignBranchRegisters(inodes, registerMaker):
"""Assign temporary registers to each of the branch nodes.
"""
for node in inodes:
node.reg = registerMaker(node, temporary=True)
def collapseDuplicateSubtrees(ast):
"""Common subexpression elimination.
"""
seen = {}
aliases = []
for a in ast.allOf('op'):
if a in seen:
target = seen[a]
a.astType = 'alias'
a.value = target
a.children = ()
aliases.append(a)
else:
seen[a] = a
# Set values and registers so optimizeTemporariesAllocation
# doesn't get confused
for a in aliases:
while a.value.astType == 'alias':
a.value = a.value.value
return aliases
def optimizeTemporariesAllocation(ast):
"""Attempt to minimize the number of temporaries needed, by
reusing old ones.
"""
nodes = [n for n in ast.postorderWalk() if n.reg.temporary]
users_of = dict((n.reg, set()) for n in nodes)
node_regs = dict((n, set(c.reg for c in n.children if c.reg.temporary))
for n in nodes)
if nodes and nodes[-1] is not ast:
nodes_to_check = nodes + [ast]
else:
nodes_to_check = nodes
for n in nodes_to_check:
for c in n.children:
if c.reg.temporary:
users_of[c.reg].add(n)
unused = dict([(tc, set()) for tc in scalar_constant_kinds])
for n in nodes:
for c in n.children:
reg = c.reg
if reg.temporary:
users = users_of[reg]
users.discard(n)
if not users:
unused[reg.node.astKind].add(reg)
if unused[n.astKind]:
reg = unused[n.astKind].pop()
users_of[reg] = users_of[n.reg]
n.reg = reg
def setOrderedRegisterNumbers(order, start):
"""Given an order of nodes, assign register numbers.
"""
for i, node in enumerate(order):
node.reg.n = start + i
return start + len(order)
def setRegisterNumbersForTemporaries(ast, start):
"""Assign register numbers for temporary registers, keeping track of
aliases and handling immediate operands.
"""
seen = 0
signature = ''
aliases = []
for node in ast.postorderWalk():
if node.astType == 'alias':
aliases.append(node)
node = node.value
if node.reg.immediate:
node.reg.n = node.value
continue
reg = node.reg
if reg.n is None:
reg.n = start + seen
seen += 1
signature += reg.node.typecode()
for node in aliases:
node.reg = node.value.reg
return start + seen, signature
def convertASTtoThreeAddrForm(ast):
"""Convert an AST to a three address form.
Three address form is (op, reg1, reg2, reg3), where reg1 is the
destination of the result of the instruction.
I suppose this should be called three register form, but three
address form is found in compiler theory.
"""
return [(node.value, node.reg) + tuple([c.reg for c in node.children])
for node in ast.allOf('op')]
def compileThreeAddrForm(program):
"""Given a three address form of the program, compile it a string that
the VM understands.
"""
def nToChr(reg):
if reg is None:
return b'\xff'
elif reg.n < 0:
raise ValueError("negative value for register number %s" % reg.n)
else:
if sys.version_info[0] < 3:
return chr(reg.n)
else:
# int.to_bytes is not available in Python < 3.2
#return reg.n.to_bytes(1, sys.byteorder)
return bytes([reg.n])
def quadrupleToString(opcode, store, a1=None, a2=None):
cop = chr(interpreter.opcodes[opcode]).encode('ascii')
cs = nToChr(store)
ca1 = nToChr(a1)
ca2 = nToChr(a2)
return cop + cs + ca1 + ca2
def toString(args):
while len(args) < 4:
args += (None,)
opcode, store, a1, a2 = args[:4]
s = quadrupleToString(opcode, store, a1, a2)
l = [s]
args = args[4:]
while args:
s = quadrupleToString(b'noop', *args[:3])
l.append(s)
args = args[3:]
return b''.join(l)
prog_str = b''.join([toString(t) for t in program])
return prog_str
context_info = [
('optimization', ('none', 'moderate', 'aggressive'), 'aggressive'),
('truediv', (False, True, 'auto'), 'auto')
]
def getContext(kwargs, frame_depth=1):
d = kwargs.copy()
context = {}
for name, allowed, default in context_info:
value = d.pop(name, default)
if value in allowed:
context[name] = value
else:
raise ValueError("'%s' must be one of %s" % (name, allowed))
if d:
raise ValueError("Unknown keyword argument '%s'" % d.popitem()[0])
if context['truediv'] == 'auto':
caller_globals = sys._getframe(frame_depth + 1).f_globals
context['truediv'] = \
caller_globals.get('division', None) == __future__.division
return context
def precompile(ex, signature=(), context={}):
"""Compile the expression to an intermediate form.
"""
types = dict(signature)
input_order = [name for (name, type_) in signature]
if isinstance(ex, (str, unicode)):
ex = stringToExpression(ex, types, context)
# the AST is like the expression, but the node objects don't have
# any odd interpretations
ast = expressionToAST(ex)
if ex.astType != 'op':
ast = ASTNode('op', value='copy', astKind=ex.astKind, children=(ast,))
ast = typeCompileAst(ast)
aliases = collapseDuplicateSubtrees(ast)
assignLeafRegisters(ast.allOf('raw'), Immediate)
assignLeafRegisters(ast.allOf('variable', 'constant'), Register)
assignBranchRegisters(ast.allOf('op'), Register)
# assign registers for aliases
for a in aliases:
a.reg = a.value.reg
input_order = getInputOrder(ast, input_order)
constants_order, constants = getConstants(ast)
if isReduction(ast):
ast.reg.temporary = False
optimizeTemporariesAllocation(ast)
ast.reg.temporary = False
r_output = 0
ast.reg.n = 0
r_inputs = r_output + 1
r_constants = setOrderedRegisterNumbers(input_order, r_inputs)
r_temps = setOrderedRegisterNumbers(constants_order, r_constants)
r_end, tempsig = setRegisterNumbersForTemporaries(ast, r_temps)
threeAddrProgram = convertASTtoThreeAddrForm(ast)
input_names = tuple([a.value for a in input_order])
signature = ''.join(type_to_typecode[types.get(x, default_type)]
for x in input_names)
return threeAddrProgram, signature, tempsig, constants, input_names
def NumExpr(ex, signature=(), **kwargs):
"""
Compile an expression built using E.<variable> variables to a function.
ex can also be specified as a string "2*a+3*b".
The order of the input variables and their types can be specified using the
signature parameter, which is a list of (name, type) pairs.
Returns a `NumExpr` object containing the compiled function.
"""
# NumExpr can be called either directly by the end-user, in which case
# kwargs need to be sanitized by getContext, or by evaluate,
# in which case kwargs are in already sanitized.
# In that case frame_depth is wrong (it should be 2) but it doesn't matter
# since it will not be used (because truediv='auto' has already been
# translated to either True or False).
context = getContext(kwargs, frame_depth=1)
threeAddrProgram, inputsig, tempsig, constants, input_names = \
precompile(ex, signature, context)
program = compileThreeAddrForm(threeAddrProgram)
return interpreter.NumExpr(inputsig.encode('ascii'),
tempsig.encode('ascii'),
program, constants, input_names)
def disassemble(nex):
"""
Given a NumExpr object, return a list which is the program disassembled.
"""
rev_opcodes = {}
for op in interpreter.opcodes:
rev_opcodes[interpreter.opcodes[op]] = op
r_constants = 1 + len(nex.signature)
r_temps = r_constants + len(nex.constants)
def getArg(pc, offset):
if sys.version_info[0] < 3:
arg = ord(nex.program[pc + offset])
op = rev_opcodes.get(ord(nex.program[pc]))
else:
arg = nex.program[pc + offset]
op = rev_opcodes.get(nex.program[pc])
try:
code = op.split(b'_')[1][offset - 1]
except IndexError:
return None
if sys.version_info[0] > 2:
# int.to_bytes is not available in Python < 3.2
#code = code.to_bytes(1, sys.byteorder)
code = bytes([code])
if arg == 255:
return None
if code != b'n':
if arg == 0:
return b'r0'
elif arg < r_constants:
return ('r%d[%s]' % (arg, nex.input_names[arg - 1])).encode('ascii')
elif arg < r_temps:
return ('c%d[%s]' % (arg, nex.constants[arg - r_constants])).encode('ascii')
else:
return ('t%d' % (arg,)).encode('ascii')
else:
return arg
source = []
for pc in range(0, len(nex.program), 4):
if sys.version_info[0] < 3:
op = rev_opcodes.get(ord(nex.program[pc]))
else:
op = rev_opcodes.get(nex.program[pc])
dest = getArg(pc, 1)
arg1 = getArg(pc, 2)
arg2 = getArg(pc, 3)
source.append((op, dest, arg1, arg2))
return source
def getType(a):
kind = a.dtype.kind
if kind == 'b':
return bool
if kind in 'iu':
if a.dtype.itemsize > 4:
return long_ # ``long`` is for integers of more than 32 bits
if kind == 'u' and a.dtype.itemsize == 4:
return long_ # use ``long`` here as an ``int`` is not enough
return int_
if kind == 'f':
if a.dtype.itemsize > 4:
return double # ``double`` is for floats of more than 32 bits
return float
if kind == 'c':
return complex
if kind == 'S':
return bytes
raise ValueError("unknown type %s" % a.dtype.name)
def getExprNames(text, context):
ex = stringToExpression(text, {}, context)
ast = expressionToAST(ex)
input_order = getInputOrder(ast, None)
#try to figure out if vml operations are used by expression
if not use_vml:
ex_uses_vml = False
else:
for node in ast.postorderWalk():
if node.astType == 'op' \
and node.value in ['sin', 'cos', 'exp', 'log',
'expm1', 'log1p',
'pow', 'div',
'sqrt', 'inv',
'sinh', 'cosh', 'tanh',
'arcsin', 'arccos', 'arctan',
'arccosh', 'arcsinh', 'arctanh',
'arctan2', 'abs']:
ex_uses_vml = True
break
else:
ex_uses_vml = False
return [a.value for a in input_order], ex_uses_vml
# Dictionaries for caching variable names and compiled expressions
_names_cache = CacheDict(256)
_numexpr_cache = CacheDict(256)
evaluate_lock = threading.Lock()
def evaluate(ex, local_dict=None, global_dict=None,
out=None, order='K', casting='safe', **kwargs):
"""Evaluate a simple array expression element-wise, using the new iterator.
ex is a string forming an expression, like "2*a+3*b". The values for "a"
and "b" will by default be taken from the calling function's frame
(through use of sys._getframe()). Alternatively, they can be specifed
using the 'local_dict' or 'global_dict' arguments.
Parameters
----------
local_dict : dictionary, optional
A dictionary that replaces the local operands in current frame.
global_dict : dictionary, optional
A dictionary that replaces the global operands in current frame.
out : NumPy array, optional
An existing array where the outcome is going to be stored. Care is
required so that this array has the same shape and type than the
actual outcome of the computation. Useful for avoiding unnecessary
new array allocations.
order : {'C', 'F', 'A', or 'K'}, optional
Controls the iteration order for operands. 'C' means C order, 'F'
means Fortran order, 'A' means 'F' order if all the arrays are
Fortran contiguous, 'C' order otherwise, and 'K' means as close to
the order the array elements appear in memory as possible. For
efficient computations, typically 'K'eep order (the default) is
desired.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy or
buffering. Setting this to 'unsafe' is not recommended, as it can
adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
"""
with evaluate_lock:
if not isinstance(ex, (str, unicode)):
raise ValueError("must specify expression as a string")
# Get the names for this expression
context = getContext(kwargs, frame_depth=1)
expr_key = (ex, tuple(sorted(context.items())))
if expr_key not in _names_cache:
_names_cache[expr_key] = getExprNames(ex, context)
names, ex_uses_vml = _names_cache[expr_key]
# Get the arguments based on the names.
call_frame = sys._getframe(1)
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
arguments = []
for name in names:
try:
a = local_dict[name]
except KeyError:
a = global_dict[name]
arguments.append(numpy.asarray(a))
# Create a signature
signature = [(name, getType(arg)) for (name, arg) in zip(names, arguments)]
# Look up numexpr if possible.
numexpr_key = expr_key + (tuple(signature),)
try:
compiled_ex = _numexpr_cache[numexpr_key]
except KeyError:
compiled_ex = _numexpr_cache[numexpr_key] = \
NumExpr(ex, signature, **context)
kwargs = {'out': out, 'order': order, 'casting': casting,
'ex_uses_vml': ex_uses_vml}
return compiled_ex(*arguments, **kwargs)
| 33.619171 | 92 | 0.583648 |
ada78b5165a53fed03aa03dadf79153c9b86fded | 8,729 | py | Python | soft_sort/ops.py | kinoute/google-research | 4a59cab927579ea9722e43252c695de5da4eb5e2 | [
"Apache-2.0"
] | 11 | 2020-01-29T07:25:04.000Z | 2022-03-05T16:01:21.000Z | soft_sort/ops.py | RubensZimbres/google-research | 562c7c6ef959cb3cb382b1b660ccc45e8f5289c4 | [
"Apache-2.0"
] | 13 | 2020-01-28T22:19:53.000Z | 2022-02-10T00:39:26.000Z | soft_sort/ops.py | RubensZimbres/google-research | 562c7c6ef959cb3cb382b1b660ccc45e8f5289c4 | [
"Apache-2.0"
] | 2 | 2020-05-07T17:43:37.000Z | 2020-12-06T14:43:30.000Z | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module defines the softranks and softsort operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from soft_sort import soft_quantilizer
DIRECTIONS = ('ASCENDING', 'DESCENDING')
def _preprocess(x, axis):
"""Reshapes the input data to make it rank 2 as required by SoftQuantilizer.
The SoftQuantilizer expects an input tensor of rank 2, where the first
dimension is the batch dimension and the soft sorting is applied on the second
one.
Args:
x: Tensor<float> of any dimension.
axis: (int) the axis to be turned into the second dimension.
Returns:
a Tensor<float>[batch, n] where n is the dimensions over the axis and batch
the product of all other dimensions
"""
dims = list(range(x.shape.rank))
dims[-1], dims[axis] = dims[axis], dims[-1]
z = tf.transpose(x, dims) if dims[axis] != dims[-1] else x
return tf.reshape(z, (-1, tf.shape(x)[axis]))
def _postprocess(x, shape, axis):
"""Applies the inverse transformation of _preprocess.
Args:
x: Tensor<float>[batch, n]
shape: TensorShape of the desired output.
axis: (int) the axis along which the original tensor was processed.
Returns:
A Tensor<float> with the shape given in argument.
"""
s = list(shape)
s[axis], s[-1] = s[-1], s[axis]
z = tf.reshape(x, s)
# Transpose to get back to the original shape
dims = list(range(shape.rank))
dims[-1], dims[axis] = dims[axis], dims[-1]
return tf.transpose(z, dims) if dims[axis] != dims[-1] else z
def softsort(x, direction='ASCENDING', axis=-1, **kwargs):
"""Applies the softsort operator on input tensor x.
This operator acts as differentiable alternative to tf.sort.
Args:
x: the input tensor. It can be either of shape [batch, n] or [n].
direction: the direction 'ASCENDING' or 'DESCENDING'
axis: the axis on which to operate the sort.
**kwargs: see SoftQuantilizer for possible parameters.
Returns:
A tensor of the same shape as the input.
"""
if direction not in DIRECTIONS:
raise ValueError('`direction` should be one of {}'.format(DIRECTIONS))
z = _preprocess(x, axis)
descending = (direction == 'DESCENDING')
sorter = soft_quantilizer.SoftQuantilizer(z, descending=descending, **kwargs)
# In case we are applying some quantization while sorting, the number of
# outputs should be the number of targets.
shape = x.shape.as_list()
shape[axis] = sorter.target_weights.shape[1]
return _postprocess(sorter.softsort, tf.TensorShape(shape), axis)
def softranks(x, direction='ASCENDING', axis=-1, zero_based=True, **kwargs):
"""A differentiable argsort-like operator that returns directly the ranks.
Note that it behaves as the 'inverse' of the argsort operator since it returns
soft ranks, i.e. real numbers that play the role of indices and quantify the
relative standing (among all n entries) of each entry of x.
Args:
x: Tensor<float> of any shape.
direction: (str) either 'ASCENDING' or 'DESCENDING', as in tf.sort.
axis: (int) the axis along which to sort, as in tf.sort.
zero_based: (bool) to return values in [0, n-1] or in [1, n].
**kwargs: see SoftQuantilizer for possible parameters.
Returns:
A Tensor<float> of the same shape as the input containing the soft ranks.
"""
if direction not in DIRECTIONS:
raise ValueError('`direction` should be one of {}'.format(DIRECTIONS))
descending = (direction == 'DESCENDING')
z = _preprocess(x, axis)
sorter = soft_quantilizer.SoftQuantilizer(z, descending=descending, **kwargs)
ranks = sorter.softcdf * tf.cast(tf.shape(z)[1], dtype=x.dtype)
if zero_based:
ranks -= tf.cast(1.0, dtype=x.dtype)
return _postprocess(ranks, x.shape, axis)
def softquantiles(x, quantiles, quantile_width=None, axis=-1, **kwargs):
"""Computes a (single) soft quantile via optimal transport.
This operator takes advantage of the fact that an exhaustive softsort is not
required to recover a single quantile. Instead, one can transport all
input values in x onto only 3 weighted values. Target weights are adjusted so
that those values in x that are transported to the middle value in the target
vector y correspond to those concentrating around the quantile of interest.
This idea generalizes to more quantiles, interleaving small weights on the
quantile indices and bigger weights in between, corresponding to the gap from
one desired quantile to the next one.
Args:
x: Tensor<float> of any shape.
quantiles: list<float> the quantiles to be returned. It can also be a single
float.
quantile_width: (float) mass given to the bucket supposed to attract points
whose value concentrate around the desired quantile value. Bigger width
means that we allow the soft quantile to be a mixture of
more points further away from the quantile. If None, the width is set at 1/n
where n is the number of values considered (the size along the 'axis').
axis: (int) the axis along which to compute the quantile.
**kwargs: see SoftQuantilizer for possible extra parameters.
Returns:
A Tensor<float> similar to the input tensor, but the axis dimension is
replaced by the number of quantiles specified in the quantiles list.
Hence, if only a quantile is requested (quantiles is a float) only one value
in that axis is returned. When several quantiles are requested, the tensor
will have that many values in that axis.
Raises:
tf.errors.InvalidArgumentError when the quantiles and quantile width are not
correct, namely quantiles are either not in sorted order or the
quantile_width is too large.
"""
if isinstance(quantiles, float):
quantiles = [quantiles]
quantiles = tf.constant(quantiles, tf.float32)
# Preprocesses submitted quantiles to check that they satisfy elementary
# constraints.
valid_quantiles = tf.boolean_mask(
quantiles, tf.logical_and(quantiles > 0.0, quantiles < 1.0))
num_quantiles = tf.shape(valid_quantiles)[0]
# Includes values on both ends of [0,1].
extended_quantiles = tf.concat([[0.0], valid_quantiles, [1.0]], axis=0)
# Builds filler_weights in between the target quantiles.
filler_weights = extended_quantiles[1:] - extended_quantiles[:-1]
if quantile_width is None:
quantile_width = tf.reduce_min(
tf.concat(
[filler_weights, [1.0 / tf.cast(tf.shape(x)[axis], dtype=x.dtype)]],
axis=0))
# Takes into account quantile_width in the definition of weights
shift = -tf.ones(tf.shape(filler_weights), dtype=x.dtype)
shift = shift + 0.5 * (
tf.one_hot(0, num_quantiles + 1) +
tf.one_hot(num_quantiles, num_quantiles + 1))
filler_weights = filler_weights + quantile_width * shift
assert_op = tf.Assert(tf.reduce_all(filler_weights >= 0.0), [filler_weights])
with tf.control_dependencies([assert_op]):
# Adds one more value to have tensors of the same shape to interleave them.
quantile_weights = tf.ones(num_quantiles + 1) * quantile_width
# Interleaves the filler_weights with the quantile weights.
weights = tf.reshape(
tf.stack([filler_weights, quantile_weights], axis=1), (-1,))[:-1]
# Sends only the positive weights to the softsort operator.
positive_weights = tf.boolean_mask(weights, weights > 0.0)
result = softsort(
x,
direction='ASCENDING', axis=axis, target_weights=positive_weights,
**kwargs)
# Recovers the indices corresponding to the desired quantiles.
odds = tf.math.floormod(tf.range(weights.shape[0], dtype=tf.float32), 2)
positives = tf.cast(weights > 0.0, tf.float32)
indices = tf.cast(tf.math.cumsum(positives) * odds, dtype=tf.int32)
indices = tf.boolean_mask(indices, indices > 0) - 1
result = tf.gather(result, indices, axis=axis)
# In the specific case where we want a single quantile, squeezes the
# quantile dimension.
return tf.cond(tf.equal(tf.shape(result)[axis], 1),
lambda: tf.squeeze(result, axis=axis),
lambda: result)
| 38.96875 | 80 | 0.718066 |
3698305a217958c58a9a869ee8eea8af73bba961 | 17,380 | py | Python | tests/test_magicgui.py | GenevieveBuckley/magicgui | c8bd2fd866e19e39996716c0f5b98e8672a921a6 | [
"MIT"
] | 1 | 2020-06-01T06:02:25.000Z | 2020-06-01T06:02:25.000Z | tests/test_magicgui.py | GenevieveBuckley/magicgui | c8bd2fd866e19e39996716c0f5b98e8672a921a6 | [
"MIT"
] | 1 | 2020-06-29T12:06:23.000Z | 2020-07-01T03:25:17.000Z | tests/test_magicgui.py | GenevieveBuckley/magicgui | c8bd2fd866e19e39996716c0f5b98e8672a921a6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Tests for `magicgui` package."""
import inspect
from enum import Enum
import pytest
from qtpy.QtCore import Qt
from magicgui import magicgui, register_type, type_map, widgets
from magicgui.signature import MagicSignature
@pytest.fixture
def magic_func():
"""Test function decorated by magicgui."""
@magicgui(call_button="my_button", auto_call=True, labels=False)
def func(a: str = "works", b: int = 3, c=7.1) -> str:
return a + str(b)
return func
def test_magicgui(magic_func):
"""Test basic magicgui functionality."""
assert magic_func() == "works3"
assert magic_func.a.value == "works"
assert magic_func.b.value == 3
assert magic_func.c.value == 7.1
assert isinstance(magic_func.a, widgets.LineEdit)
assert isinstance(magic_func.b, widgets.SpinBox)
assert isinstance(magic_func.c, widgets.FloatSpinBox)
magic_func.show()
assert magic_func.visible
a = magic_func.a # save ref
assert magic_func.index(a) == 0
# we can delete widgets
del magic_func.a
with pytest.raises(AttributeError):
getattr(magic_func, "a")
# they disappear from the layout
with pytest.raises(ValueError):
magic_func.index(a)
def test_overriding_widget_type():
"""Test overriding the widget type of a parameter."""
# a will now be a LineEdit instead of a spinbox
@magicgui(a={"widget_type": "LineEdit"})
def func(a: int = 1):
pass
assert isinstance(func.a, widgets.LineEdit)
assert func.a.value == "1"
def test_unrecognized_types():
"""Test that arg with an unrecognized type is hidden."""
class Something:
pass
# don't know how to handle Something type
@magicgui
def func(arg: Something, b: int = 1):
pass
assert isinstance(func.arg, widgets.EmptyWidget)
with pytest.raises(TypeError) as e:
func()
assert "missing a required argument" in str(e)
def test_no_type_provided():
"""Test position args with unknown type."""
@magicgui
def func(a):
pass
assert isinstance(func.a, widgets.EmptyWidget)
with pytest.raises(TypeError) as e:
func()
assert "missing a required argument" in str(e)
assert "@magicgui(a={'bind': value})" in str(e)
def test_bind_out_of_order():
"""Test that binding a value before a non-default argument still gives message."""
@magicgui(a={"bind": 10})
def func(a, x):
pass
assert isinstance(func.a, widgets.EmptyWidget)
with pytest.raises(TypeError) as e:
func()
assert "missing a required argument" in str(e)
assert "@magicgui(x={'bind': value})" in str(e)
def test_call_button():
"""Test that the call button has been added, and pressing it calls the function."""
@magicgui(call_button="my_button", auto_call=True)
def func(a: int, b: int = 3, c=7.1):
assert a == 7
assert hasattr(func, "_call_button")
assert isinstance(func._call_button, widgets.PushButton)
func.a.value = 7
def test_auto_call(qtbot, magic_func):
"""Test that changing a parameter calls the function."""
# TODO: remove qtbot requirement so we can test other backends eventually.
# changing the widget parameter calls the function
with qtbot.waitSignal(magic_func.called, timeout=1000):
magic_func.b.value = 6
# changing the gui calls the function
with qtbot.waitSignal(magic_func.called, timeout=1000):
qtbot.keyClick(magic_func.a.native, Qt.Key_A, Qt.ControlModifier)
qtbot.keyClick(magic_func.a.native, Qt.Key_Delete)
def test_dropdown_list_from_enum():
"""Test that enums properly populate the dropdown menu with options."""
class Medium(Enum):
Glass = 1.520
Oil = 1.515
Water = 1.333
Air = 1.0003
@magicgui
def func(arg: Medium = Medium.Water):
...
assert func.arg.value == Medium.Water
assert isinstance(func.arg, widgets.ComboBox)
assert list(func.arg.choices) == list(Medium.__members__.values())
def test_dropdown_list_from_choices():
"""Test that providing the 'choices' argument with a list of strings works."""
CHOICES = ["Oil", "Water", "Air"]
@magicgui(arg={"choices": CHOICES})
def func(arg="Water"):
...
assert func.arg.value == "Water"
assert isinstance(func.arg, widgets.ComboBox)
assert list(func.arg.choices) == CHOICES
with pytest.raises(ValueError):
# the default value must be in the list
@magicgui(arg={"choices": ["Oil", "Water", "Air"]})
def func(arg="Silicone"):
...
def test_dropdown_list_from_callable():
"""Test that providing the 'choices' argument with a callable works."""
CHOICES = ["Oil", "Water", "Air"]
def get_choices(gui):
return CHOICES
@magicgui(arg={"choices": get_choices})
def func(arg="Water"):
...
assert func.arg.value == "Water"
assert isinstance(func.arg, widgets.ComboBox)
assert list(func.arg.choices) == CHOICES
func.reset_choices()
def test_changing_widget_attr_fails(magic_func):
"""Test set_widget will either update or change an existing widget."""
assert magic_func.a.value == "works"
widget1 = magic_func.a
assert isinstance(widget1, widgets.LineEdit)
# changing it to a different type will destroy and create a new widget
widget2 = widgets.create_widget(value=1, name="a")
with pytest.raises(AttributeError):
magic_func.a = widget2
assert magic_func.a == widget1
def test_multiple_gui_with_same_args():
"""Test that similarly named arguments are independent of one another."""
@magicgui
def example1(a=2):
return a
@magicgui
def example2(a=5):
return a
# they get their initial values from the function sigs
assert example1.a.value == 2
assert example2.a.value == 5
# settings one doesn't affect the other
example1.a.value = 10
assert example1.a.value == 10
assert example2.a.value == 5
# vice versa...
example2.a.value = 4
assert example1.a.value == 10
assert example2.a.value == 4
# calling the original equations updates the function defaults
assert example1() == 10
assert example2() == 4
def test_multiple_gui_instance_independence():
"""Test that multiple instance of the same decorated function are independent."""
def example(a=2):
return a
w1 = magicgui(example)
w2 = magicgui(example)
# they get their initial values from the function sigs
assert w1.a.value == 2
assert w2.a.value == 2
# settings one doesn't affect the other
w1.a.value = 10
assert w1.a.value == 10
assert w2.a.value == 2
# vice versa...
w2.a.value = 4
assert w1.a.value == 10
assert w2.a.value == 4
# all instances are independent
assert example() == 2
assert w1() == 10
assert w2() == 4
def test_invisible_param():
"""Test that the visible option works."""
@magicgui(a={"visible": False})
def func(a: str = "string", b: int = 3, c=7.1) -> str:
return "works"
assert hasattr(func, "a")
func.show()
assert not func.a.visible
assert func.b.visible
assert func.c.visible
func()
def test_bad_options():
"""Test that invalid parameter options raise TypeError."""
with pytest.raises(TypeError):
@magicgui(b=7) # type: ignore
def func(a="string", b=3, c=7.1):
return "works"
# @pytest.mark.xfail(reason="MagicSignatures are slightly different")
def test_signature_repr():
"""Test that the gui makes a proper signature."""
def func(a: str = "string", b: int = 3, c: float = 7.1):
return locals()
magic_func = magicgui(func)
# the STRING signature representation should be the same as the original function
assert str(inspect.signature(magic_func)) == str(inspect.signature(func))
# however, the magic_func signature is an enhance MagicSignature object:
assert isinstance(inspect.signature(magic_func), MagicSignature)
assert isinstance(inspect.signature(func), inspect.Signature)
# make sure it is up to date
magic_func.b.value = 0
assert (
str(inspect.signature(magic_func))
== "(a: str = 'string', b: int = 0, c: float = 7.1)"
)
def test_set_choices_raises():
"""Test failures on setting choices."""
@magicgui(mood={"choices": ["happy", "sad"]})
def func(mood: str = "happy"):
pass
with pytest.raises(TypeError):
func.mood.choices = None
with pytest.raises(TypeError):
func.mood.choices = 1
def test_get_choices_raises():
"""Test failures on getting choices."""
@magicgui(mood={"choices": [1, 2, 3]})
def func(mood: int = 1, hi: str = "hello"):
pass
with pytest.raises(AttributeError):
func.hi.choices
assert func.mood.choices == (1, 2, 3)
@pytest.mark.parametrize(
"labels",
[
pytest.param(
True, marks=pytest.mark.xfail(reason="indexing still wrong with labels")
),
False,
],
ids=["with-labels", "no-labels"],
)
def test_add_at_position(labels):
"""Test that adding widghet with position option puts widget in the right place."""
def func(a=1, b=2, c=3):
pass
def get_layout_items(gui):
lay = gui.native.layout()
items = [lay.itemAt(i).widget()._magic_widget.name for i in range(lay.count())]
if labels:
items = list(filter(None, items))
return items
gui = magicgui(func, labels=labels)
assert get_layout_items(gui) == ["a", "b", "c"]
gui.insert(1, widgets.create_widget(name="new"))
assert get_layout_items(gui) == ["a", "new", "b", "c"]
def test_original_function_works(magic_func):
"""Test that the decorated function is still operational."""
assert magic_func() == "works3"
assert magic_func("hi") == "hi3"
def test_show(magic_func):
"""Test that the show option works."""
# assert not magic_func.visible
magic_func.show()
assert magic_func.visible
def test_register_types():
"""Test that we can register custom widget classes for certain types."""
# must provide a non-None choices or widget_type
with pytest.raises(ValueError):
register_type(str, choices=None)
register_type(int, widget_type="LineEdit")
# this works, but choices overrides widget_type, and warns the user
with pytest.warns(UserWarning):
register_type(str, choices=["works", "cool", "huh"], widget_type="LineEdit")
class Main:
pass
class Sub(Main):
pass
class Main2:
pass
class Sub2(Main2):
pass
register_type(Main, choices=[1, 2, 3])
register_type(Main2, widget_type="LineEdit")
@magicgui
def func(a: str = "works", b: int = 3, c: Sub = None, d: Sub2 = None):
return a
assert isinstance(func.a, widgets.ComboBox)
assert isinstance(func.b, widgets.LineEdit)
assert isinstance(func.c, widgets.ComboBox)
assert isinstance(func.d, widgets.LineEdit)
del type_map._TYPE_DEFS[str]
del type_map._TYPE_DEFS[int]
def test_register_return_callback():
"""Test that registering a return callback works."""
def check_value(gui, value, rettype):
assert value == 1
class Base:
pass
class Sub(Base):
pass
register_type(int, return_callback=check_value)
register_type(Base, return_callback=check_value)
@magicgui
def func(a=1) -> int:
return a
func()
with pytest.raises(AssertionError):
func(3)
@magicgui
def func2(a=1) -> Sub:
return a
func2()
# @pytest.mark.skip(reason="need to rethink how to test this")
# def test_parent_changed(qtbot, magic_func):
# """Test that setting MagicGui parent emits a signal."""
# with qtbot.waitSignal(magic_func.parent_changed, timeout=1000):
# magic_func.native.setParent(None)
def test_function_binding():
class MyObject:
def __init__(self, name):
self.name = name
self.counter = 0.0
@magicgui(auto_call=True)
def method(self, sigma: float = 1):
self.counter = self.counter + sigma
return self.name, self.counter
a = MyObject("a")
b = MyObject("b")
assert a.method() == ("a", 1)
assert b.method(sigma=4) == ("b", 4)
assert a.method() == ("a", 2)
assert b.method() == ("b", 5)
def test_call_count():
"""Test that a function gui remembers how many times it's been called."""
@magicgui
def func():
pass
assert func.call_count == 0
func()
func()
assert func.call_count == 2
func.reset_call_count()
assert func.call_count == 0
def test_tooltips_from_numpydoc():
"""Test that numpydocs docstrings can be used for tooltips."""
x_tooltip = "override tooltip"
y_docstring = """A greeting, by default 'hi'. Notice how we miraculously pull
the entirety of the docstring just like that"""
@magicgui(x={"tooltip": x_tooltip}, z={"tooltip": None})
def func(x: int, y: str = "hi", z=None):
"""Do a little thing.
Parameters
----------
x : int
An integer for you to use
y : str, optional
A greeting, by default 'hi'. Notice how we miraculously pull
the entirety of the docstring just like that
z : Any, optional
No tooltip for me please.
"""
assert func.x.tooltip == x_tooltip
assert func.y.tooltip == y_docstring
assert not func.z.tooltip
def test_duplicated_and_missing_params_from_numpydoc():
"""Test that numpydocs docstrings can be used for tooltips."""
@magicgui
def func(x, y, z=None):
"""Do a little thing.
Parameters
----------
x, y : int
Integers for you to use
"""
assert func.x.tooltip == "Integers for you to use"
assert func.y.tooltip == "Integers for you to use"
assert not func.z.tooltip
def test_tooltips_from_google_doc():
"""Test that google docstrings can be used for tooltips."""
x_docstring = "An integer for you to use"
y_docstring = """A greeting. Notice how we miraculously pull
the entirety of the docstring just like that"""
@magicgui
def func(x: int, y: str = "hi"):
"""Do a little thing.
Args:
x (int): An integer for you to use
y (str, optional): A greeting. Notice how we miraculously pull
the entirety of the docstring just like that
"""
assert func.x.tooltip == x_docstring
assert func.y.tooltip == y_docstring
def test_tooltips_from_rest_doc():
"""Test that google docstrings can be used for tooltips."""
x_docstring = "An integer for you to use"
y_docstring = """A greeting, by default 'hi'. Notice how we miraculously pull
the entirety of the docstring just like that"""
@magicgui
def func(x: int, y: str = "hi", z=None):
"""Do a little thing.
:param x: An integer for you to use
:param y: A greeting, by default 'hi'. Notice how we miraculously pull
the entirety of the docstring just like that
:type x: int
:type y: str
"""
assert func.x.tooltip == x_docstring
assert func.y.tooltip == y_docstring
def test_no_tooltips_from_numpydoc():
"""Test that ``tooltips=False`` hides all tooltips."""
@magicgui(tooltips=False)
def func(x: int, y: str = "hi"):
"""Do a little thing.
Parameters
----------
x : int
An integer for you to use
y : str, optional
A greeting, by default 'hi'
"""
assert not func.x.tooltip
assert not func.y.tooltip
def test_only_some_tooltips_from_numpydoc():
"""Test that we can still show some tooltips with ``tooltips=False``."""
# tooltips=False, means docstrings wont be parsed at all, but tooltips
# can still be manually provided.
@magicgui(tooltips=False, y={"tooltip": "Still want a tooltip"})
def func(x: int, y: str = "hi"):
"""Do a little thing.
Parameters
----------
x : int
An integer for you to use
y : str, optional
A greeting, by default 'hi'
"""
assert not func.x.tooltip
assert func.y.tooltip == "Still want a tooltip"
def test_magicgui_type_error():
with pytest.raises(TypeError):
magicgui("not a function") # type: ignore
@magicgui
def self_referencing_function(x: int = 1):
"""Function that refers to itself, and wants the FunctionGui instance."""
return self_referencing_function
def test_magicgui_self_reference():
"""Test that self-referential magicguis work in global scopes."""
assert isinstance(self_referencing_function(), widgets.FunctionGui)
def test_local_magicgui_self_reference():
"""Test that self-referential magicguis work in local scopes."""
@magicgui
def local_self_referencing_function(x: int = 1):
"""Function that refers to itself, and wants the FunctionGui instance."""
return local_self_referencing_function
assert isinstance(local_self_referencing_function(), widgets.FunctionGui)
| 27.241379 | 87 | 0.640564 |
9eed0d9c1c12fa457211f8e76066d0a77d46b808 | 5,214 | py | Python | core/calls/views/requests.py | Nikita-Filonov/lama_logger | 7b3f474ddf35685e6949ab00d7272d16c630295c | [
"Apache-2.0"
] | null | null | null | core/calls/views/requests.py | Nikita-Filonov/lama_logger | 7b3f474ddf35685e6949ab00d7272d16c630295c | [
"Apache-2.0"
] | null | null | null | core/calls/views/requests.py | Nikita-Filonov/lama_logger | 7b3f474ddf35685e6949ab00d7272d16c630295c | [
"Apache-2.0"
] | 1 | 2021-12-21T09:39:02.000Z | 2021-12-21T09:39:02.000Z | import json
from rest_framework import views, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import api_view, authentication_classes, permission_classes, throttle_classes
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.throttling import UserRateThrottle
from core.calls.helpers.requests.authenticators import IntegrationTokenAuthentication
from core.calls.helpers.requests.filters import filter_request
from core.calls.models import Request
from core.calls.permissions.common import IsRequestAllowed
from core.calls.serializers.requests import RequestsSerializer, RequestSerializer
from core.projects.models import Project
from core.stats.tracks.requests import track_request, track_requests
from core.tracks.helpers.analyzers.analyze_request import analyze_request
from utils.exeptions import BadRequest, NotFound
from utils.helpers.common import delete_model
@api_view(['POST'])
@authentication_classes((IntegrationTokenAuthentication,))
@permission_classes((IsAuthenticated, IsRequestAllowed))
@throttle_classes((UserRateThrottle,))
def create_request(request, project_id):
project = Project.objects.get(id=project_id)
payload = request.data.copy()
analyze_request(project_id, payload)
should_create_request = filter_request(project, payload)
if not should_create_request:
return Response(status=status.HTTP_204_NO_CONTENT)
context = {'user': request.user, 'project': project}
serializer = RequestSerializer(data=request.data, context=context)
if serializer.is_valid():
created_request = serializer.save()
track_request(project, created_request, 'create')
payload = RequestsSerializer(created_request, many=False).data
return Response(payload, status=status.HTTP_201_CREATED)
raise BadRequest('Error happened while creating request', data=serializer.errors)
@api_view(['DELETE'])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated, IsRequestAllowed))
@throttle_classes((UserRateThrottle,))
def delete_all_requests(request, project_id):
requests = Request.objects.filter(project_id=project_id)
requests.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET'])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated, IsRequestAllowed))
@throttle_classes((UserRateThrottle,))
def get_requests_chain(request, project_id, node_id):
requests = Request.objects.filter(project_id=project_id, nodeId=node_id).order_by('created')
serializer = RequestsSerializer(requests, many=True)
return Response(serializer.data)
class RequestsApi(views.APIView, LimitOffsetPagination):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated, IsRequestAllowed]
throttle_classes = [UserRateThrottle]
def get(self, request, project_id):
filters = json.loads(request.query_params.get('filters', '{}'))
requests = Request.objects.filter(**filters, project_id=project_id, isCustom=False).order_by('-created')
results = self.paginate_queryset(requests, request, view=self)
serializer = RequestsSerializer(results, many=True)
return self.get_paginated_response(serializer.data)
def delete(self, request, project_id):
requests = request.data
if not isinstance(requests, list):
raise BadRequest('You should provide requests ids')
project = Project.objects.get(id=project_id)
requests = Request.objects.filter(project_id=project_id, requestId__in=requests)
track_requests(project, requests, 'delete')
requests.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class RequestApi(views.APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated, IsRequestAllowed]
throttle_classes = [UserRateThrottle]
def get(self, request, project_id, request_id):
try:
db_request = Request.objects.get(requestId=request_id)
except Request.DoesNotExist:
raise NotFound('Request was not found on the server')
return Response(RequestsSerializer(db_request, many=False).data)
def patch(self, request, project_id, request_id):
try:
custom_request = Request.objects.get(requestId=request_id, isCustom=False)
except Request.DoesNotExist:
raise NotFound('Request not found')
serializer = RequestSerializer(custom_request, data=request.data, partial=True)
if serializer.is_valid():
custom_request = serializer.save()
serializer = RequestsSerializer(custom_request, many=False)
return Response(serializer.data)
raise BadRequest(message='Error happened while updating request', data=serializer.errors)
def delete(self, request, project_id, request_id):
delete_model(Request, requestId=request_id)
return Response({'message': 'Request was successfully deleted', 'level': 'success'})
| 41.712 | 112 | 0.764097 |
4a2643fbf94b7b96fdc691562a698cd996a25bd0 | 4,206 | py | Python | threat_connect/komand_threat_connect/actions/bulk_indicator_download/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | threat_connect/komand_threat_connect/actions/bulk_indicator_download/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | threat_connect/komand_threat_connect/actions/bulk_indicator_download/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
ATTRIBUTE = "attribute"
CONFIDENCE = "confidence"
DATE_ADDED = "date_added"
LAST_MODIFIED = "last_modified"
OWNER = "owner"
RATING = "rating"
TAG = "tag"
THREAT_ASSESS_CONFIDENCE = "threat_assess_confidence"
THREAT_ASSESS_RATING = "threat_assess_rating"
TYPE = "type"
class Output:
BULK_INDICATORS = "bulk_indicators"
class BulkIndicatorDownloadInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"attribute": {
"type": "string",
"title": "Attribute",
"description": "Attribute type",
"order": 2
},
"confidence": {
"type": "integer",
"title": "Confidence",
"description": "Confidence value",
"order": 3
},
"date_added": {
"type": "string",
"title": "Date Added",
"displayType": "date",
"description": "Date indicator added",
"format": "date-time",
"order": 4
},
"last_modified": {
"type": "string",
"title": "Last Modified",
"displayType": "date",
"description": "Last modified date",
"format": "date-time",
"order": 5
},
"owner": {
"type": "string",
"title": "Owner",
"description": "Owner/Organization",
"order": 1
},
"rating": {
"type": "string",
"title": "Rating",
"description": "Indicator rating",
"order": 6
},
"tag": {
"type": "string",
"title": "Tag",
"description": "Single tag filter",
"order": 7
},
"threat_assess_confidence": {
"type": "integer",
"title": "Threat Assess Confidence",
"description": "Threat Assess Confidence filter",
"order": 8
},
"threat_assess_rating": {
"type": "string",
"title": "Threat Assess Rating",
"description": "Threat Assess Rating filter",
"order": 9
},
"type": {
"type": "string",
"title": "Type",
"description": "Indicator type",
"order": 10
}
},
"required": [
"owner"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class BulkIndicatorDownloadOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"bulk_indicators": {
"type": "array",
"title": "Bulk Indicators",
"description": "Bulk indicators",
"items": {
"$ref": "#/definitions/bulk_indicator_output"
},
"order": 1
}
},
"definitions": {
"bulk_indicator_output": {
"type": "object",
"title": "bulk_indicator_output",
"properties": {
"confidence": {
"type": "string",
"title": "Confidence",
"order": 7
},
"date_added": {
"type": "string",
"title": "Date Added",
"displayType": "date",
"format": "date-time",
"order": 3
},
"id": {
"type": "integer",
"title": "ID",
"order": 1
},
"last_modified": {
"type": "string",
"title": "Last Modified",
"displayType": "date",
"format": "date-time",
"order": 4
},
"owner_name": {
"type": "string",
"title": "Owner Name",
"order": 2
},
"rating": {
"type": "string",
"title": "Rating",
"order": 5
},
"threat_assess_confidence": {
"type": "string",
"title": "Threat Assess Confidence",
"order": 8
},
"threat_assess_rating": {
"type": "string",
"title": "Threat Assess Rating",
"order": 6
},
"type": {
"type": "string",
"title": "Type",
"order": 9
},
"weblink": {
"type": "string",
"title": "Weblink",
"order": 10
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 22.491979 | 57 | 0.485259 |
10fec8336add86ccd7a51ff59aefa0b7dcd54939 | 4,642 | py | Python | big_data_shenyang/expert/shenyang_expert_spider.py | littlesluttery/spider | 875187c3a01a79840dd1470878d7cb4678b183d7 | [
"Apache-2.0"
] | null | null | null | big_data_shenyang/expert/shenyang_expert_spider.py | littlesluttery/spider | 875187c3a01a79840dd1470878d7cb4678b183d7 | [
"Apache-2.0"
] | null | null | null | big_data_shenyang/expert/shenyang_expert_spider.py | littlesluttery/spider | 875187c3a01a79840dd1470878d7cb4678b183d7 | [
"Apache-2.0"
] | null | null | null | #-*- coding=utf-8 -*-
#@Time : 2020/10/9 12:52 PM
#@Author : 小邋遢
#@File : shenyang_expert_spider.py
#@Software : PyCharm
import json
import os
import re
from urllib.request import urlretrieve
import pymysql
import requests
from expert.config import *
from lxml import etree
import pandas as pd
def total_page_numbers():
url = 'http://www.sykjtjpt.cn/h/talent/talentService'
r = requests.get(url,headers=HEADERS)
if r.status_code == 200:
html = etree.HTML(r.text)
total_page_number = html.xpath("//div[@id='pagination']/li/a/text()")[-2]
total_page_number = ''.join(total_page_number)
return total_page_number
else:
return None
def coonect_mysql():
db = pymysql.connect(**CONFIG)
cursor = db.cursor()
return db,cursor
def id_is(key_id):
db,cursor = coonect_mysql()
# 判断数据是否存在
sql = 'select key_id from expert where key_id="{}"'.format(key_id)
data = pd.read_sql_query(sql, db)
if len(data["key_id"]) != 0:
print("该专家已经存在数据库....")
return 1
else:
return 0
def save_to_msyql(results):
db, cursor = coonect_mysql()
key_id = results['key_id']
#print(key_id)
flag = id_is(key_id)
if flag == 0:
inventor = results['inventor']
gender = results['gender']
degrees = results['degrees']
data_of_birth = results['data_of_birth']
expert_title = results['expert_title']
professional_field = results['professional_field']
workCompany = results['workCompany']
areas_of_expertise = results['areas_of_expertise']
talent_profile = results['talent_profile']
photo = results['photo']
try:
sql = 'insert into expert(inventor,key_id,gender,degrees,data_of_birth,expert_title,professional_field,workCompany,areas_of_expertise,talent_profile,photo) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
cursor.execute(sql, (inventor,key_id,gender,degrees,data_of_birth,expert_title,professional_field,workCompany,areas_of_expertise,talent_profile,photo))
db.commit()
print("保存到数据库成功")
cursor.close()
db.close()
except:
save_to_msyql(results)
print("保存到数据库失败......")
def save_img(photo_url,id):
print("保存专家照片到本地.....")
# 获取当前目录
path = os.getcwd()
file_path = path + "/expert_photo/" + "{}.png".format(id)
try:
urlretrieve(photo_url, file_path)
except Exception:
print("error of save expert photo")
def parse_page_url(i):
data = {
"pageNo": i,
"pageSize": 12,
"industrialField":"",
"rank": "",
}
r = requests.post(url=DONGBEI_EXPERT_URL,headers=HEADERS,data=data)
print(r.status_code)
#print(r.text)
if r.status_code == 200:
data = json.loads(r.text)
data = data['page']
data = data['list']
for i in range(12):
data_1 = data[i]
#print(data_1)
key_id = data_1['expertId']
# inventor:姓名
inventor = data_1['name']
# gender:性别
gender = data_1['gender']
#degree:学历
degrees = data_1['degrees']
try:
# data_of_birth :出生年月
data_of_birth = data_1['birthday']
except:
data_of_birth = None
# expert_title:专家职称
expert_title = data_1['rank']
#professional_field:专业领域
professional_field = data_1['industrialField']
#workCompany:工作单位
workCompany = data_1['unit']
#areas_of_expertise:擅长领域
areas_of_expertise = data_1['goodField']
# talent_profile:人才简介
talent_profile = data_1['intro']
#photo:照片
photo = data_1['imgPath']
save_img(photo,key_id)
results = {
"inventor":inventor,
"key_id":key_id,
"gender":gender,
"degrees":degrees,
"data_of_birth":data_of_birth,
"expert_title":expert_title,
"professional_field":professional_field,
"workCompany":workCompany,
"areas_of_expertise":areas_of_expertise,
"talent_profile":talent_profile,
"photo":photo,
}
save_to_msyql(results)
else:
print(111)
return None
def run():
# 获取总页数
total_page_number = total_page_numbers()
print(total_page_number)
for i in range(1,int(total_page_number)):
parse_page_url(i)
if __name__ == '__main__':
run() | 29.0125 | 210 | 0.588324 |
b17d944c7e5787d6adede43bbb837e8e31e85f08 | 571 | py | Python | setup.py | SuryaThiru/ppscore | 59df800e32d4ef5fda4be2bdf4b3235db2a39fee | [
"MIT"
] | 1 | 2020-07-04T20:18:52.000Z | 2020-07-04T20:18:52.000Z | setup.py | SuryaThiru/ppscore | 59df800e32d4ef5fda4be2bdf4b3235db2a39fee | [
"MIT"
] | null | null | null | setup.py | SuryaThiru/ppscore | 59df800e32d4ef5fda4be2bdf4b3235db2a39fee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Setup file for ppscore.
Use setup.cfg to configure your project.
This file was generated with PyScaffold 3.2.1.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
import sys
from pkg_resources import VersionConflict, require
from setuptools import setup
try:
require("setuptools>=38.3")
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
if __name__ == "__main__":
setup(use_pyscaffold=True)
| 23.791667 | 75 | 0.702277 |
e1d4a42367a9970c07dc4821e4e15e11003793b0 | 19,729 | py | Python | stable_baselines/ppo1/pposgd_simple.py | hejujie/stable-baselines | 0dfb0e6e7ca7aa215c165bdf28c709d2e6aa3dc4 | [
"MIT"
] | null | null | null | stable_baselines/ppo1/pposgd_simple.py | hejujie/stable-baselines | 0dfb0e6e7ca7aa215c165bdf28c709d2e6aa3dc4 | [
"MIT"
] | null | null | null | stable_baselines/ppo1/pposgd_simple.py | hejujie/stable-baselines | 0dfb0e6e7ca7aa215c165bdf28c709d2e6aa3dc4 | [
"MIT"
] | null | null | null | from collections import deque
import time
import gym
import tensorflow as tf
import numpy as np
from mpi4py import MPI
from stable_baselines.common import Dataset, explained_variance, fmt_row, zipsame, ActorCriticRLModel, SetVerbosity, \
TensorboardWriter
from stable_baselines import logger
import stable_baselines.common.tf_util as tf_util
from stable_baselines.common.policies import ActorCriticPolicy
from stable_baselines.common.mpi_adam import MpiAdam
from stable_baselines.common.mpi_moments import mpi_moments
from stable_baselines.trpo_mpi.utils import traj_segment_generator, add_vtarg_and_adv, flatten_lists
from stable_baselines.a2c.utils import total_episode_reward_logger
class PPO1(ActorCriticRLModel):
"""
Proximal Policy Optimization algorithm (MPI version).
Paper: https://arxiv.org/abs/1707.06347
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)
:param timesteps_per_actorbatch: (int) timesteps per actor per update
:param clip_param: (float) clipping parameter epsilon
:param entcoeff: (float) the entropy loss weight
:param optim_epochs: (float) the optimizer's number of epochs
:param optim_stepsize: (float) the optimizer's stepsize
:param optim_batchsize: (int) the optimizer's the batch size
:param gamma: (float) discount factor
:param lam: (float) advantage estimation
:param adam_epsilon: (float) the epsilon value for the adam optimizer
:param schedule: (str) The type of scheduler for the learning rate update ('linear', 'constant',
'double_linear_con', 'middle_drop' or 'double_middle_drop')
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, timesteps_per_actorbatch=256, clip_param=0.2, entcoeff=0.01,
optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=64, lam=0.95, adam_epsilon=1e-5,
schedule='linear', verbose=0, tensorboard_log=None, _init_setup_model=True,
policy_kwargs=None, full_tensorboard_log=False, seed=None, n_cpu_tf_sess=1):
super().__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=False,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.gamma = gamma
self.timesteps_per_actorbatch = timesteps_per_actorbatch
self.clip_param = clip_param
self.entcoeff = entcoeff
self.optim_epochs = optim_epochs
self.optim_stepsize = optim_stepsize
self.optim_batchsize = optim_batchsize
self.lam = lam
self.adam_epsilon = adam_epsilon
self.schedule = schedule
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
self.graph = None
self.sess = None
self.policy_pi = None
self.loss_names = None
self.lossandgrad = None
self.adam = None
self.assign_old_eq_new = None
self.compute_losses = None
self.params = None
self.step = None
self.proba_step = None
self.initial_state = None
self.summary = None
self.episode_reward = None
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.policy_pi
action_ph = policy.pdtype.sample_placeholder([None])
if isinstance(self.action_space, gym.spaces.Discrete):
return policy.obs_ph, action_ph, policy.policy
return policy.obs_ph, action_ph, policy.deterministic_action
def setup_model(self):
with SetVerbosity(self.verbose):
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
n_steps = self.optim_batchsize // self.n_envs
# Construct network for new policy
self.policy_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, n_steps,
self.n_envs * n_steps, reuse=False, **self.policy_kwargs)
# Network for old policy
with tf.variable_scope("oldpi", reuse=False):
old_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, n_steps,
self.n_envs * n_steps, reuse=False, **self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
# Target advantage function (if applicable)
atarg = tf.placeholder(dtype=tf.float32, shape=[None])
# Empirical return
ret = tf.placeholder(dtype=tf.float32, shape=[None])
# learning rate multiplier, updated with schedule
lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[])
# Annealed cliping parameter epislon
clip_param = self.clip_param * lrmult
obs_ph = self.policy_pi.obs_ph
action_ph = self.policy_pi.pdtype.sample_placeholder([None])
kloldnew = old_pi.proba_distribution.kl(self.policy_pi.proba_distribution)
ent = self.policy_pi.proba_distribution.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
pol_entpen = (-self.entcoeff) * meanent
# pnew / pold
ratio = tf.exp(self.policy_pi.proba_distribution.logp(action_ph) -
old_pi.proba_distribution.logp(action_ph))
# surrogate from conservative policy iteration
surr1 = ratio * atarg
surr2 = tf.clip_by_value(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg
# PPO's pessimistic surrogate (L^CLIP)
pol_surr = - tf.reduce_mean(tf.minimum(surr1, surr2))
vf_loss = tf.reduce_mean(tf.square(self.policy_pi.value_flat - ret))
total_loss = pol_surr + pol_entpen + vf_loss
losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent]
self.loss_names = ["pol_surr", "pol_entpen", "vf_loss", "kl", "ent"]
tf.summary.scalar('entropy_loss', pol_entpen)
tf.summary.scalar('policy_gradient_loss', pol_surr)
tf.summary.scalar('value_function_loss', vf_loss)
tf.summary.scalar('approximate_kullback-leibler', meankl)
tf.summary.scalar('clip_factor', clip_param)
tf.summary.scalar('loss', total_loss)
self.params = tf_util.get_trainable_vars("model")
self.assign_old_eq_new = tf_util.function(
[], [], updates=[tf.assign(oldv, newv) for (oldv, newv) in
zipsame(tf_util.get_globals_vars("oldpi"), tf_util.get_globals_vars("model"))])
with tf.variable_scope("Adam_mpi", reuse=False):
self.adam = MpiAdam(self.params, epsilon=self.adam_epsilon, sess=self.sess)
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('discounted_rewards', tf.reduce_mean(ret))
tf.summary.scalar('learning_rate', tf.reduce_mean(self.optim_stepsize))
tf.summary.scalar('advantage', tf.reduce_mean(atarg))
tf.summary.scalar('clip_range', tf.reduce_mean(self.clip_param))
if self.full_tensorboard_log:
tf.summary.histogram('discounted_rewards', ret)
tf.summary.histogram('learning_rate', self.optim_stepsize)
tf.summary.histogram('advantage', atarg)
tf.summary.histogram('clip_range', self.clip_param)
if tf_util.is_image(self.observation_space):
tf.summary.image('observation', obs_ph)
else:
tf.summary.histogram('observation', obs_ph)
self.step = self.policy_pi.step
self.proba_step = self.policy_pi.proba_step
self.initial_state = self.policy_pi.initial_state
tf_util.initialize(sess=self.sess)
self.summary = tf.summary.merge_all()
self.lossandgrad = tf_util.function([obs_ph, old_pi.obs_ph, action_ph, atarg, ret, lrmult],
[self.summary, tf_util.flatgrad(total_loss, self.params)] + losses)
self.compute_losses = tf_util.function([obs_ph, old_pi.obs_ph, action_ph, atarg, ret, lrmult],
losses)
def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="PPO1",
reset_num_timesteps=True):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the PPO1 model must be " \
"an instance of common.policies.ActorCriticPolicy."
with self.sess.as_default():
self.adam.sync()
# Prepare for rollouts
seg_gen = traj_segment_generator(self.policy_pi, self.env, self.timesteps_per_actorbatch)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
t_start = time.time()
# rolling buffer for episode lengths
lenbuffer = deque(maxlen=100)
# rolling buffer for episode rewards
rewbuffer = deque(maxlen=100)
self.episode_reward = np.zeros((self.n_envs,))
while True:
if callback is not None:
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback(locals(), globals()) is False:
break
if total_timesteps and timesteps_so_far >= total_timesteps:
break
if self.schedule == 'constant':
cur_lrmult = 1.0
elif self.schedule == 'linear':
cur_lrmult = max(1.0 - float(timesteps_so_far) / total_timesteps, 0)
else:
raise NotImplementedError
logger.log("********** Iteration %i ************" % iters_so_far)
seg = seg_gen.__next__()
add_vtarg_and_adv(seg, self.gamma, self.lam)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
observations, actions = seg["observations"], seg["actions"]
atarg, tdlamret = seg["adv"], seg["tdlamret"]
# true_rew is the reward without discount
if writer is not None:
self.episode_reward = total_episode_reward_logger(self.episode_reward,
seg["true_rewards"].reshape((self.n_envs, -1)),
seg["dones"].reshape((self.n_envs, -1)),
writer, self.num_timesteps)
# predicted value function before udpate
vpredbefore = seg["vpred"]
# standardized advantage function estimate
atarg = (atarg - atarg.mean()) / atarg.std()
dataset = Dataset(dict(ob=observations, ac=actions, atarg=atarg, vtarg=tdlamret),
shuffle=not self.policy.recurrent)
optim_batchsize = self.optim_batchsize or observations.shape[0]
# set old parameter values to new parameter values
self.assign_old_eq_new(sess=self.sess)
logger.log("Optimizing...")
logger.log(fmt_row(13, self.loss_names))
# Here we do a bunch of optimization epochs over the data
for k in range(self.optim_epochs):
# list of tuples, each of which gives the loss for a minibatch
losses = []
for i, batch in enumerate(dataset.iterate_once(optim_batchsize)):
steps = (self.num_timesteps +
k * optim_batchsize +
int(i * (optim_batchsize / len(dataset.data_map))))
if writer is not None:
# run loss backprop with summary, but once every 10 runs save the metadata
# (memory, compute time, ...)
if self.full_tensorboard_log and (1 + k) % 10 == 0:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, grad, *newlosses = self.lossandgrad(batch["ob"], batch["ob"], batch["ac"],
batch["atarg"], batch["vtarg"],
cur_lrmult, sess=self.sess,
options=run_options,
run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % steps)
else:
summary, grad, *newlosses = self.lossandgrad(batch["ob"], batch["ob"], batch["ac"],
batch["atarg"], batch["vtarg"],
cur_lrmult, sess=self.sess)
writer.add_summary(summary, steps)
else:
_, grad, *newlosses = self.lossandgrad(batch["ob"], batch["ob"], batch["ac"],
batch["atarg"], batch["vtarg"], cur_lrmult,
sess=self.sess)
self.adam.update(grad, self.optim_stepsize * cur_lrmult)
losses.append(newlosses)
logger.log(fmt_row(13, np.mean(losses, axis=0)))
logger.log("Evaluating losses...")
losses = []
for batch in dataset.iterate_once(optim_batchsize):
newlosses = self.compute_losses(batch["ob"], batch["ob"], batch["ac"], batch["atarg"],
batch["vtarg"], cur_lrmult, sess=self.sess)
losses.append(newlosses)
mean_losses, _, _ = mpi_moments(losses, axis=0)
logger.log(fmt_row(13, mean_losses))
for (loss_val, name) in zipsame(mean_losses, self.loss_names):
logger.record_tabular("loss_" + name, loss_val)
logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret))
# local values
lrlocal = (seg["ep_lens"], seg["ep_rets"])
# list of tuples
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal)
lens, rews = map(flatten_lists, zip(*listoflrpairs))
lenbuffer.extend(lens)
rewbuffer.extend(rews)
if len(lenbuffer) > 0:
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
current_it_timesteps = MPI.COMM_WORLD.allreduce(seg["total_timestep"])
timesteps_so_far += current_it_timesteps
self.num_timesteps += current_it_timesteps
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", self.num_timesteps)
logger.record_tabular("TimeElapsed", time.time() - t_start)
if self.verbose >= 1 and MPI.COMM_WORLD.Get_rank() == 0:
logger.dump_tabular()
return self
def save(self, save_path, cloudpickle=False):
data = {
"gamma": self.gamma,
"timesteps_per_actorbatch": self.timesteps_per_actorbatch,
"clip_param": self.clip_param,
"entcoeff": self.entcoeff,
"optim_epochs": self.optim_epochs,
"optim_stepsize": self.optim_stepsize,
"optim_batchsize": self.optim_batchsize,
"lam": self.lam,
"adam_epsilon": self.adam_epsilon,
"schedule": self.schedule,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
| 53.321622 | 121 | 0.55426 |
145157d656ae3576c9963b294c93d7f82c40f7e4 | 1,466 | py | Python | noxfile.py | jeromekelleher/lshmm | 58e0c3395f222e756bb10a0063f5118b20176a01 | [
"MIT"
] | null | null | null | noxfile.py | jeromekelleher/lshmm | 58e0c3395f222e756bb10a0063f5118b20176a01 | [
"MIT"
] | null | null | null | noxfile.py | jeromekelleher/lshmm | 58e0c3395f222e756bb10a0063f5118b20176a01 | [
"MIT"
] | null | null | null | import nox
@nox.session
def format(session):
session.install("-r", "requirements.dev.txt")
session.run("black", ".")
session.run("isort", ".")
@nox.session
def pydocstyle(session):
session.install("-r", "requirements.dev.txt")
session.run("pydocstyle", "lshmm")
@nox.session
def lint(session):
session.install("msprime")
session.install("-r", "requirements.dev.txt")
session.install(".")
session.run("pylint", "lshmm")
session.run("pylint", "tests")
@nox.session
def test(session):
session.install("tskit")
session.install("msprime")
session.install("-r", "requirements.dev.txt")
session.install(".")
session.run("pytest")
@nox.session
def build_docs(session):
session.install("tskit")
session.install("msprime")
session.install("-r", "docs/requirements.docs.txt")
session.install(".")
session.cd("docs")
session.run("rm", "-rf", "api_reference", external=True)
session.run("python3", "generate_api_reference.py")
session.run("rm", "-rf", "html", external=True)
session.run("python3", "-m", "sphinx", "-W", "-b", "html", ".", "html")
@nox.session
def serve_docs(session):
session.cd("docs/html")
session.run("python3", "-m", "http.server")
@nox.session
def pip_compile(session):
session.install("pip-tools")
session.run("pip-compile", *session.posargs)
nox.options.sessions = ["format", "pydocstyle", "lint", "test", "build_docs"]
| 21.558824 | 77 | 0.641201 |
ef8e368b7084884a3edcf91ffa5f75b354ca5f67 | 610 | py | Python | expenses_tracker/expenses_tracker/profiles/migrations/0001_initial.py | BoyanPeychinov/python_web_basics | 2f892ac119f7fe3a5c03fc5e7b35670dc609a70f | [
"MIT"
] | 1 | 2021-07-20T12:16:34.000Z | 2021-07-20T12:16:34.000Z | expenses_tracker/expenses_tracker/profiles/migrations/0001_initial.py | BoyanPeychinov/python_web_basics | 2f892ac119f7fe3a5c03fc5e7b35670dc609a70f | [
"MIT"
] | null | null | null | expenses_tracker/expenses_tracker/profiles/migrations/0001_initial.py | BoyanPeychinov/python_web_basics | 2f892ac119f7fe3a5c03fc5e7b35670dc609a70f | [
"MIT"
] | null | null | null | # Generated by Django 3.2.4 on 2021-06-25 10:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=15)),
('last_name', models.CharField(max_length=15)),
('budget', models.IntegerField()),
],
),
]
| 25.416667 | 117 | 0.570492 |
eb7d830cabbbf082d15541a78cb1f69d41e4ae67 | 2,603 | py | Python | data/creat_static_image_dataset.py | Forence1999/SmartWalker | 635410bf44234eead9fd1e2fe226eb8eafa9d27d | [
"MIT"
] | 2 | 2021-11-13T14:16:06.000Z | 2022-01-12T06:07:32.000Z | data/creat_static_image_dataset.py | Forence1999/SmartWalker | 635410bf44234eead9fd1e2fe226eb8eafa9d27d | [
"MIT"
] | null | null | null | data/creat_static_image_dataset.py | Forence1999/SmartWalker | 635410bf44234eead9fd1e2fe226eb8eafa9d27d | [
"MIT"
] | 3 | 2021-08-30T04:40:39.000Z | 2022-01-09T11:34:04.000Z | import sys, os
# import time
import numpy as np
import math
import matplotlib.pyplot as pyplot
import cv2 as cv
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd))
# sys.path.append(father_path)
# print(father_path)
def get_data(direction):
# 原始信息获取
file = open(direction)
list_ir_data = file.readlines()
lists = []
for lines in list_ir_data:
lines = lines.strip("\n")
lines = lines.strip('[')
lines = lines.strip(']')
lines = lines.split(", ")
lists.append(lines)
file.close()
array_data = np.array(lists)
rows_data = array_data.shape[0]
columns_data = array_data.shape[1]
data = np.zeros((rows_data, columns_data))
for i in range(rows_data):
for j in range(columns_data):
data[i][j] = float(array_data[i][j])
return data
"""load the data"""
direction_ir_data = os.path.abspath(father_path + os.path.sep + "ir_data.txt")
ir_data = get_data(direction_ir_data)[:,1:769]
print("ir",ir_data.shape)
def normalization(img,binarizaion:bool=False):
"""according to an average value of the image to decide the threshold"""
new_img = np.copy(img)
if len(new_img.shape) != 0:
if binarizaion:
threshold = max(new_img.mean() + 1.4, 23)
new_img[new_img < threshold] = 0
new_img[new_img >= threshold] = 1
else:
new_img = (new_img-10)/(40-10)
return new_img
def filter(img,binarization:bool=False):
img_new = np.copy(img)
if binarization:
img_new = img_new.reshape((24,32))
filter_kernel = np.ones((2, 2)) / 4
"""other filters"""
# filter_kernel = np.array([[1,1,1],[1,1,1],[1,1,1]])/10
# filter_kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])
for j in range(1):
img_new = cv.filter2D(img_new, -1, filter_kernel)
img_new = img_new.flatten()
else:
pass
return img_new
for i in range(ir_data.shape[0]):
ir_data[i,0:ir_data.shape[1]] = filter(normalization(ir_data[i,0:ir_data.shape[1]]))
label = np.ones((ir_data.shape[0],1))
"""0:still, 1:forward, 2:left turn, 3:right turn, 4:yuandi left 5:yuandi right"""
intention_class = 5
label = label*intention_class
s_train_data_path = os.path.abspath(father_path + os.path.sep + "s"+str(ir_data.shape[0])+"_data.txt")
np.savetxt(s_train_data_path,ir_data,fmt="%.3f")
s_train_label_path = os.path.abspath(father_path + os.path.sep + "s"+str(ir_data.shape[0])+"_label.txt")
np.savetxt(s_train_label_path,label,fmt="%d")
| 31.361446 | 104 | 0.636957 |
0c67f3dd07904c5e00db77743d9c373faec42464 | 9,099 | py | Python | src/VAE/main.py | goeckslab/MarkerIntensityPredictor | 704e4ea782c6653cabb4b37a7b34fea4cd9fe595 | [
"MIT"
] | 3 | 2021-02-22T19:26:04.000Z | 2022-03-02T22:08:25.000Z | src/VAE/main.py | goeckslab/MarkerIntensityPredictor | 704e4ea782c6653cabb4b37a7b34fea4cd9fe595 | [
"MIT"
] | 1 | 2021-03-12T22:22:25.000Z | 2021-03-12T22:22:25.000Z | src/VAE/main.py | goeckslab/MarkerIntensityPredictor | 704e4ea782c6653cabb4b37a7b34fea4cd9fe595 | [
"MIT"
] | 1 | 2021-03-12T20:28:50.000Z | 2021-03-12T20:28:50.000Z | from keras import layers, regularizers
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from Shared.data_loader import DataLoader
from Shared.data import Data
import numpy as np
import sys
import pandas as pd
from pathlib import Path
import keras
from VAE.sampling import Sampling
from VAE.vae_model import VAE
import anndata as ad
import matplotlib.pyplot as plt
import pickle
from sklearn.metrics import r2_score
import umap
import tensorflow as tf
class VAutoEncoder:
data: Data
# The defined encoder
encoder: any
# The defined decoder
decoder: any
# The ae
vae: any
# the training history of the AE
history: any
input_dim: int
encoding_dim: int
input_umap: any
latent_umap: any
r2_scores = pd.DataFrame(columns=["Marker", "Score"])
encoded_data = pd.DataFrame()
reconstructed_data = pd.DataFrame()
args = None
results_folder = Path("results", "vae")
def __init__(self, args, folder: str = None):
self.encoding_dim = 5
self.args = args
if folder is not None:
self.results_folder = Path(self.results_folder, folder)
def normalize(self, data):
# Input data contains some zeros which results in NaN (or Inf)
# values when their log10 is computed. NaN (or Inf) are problematic
# values for downstream analysis. Therefore, zeros are replaced by
# a small value; see the following thread for related discussion.
# https://www.researchgate.net/post/Log_transformation_of_values_that_include_0_zero_for_statistical_analyses2
data[data == 0] = 1e-32
data = np.log10(data)
standard_scaler = StandardScaler()
data = standard_scaler.fit_transform(data)
data = data.clip(min=-5, max=5)
min_max_scaler = MinMaxScaler(feature_range=(0, 1))
data = min_max_scaler.fit_transform(data)
return data
def load_data(self):
print("Loading data...")
if self.args.file:
inputs, markers = DataLoader.get_data(
self.args.file, self.args.morph)
elif self.args.dir:
inputs, markers = DataLoader.load_folder_data(
self.args.dir, self.args.morph)
else:
print("Please specify a directory or a file")
sys.exit()
self.data = Data(np.array(inputs), markers, self.normalize)
def build_auto_encoder(self):
# Build the encoder
inputs_dim = self.data.inputs.shape[1]
activity_regularizer = regularizers.l1_l2(10e-5)
activation = tf.keras.layers.LeakyReLU()
encoder_inputs = keras.Input(shape=(inputs_dim,))
h1 = layers.Dense(inputs_dim, activation=activation, activity_regularizer=activity_regularizer)(encoder_inputs)
h2 = layers.BatchNormalization()(h1)
h3 = layers.Dropout(0.5)(h2)
h4 = layers.Dense(inputs_dim / 2, activation=activation, activity_regularizer=activity_regularizer)(h3)
h5 = layers.BatchNormalization()(h4)
h6 = layers.Dropout(0.5)(h5)
h7 = layers.Dense(inputs_dim / 3, activation=activation, activity_regularizer=activity_regularizer)(h6)
h8 = layers.Dropout(0.5)(h7)
h9 = layers.BatchNormalization()(h8)
# The following variables are for the convenience of building the decoder.
# last layer before flatten
lbf = h9
# shape before flatten.
sbf = keras.backend.int_shape(lbf)[1:]
# neurons count before latent dim
nbl = np.prod(sbf)
z_mean = layers.Dense(self.encoding_dim, name="z_mean")(lbf)
z_log_var = layers.Dense(self.encoding_dim, name="z_log_var")(lbf)
z = Sampling()([z_mean, z_log_var])
self.encoder = keras.Model(encoder_inputs, [z_mean, z_log_var, z], name="encoder")
self.encoder.summary()
# Build the decoder
decoder_inputs = keras.Input(shape=(self.encoding_dim,))
h1 = layers.Dense(nbl, activation=activation)(decoder_inputs)
h2 = layers.Dense(inputs_dim / 2, activation=activation)(h1)
decoder_outputs = layers.Dense(inputs_dim)(h2)
self.decoder = keras.Model(decoder_inputs, decoder_outputs, name="decoder")
self.decoder.summary()
# Visualize the model.
# tf.keras.utils.plot_model(model, to_file="model.png")
# Train the VAE
# Create the VAR, compile, and run.
callback = tf.keras.callbacks.EarlyStopping(monitor="reconstruction_loss",
mode="min", patience=5,
restore_best_weights=True)
self.vae = VAE(self.encoder, self.decoder)
self.vae.compile(optimizer="adam")
self.history = self.vae.fit(self.data.X_train,
validation_data=(self.data.X_val, self.data.X_val),
epochs=500,
callbacks=callback,
batch_size=32,
shuffle=True,
verbose=1)
def predict(self):
# Make some predictions
cell = self.data.X_val[0]
cell = cell.reshape(1, cell.shape[0])
mean, log_var, z = self.encoder.predict(cell)
encoded_cell = z
decoded_cell = self.decoder.predict(encoded_cell)
var_cell = self.vae.predict(cell)
print(f"Input shape:\t{cell.shape}")
print(f"Encoded shape:\t{encoded_cell.shape}")
print(f"Decoded shape:\t{decoded_cell.shape}")
print(f"\nInput:\n{cell[0]}")
print(f"\nEncoded:\n{encoded_cell[0]}")
print(f"\nDecoded:\n{decoded_cell[0]}")
def calculate_r2_score(self):
recon_test = self.vae.predict(self.data.X_test)
recon_test = pd.DataFrame(data=recon_test, columns=self.data.markers)
input_data = pd.DataFrame(data=self.data.X_test, columns=self.data.markers)
# self.plot_clusters(input_data, range(len(self.data.markers)))
for marker in self.data.markers:
input_marker = input_data[f"{marker}"]
var_marker = recon_test[f"{marker}"]
self.r2_scores = self.r2_scores.append(
{
"Marker": marker,
"Score": r2_score(input_marker, var_marker)
}, ignore_index=True
)
# self.plot_label_clusters(self.data.X_test, self.data.X_test)
def plot_label_clusters(self, data, labels):
# display a 2D plot of the digit classes in the latent space
z_mean, _, _ = self.vae.encoder.predict(data)
plt.figure(figsize=(12, 10))
plt.scatter(z_mean[:, 0], z_mean[:, 1], c=labels)
plt.colorbar()
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.show()
def create_h5ad_object(self):
# Input
fit = umap.UMAP()
self.input_umap = input_umap = fit.fit_transform(self.data.X_test)
# latent space
fit = umap.UMAP()
mean, log_var, z = self.encoder.predict(self.data.X_test)
self.latent_umap = fit.fit_transform(z)
self.__create_h5ad("latent_markers", self.latent_umap, self.data.markers,
pd.DataFrame(columns=self.data.markers, data=self.data.X_test))
self.__create_h5ad("input", input_umap, self.data.markers,
pd.DataFrame(columns=self.data.markers, data=self.data.X_test))
return
def __create_h5ad(self, file_name: str, umap, markers, df):
obs = pd.DataFrame(data=df, index=df.index)
var = pd.DataFrame(index=markers)
obsm = {"X_umap": umap}
uns = dict()
adata = ad.AnnData(df.to_numpy(), var=var, obs=obs, uns=uns, obsm=obsm)
adata.var_names_make_unique()
adata.write(Path(f'{self.results_folder}/{file_name}.h5ad'))
def create_test_predictions(self):
mean, log_var, z = self.encoder.predict(self.data.X_test)
self.encoded_data = pd.DataFrame(z)
self.reconstructed_data = pd.DataFrame(columns=self.data.markers, data=self.decoder.predict(self.encoded_data))
def create_correlation_data(self):
inputs = pd.DataFrame(columns=self.data.markers, data=self.data.inputs)
corr = inputs.corr()
corr.to_csv(Path(f'{self.results_folder}/correlation.csv'), index=False)
def write_created_data_to_disk(self):
with open(f'{self.results_folder}/vae_history', 'wb') as file_pi:
pickle.dump(self.history.history, file_pi)
X_test = pd.DataFrame(columns=self.data.markers, data=self.data.X_test)
X_test.to_csv(Path(f'{self.results_folder}/test_data.csv'), index=False)
self.encoded_data.to_csv(Path(f'{self.results_folder}/vae_encoded_data.csv'), index=False)
self.reconstructed_data.to_csv(Path(f'{self.results_folder}/reconstructed_data.csv'), index=False)
self.r2_scores.to_csv(Path(f'{self.results_folder}/r2_scores.csv'), index=False)
| 38.555085 | 119 | 0.630839 |
33fe7b1ac73fcbb5edd066433d7f0987d10a567b | 4,705 | py | Python | src/providers/commoncrawl/FloraOn.py | AyanChoudhary/cccatalog | ca2b247fffc0f38ec6b73574f963dd94a9505c86 | [
"MIT"
] | 65 | 2018-05-25T00:47:18.000Z | 2021-11-30T05:58:43.000Z | src/providers/commoncrawl/FloraOn.py | AyanChoudhary/cccatalog | ca2b247fffc0f38ec6b73574f963dd94a9505c86 | [
"MIT"
] | 463 | 2018-05-01T14:35:42.000Z | 2021-06-11T20:32:50.000Z | src/providers/commoncrawl/FloraOn.py | AyanChoudhary/cccatalog | ca2b247fffc0f38ec6b73574f963dd94a9505c86 | [
"MIT"
] | 81 | 2018-05-05T20:33:12.000Z | 2021-04-28T02:23:10.000Z | """
Content Provider: Flora-On
ETL Process: Identify images of plant species that are available under a Creative
Commons license.
Output: TSV file containing images of artworks and their respective meta-data.
"""
from Provider import *
logging.basicConfig(format='%(asctime)s - %(name)s: [%(levelname)s - Flora-On] =======> %(message)s', level=logging.INFO)
class FloraOn(Provider):
def __init__(self, _name, _domain, _cc_index):
Provider.__init__(self, _name, _domain, _cc_index)
def getMetaData(self, _html, _url):
"""
Parameters
------------------
_html: string
The HTML page that was extracted from Common Crawls WARC file.
_url: string
The url for the webpage.
Returns
------------------
A tab separated string which contains the meta data that was extracted from the HTML.
"""
soup = BeautifulSoup(_html, 'html.parser')
otherMetaData = {}
license = None
version = None
imageURL = None
extracted = []
photoList = soup.find('div', {'id': 'fotochooser'})
if photoList:
for photo in photoList.find_all('div', {'class': 'thumbnail'}):
self.clearFields()
self.watermarked = 't'
licenseInfo = photo.find('a', {'rel': 'license'})
ccURL = urlparse(licenseInfo.attrs['href'].strip())
license, version = self.getLicense(ccURL.netloc, ccURL.path, _url)
if not license:
logging.warning('License not detected in url: {}'.format(_url))
continue
self.license = license
self.licenseVersion = version
#get the image
imageInfo = photo.find('img', {'class': 'image', 'src': True})
if imageInfo:
self.url = self.validateContent('', imageInfo, 'src')
otherMetaData['image_alt_text'] = self.validateContent('', imageInfo, 'alt')
if self.url:
self.url = '{}/{}'.format(self.domain.strip('%'), self.url)
else:
logging.warning('Image not detected in url: {}'.format(_url))
continue
imgWidth = photo.find('input', {'name': 'wid'})
if imgWidth:
self.width = self.validateContent('', imgWidth, 'value')
imgHeight = photo.find('input', {'name': 'hei'})
if imgHeight:
self.height = self.validateContent('', imgHeight, 'value')
titleInfo = soup.find('span', {'class': 'especie'})
if titleInfo:
self.title = titleInfo.text.strip().lower()
authorInfo = photo.find('input', {'name': 'aut'})
self.creator = self.validateContent('', authorInfo, 'value')
self.foreignLandingURL = _url
self.provider = self.name
self.source = 'commoncrawl'
#get the details
details = soup.find('div', {'id': 'fic-ecologia'}).find_all('div', {'class': 'fic-detalhe'})
if details:
for detail in details:
key = detail.find('div', {'class': 'head'}).text.strip().lower().replace(' ', '_')
val = detail.find('div', {'class': 'content'}).text.strip()
otherMetaData[key] = val
#related species
species = soup.find('div', {'id': 'detalhes-especie'})
if species:
key = species.find('span', {'class': 'showtooltip big'})
if key:
key = key.text.strip().lower().replace(' ', '_')
related = species.find_all('i')
val = ','.join([x.text.strip() for x in related if x.text.strip() != 'Download'])
otherMetaData[key] = val
if otherMetaData:
self.metaData = otherMetaData
extracted.extend(self.formatOutput)
return extracted
| 38.565574 | 126 | 0.456961 |
730ff5b66f1e3a2d5a7ed1e4966d013c43fa193d | 2,507 | py | Python | gui/weight_entry_popup.py | Penaz91/fjournal | 0cf1634f67308f3491241d1bb250772ce4def2a0 | [
"MIT"
] | null | null | null | gui/weight_entry_popup.py | Penaz91/fjournal | 0cf1634f67308f3491241d1bb250772ce4def2a0 | [
"MIT"
] | null | null | null | gui/weight_entry_popup.py | Penaz91/fjournal | 0cf1634f67308f3491241d1bb250772ce4def2a0 | [
"MIT"
] | null | null | null | """
This file is part of the FJournal Project.
Copyright © 2019-2020, Daniele Penazzo. All Rights Reserved.
The use of this code is governed by the MIT license attached.
See the LICENSE file for the full license.
Created on: 2020-07-09
Author: Penaz
"""
from datetime import datetime
from tkinter import ttk
import tkinter as tk
from models import WeightEntry
from gui import Calendar
class WeightEntryPopup(ttk.Frame):
"""
Defines a popup for entering weight
"""
def __init__(self, master=None, date=None, session=None):
"""
Constructor of the class
"""
super().__init__(master)
self.master = master
self.date = tk.StringVar()
self.weight = tk.DoubleVar()
self.date.set(date.strftime("%Y-%m-%d"))
self.grid(row=0, column=0)
self.session = session
self.create_widgets()
def create_widgets(self):
"""
Creates the widgets for the popup
"""
self.dateinputlbl = ttk.Label(self, text="Entry Date:")
self.dateinputlbl.grid(row=0, column=0)
self.dateinput = ttk.Entry(self, textvariable=self.date)
self.dateinput.grid(row=0, column=1)
self.dateinputbtn = ttk.Button(self, text="...")
self.dateinputbtn["command"] = self.calendarPopup
self.dateinputbtn.grid(row=0, column=2)
self.weightinputlbl = ttk.Label(self, text="Weight:")
self.weightinputlbl.grid(row=1, column=0)
self.weightinput = ttk.Entry(self, textvariable=self.weight)
self.weightinput.grid(row=1, column=1)
self.confirmbtn = ttk.Button(self, command=self.confirm_entry)
self.confirmbtn["text"] = "Confirm"
self.confirmbtn["command"] = self.confirm_entry
self.confirmbtn.grid(row=2, column=0, columnspan=2)
def calendarPopup(self):
"""
Pops out a calendar date selector
"""
child = tk.Toplevel()
child.title("Date Picker")
Calendar(child, self.selectDate)
def selectDate(self, year, month, day):
"""
Selects a date from the calendar popup
"""
self.date.set("{}-{}-{}".format(year, month, day))
def confirm_entry(self):
"""
Creates the DB Entry
"""
we = WeightEntry(
date=datetime.strptime(self.date.get(), "%Y-%m-%d"),
weight=self.weight.get()
)
self.session.add(we)
self.session.commit()
self.master.destroy()
| 30.950617 | 70 | 0.614679 |
048db1533cefbb23d6abe1c94c5e4aa988f95c99 | 56,451 | py | Python | imgaug/augmenters/contrast.py | russoale/imgaug | 8dd78f10ec5ae2c133e87850d50addb9d7c67354 | [
"MIT"
] | 1 | 2019-06-29T12:46:09.000Z | 2019-06-29T12:46:09.000Z | imgaug/augmenters/contrast.py | jiangxiluning/imgaug | 6c143483810629b7efee13afd8c93bc647b9df35 | [
"MIT"
] | null | null | null | imgaug/augmenters/contrast.py | jiangxiluning/imgaug | 6c143483810629b7efee13afd8c93bc647b9df35 | [
"MIT"
] | null | null | null | """
Augmenters that perform contrast changes.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([iaa.GammaContrast((0.5, 1.5))])
List of augmenters:
* GammaContrast
* SigmoidContrast
* LogContrast
* LinearContrast
* AllChannelsHistogramEqualization
* HistogramEqualization
* AllChannelsCLAHE
* CLAHE
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import six.moves as sm
import skimage.exposure as ski_exposure
import cv2
import warnings
from . import meta
from . import color as color_lib
from .. import imgaug as ia
from .. import parameters as iap
from .. import dtypes as iadt
# TODO quite similar to the other adjust_contrast_*() functions, make DRY
def adjust_contrast_gamma(arr, gamma):
"""
Adjust contrast by scaling each pixel value to ``255 * ((I_ij/255)**gamma)``.
dtype support::
* ``uint8``: yes; fully tested (1) (2) (3)
* ``uint16``: yes; tested (2) (3)
* ``uint32``: yes; tested (2) (3)
* ``uint64``: yes; tested (2) (3) (4)
* ``int8``: limited; tested (2) (3) (5)
* ``int16``: limited; tested (2) (3) (5)
* ``int32``: limited; tested (2) (3) (5)
* ``int64``: limited; tested (2) (3) (4) (5)
* ``float16``: limited; tested (5)
* ``float32``: limited; tested (5)
* ``float64``: limited; tested (5)
* ``float128``: no (6)
* ``bool``: no (7)
- (1) Handled by ``cv2``. Other dtypes are handled by ``skimage``.
- (2) Normalization is done as ``I_ij/max``, where ``max`` is the maximum value of the
dtype, e.g. 255 for ``uint8``. The normalization is reversed afterwards,
e.g. ``result*255`` for ``uint8``.
- (3) Integer-like values are not rounded after applying the contrast adjustment equation
(before inverting the normalization to 0.0-1.0 space), i.e. projection from continuous
space to discrete happens according to floor function.
- (4) Note that scikit-image doc says that integers are converted to ``float64`` values before
applying the contrast normalization method. This might lead to inaccuracies for large
64bit integer values. Tests showed no indication of that happening though.
- (5) Must not contain negative values. Values >=0 are fully supported.
- (6) Leads to error in scikit-image.
- (7) Does not make sense for contrast adjustments.
Parameters
----------
arr : numpy.ndarray
Array for which to adjust the contrast. Dtype ``uint8`` is fastest.
gamma : number
Exponent for the contrast adjustment. Higher values darken the image.
Returns
-------
numpy.ndarray
Array with adjusted contrast.
"""
# int8 is also possible according to docs
# https://docs.opencv.org/3.0-beta/modules/core/doc/operations_on_arrays.html#cv2.LUT , but here it seemed
# like `d` was 0 for CV_8S, causing that to fail
if arr.dtype.name == "uint8":
min_value, _center_value, max_value = iadt.get_value_range_of_dtype(arr.dtype)
dynamic_range = max_value - min_value
value_range = np.linspace(0, 1.0, num=dynamic_range+1, dtype=np.float32)
# 255 * ((I_ij/255)**gamma)
# using np.float32(.) here still works when the input is a numpy array of size 1
table = (min_value + (value_range ** np.float32(gamma)) * dynamic_range)
arr_aug = cv2.LUT(arr, np.clip(table, min_value, max_value).astype(arr.dtype))
if arr.ndim == 3 and arr_aug.ndim == 2:
return arr_aug[..., np.newaxis]
return arr_aug
else:
return ski_exposure.adjust_gamma(arr, gamma)
# TODO quite similar to the other adjust_contrast_*() functions, make DRY
def adjust_contrast_sigmoid(arr, gain, cutoff):
"""
Adjust contrast by scaling each pixel value to ``255 * 1/(1 + exp(gain*(cutoff - I_ij/255)))``.
dtype support::
* ``uint8``: yes; fully tested (1) (2) (3)
* ``uint16``: yes; tested (2) (3)
* ``uint32``: yes; tested (2) (3)
* ``uint64``: yes; tested (2) (3) (4)
* ``int8``: limited; tested (2) (3) (5)
* ``int16``: limited; tested (2) (3) (5)
* ``int32``: limited; tested (2) (3) (5)
* ``int64``: limited; tested (2) (3) (4) (5)
* ``float16``: limited; tested (5)
* ``float32``: limited; tested (5)
* ``float64``: limited; tested (5)
* ``float128``: no (6)
* ``bool``: no (7)
- (1) Handled by ``cv2``. Other dtypes are handled by ``skimage``.
- (2) Normalization is done as ``I_ij/max``, where ``max`` is the maximum value of the
dtype, e.g. 255 for ``uint8``. The normalization is reversed afterwards,
e.g. ``result*255`` for ``uint8``.
- (3) Integer-like values are not rounded after applying the contrast adjustment equation
(before inverting the normalization to 0.0-1.0 space), i.e. projection from continuous
space to discrete happens according to floor function.
- (4) Note that scikit-image doc says that integers are converted to ``float64`` values before
applying the contrast normalization method. This might lead to inaccuracies for large
64bit integer values. Tests showed no indication of that happening though.
- (5) Must not contain negative values. Values >=0 are fully supported.
- (6) Leads to error in scikit-image.
- (7) Does not make sense for contrast adjustments.
Parameters
----------
arr : numpy.ndarray
Array for which to adjust the contrast. Dtype ``uint8`` is fastest.
gain : number
Multiplier for the sigmoid function's output.
Higher values lead to quicker changes from dark to light pixels.
cutoff : number
Cutoff that shifts the sigmoid function in horizontal direction.
Higher values mean that the switch from dark to light pixels happens later, i.e.
the pixels will remain darker.
Returns
-------
numpy.ndarray
Array with adjusted contrast.
"""
# int8 is also possible according to docs
# https://docs.opencv.org/3.0-beta/modules/core/doc/operations_on_arrays.html#cv2.LUT , but here it seemed
# like `d` was 0 for CV_8S, causing that to fail
if arr.dtype.name == "uint8":
min_value, _center_value, max_value = iadt.get_value_range_of_dtype(arr.dtype)
dynamic_range = max_value - min_value
value_range = np.linspace(0, 1.0, num=dynamic_range+1, dtype=np.float32)
# 255 * 1/(1 + exp(gain*(cutoff - I_ij/255)))
# using np.float32(.) here still works when the input is a numpy array of size 1
gain = np.float32(gain)
cutoff = np.float32(cutoff)
table = min_value + dynamic_range * 1/(1 + np.exp(gain * (cutoff - value_range)))
arr_aug = cv2.LUT(arr, np.clip(table, min_value, max_value).astype(arr.dtype))
if arr.ndim == 3 and arr_aug.ndim == 2:
return arr_aug[..., np.newaxis]
return arr_aug
else:
return ski_exposure.adjust_sigmoid(arr, cutoff=cutoff, gain=gain)
# TODO quite similar to the other adjust_contrast_*() functions, make DRY
def adjust_contrast_log(arr, gain):
"""
Adjust contrast by scaling each pixel value to ``255 * gain * log_2(1 + I_ij/255)``.
dtype support::
* ``uint8``: yes; fully tested (1) (2) (3)
* ``uint16``: yes; tested (2) (3)
* ``uint32``: yes; tested (2) (3)
* ``uint64``: yes; tested (2) (3) (4)
* ``int8``: limited; tested (2) (3) (5)
* ``int16``: limited; tested (2) (3) (5)
* ``int32``: limited; tested (2) (3) (5)
* ``int64``: limited; tested (2) (3) (4) (5)
* ``float16``: limited; tested (5)
* ``float32``: limited; tested (5)
* ``float64``: limited; tested (5)
* ``float128``: no (6)
* ``bool``: no (7)
- (1) Handled by ``cv2``. Other dtypes are handled by ``skimage``.
- (2) Normalization is done as ``I_ij/max``, where ``max`` is the maximum value of the
dtype, e.g. 255 for ``uint8``. The normalization is reversed afterwards,
e.g. ``result*255`` for ``uint8``.
- (3) Integer-like values are not rounded after applying the contrast adjustment equation
(before inverting the normalization to 0.0-1.0 space), i.e. projection from continuous
space to discrete happens according to floor function.
- (4) Note that scikit-image doc says that integers are converted to ``float64`` values before
applying the contrast normalization method. This might lead to inaccuracies for large
64bit integer values. Tests showed no indication of that happening though.
- (5) Must not contain negative values. Values >=0 are fully supported.
- (6) Leads to error in scikit-image.
- (7) Does not make sense for contrast adjustments.
Parameters
----------
arr : numpy.ndarray
Array for which to adjust the contrast. Dtype ``uint8`` is fastest.
gain : number
Multiplier for the logarithm result. Values around 1.0 lead to a contrast-adjusted
images. Values above 1.0 quickly lead to partially broken images due to exceeding the
datatype's value range.
Returns
-------
numpy.ndarray
Array with adjusted contrast.
"""
# int8 is also possible according to docs
# https://docs.opencv.org/3.0-beta/modules/core/doc/operations_on_arrays.html#cv2.LUT , but here it seemed
# like `d` was 0 for CV_8S, causing that to fail
if arr.dtype.name == "uint8":
min_value, _center_value, max_value = iadt.get_value_range_of_dtype(arr.dtype)
dynamic_range = max_value - min_value
value_range = np.linspace(0, 1.0, num=dynamic_range+1, dtype=np.float32)
# 255 * 1/(1 + exp(gain*(cutoff - I_ij/255)))
# using np.float32(.) here still works when the input is a numpy array of size 1
gain = np.float32(gain)
table = min_value + dynamic_range * gain * np.log2(1 + value_range)
arr_aug = cv2.LUT(arr, np.clip(table, min_value, max_value).astype(arr.dtype))
if arr.ndim == 3 and arr_aug.ndim == 2:
return arr_aug[..., np.newaxis]
return arr_aug
else:
return ski_exposure.adjust_log(arr, gain=gain)
# TODO quite similar to the other adjust_contrast_*() functions, make DRY
def adjust_contrast_linear(arr, alpha):
"""Adjust contrast by scaling each pixel value to ``127 + alpha*(I_ij-127)``.
dtype support::
* ``uint8``: yes; fully tested (1) (2)
* ``uint16``: yes; tested (2)
* ``uint32``: yes; tested (2)
* ``uint64``: no (3)
* ``int8``: yes; tested (2)
* ``int16``: yes; tested (2)
* ``int32``: yes; tested (2)
* ``int64``: no (2)
* ``float16``: yes; tested (2)
* ``float32``: yes; tested (2)
* ``float64``: yes; tested (2)
* ``float128``: no (2)
* ``bool``: no (4)
- (1) Handled by ``cv2``. Other dtypes are handled by raw ``numpy``.
- (2) Only tested for reasonable alphas with up to a value of around 100.
- (3) Conversion to ``float64`` is done during augmentation, hence ``uint64``, ``int64``,
and ``float128`` support cannot be guaranteed.
- (4) Does not make sense for contrast adjustments.
Parameters
----------
arr : numpy.ndarray
Array for which to adjust the contrast. Dtype ``uint8`` is fastest.
alpha : number
Multiplier to linearly pronounce (>1.0), dampen (0.0 to 1.0) or invert (<0.0) the
difference between each pixel value and the center value, e.g. ``127`` for ``uint8``.
Returns
-------
numpy.ndarray
Array with adjusted contrast.
"""
# int8 is also possible according to docs
# https://docs.opencv.org/3.0-beta/modules/core/doc/operations_on_arrays.html#cv2.LUT , but here it seemed
# like `d` was 0 for CV_8S, causing that to fail
if arr.dtype.name == "uint8":
min_value, center_value, max_value = iadt.get_value_range_of_dtype(arr.dtype)
value_range = np.arange(0, 256, dtype=np.float32)
# 127 + alpha*(I_ij-127)
# using np.float32(.) here still works when the input is a numpy array of size 1
alpha = np.float32(alpha)
table = center_value + alpha * (value_range - center_value)
arr_aug = cv2.LUT(arr, np.clip(table, min_value, max_value).astype(arr.dtype))
if arr.ndim == 3 and arr_aug.ndim == 2:
return arr_aug[..., np.newaxis]
return arr_aug
else:
input_dtype = arr.dtype
_min_value, center_value, _max_value = iadt.get_value_range_of_dtype(input_dtype)
if input_dtype.kind in ["u", "i"]:
center_value = int(center_value)
image_aug = center_value + alpha * (arr.astype(np.float64)-center_value)
image_aug = iadt.restore_dtypes_(image_aug, input_dtype)
return image_aug
def GammaContrast(gamma=1, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Adjust contrast by scaling each pixel value to ``255 * ((I_ij/255)**gamma)``.
Values in the range ``gamma=(0.5, 2.0)`` seem to be sensible.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_gamma`.
Parameters
----------
gamma : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Exponent for the contrast adjustment. Higher values darken the image.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform gamma contrast adjustment.
"""
params1d = [iap.handle_continuous_param(gamma, "gamma", value_range=None, tuple_to_uniform=True,
list_to_choice=True)]
func = adjust_contrast_gamma
return _ContrastFuncWrapper(
func, params1d, per_channel,
dtypes_allowed=["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"],
dtypes_disallowed=["float96", "float128", "float256", "bool"],
name=name if name is not None else ia.caller_name(),
deterministic=deterministic,
random_state=random_state
)
def SigmoidContrast(gain=10, cutoff=0.5, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Adjust contrast by scaling each pixel value to ``255 * 1/(1 + exp(gain*(cutoff - I_ij/255)))``.
Values in the range ``gain=(5, 20)`` and ``cutoff=(0.25, 0.75)`` seem to be sensible.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_sigmoid`.
Parameters
----------
gain : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Multiplier for the sigmoid function's output.
Higher values lead to quicker changes from dark to light pixels.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
cutoff : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Cutoff that shifts the sigmoid function in horizontal direction.
Higher values mean that the switch from dark to light pixels happens later, i.e.
the pixels will remain darker.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform sigmoid contrast adjustment.
"""
# TODO add inv parameter?
params1d = [
iap.handle_continuous_param(gain, "gain", value_range=(0, None), tuple_to_uniform=True, list_to_choice=True),
iap.handle_continuous_param(cutoff, "cutoff", value_range=(0, 1.0), tuple_to_uniform=True, list_to_choice=True)
]
func = adjust_contrast_sigmoid
return _ContrastFuncWrapper(
func, params1d, per_channel,
dtypes_allowed=["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"],
dtypes_disallowed=["float96", "float128", "float256", "bool"],
name=name if name is not None else ia.caller_name(),
deterministic=deterministic,
random_state=random_state
)
def LogContrast(gain=1, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Adjust contrast by scaling each pixel value to ``255 * gain * log_2(1 + I_ij/255)``.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_log`.
Parameters
----------
gain : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Multiplier for the logarithm result. Values around 1.0 lead to a contrast-adjusted
images. Values above 1.0 quickly lead to partially broken images due to exceeding the
datatype's value range.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform logarithmic contrast adjustment.
"""
# TODO add inv parameter?
params1d = [iap.handle_continuous_param(gain, "gain", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)]
func = adjust_contrast_log
return _ContrastFuncWrapper(
func, params1d, per_channel,
dtypes_allowed=["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"],
dtypes_disallowed=["float96", "float128", "float256", "bool"],
name=name if name is not None else ia.caller_name(),
deterministic=deterministic,
random_state=random_state
)
def LinearContrast(alpha=1, per_channel=False, name=None, deterministic=False, random_state=None):
"""Adjust contrast by scaling each pixel value to ``127 + alpha*(I_ij-127)``.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_linear`.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Multiplier to linearly pronounce (>1.0), dampen (0.0 to 1.0) or invert (<0.0) the
difference between each pixel value and the center value, e.g. ``127`` for ``uint8``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform contrast adjustment by linearly scaling the distance to 128.
"""
params1d = [
iap.handle_continuous_param(alpha, "alpha", value_range=None, tuple_to_uniform=True, list_to_choice=True)
]
func = adjust_contrast_linear
return _ContrastFuncWrapper(
func, params1d, per_channel,
dtypes_allowed=["uint8", "uint16", "uint32",
"int8", "int16", "int32",
"float16", "float32", "float64"],
dtypes_disallowed=["uint64", "int64", "float96", "float128", "float256", "bool"],
name=name if name is not None else ia.caller_name(),
deterministic=deterministic,
random_state=random_state
)
# TODO maybe offer the other contrast augmenters also wrapped in this, similar to CLAHE and HistogramEqualization?
# this is essentially tested by tests for CLAHE
class _IntensityChannelBasedApplier(object):
RGB = color_lib.ChangeColorspace.RGB
BGR = color_lib.ChangeColorspace.BGR
HSV = color_lib.ChangeColorspace.HSV
HLS = color_lib.ChangeColorspace.HLS
Lab = color_lib.ChangeColorspace.Lab
_CHANNEL_MAPPING = {
HSV: 2,
HLS: 1,
Lab: 0
}
def __init__(self, from_colorspace, to_colorspace, name):
super(_IntensityChannelBasedApplier, self).__init__()
# TODO maybe add CIE, Luv?
ia.do_assert(from_colorspace in [self.RGB,
self.BGR,
self.Lab,
self.HLS,
self.HSV])
ia.do_assert(to_colorspace in [self.Lab,
self.HLS,
self.HSV])
self.change_colorspace = color_lib.ChangeColorspace(
to_colorspace=to_colorspace,
from_colorspace=from_colorspace,
name="%s_IntensityChannelBasedApplier_ChangeColorspace" % (name,))
self.change_colorspace_inv = color_lib.ChangeColorspace(
to_colorspace=from_colorspace,
from_colorspace=to_colorspace,
name="%s_IntensityChannelBasedApplier_ChangeColorspaceInverse" % (name,))
def apply(self, images, random_state, parents, hooks, func):
input_was_array = ia.is_np_array(images)
rss = ia.derive_random_states(random_state, 3)
# normalize images
# (H, W, 1) will be used directly in AllChannelsCLAHE
# (H, W, 3) will be converted to target colorspace in the next block
# (H, W, 4) will be reduced to (H, W, 3) (remove 4th channel) and converted to target colorspace in next block
# (H, W, <else>) will raise a warning and be treated channelwise by AllChannelsCLAHE
images_normalized = []
images_change_cs = []
images_change_cs_indices = []
for i, image in enumerate(images):
nb_channels = image.shape[2]
if nb_channels == 1:
images_normalized.append(image)
elif nb_channels == 3:
images_normalized.append(None)
images_change_cs.append(image)
images_change_cs_indices.append(i)
elif nb_channels == 4:
# assume that 4th channel is an alpha channel, e.g. in RGBA
images_normalized.append(None)
images_change_cs.append(image[..., 0:3])
images_change_cs_indices.append(i)
else:
warnings.warn("Got image with %d channels in _IntensityChannelBasedApplier (parents: %s), "
"expected 0, 1, 3 or 4 channels." % (
nb_channels, ", ".join(parent.name for parent in parents)))
images_normalized.append(image)
# convert colorspaces of normalized 3-channel images
images_after_color_conversion = [None] * len(images_normalized)
if len(images_change_cs) > 0:
images_new_cs = self.change_colorspace._augment_images(images_change_cs, rss[0], parents + [self], hooks)
for image_new_cs, target_idx in zip(images_new_cs, images_change_cs_indices):
chan_idx = self._CHANNEL_MAPPING[self.change_colorspace.to_colorspace.value]
images_normalized[target_idx] = image_new_cs[..., chan_idx:chan_idx+1]
images_after_color_conversion[target_idx] = image_new_cs
# apply CLAHE channelwise
# images_aug = self.all_channel_clahe._augment_images(images_normalized, rss[1], parents + [self], hooks)
images_aug = func(images_normalized, rss[1])
# denormalize
result = []
images_change_cs = []
images_change_cs_indices = []
for i, (image, image_conv, image_aug) in enumerate(zip(images, images_after_color_conversion, images_aug)):
nb_channels = image.shape[2]
if nb_channels in [3, 4]:
chan_idx = self._CHANNEL_MAPPING[self.change_colorspace.to_colorspace.value]
image_tmp = image_conv
image_tmp[..., chan_idx:chan_idx+1] = image_aug
result.append(None if nb_channels == 3 else image[..., 3:4])
images_change_cs.append(image_tmp)
images_change_cs_indices.append(i)
else:
result.append(image_aug)
# invert colorspace conversion
if len(images_change_cs) > 0:
images_new_cs = self.change_colorspace_inv._augment_images(images_change_cs, rss[0], parents + [self],
hooks)
for image_new_cs, target_idx in zip(images_new_cs, images_change_cs_indices):
if result[target_idx] is None:
result[target_idx] = image_new_cs
else: # input image had four channels, 4th channel is already in result
result[target_idx] = np.dstack((image_new_cs, result[target_idx]))
# convert to array if necessary
if input_was_array:
result = np.array(result, dtype=result[0].dtype)
return result
# TODO add parameter `tile_grid_size_percent`
class AllChannelsCLAHE(meta.Augmenter):
"""
Contrast Limited Adaptive Histogram Equalization, applied to all channels of the input images.
CLAHE performs histogram equilization within image patches, i.e. over local neighbourhoods.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: no (2)
* ``int16``: no (2)
* ``int32``: no (2)
* ``int64``: no (2)
* ``float16``: no (2)
* ``float32``: no (2)
* ``float64``: no (2)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) rejected by cv2
- (2) results in error in cv2: ``cv2.error: OpenCV(3.4.2) (...)/clahe.cpp:351: error: (-215:Assertion failed)
src.type() == (((0) & ((1 << 3) - 1)) + (((1)-1) << 3))
|| _src.type() == (((2) & ((1 << 3) - 1)) + (((1)-1) << 3)) in function 'apply'``
Parameters
----------
clip_limit : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See ``imgaug.augmenters.contrast.CLAHE``.
tile_grid_size_px : int or tuple of int or list of int or imgaug.parameters.StochasticParameter \
or tuple of tuple of int or tuple of list of int \
or tuple of imgaug.parameters.StochasticParameter, optional
See ``imgaug.augmenters.contrast.CLAHE``.
tile_grid_size_px_min : int, optional
See ``imgaug.augmenters.contrast.CLAHE``.
per_channel : bool or float, optional
Whether to use the same values for all channels (False)
or to sample new values for each channel (True).
If this parameter is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
"""
def __init__(self, clip_limit=40, tile_grid_size_px=8, tile_grid_size_px_min=3, per_channel=False, name=None,
deterministic=False, random_state=None):
super(AllChannelsCLAHE, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.clip_limit = iap.handle_continuous_param(clip_limit, "clip_limit", value_range=(0+1e-4, None),
tuple_to_uniform=True, list_to_choice=True)
self.tile_grid_size_px = iap.handle_discrete_kernel_size_param(tile_grid_size_px, "tile_grid_size_px",
value_range=(0, None),
allow_floats=False)
self.tile_grid_size_px_min = tile_grid_size_px_min
self.per_channel = iap.handle_probability_param(per_channel, "per_channel")
def _augment_images(self, images, random_state, parents, hooks):
iadt.gate_dtypes(images,
allowed=["uint8", "uint16"],
disallowed=["bool",
"uint32", "uint64", "uint128", "uint256",
"int8", "int16", "int32", "int64", "int128", "int256",
"float16", "float32", "float64", "float96", "float128", "float256"],
augmenter=self)
nb_images = len(images)
nb_channels = meta.estimate_max_number_of_channels(images)
mode = "single" if self.tile_grid_size_px[1] is None else "two"
rss = ia.derive_random_states(random_state, 3 if mode == "single" else 4)
per_channel = self.per_channel.draw_samples((nb_images,), random_state=rss[0])
clip_limit = self.clip_limit.draw_samples((nb_images, nb_channels), random_state=rss[1])
tile_grid_size_px_h = self.tile_grid_size_px[0].draw_samples((nb_images, nb_channels), random_state=rss[2])
if mode == "single":
tile_grid_size_px_w = tile_grid_size_px_h
else:
tile_grid_size_px_w = self.tile_grid_size_px[1].draw_samples((nb_images, nb_channels), random_state=rss[3])
tile_grid_size_px_w = np.maximum(tile_grid_size_px_w, self.tile_grid_size_px_min)
tile_grid_size_px_h = np.maximum(tile_grid_size_px_h, self.tile_grid_size_px_min)
gen = enumerate(zip(images, clip_limit, tile_grid_size_px_h, tile_grid_size_px_w, per_channel))
for i, (image, clip_limit_i, tgs_px_h_i, tgs_px_w_i, per_channel_i) in gen:
nb_channels = image.shape[2]
c_param = 0
image_warped = []
for c in sm.xrange(nb_channels):
if tgs_px_w_i[c_param] > 1 or tgs_px_h_i[c_param] > 1:
clahe = cv2.createCLAHE(clipLimit=clip_limit_i[c_param],
tileGridSize=(tgs_px_w_i[c_param], tgs_px_h_i[c_param]))
channel_warped = clahe.apply(image[..., c])
image_warped.append(channel_warped)
else:
image_warped.append(image[..., c])
if per_channel_i > 0.5:
c_param += 1
# combine channels to one image
image_warped = np.array(image_warped, dtype=image_warped[0].dtype)
image_warped = image_warped.transpose((1, 2, 0))
images[i] = image_warped
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.clip_limit, self.tile_grid_size_px, self.tile_grid_size_px_min, self.per_channel]
class CLAHE(meta.Augmenter):
"""
Contrast Limited Adaptive Histogram Equalization.
This augmenter applies CLAHE to images, a form of histogram equalization that normalizes within local image
patches.
The augmenter transforms input images to a target colorspace (e.g. ``Lab``), extracts an intensity-related channel
from the converted images (e.g. ``L`` for ``Lab``), applies CLAHE to the channel and then converts the resulting
image back to the original colorspace.
Grayscale images (images without channel axis or with only one channel axis) are automatically handled,
`from_colorspace` does not have to be adjusted for them. For images with four channels (e.g. ``RGBA``), the fourth
channel is ignored in the colorspace conversion (e.g. from an ``RGBA`` image, only the ``RGB`` part is converted,
normalized, converted back and concatenated with the input ``A`` channel).
Images with unusual channel numbers (2, 5 or more than 5) are normalized channel-by-channel (same behaviour as
``AllChannelsCLAHE``, though a warning will be raised).
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) This augmenter uses ChangeColorspace, which is currently limited to ``uint8``.
Parameters
----------
clip_limit : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Clipping limit. Higher values result in stronger contrast. OpenCV uses a default of ``40``, though
values around ``5`` seem to already produce decent contrast.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
tile_grid_size_px : int or tuple of int or list of int or imgaug.parameters.StochasticParameter \
or tuple of tuple of int or tuple of list of int \
or tuple of imgaug.parameters.StochasticParameter, optional
Kernel size, i.e. size of each local neighbourhood in pixels.
* If an int, then that value will be used for all images for both kernel height and width.
* If a tuple ``(a, b)``, then a value from the discrete range ``[a..b]`` will be sampled per
image.
* If a list, then a random value will be sampled from that list per image and used for both
kernel height and width.
* If a StochasticParameter, then a value will be sampled per image from that parameter per
image and used for both kernel height and width.
* If a tuple of tuple of int given as ``((a, b), (c, d))``, then two values will be sampled
independently from the discrete ranges ``[a..b]`` and ``[c..d]`` per image and used as
the kernel height and width.
* If a tuple of lists of int, then two values will be sampled independently per image, one
from the first list and one from the second, and used as the kernel height and width.
* If a tuple of StochasticParameter, then two values will be sampled indepdently per image,
one from the first parameter and one from the second, and used as the kernel height and
width.
tile_grid_size_px_min : int, optional
Minimum kernel size in px, per axis. If the sampling results in a value lower than this minimum,
it will be clipped to this value.
from_colorspace : {"RGB", "BGR", "HSV", "HLS", "Lab"}, optional
Colorspace of the input images.
If any input image has only one or zero channels, this setting will be ignored and it will be assumed that
the input is grayscale.
If a fourth channel is present in an input image, it will be removed before the colorspace conversion and
later re-added.
See also ``imgaug.augmenters.color.ChangeColorspace`` for details.
to_colorspace : {"Lab", "HLS", "HSV"}, optional
Colorspace in which to perform CLAHE. For ``Lab``, CLAHE will only be applied to the first channel (``L``),
for ``HLS`` to the second (``L``) and for ``HSV`` to the third (``V``).
To apply CLAHE to all channels of an input image (without colorspace conversion),
see ``imgaug.augmenters.contrast.AllChannelsCLAHE``.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.CLAHE()
Creates a standard CLAHE augmenter.
>>> aug = iaa.CLAHE(clip_limit=(1, 50))
Creates a CLAHE augmenter with a clip limit uniformly sampled from ``[1..50]``, where ``1`` is rather low contrast
and ``50`` is rather high contrast.
>>> aug = iaa.CLAHE(tile_grid_size_px=(3, 21))
Creates a CLAHE augmenter with kernel sizes of ``SxS``, where ``S`` is uniformly sampled from ``[3..21]``.
Sampling happens once per image.
>>> aug = iaa.CLAHE(tile_grid_size_px=iap.Discretize(iap.Normal(loc=7, scale=2)), tile_grid_size_px_min=3)
Creates a CLAHE augmenter with kernel sizes of ``SxS``, where ``S`` is sampled from ``N(7, 2)``, but does not go
below ``3``.
>>> aug = iaa.CLAHE(tile_grid_size_px=((3, 21), [3, 5, 7]))
Creates a CLAHE augmenter with kernel sizes of ``HxW``, where ``H`` is uniformly sampled from ``[3..21]`` and
``W`` is randomly picked from the list ``[3, 5, 7]``.
>>> aug = iaa.CLAHE(from_colorspace=iaa.CLAHE.BGR, to_colorspace=iaa.CLAHE.HSV)
Creates a CLAHE augmenter that converts images from BGR colorspace to HSV colorspace and then applies the local
histogram equalization to the ``V`` channel of the images (before converting back to ``BGR``). Alternatively,
``Lab`` (default) or ``HLS`` can be used as the target colorspace. Grayscale images (no channels / one channel)
are never converted and are instead directly normalized (i.e. `from_colorspace` does not have to be changed for
them).
"""
RGB = _IntensityChannelBasedApplier.RGB
BGR = _IntensityChannelBasedApplier.BGR
HSV = _IntensityChannelBasedApplier.HSV
HLS = _IntensityChannelBasedApplier.HLS
Lab = _IntensityChannelBasedApplier.Lab
def __init__(self, clip_limit=40, tile_grid_size_px=8, tile_grid_size_px_min=3,
from_colorspace=color_lib.ChangeColorspace.RGB, to_colorspace=color_lib.ChangeColorspace.Lab,
name=None, deterministic=False, random_state=None):
super(CLAHE, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.all_channel_clahe = AllChannelsCLAHE(clip_limit=clip_limit,
tile_grid_size_px=tile_grid_size_px,
tile_grid_size_px_min=tile_grid_size_px_min,
name="%s_AllChannelsCLAHE" % (name,))
self.intensity_channel_based_applier = _IntensityChannelBasedApplier(from_colorspace, to_colorspace, name=name)
def _augment_images(self, images, random_state, parents, hooks):
iadt.gate_dtypes(images,
allowed=["uint8"],
disallowed=["bool",
"uint16", "uint32", "uint64", "uint128", "uint256",
"int8", "int16", "int32", "int64", "int128", "int256",
"float16", "float32", "float64", "float96", "float128", "float256"],
augmenter=self)
def _augment_all_channels_clahe(images_normalized, random_state_derived):
return self.all_channel_clahe._augment_images(images_normalized, random_state_derived, parents + [self],
hooks)
return self.intensity_channel_based_applier.apply(images, random_state, parents + [self], hooks,
_augment_all_channels_clahe)
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.all_channel_clahe.clip_limit,
self.all_channel_clahe.tile_grid_size_px,
self.all_channel_clahe.tile_grid_size_px_min,
self.intensity_channel_based_applier.change_colorspace.from_colorspace, # from_colorspace is always str
self.intensity_channel_based_applier.change_colorspace.to_colorspace.value]
class AllChannelsHistogramEqualization(meta.Augmenter):
"""
Augmenter to perform standard histogram equalization on images, applied to all channels of each input image.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no (1)
* ``uint32``: no (2)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (2)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (2)
* ``bool``: no (1)
- (1) causes cv2 error: ``cv2.error: OpenCV(3.4.5) (...)/histogram.cpp:3345: error: (-215:Assertion failed)
src.type() == CV_8UC1 in function 'equalizeHist'``
- (2) rejected by cv2
Parameters
----------
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
"""
def __init__(self, name=None, deterministic=False, random_state=None):
super(AllChannelsHistogramEqualization, self).__init__(name=name, deterministic=deterministic,
random_state=random_state)
def _augment_images(self, images, random_state, parents, hooks):
iadt.gate_dtypes(images,
allowed=["uint8"],
disallowed=["bool",
"uint16", "uint32", "uint64", "uint128", "uint256",
"int8", "int16", "int32", "int64", "int128", "int256",
"float16", "float32", "float64", "float96", "float128", "float256"],
augmenter=self)
for i, image in enumerate(images):
image_warped = [cv2.equalizeHist(image[..., c]) for c in sm.xrange(image.shape[2])]
image_warped = np.array(image_warped, dtype=image_warped[0].dtype)
image_warped = image_warped.transpose((1, 2, 0))
images[i] = image_warped
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return []
class HistogramEqualization(meta.Augmenter):
"""
Augmenter to apply standard histogram equalization to images.
This augmenter is similar to ``imgaug.augmenters.contrast.CLAHE``.
The augmenter transforms input images to a target colorspace (e.g. ``Lab``), extracts an intensity-related channel
from the converted images (e.g. ``L`` for ``Lab``), applies Histogram Equalization to the channel and then
converts the resulting image back to the original colorspace.
Grayscale images (images without channel axis or with only one channel axis) are automatically handled,
`from_colorspace` does not have to be adjusted for them. For images with four channels (e.g. RGBA), the fourth
channel is ignored in the colorspace conversion (e.g. from an ``RGBA`` image, only the ``RGB`` part is converted,
normalized, converted back and concatenated with the input ``A`` channel).
Images with unusual channel numbers (2, 5 or more than 5) are normalized channel-by-channel (same behaviour as
``AllChannelsHistogramEqualization``, though a warning will be raised).
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) This augmenter uses AllChannelsHistogramEqualization, which only supports ``uint8``.
Parameters
----------
from_colorspace : {"RGB", "BGR", "HSV", "HLS", "Lab"}, optional
Colorspace of the input images.
If any input image has only one or zero channels, this setting will be ignored and it will be assumed that
the input is grayscale.
If a fourth channel is present in an input image, it will be removed before the colorspace conversion and
later re-added.
See also ``imgaug.augmenters.color.ChangeColorspace`` for details.
to_colorspace : {"Lab", "HLS", "HSV"}, optional
Colorspace in which to perform Histogram Equalization. For ``Lab``, the equalization will only be applied to
the first channel (``L``), for ``HLS`` to the second (``L``) and for ``HSV`` to the third (``V``).
To apply histogram equalization to all channels of an input image (without colorspace conversion),
see ``imgaug.augmenters.contrast.AllChannelsHistogramEqualization``.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.HistogramEqualization()
Creates a standard histogram equalization augmenter.
>>> aug = iaa.HistogramEqualization(from_colorspace=iaa.HistogramEqualization.BGR,
>>> to_colorspace=iaa.HistogramEqualization.HSV)
Creates a histogram equalization augmenter that converts images from BGR colorspace to HSV colorspace and then
applies the local histogram equalization to the ``V`` channel of the images (before converting back to ``BGR``).
Alternatively, ``Lab`` (default) or ``HLS`` can be used as the target colorspace. Grayscale images
(no channels / one channel) are never converted and are instead directly normalized (i.e. `from_colorspace` does
not have to be changed for them).
"""
RGB = _IntensityChannelBasedApplier.RGB
BGR = _IntensityChannelBasedApplier.BGR
HSV = _IntensityChannelBasedApplier.HSV
HLS = _IntensityChannelBasedApplier.HLS
Lab = _IntensityChannelBasedApplier.Lab
def __init__(self, from_colorspace=color_lib.ChangeColorspace.RGB, to_colorspace=color_lib.ChangeColorspace.Lab,
name=None, deterministic=False, random_state=None):
super(HistogramEqualization, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.all_channel_histogram_equalization = AllChannelsHistogramEqualization(
name="%s_AllChannelsHistogramEqualization" % (name,))
self.intensity_channel_based_applier = _IntensityChannelBasedApplier(from_colorspace, to_colorspace, name=name)
def _augment_images(self, images, random_state, parents, hooks):
iadt.gate_dtypes(images,
allowed=["uint8"],
disallowed=["bool",
"uint16", "uint32", "uint64", "uint128", "uint256",
"int8", "int16", "int32", "int64", "int128", "int256",
"float16", "float32", "float64", "float96", "float128", "float256"],
augmenter=self)
def _augment_all_channels_histogram_equalization(images_normalized, random_state_derived):
return self.all_channel_histogram_equalization._augment_images(images_normalized, random_state_derived,
parents + [self], hooks)
return self.intensity_channel_based_applier.apply(images, random_state, parents + [self], hooks,
_augment_all_channels_histogram_equalization)
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.intensity_channel_based_applier.change_colorspace.from_colorspace, # from_colorspace is always str
self.intensity_channel_based_applier.change_colorspace.to_colorspace.value]
class _ContrastFuncWrapper(meta.Augmenter):
def __init__(self, func, params1d, per_channel, dtypes_allowed=None, dtypes_disallowed=None,
name=None, deterministic=False, random_state=None):
super(_ContrastFuncWrapper, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.func = func
self.params1d = params1d
self.per_channel = iap.handle_probability_param(per_channel, "per_channel")
self.dtypes_allowed = dtypes_allowed
self.dtypes_disallowed = dtypes_disallowed
def _augment_images(self, images, random_state, parents, hooks):
if self.dtypes_allowed is not None:
iadt.gate_dtypes(images,
allowed=self.dtypes_allowed,
disallowed=self.dtypes_disallowed,
augmenter=self)
nb_images = len(images)
rss = ia.derive_random_states(random_state, 1+nb_images)
per_channel = self.per_channel.draw_samples((nb_images,), random_state=rss[0])
result = images
for i, (image, per_channel_i, rs) in enumerate(zip(images, per_channel, rss[1:])):
nb_channels = 1 if per_channel_i <= 0.5 else image.shape[2]
samples_i = [param.draw_samples((nb_channels,), random_state=rs) for param in self.params1d]
if per_channel_i > 0.5:
input_dtype = image.dtype
image_aug = image.astype(np.float64)
for c in sm.xrange(nb_channels):
samples_i_c = [sample_i[c] for sample_i in samples_i]
args = tuple([image[..., c]] + samples_i_c)
image_aug[..., c] = self.func(*args)
image_aug = image_aug.astype(input_dtype)
else:
# don't use something like samples_i[...][0] here, because that returns python scalars and is slightly
# less accurate than keeping the numpy values
args = tuple([image] + samples_i)
image_aug = self.func(*args)
result[i] = image_aug
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return self.params1d
# TODO delete this or maybe move it somewhere else
"""
class _PreserveDtype(object):
def __init__(self, func, adjust_value_range=False):
self.func = func
self.adjust_value_range = adjust_value_range
def __call__(self, *args, **kwargs):
image = args[0]
input_dtype = image.dtype
image_aug = self.func(image, *args[1:], **kwargs)
if input_dtype.type == np.uint8:
if self.adjust_value_range:
image_aug = image_aug * 255
image_aug = meta.clip_augmented_image_(image_aug, 0, 255)
image_aug = meta.restore_augmented_image_dtype_(image_aug, input_dtype)
return image_aug
"""
| 45.96987 | 120 | 0.626473 |
9186a018296125130da1f849461e995611e87b9c | 2,519 | py | Python | default_routes_service/default_routes_service/utils/utilization.py | kkkkv/tgnms | a3b8fd8a69b647a614f9856933f05e50a4affadf | [
"MIT"
] | 12 | 2021-04-06T06:27:18.000Z | 2022-03-18T10:52:29.000Z | default_routes_service/default_routes_service/utils/utilization.py | kkkkv/tgnms | a3b8fd8a69b647a614f9856933f05e50a4affadf | [
"MIT"
] | 6 | 2022-01-04T13:32:16.000Z | 2022-03-28T21:13:59.000Z | default_routes_service/default_routes_service/utils/utilization.py | kkkkv/tgnms | a3b8fd8a69b647a614f9856933f05e50a4affadf | [
"MIT"
] | 7 | 2021-09-27T13:14:42.000Z | 2022-03-28T16:24:15.000Z | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
from collections import defaultdict
from datetime import datetime
from typing import DefaultDict, List, Optional, Tuple
from aiomysql.sa.result import RowProxy
def to_tuple(routes: Optional[List[List]]) -> Optional[Tuple[Tuple]]:
"Make routes hashable by casting to Tuple."
if routes is None:
return None
return tuple(tuple(r) for r in routes) # type: ignore
def to_list(routes: Optional[Tuple[Tuple]]) -> Optional[List[List]]:
"Revert hashable routes to List."
if routes is None:
return None
return [list(r) for r in routes]
def compute_routes_utilization(
raw_routes_data: List[RowProxy], start_dt: datetime, end_dt: datetime
) -> DefaultDict[str, List]:
"""Calculate routes utilization.
Process raw_routes_data to calculate the percentage of time each route
takes for each node.
"""
# Group raw routes data by node name
node_routes_changes = defaultdict(list) # type: ignore
for row in raw_routes_data:
node_routes_changes[row.node_name].append((row.routes, row.last_updated))
total_time_window: float = (end_dt - start_dt).total_seconds()
routes_utilization: DefaultDict[str, List] = defaultdict(list)
for node_name, routes_changes in node_routes_changes.items():
routes_duration: DefaultDict = defaultdict(float)
first_routes, first_last_updated = routes_changes[0]
prev_routes = first_routes if first_last_updated < start_dt else None
prev_last_updated = start_dt
for routes, last_updated in routes_changes:
# Calculate duration of previous route
duration = last_updated - prev_last_updated
routes_duration[to_tuple(prev_routes)] += duration.total_seconds()
# Record the routes and last_updated for next iteration
prev_routes, prev_last_updated = routes, last_updated
# Calculate the duration from last routes change to end_dt
duration = end_dt - prev_last_updated
routes_duration[to_tuple(prev_routes)] += duration.total_seconds()
# Calculate routes utilization for all routes
for routes, duration in routes_duration.items():
routes_utilization[node_name].append(
{
"routes": to_list(routes),
"percentage": round(duration / total_time_window * 100, 3),
}
)
return routes_utilization
| 36.507246 | 81 | 0.689956 |
fa2cf399dd90a83c374bf308276c06e0f65c9eda | 1,879 | py | Python | markets/migrations/0001_initial.py | aknirmal90/arbitrage | e43c91be80c6f8ef4070d30a3a897c2e113ae136 | [
"MIT"
] | 3 | 2017-12-14T12:41:01.000Z | 2021-05-17T11:51:53.000Z | markets/migrations/0001_initial.py | aknirmal90/arbitrage | e43c91be80c6f8ef4070d30a3a897c2e113ae136 | [
"MIT"
] | 2 | 2020-06-05T17:55:14.000Z | 2021-06-10T19:48:10.000Z | markets/migrations/0001_initial.py | aknirmal90/arbitrage | e43c91be80c6f8ef4070d30a3a897c2e113ae136 | [
"MIT"
] | 2 | 2018-07-11T03:23:51.000Z | 2021-07-16T17:42:21.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-02 19:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Exchange',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('exch_balance_enabled', models.BooleanField()),
('exch_code', models.CharField(max_length=10)),
('exch_fee', models.DecimalField(decimal_places=6, max_digits=14)),
('exch_id', models.IntegerField()),
('exch_name', models.CharField(max_length=80)),
('exch_trade_enabled', models.BooleanField()),
('exch_url', models.CharField(max_length=160)),
('is_user_enabled', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Market',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mkt_id', models.IntegerField()),
('mkt_name', models.CharField(max_length=20)),
('exchmkt_id', models.IntegerField()),
('currency_one', models.CharField(blank=True, max_length=10, null=True)),
('currency_two', models.CharField(blank=True, max_length=10, null=True)),
('exchange', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='markets.Exchange')),
],
),
migrations.AlterUniqueTogether(
name='market',
unique_together=set([('exchange', 'mkt_id')]),
),
]
| 39.145833 | 116 | 0.580628 |
fd9286649dd22084fc8caacfa09f1a8a6714d46e | 4,846 | py | Python | warp/yul/parse.py | swapnilraj/warp | 2fb1fa105fc5c46b2e53790fb0a2f7165b4133a1 | [
"Apache-2.0"
] | null | null | null | warp/yul/parse.py | swapnilraj/warp | 2fb1fa105fc5c46b2e53790fb0a2f7165b4133a1 | [
"Apache-2.0"
] | null | null | null | warp/yul/parse.py | swapnilraj/warp | 2fb1fa105fc5c46b2e53790fb0a2f7165b4133a1 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
import yul.yul_ast as ast
from yul.WarpException import WarpException, warp_assert
from yul.utils import camelize, remove_prefix, is_statement
_node_to_parser = {}
def register_parser(parser):
fun_name = parser.__name__
assert fun_name.startswith("parse_"), (
"A parsing function should start with 'parse_' "
"and end with the node type written in snake case"
)
node_type = "Yul" + camelize(remove_prefix(fun_name, "parse_"))
_node_to_parser[node_type] = parser
def inner(yul_ast):
warp_assert(
yul_ast["nodeType"] == node_type,
f'Expected {node_type}, got {yul_ast["nodeType"]}',
)
return parser(yul_ast)
return inner
def parse_node(yul_ast) -> ast.Node:
node_type = yul_ast["nodeType"]
parser = _node_to_parser.get(node_type)
if parser is None:
raise WarpException(f"Don't know how to handle {node_type}")
else:
return parser(yul_ast)
def parse_expression(yul_ast) -> ast.Expression:
if yul_ast["nodeType"] == "YulIdentifier":
return parse_identifier(yul_ast)
elif yul_ast["nodeType"] == "YulLiteral":
return parse_literal(yul_ast)
else:
return parse_function_call(yul_ast)
def parse_statement(yul_ast) -> ast.Statement:
node = parse_node(yul_ast)
warp_assert(
is_statement(node),
f"Expected yul_ast.Statement, got {type(node)}",
)
return node
@register_parser
def parse_typed_name(yul_ast) -> ast.TypedName:
name_ = yul_ast["name"]
type_ = yul_ast["type"]
if not type_:
type_ = "Uint256"
return ast.TypedName(name_, type_)
def read_int(x: str) -> int:
return int(x, 16) if x.startswith("0x") else int(x)
@register_parser
def parse_literal(yul_ast) -> ast.Literal:
kind = yul_ast["kind"]
if kind == "number":
return ast.Literal(read_int(yul_ast["value"]))
elif kind == "bool":
return ast.Literal(yul_ast["value"] == "true")
elif kind == "string":
raise WarpException("string literals are not supported yet")
else:
assert False, "Invalid Literal node"
@register_parser
def parse_identifier(yul_ast) -> ast.Identifier:
return ast.Identifier(yul_ast["name"])
@register_parser
def parse_assignment(yul_ast) -> ast.Assignment:
var_names = [parse_identifier(var) for var in yul_ast["variableNames"]]
value = parse_expression(yul_ast["value"])
return ast.Assignment(var_names, value)
@register_parser
def parse_function_call(yul_ast) -> ast.FunctionCall:
fun_name = parse_identifier(yul_ast["functionName"])
args = [parse_expression(x) for x in yul_ast["arguments"]]
return ast.FunctionCall(fun_name, args)
@register_parser
def parse_expression_statement(yul_ast) -> ast.ExpressionStatement:
return ast.ExpressionStatement(parse_expression(yul_ast["expression"]))
@register_parser
def parse_variable_declaration(yul_ast) -> ast.VariableDeclaration:
variables = [parse_typed_name(x) for x in yul_ast.get("variables", [])]
value = parse_expression(yul_ast["value"]) if yul_ast["value"] else None
return ast.VariableDeclaration(variables, value)
@register_parser
def parse_block(yul_ast) -> ast.Block:
statements = [parse_statement(x) for x in yul_ast["statements"]]
return ast.Block(statements=statements)
@register_parser
def parse_function_definition(yul_ast) -> ast.FunctionDefinition:
fun_name = yul_ast["name"]
params = [parse_typed_name(x) for x in yul_ast.get("parameters", [])]
returns = [parse_typed_name(x) for x in yul_ast.get("returnVariables", [])]
body = parse_block(yul_ast["body"])
return ast.FunctionDefinition(fun_name, params, returns, body)
@register_parser
def parse_if(yul_ast) -> ast.If:
condition = parse_expression(yul_ast["condition"])
body = parse_block(yul_ast["body"])
return ast.If(condition, body)
@register_parser
def parse_case(yul_ast) -> ast.Case:
return ast.Case(
parse_literal(yul_ast["value"]) if yul_ast["value"] != "default" else None,
parse_block(yul_ast["body"]),
)
@register_parser
def parse_switch(yul_ast) -> ast.Switch:
return ast.Switch(
parse_expression(yul_ast["expression"]),
[parse_case(x) for x in yul_ast["cases"]],
)
@register_parser
def parse_for_loop(yul_ast) -> ast.ForLoop:
return ast.ForLoop(
parse_block(yul_ast["pre"]),
parse_expression(yul_ast["condition"]),
parse_block(yul_ast["post"]),
parse_block(yul_ast["body"]),
)
@register_parser
def parse_break(yul_ast) -> ast.Break:
return ast.Break()
@register_parser
def parse_continue(yul_ast) -> ast.Continue:
return ast.Continue()
@register_parser
def parse_leave(yul_ast) -> ast.Leave:
return ast.LEAVE
| 27.691429 | 83 | 0.693562 |
d392195340f10c608cd30f7ff76a5d79a6f8a271 | 15,550 | py | Python | finance/api/tests.py | dynamicguy/treeio | 4f674898cff2331711639a9b5f6812c874a2cb25 | [
"MIT"
] | null | null | null | finance/api/tests.py | dynamicguy/treeio | 4f674898cff2331711639a9b5f6812c874a2cb25 | [
"MIT"
] | null | null | null | finance/api/tests.py | dynamicguy/treeio | 4f674898cff2331711639a9b5f6812c874a2cb25 | [
"MIT"
] | 1 | 2019-02-03T03:54:06.000Z | 2019-02-03T03:54:06.000Z | # encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
#-*- coding: utf-8 -*-
import json
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User as DjangoUser
from treeio.core.models import User, Group, Perspective, ModuleSetting, Object
from treeio.finance.models import Transaction, Liability, Category, Account, Equity, Asset, Currency, Tax
from treeio.identities.models import Contact, ContactType
class FinanceAPITest(TestCase):
"Finance api tests"
username = "api_test"
password = "api_password"
prepared = False
authentication_headers ={"CONTENT_TYPE": "application/json",
"HTTP_AUTHORIZATION" : "Basic YXBpX3Rlc3Q6YXBpX3Bhc3N3b3Jk" }
content_type ='application/json'
def setUp(self):
"Initial Setup"
if not self.prepared:
# Clean up first
Object.objects.all().delete()
User.objects.all().delete()
# Create objects
try:
self.group = Group.objects.get(name='test')
except Group.DoesNotExist:
Group.objects.all().delete()
self.group = Group(name='test')
self.group.save()
try:
self.user = DjangoUser.objects.get(username=self.username)
self.user.set_password(self.password)
try:
self.profile = self.user.get_profile()
except Exception:
User.objects.all().delete()
self.user = DjangoUser(username=self.username, password='')
self.user.set_password(self.password)
self.user.save()
except DjangoUser.DoesNotExist:
User.objects.all().delete()
self.user = DjangoUser(username=self.username, password='')
self.user.set_password(self.password)
self.user.save()
try:
perspective = Perspective.objects.get(name='default')
except Perspective.DoesNotExist:
Perspective.objects.all().delete()
perspective = Perspective(name='default')
perspective.set_default_user()
perspective.save()
ModuleSetting.set('default_perspective', perspective.id)
self.contact_type = ContactType(name='test')
self.contact_type.set_default_user()
self.contact_type.save()
self.contact = Contact(name='test', contact_type=self.contact_type)
self.contact.set_default_user()
self.contact.save()
self.category = Category(name='test')
self.category.set_default_user()
self.category.save()
self.equity = Equity(issue_price=10, sell_price=10, issuer=self.contact, owner=self.contact)
self.equity.set_default_user()
self.equity.save()
self.asset = Asset(name='test', owner=self.contact)
self.asset.set_default_user()
self.asset.save()
self.tax = Tax(name='test', rate=10)
self.tax.set_default_user()
self.tax.save()
self.currency = Currency(code="GBP",
name="Pounds",
symbol="L",
is_default=True)
self.currency.set_default_user()
self.currency.save()
self.account = Account(name='test', owner=self.contact, balance_currency=self.currency)
self.account.set_default_user()
self.account.save()
self.liability = Liability(name='test',
source=self.contact,
target=self.contact,
account=self.account,
value=10,
value_currency=self.currency)
self.liability.set_default_user()
self.liability.save()
self.transaction = Transaction(name='test', account=self.account, source=self.contact,
target=self.contact, value=10, value_currency=self.currency)
self.transaction.set_default_user()
self.transaction.save()
self.client = Client()
self.prepared = True
def test_unauthenticated_access(self):
"Test index page at /api/finance/currencies"
response = self.client.get('/api/finance/currencies')
# Redirects as unauthenticated
self.assertEquals(response.status_code, 401)
def test_get_currencies_list(self):
""" Test index page api/finance/currencies """
response = self.client.get(path=reverse('api_finance_currencies'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_currency(self):
response = self.client.get(path=reverse('api_finance_currencies', kwargs={'object_ptr': self.currency.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_currency(self):
updates = {"code": "RUB", "name": "api RUB", "factor": "10.00", "is_active": True}
response = self.client.put(path=reverse('api_finance_currencies', kwargs={'object_ptr': self.currency.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['code'], updates['code'])
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['factor'], updates['factor'])
self.assertEquals(data['is_active'], updates['is_active'])
def test_get_taxes_list(self):
""" Test index page api/finance/taxes """
response = self.client.get(path=reverse('api_finance_taxes'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_tax(self):
response = self.client.get(path=reverse('api_finance_taxes', kwargs={'object_ptr': self.tax.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_tax(self):
updates = { "name" : "API TEST TAX", "rate": "20.00", "compound": False}
response = self.client.put(path=reverse('api_finance_taxes', kwargs={'object_ptr': self.tax.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['rate'], updates['rate'])
self.assertEquals(data['compound'], updates['compound'])
def test_get_categories_list(self):
""" Test index page api/finance/categories """
response = self.client.get(path=reverse('api_finance_categories'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_category(self):
response = self.client.get(path=reverse('api_finance_categories', kwargs={'object_ptr': self.category.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_category(self):
updates = { "name":"Api category", "details": "api details" }
response = self.client.put(path=reverse('api_finance_categories', kwargs={'object_ptr': self.category.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['details'], updates['details'])
def test_get_assets_list(self):
""" Test index page api/finance/assets """
response = self.client.get(path=reverse('api_finance_assets'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_asset(self):
response = self.client.get(path=reverse('api_finance_assets', kwargs={'object_ptr': self.asset.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_asset(self):
updates = { "current_value": "20.0", "owner": self.contact.id, "asset_type": "fixed", "name": "Api name",
"initial_value": '40.0'}
response = self.client.put(path=reverse('api_finance_assets', kwargs={'object_ptr': self.asset.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
print response.content
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['owner']['id'], updates['owner'])
self.assertEquals(data['asset_type'], updates['asset_type'])
self.assertEquals(data['initial_value'], updates['initial_value'])
self.assertEquals(data['current_value'], updates['current_value'])
def test_get_accounts_list(self):
""" Test index page api/finance/accounts """
response = self.client.get(path=reverse('api_finance_accounts'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_account(self):
response = self.client.get(path=reverse('api_finance_accounts', kwargs={'object_ptr': self.account.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_account(self):
updates = { "owner": self.user.id, "balance_display": 40.0, "name": "api test name", "balance_currency": self.currency.id }
response = self.client.put(path=reverse('api_finance_accounts', kwargs={'object_ptr': self.account.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['owner']['id'], updates['owner'])
self.assertEquals(data['balance_display'], updates['balance_display'])
self.assertEquals(data['balance_currency']['id'], updates['balance_currency'])
def test_get_equities_list(self):
""" Test index page api/finance/equities"""
response = self.client.get(path=reverse('api_finance_equities'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_equity(self):
response = self.client.get(path=reverse('api_finance_equities', kwargs={'object_ptr': self.equity.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_account(self):
updates = { "issue_price": "100.0", "equity_type": "warrant", "sell_price": "50.0", "amount": 100,
"purchase_date": "2011-06-06", "owner": self.contact.id, "issuer": self.contact.id }
response = self.client.put(path=reverse('api_finance_equities', kwargs={'object_ptr': self.equity.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['issue_price'], updates['issue_price'])
self.assertEquals(data['equity_type'], updates['equity_type'])
self.assertEquals(data['sell_price'], updates['sell_price'])
self.assertEquals(data['amount'], updates['amount'])
self.assertEquals(data['purchase_date'], updates['purchase_date'])
self.assertEquals(data['owner']['id'], updates['owner'])
self.assertEquals(data['issuer']['id'], updates['issuer'])
self.assertEquals(data['issuer']['id'], updates['issuer'])
def test_get_liabilities_list(self):
""" Test index page api/finance/liabilities"""
response = self.client.get(path=reverse('api_finance_liabilities'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_liability(self):
response = self.client.get(path=reverse('api_finance_liabilities', kwargs={'object_ptr': self.liability.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_liability(self):
updates = { "account": self.account.id, "target": self.contact.id, "value_display": "20.0",
"name": "api test name", "value_currency": self.currency.id}
response = self.client.put(path=reverse('api_finance_liabilities', kwargs={'object_ptr': self.liability.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['target']['id'], updates['target'])
self.assertEquals(data['account']['id'], updates['account'])
self.assertEquals(data['value_display'], updates['value_display'])
self.assertEquals(data['value_currency']['id'], updates['value_currency'])
def test_get_transactions_list(self):
""" Test index page api/finance/transactions"""
response = self.client.get(path=reverse('api_finance_transactions'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_transaction(self):
response = self.client.get(path=reverse('api_finance_transactions', kwargs={'object_ptr': self.transaction.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_transaction(self):
updates = { "value_display": "1000.0", "account": self.account.id, "name": "api test name", "value_currency": self.currency.id,
"datetime": "2011-03-21 11:04:42", "target": self.contact.id, "account": self.account.id, "source": self.contact.id }
response = self.client.put(path=reverse('api_finance_transactions', kwargs={'object_ptr': self.transaction.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
print response.content
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['value_display'], updates['value_display'])
self.assertEquals(data['account']['id'], updates['account'])
self.assertEquals(data['value_currency']['id'], updates['value_currency'])
self.assertEquals(data['datetime'], updates['datetime'])
self.assertEquals(data['target']['id'], updates['target'])
self.assertEquals(data['account']['id'], updates['account'])
self.assertEquals(data['source']['id'], updates['source'])
| 50.983607 | 151 | 0.639614 |
465ba94ccf62128c53596135f99f3b8c735683d3 | 5,629 | py | Python | docs/scripts/tests/conftest.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 1 | 2022-03-09T08:11:10.000Z | 2022-03-09T08:11:10.000Z | docs/scripts/tests/conftest.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 2 | 2022-03-28T13:18:55.000Z | 2022-03-28T13:18:57.000Z | docs/scripts/tests/conftest.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
""" Configuration for tests.
Tests for documentation utilize pytest test framework for tests execution
and reports generation.
Documentation generation tests process Doxygen log to generate test per
documentation source file (.hpp, .md, etc. files). Source files
with errors can be skipped (--doxygen-skip) or excluded temporary
(--doxygen-xfail).
Usage:
pytest --doxygen doxygen.log --html doc-generation.html test_doc-generation.py
"""
import pytest
from utils.log import LogParser
def pytest_addoption(parser):
""" Define extra options for pytest options
"""
parser.addoption('--doxygen', help='Doxygen log path to run tests for')
parser.addoption('--sphinx', help='Sphinx log path to run tests for')
parser.addoption(
'--doxygen-strip',
default='tmp_docs/',
help='Path to strip from paths found in doxygen log')
parser.addoption(
'--sphinx-strip',
default='tmp_docs/',
help='Path to strip from paths found in sphinx log')
parser.addoption(
'--suppress-warnings',
action='append',
default=[],
help='A list of warning patterns to suppress')
parser.addoption(
'--doxygen-xfail',
action='append',
default=[],
help='A file with relative paths to a files with known failures')
parser.addoption(
'--doxygen-skip',
action='append',
default=[],
help='A file with relative paths to a files to exclude from validation')
parser.addoption(
'--include_omz',
action="store_true",
default=False,
help='Include link check for omz docs')
parser.addoption(
'--include_wb',
action="store_true",
default=False,
help='Include link check for workbench docs')
parser.addoption(
'--include_pot',
action="store_true",
default=False,
help='Include link check for pot docs')
parser.addoption(
'--include_gst',
action="store_true",
default=False,
help='Include link check for gst docs')
parser.addoption(
'--include_ovms',
action="store_true",
default=False,
help='Include link check for ovms')
def read_lists(configs):
"""Read lines from files from configs. Return unique items.
"""
files = set()
for config_path in configs:
try:
with open(config_path, 'r', encoding='utf-8') as config:
files.update(map(str.strip, config.readlines()))
except OSError:
pass
return list(files)
def pytest_generate_tests(metafunc):
""" Generate tests depending on command line options
"""
exclude_links = {'open_model_zoo', 'workbench', 'pot', 'gst', 'omz', 'ovms'}
if metafunc.config.getoption('include_omz'):
exclude_links.remove('open_model_zoo')
exclude_links.remove('omz')
if metafunc.config.getoption('include_wb'):
exclude_links.remove('workbench')
if metafunc.config.getoption('include_pot'):
exclude_links.remove('pot')
if metafunc.config.getoption('include_gst'):
exclude_links.remove('gst')
if metafunc.config.getoption('include_ovms'):
exclude_links.remove('ovms')
# warnings to ignore
suppress_warnings = read_lists(metafunc.config.getoption('suppress_warnings'))
for link in exclude_links:
doxy_ref_pattern = "unable to resolve reference to '{}".format(link)
sphinx_ref_pattern = "toctree contains reference to nonexisting document '{}".format(link)
sphinx_ref_pattern2 = "unknown document: {}".format(link)
suppress_warnings.append(doxy_ref_pattern)
suppress_warnings.append(sphinx_ref_pattern)
suppress_warnings.append(sphinx_ref_pattern2)
# read doxygen log
doxy_parser = LogParser(metafunc.config.getoption('doxygen'),
strip=metafunc.config.getoption('doxygen_strip'),
xfail_list=metafunc.config.getoption('doxygen_xfail'),
suppress_warnings=suppress_warnings)
doxy_parser.parse()
doxygen_warnings = doxy_parser.filter()
# read sphinx log
sphinx_parser = LogParser(metafunc.config.getoption('sphinx'),
strip=metafunc.config.getoption('sphinx_strip'),
xfail_list=metafunc.config.getoption('doxygen_xfail'),
suppress_warnings=suppress_warnings
)
sphinx_parser.parse()
sphinx_warnings = sphinx_parser.filter()
all_warnings = dict()
all_warnings.update(doxygen_warnings)
all_warnings.update(sphinx_warnings)
filtered_keys = filter(lambda line: not any([line.startswith(repo) for repo in exclude_links]), all_warnings)
files_with_errors = {key: all_warnings[key] for key in filtered_keys}
# read mute lists
marks = dict()
marks.update(
(name, pytest.mark.xfail)
for name in read_lists(metafunc.config.getoption('doxygen_xfail')))
marks.update(
(name, pytest.mark.skip)
for name in read_lists(metafunc.config.getoption('doxygen_skip')))
# generate tests
if 'doxygen_errors' in metafunc.fixturenames:
metafunc.parametrize(
'doxygen_errors', [
pytest.param(errors, marks=marks[file])
if file in marks else errors for file, errors in files_with_errors.items()
],
ids=list(files_with_errors.keys()))
| 35.853503 | 113 | 0.645052 |
f125e8c829f3e9627bb55ce4ac1fe9e9c32c46cd | 998 | py | Python | EPI/envs/hopper_task.py | zwxxxuan/EPI | 1c48994afc97518a4d8e6df7f6cabaa8792bb425 | [
"MIT"
] | 28 | 2019-02-22T05:00:10.000Z | 2021-09-23T05:12:59.000Z | EPI/envs/hopper_task.py | zwxxxuan/EPI | 1c48994afc97518a4d8e6df7f6cabaa8792bb425 | [
"MIT"
] | 3 | 2019-06-04T05:38:42.000Z | 2019-10-27T03:51:56.000Z | EPI/envs/hopper_task.py | zwxxxuan/EPI | 1c48994afc97518a4d8e6df7f6cabaa8792bb425 | [
"MIT"
] | 1 | 2020-09-04T02:10:09.000Z | 2020-09-04T02:10:09.000Z | import numpy as np
from .hopper_avg import HopperAvgEnv
import EPI
class HopperTaskEnv(HopperAvgEnv):
def __init__(self, reset):
self.epi_reset = reset
self.env_vec = np.zeros(EPI.EMBEDDING_DIMENSION)
super(HopperTaskEnv, self).__init__()
self.interactive_policy = None
def load_interaction_policy(self, p):
self.interactive_policy = p
def reset_model(self):
self.change_env()
self.raw_reset_model()
self.env_vec = self.interactive_policy.do_interaction(self)
if self.epi_reset:
self.raw_reset_model()
return self._get_obs()
def _get_obs(self):
return np.concatenate([
self.model.data.qpos.flat[1:],
np.clip(self.model.data.qvel.flat, -10, 10),
self.env_vec,
])
def get_raw_obs(self):
return np.concatenate([
self.model.data.qpos.flat[1:],
np.clip(self.model.data.qvel.flat, -10, 10),
])
| 27.722222 | 67 | 0.617234 |
76764e61b395393352ad5717cbb028bfcc1dc1c0 | 15,813 | py | Python | certbot/tests/log_test.py | magikid/certbot | f5a88ade54cc34ae216959a84a658986324ea69c | [
"Apache-2.0"
] | 5 | 2021-01-26T08:47:29.000Z | 2021-01-30T00:42:12.000Z | certbot/tests/log_test.py | magikid/certbot | f5a88ade54cc34ae216959a84a658986324ea69c | [
"Apache-2.0"
] | 1 | 2021-01-25T14:47:33.000Z | 2021-01-25T15:00:46.000Z | certbot/tests/log_test.py | magikid/certbot | f5a88ade54cc34ae216959a84a658986324ea69c | [
"Apache-2.0"
] | 1 | 2020-10-28T05:49:43.000Z | 2020-10-28T05:49:43.000Z | """Tests for certbot._internal.log."""
import logging
import logging.handlers
import sys
import time
import unittest
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock
import six
from acme import messages
from certbot import errors
from certbot import util
from certbot._internal import constants
from certbot.compat import filesystem
from certbot.compat import os
from certbot.tests import util as test_util
class PreArgParseSetupTest(unittest.TestCase):
"""Tests for certbot._internal.log.pre_arg_parse_setup."""
@classmethod
def _call(cls, *args, **kwargs): # pylint: disable=unused-argument
from certbot._internal.log import pre_arg_parse_setup
return pre_arg_parse_setup()
@mock.patch('certbot._internal.log.sys')
@mock.patch('certbot._internal.log.pre_arg_parse_except_hook')
@mock.patch('certbot._internal.log.logging.getLogger')
@mock.patch('certbot._internal.log.util.atexit_register')
def test_it(self, mock_register, mock_get, mock_except_hook, mock_sys):
mock_sys.argv = ['--debug']
mock_sys.version_info = sys.version_info
self._call()
mock_root_logger = mock_get()
mock_root_logger.setLevel.assert_called_once_with(logging.DEBUG)
self.assertEqual(mock_root_logger.addHandler.call_count, 2)
memory_handler = None # type: Optional[logging.handlers.MemoryHandler]
for call in mock_root_logger.addHandler.call_args_list:
handler = call[0][0]
if memory_handler is None and isinstance(handler, logging.handlers.MemoryHandler):
memory_handler = handler
target = memory_handler.target # type: ignore
else:
self.assertTrue(isinstance(handler, logging.StreamHandler))
self.assertTrue(
isinstance(target, logging.StreamHandler))
mock_register.assert_called_once_with(logging.shutdown)
mock_sys.excepthook(1, 2, 3)
mock_except_hook.assert_called_once_with(
memory_handler, 1, 2, 3, debug=True, log_path=mock.ANY)
class PostArgParseSetupTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.log.post_arg_parse_setup."""
@classmethod
def _call(cls, *args, **kwargs):
from certbot._internal.log import post_arg_parse_setup
return post_arg_parse_setup(*args, **kwargs)
def setUp(self):
super(PostArgParseSetupTest, self).setUp()
self.config.debug = False
self.config.max_log_backups = 1000
self.config.quiet = False
self.config.verbose_count = constants.CLI_DEFAULTS['verbose_count']
self.devnull = open(os.devnull, 'w')
from certbot._internal.log import ColoredStreamHandler
self.stream_handler = ColoredStreamHandler(six.StringIO())
from certbot._internal.log import MemoryHandler, TempHandler
self.temp_handler = TempHandler()
self.temp_path = self.temp_handler.path
self.memory_handler = MemoryHandler(self.temp_handler)
self.root_logger = mock.MagicMock(
handlers=[self.memory_handler, self.stream_handler])
def tearDown(self):
self.memory_handler.close()
self.stream_handler.close()
self.temp_handler.close()
self.devnull.close()
super(PostArgParseSetupTest, self).tearDown()
def test_common(self):
with mock.patch('certbot._internal.log.logging.getLogger') as mock_get_logger:
mock_get_logger.return_value = self.root_logger
except_hook_path = 'certbot._internal.log.post_arg_parse_except_hook'
with mock.patch(except_hook_path) as mock_except_hook:
with mock.patch('certbot._internal.log.sys') as mock_sys:
mock_sys.version_info = sys.version_info
self._call(self.config)
self.root_logger.removeHandler.assert_called_once_with(
self.memory_handler)
self.assertTrue(self.root_logger.addHandler.called)
self.assertTrue(os.path.exists(os.path.join(
self.config.logs_dir, 'letsencrypt.log')))
self.assertFalse(os.path.exists(self.temp_path))
mock_sys.excepthook(1, 2, 3)
mock_except_hook.assert_called_once_with(
1, 2, 3, debug=self.config.debug, log_path=self.config.logs_dir)
level = self.stream_handler.level
if self.config.quiet:
self.assertEqual(level, constants.QUIET_LOGGING_LEVEL)
else:
self.assertEqual(level, -self.config.verbose_count * 10)
def test_debug(self):
self.config.debug = True
self.test_common()
def test_quiet(self):
self.config.quiet = True
self.test_common()
class SetupLogFileHandlerTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.log.setup_log_file_handler."""
@classmethod
def _call(cls, *args, **kwargs):
from certbot._internal.log import setup_log_file_handler
return setup_log_file_handler(*args, **kwargs)
def setUp(self):
super(SetupLogFileHandlerTest, self).setUp()
self.config.max_log_backups = 42
@mock.patch('certbot._internal.main.logging.handlers.RotatingFileHandler')
def test_failure(self, mock_handler):
mock_handler.side_effect = IOError
try:
self._call(self.config, 'test.log', '%(message)s')
except errors.Error as err:
self.assertTrue('--logs-dir' in str(err))
else: # pragma: no cover
self.fail('Error not raised.')
def test_success_with_rollover(self):
self._test_success_common(should_rollover=True)
def test_success_without_rollover(self):
self.config.max_log_backups = 0
self._test_success_common(should_rollover=False)
def _test_success_common(self, should_rollover):
log_file = 'test.log'
handler, log_path = self._call(self.config, log_file, '%(message)s')
handler.close()
self.assertEqual(handler.level, logging.DEBUG)
self.assertEqual(handler.formatter.converter, time.localtime)
expected_path = os.path.join(self.config.logs_dir, log_file)
self.assertEqual(log_path, expected_path)
backup_path = os.path.join(self.config.logs_dir, log_file + '.1')
self.assertEqual(os.path.exists(backup_path), should_rollover)
@mock.patch('certbot._internal.log.logging.handlers.RotatingFileHandler')
def test_max_log_backups_used(self, mock_handler):
self._call(self.config, 'test.log', '%(message)s')
backup_count = mock_handler.call_args[1]['backupCount']
self.assertEqual(self.config.max_log_backups, backup_count)
class ColoredStreamHandlerTest(unittest.TestCase):
"""Tests for certbot._internal.log.ColoredStreamHandler"""
def setUp(self):
self.stream = six.StringIO()
self.stream.isatty = lambda: True
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
from certbot._internal.log import ColoredStreamHandler
self.handler = ColoredStreamHandler(self.stream)
self.logger.addHandler(self.handler)
def tearDown(self):
self.handler.close()
def test_format(self):
msg = 'I did a thing'
self.logger.debug(msg)
self.assertEqual(self.stream.getvalue(), '{0}\n'.format(msg))
def test_format_and_red_level(self):
msg = 'I did another thing'
self.handler.red_level = logging.DEBUG
self.logger.debug(msg)
self.assertEqual(self.stream.getvalue(),
'{0}{1}{2}\n'.format(util.ANSI_SGR_RED,
msg,
util.ANSI_SGR_RESET))
class MemoryHandlerTest(unittest.TestCase):
"""Tests for certbot._internal.log.MemoryHandler"""
def setUp(self):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.msg = 'hi there'
self.stream = six.StringIO()
self.stream_handler = logging.StreamHandler(self.stream)
from certbot._internal.log import MemoryHandler
self.handler = MemoryHandler(self.stream_handler)
self.logger.addHandler(self.handler)
def tearDown(self):
self.handler.close()
self.stream_handler.close()
def test_flush(self):
self._test_log_debug()
self.handler.flush(force=True)
self.assertEqual(self.stream.getvalue(), self.msg + '\n')
def test_not_flushed(self):
# By default, logging.ERROR messages and higher are flushed
self.logger.critical(self.msg)
self.handler.flush()
self.assertEqual(self.stream.getvalue(), '')
def test_target_reset(self):
self._test_log_debug()
new_stream = six.StringIO()
new_stream_handler = logging.StreamHandler(new_stream)
self.handler.setTarget(new_stream_handler)
self.handler.flush(force=True)
self.assertEqual(self.stream.getvalue(), '')
self.assertEqual(new_stream.getvalue(), self.msg + '\n')
new_stream_handler.close()
def _test_log_debug(self):
self.logger.debug(self.msg)
class TempHandlerTest(unittest.TestCase):
"""Tests for certbot._internal.log.TempHandler."""
def setUp(self):
self.closed = False
from certbot._internal.log import TempHandler
self.handler = TempHandler()
def tearDown(self):
self.handler.close()
def test_permissions(self):
self.assertTrue(filesystem.check_permissions(self.handler.path, 0o600))
def test_delete(self):
self.handler.close()
self.assertFalse(os.path.exists(self.handler.path))
def test_no_delete(self):
self.handler.emit(mock.MagicMock())
self.handler.close()
self.assertTrue(os.path.exists(self.handler.path))
os.remove(self.handler.path)
class PreArgParseExceptHookTest(unittest.TestCase):
"""Tests for certbot._internal.log.pre_arg_parse_except_hook."""
@classmethod
def _call(cls, *args, **kwargs):
from certbot._internal.log import pre_arg_parse_except_hook
return pre_arg_parse_except_hook(*args, **kwargs)
@mock.patch('certbot._internal.log.post_arg_parse_except_hook')
def test_it(self, mock_post_arg_parse_except_hook):
memory_handler = mock.MagicMock()
args = ('some', 'args',)
kwargs = {'some': 'kwargs'}
self._call(memory_handler, *args, **kwargs)
mock_post_arg_parse_except_hook.assert_called_once_with(
*args, **kwargs)
memory_handler.flush.assert_called_once_with(force=True)
class PostArgParseExceptHookTest(unittest.TestCase):
"""Tests for certbot._internal.log.post_arg_parse_except_hook."""
@classmethod
def _call(cls, *args, **kwargs):
from certbot._internal.log import post_arg_parse_except_hook
return post_arg_parse_except_hook(*args, **kwargs)
def setUp(self):
self.error_msg = 'test error message'
self.log_path = 'foo.log'
def test_base_exception(self):
exc_type = BaseException
mock_logger, output = self._test_common(exc_type, debug=False)
self._assert_exception_logged(mock_logger.error, exc_type)
self._assert_logfile_output(output)
def test_debug(self):
exc_type = ValueError
mock_logger, output = self._test_common(exc_type, debug=True)
self._assert_exception_logged(mock_logger.error, exc_type)
self._assert_logfile_output(output)
def test_custom_error(self):
exc_type = errors.PluginError
mock_logger, output = self._test_common(exc_type, debug=False)
self._assert_exception_logged(mock_logger.debug, exc_type)
self._assert_quiet_output(mock_logger, output)
def test_acme_error(self):
# Get an arbitrary error code
acme_code = next(six.iterkeys(messages.ERROR_CODES))
def get_acme_error(msg):
"""Wraps ACME errors so the constructor takes only a msg."""
return messages.Error.with_code(acme_code, detail=msg)
mock_logger, output = self._test_common(get_acme_error, debug=False)
self._assert_exception_logged(mock_logger.debug, messages.Error)
self._assert_quiet_output(mock_logger, output)
self.assertFalse(messages.ERROR_PREFIX in output)
def test_other_error(self):
exc_type = ValueError
mock_logger, output = self._test_common(exc_type, debug=False)
self._assert_exception_logged(mock_logger.debug, exc_type)
self._assert_quiet_output(mock_logger, output)
def test_keyboardinterrupt(self):
exc_type = KeyboardInterrupt
mock_logger, output = self._test_common(exc_type, debug=False)
mock_logger.error.assert_called_once_with('Exiting due to user request.')
def _test_common(self, error_type, debug):
"""Returns the mocked logger and stderr output."""
mock_err = six.StringIO()
def write_err(*args, **unused_kwargs):
"""Write error to mock_err."""
mock_err.write(args[0])
try:
raise error_type(self.error_msg)
except BaseException:
exc_info = sys.exc_info()
with mock.patch('certbot._internal.log.logger') as mock_logger:
mock_logger.error.side_effect = write_err
with mock.patch('certbot._internal.log.sys.stderr', mock_err):
try:
self._call(
*exc_info, debug=debug, log_path=self.log_path)
except SystemExit as exit_err:
mock_err.write(str(exit_err))
else: # pragma: no cover
self.fail('SystemExit not raised.')
output = mock_err.getvalue()
return mock_logger, output
def _assert_exception_logged(self, log_func, exc_type):
self.assertTrue(log_func.called)
call_kwargs = log_func.call_args[1]
self.assertTrue('exc_info' in call_kwargs)
actual_exc_info = call_kwargs['exc_info']
expected_exc_info = (exc_type, mock.ANY, mock.ANY)
self.assertEqual(actual_exc_info, expected_exc_info)
def _assert_logfile_output(self, output):
self.assertTrue('Please see the logfile' in output)
self.assertTrue(self.log_path in output)
def _assert_quiet_output(self, mock_logger, output):
self.assertFalse(mock_logger.exception.called)
self.assertTrue(mock_logger.debug.called)
self.assertTrue(self.error_msg in output)
class ExitWithLogPathTest(test_util.TempDirTestCase):
"""Tests for certbot._internal.log.exit_with_log_path."""
@classmethod
def _call(cls, *args, **kwargs):
from certbot._internal.log import exit_with_log_path
return exit_with_log_path(*args, **kwargs)
def test_log_file(self):
log_file = os.path.join(self.tempdir, 'test.log')
open(log_file, 'w').close()
err_str = self._test_common(log_file)
self.assertTrue('logfiles' not in err_str)
self.assertTrue(log_file in err_str)
def test_log_dir(self):
err_str = self._test_common(self.tempdir)
self.assertTrue('logfiles' in err_str)
self.assertTrue(self.tempdir in err_str)
# pylint: disable=inconsistent-return-statements
def _test_common(self, *args, **kwargs):
try:
self._call(*args, **kwargs)
except SystemExit as err:
return str(err)
self.fail('SystemExit was not raised.') # pragma: no cover
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 37.119718 | 94 | 0.672738 |
b0dccc118f9589f4e54db652090113e151145cec | 9,069 | py | Python | src/ml/jqxxsz/8.Regression/lego/lego.py | Wuwenxu/code-camp-python | acca993fbadfb97854cb664da7181ea5ef2b1c1b | [
"Apache-2.0"
] | 4 | 2020-01-19T08:06:00.000Z | 2022-03-22T06:19:29.000Z | src/ml/jqxxsz/8.Regression/lego/lego.py | Wuwenxu/code-camp-python | acca993fbadfb97854cb664da7181ea5ef2b1c1b | [
"Apache-2.0"
] | null | null | null | src/ml/jqxxsz/8.Regression/lego/lego.py | Wuwenxu/code-camp-python | acca993fbadfb97854cb664da7181ea5ef2b1c1b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : getData.py
@Time : 2019/07/22 09:56:16
@Author : xiao ming
@Version : 1.0
@Contact : xiaoming3526@gmail.com
@Desc : 乐高玩具套件html文件数据获取
@github : https://github.com/aimi-cn/AILearners
'''
# here put the import lib
from bs4 import BeautifulSoup
import numpy as np
import random
'''
@description: 从页面读取数据,生成retX和retY列表
@param: retX - 数据X
retY - 数据Y
inFile - HTML文件
yr - 年份
numPce - 乐高部件数目
origPrc - 原价
@return:
'''
def scrapePage(retX, retY, inFile, yr, numPce, origPrc):
# 打开并读取HTML文件
with open(inFile, encoding='utf-8') as f:
html = f.read()
soup = BeautifulSoup(html)
i = 1
# 根据HTML页面结构进行解析
currentRow = soup.find_all('table', r = "%d" % i)
while(len(currentRow) != 0):
currentRow = soup.find_all('table', r = "%d" % i)
title = currentRow[0].find_all('a')[1].text
lwrTitle = title.lower()
# 查找是否有全新标签
if (lwrTitle.find('new') > -1) or (lwrTitle.find('nisb') > -1):
newFlag = 1.0
else:
newFlag = 0.0
# 查找是否已经标志出售,我们只收集已出售的数据
soldUnicde = currentRow[0].find_all('td')[3].find_all('span')
if len(soldUnicde) == 0:
print("商品 #%d 没有出售" % i)
else:
# 解析页面获取当前价格
soldPrice = currentRow[0].find_all('td')[4]
priceStr = soldPrice.text
priceStr = priceStr.replace('$','')
priceStr = priceStr.replace(',','')
if len(soldPrice) > 1:
priceStr = priceStr.replace('Free shipping', '')
sellingPrice = float(priceStr)
# 去掉不完整的套装价格
if sellingPrice > origPrc * 0.5:
print("%d\t%d\t%d\t%f\t%f" % (yr, numPce, newFlag, origPrc, sellingPrice))
retX.append([yr, numPce, newFlag, origPrc])
retY.append(sellingPrice)
i += 1
currentRow = soup.find_all('table', r = "%d" % i)
'''
@description: 依次读取六种乐高套装的数据,并生成数据矩阵
@param {type}
@return:
'''
def setDataCollect(retX, retY):
scrapePage(retX, retY, 'D:/python/AILearners/data/ml/jqxxsz/8.Regression/lego/lego8288.html', 2006, 800, 49.99) #2006年的乐高8288,部件数目800,原价49.99
scrapePage(retX, retY, 'D:/python/AILearners/data/ml/jqxxsz/8.Regression/lego/lego10030.html', 2002, 3096, 269.99) #2002年的乐高10030,部件数目3096,原价269.99
scrapePage(retX, retY, 'D:/python/AILearners/data/ml/jqxxsz/8.Regression/lego/lego10179.html', 2007, 5195, 499.99) #2007年的乐高10179,部件数目5195,原价499.99
scrapePage(retX, retY, 'D:/python/AILearners/data/ml/jqxxsz/8.Regression/lego/lego10181.html', 2007, 3428, 199.99) #2007年的乐高10181,部件数目3428,原价199.99
scrapePage(retX, retY, 'D:/python/AILearners/data/ml/jqxxsz/8.Regression/lego/lego10189.html', 2008, 5922, 299.99) #2008年的乐高10189,部件数目5922,原价299.99
scrapePage(retX, retY, 'D:/python/AILearners/data/ml/jqxxsz/8.Regression/lego/lego10196.html', 2009, 3263, 249.99) #2009年的乐高10196,部件数目3263,原价249.99
'''
@description: 数据标准化
@param: xMat - x数据集
yMat - y数据集
@return: inxMat - 标准化后的x数据集
inyMat - 标准化后的y数据集
'''
def regularize(xMat, yMat):
inxMat = xMat.copy() #数据拷贝
inyMat = yMat.copy()
yMean = np.mean(yMat, 0) #行与行操作,求均值
inyMat = yMat - yMean #数据减去均值
inMeans = np.mean(inxMat, 0) #行与行操作,求均值
inVar = np.var(inxMat, 0) #行与行操作,求方差
# print(inxMat)
print(inMeans)
# print(inVar)
inxMat = (inxMat - inMeans) / inVar #数据减去均值除以方差实现标准化
return inxMat, inyMat
'''
@description: 计算平方误差
@param: yArr - 预测值
yHatArr - 真实值
@return: 平方误差
'''
def rssError(yArr,yHatArr):
return ((yArr-yHatArr)**2).sum()
'''
@description: 计算回归系数w
@param: xArr - x数据集
yArr - y数据集
@return: ws - 回归系数
'''
def standRegres(xArr,yArr):
xMat = np.mat(xArr); yMat = np.mat(yArr).T
xTx = xMat.T * xMat #根据文中推导的公示计算回归系数
if np.linalg.det(xTx) == 0.0:
print("矩阵为奇异矩阵,不能转置")
return
ws = xTx.I * (xMat.T*yMat)
return ws
'''
@description: 使用简单的线性回归
@param: None
@return: None
'''
def useStandRegres():
lgX = []
lgY = []
setDataCollect(lgX, lgY)
data_num, features_num = np.shape(lgX)
lgX1 = np.mat(np.ones((data_num, features_num + 1)))
lgX1[:, 1:5] = np.mat(lgX)
ws = standRegres(lgX1, lgY)
print('%f%+f*年份%+f*部件数量%+f*是否为全新%+f*原价' % (ws[0],ws[1],ws[2],ws[3],ws[4]))
'''
@description: 岭回归
@param: xMat - x数据集
yMat - y数据集
lam - 缩减系数
@return: ws - 回归系数
'''
def ridgeRegres(xMat, yMat, lam = 0.2):
xTx = xMat.T * xMat
denom = xTx + np.eye(np.shape(xMat)[1]) * lam
if np.linalg.det(denom) == 0.0:
print("矩阵为奇异矩阵,不能转置")
return
ws = denom.I * (xMat.T * yMat)
return ws
'''
@description: 交叉验证岭回归
@param: xArr - x数据集
yArr - y数据集
numVal - 交叉验证次数
@return: wMat - 回归系数矩阵
'''
def crossValidation(xArr, yArr, numVal = 10):
m = len(yArr) #统计样本个数
indexList = list(range(m)) #生成索引值列表
errorMat = np.zeros((numVal,30)) #create error mat 30columns numVal rows
for i in range(numVal): #交叉验证numVal次
trainX = []; trainY = [] #训练集
testX = []; testY = [] #测试集
random.shuffle(indexList) #打乱次序
for j in range(m): #划分数据集:90%训练集,10%测试集
if j < m * 0.9:
trainX.append(xArr[indexList[j]])
trainY.append(yArr[indexList[j]])
else:
testX.append(xArr[indexList[j]])
testY.append(yArr[indexList[j]])
wMat = ridgeTest(trainX, trainY) #获得30个不同lambda下的岭回归系数
for k in range(30): #遍历所有的岭回归系数
matTestX = np.mat(testX); matTrainX = np.mat(trainX) #测试集
meanTrain = np.mean(matTrainX,0) #测试集均值
varTrain = np.var(matTrainX,0) #测试集方差
matTestX = (matTestX - meanTrain) / varTrain #测试集标准化
yEst = matTestX * np.mat(wMat[k,:]).T + np.mean(trainY) #根据ws预测y值
errorMat[i, k] = rssError(yEst.T.A, np.array(testY)) #统计误差
meanErrors = np.mean(errorMat,0) #计算每次交叉验证的平均误差
minMean = float(min(meanErrors)) #找到最小误差
bestWeights = wMat[np.nonzero(meanErrors == minMean)] #找到最佳回归系数
xMat = np.mat(xArr); yMat = np.mat(yArr).T
meanX = np.mean(xMat,0); varX = np.var(xMat,0)
unReg = bestWeights / varX #数据经过标准化,因此需要还原
print('%f%+f*年份%+f*部件数量%+f*是否为全新%+f*原价' % ((-1 * np.sum(np.multiply(meanX,unReg)) + np.mean(yMat)), unReg[0,0], unReg[0,1], unReg[0,2], unReg[0,3]))
'''
@description: 岭回归测试
@param: xMat - x数据集
yMat - y数据集
@return: wMat - 回归系数矩阵
'''
def ridgeTest(xArr, yArr):
xMat = np.mat(xArr); yMat = np.mat(yArr).T
#数据标准化
yMean = np.mean(yMat, axis = 0) #行与行操作,求均值
yMat = yMat - yMean #数据减去均值
xMeans = np.mean(xMat, axis = 0) #行与行操作,求均值
xVar = np.var(xMat, axis = 0) #行与行操作,求方差
xMat = (xMat - xMeans) / xVar #数据减去均值除以方差实现标准化
numTestPts = 30 #30个不同的lambda测试
wMat = np.zeros((numTestPts, np.shape(xMat)[1])) #初始回归系数矩阵
for i in range(numTestPts): #改变lambda计算回归系数
ws = ridgeRegres(xMat, yMat, np.exp(i - 10)) #lambda以e的指数变化,最初是一个非常小的数,
wMat[i, :] = ws.T #计算回归系数矩阵
return wMat
if __name__ == '__main__':
# lgX = []
# lgY = []
# setDataCollect(lgX, lgY)
#useStandRegres()
# lgX = []
# lgY = []
# setDataCollect(lgX, lgY)
# crossValidation(lgX, lgY)
lgX = []
lgY = []
setDataCollect(lgX, lgY)
print(ridgeTest(lgX, lgY)) | 39.776316 | 166 | 0.489359 |
b9e0d727881e7e6eb4ef85f7df8f9d6fba8070c6 | 4,822 | py | Python | app/question/views.py | abhisuri97/penn-club-ratings | b6d32312c6addebedc9d04431a922ec631e4cb28 | [
"MIT"
] | 30 | 2017-11-28T15:22:43.000Z | 2022-03-27T02:00:04.000Z | app/question/views.py | abhisuri97/penn-club-ratings | b6d32312c6addebedc9d04431a922ec631e4cb28 | [
"MIT"
] | null | null | null | app/question/views.py | abhisuri97/penn-club-ratings | b6d32312c6addebedc9d04431a922ec631e4cb28 | [
"MIT"
] | 15 | 2017-12-26T12:28:55.000Z | 2021-03-01T09:41:35.000Z | from flask import abort, flash, redirect, render_template, url_for, request
from flask_login import login_required, current_user
from flask_rq import get_queue
from ..email import send_email
from .forms import NewQuestionForm
from . import question
from .. import db
from ..decorators import admin_required
from ..models import Question, Answer, Role
from ..helpers import bool
# All stuff dealing with adding, editing, and removing questions
@question.route('/new-question', methods=['GET', 'POST'])
@login_required
@admin_required
def new_question():
"""Create a new question."""
form = NewQuestionForm()
if form.validate_on_submit():
question = Question(
content=form.content.data,
description=form.description.data,
icon_name=form.icon_name.data,
short_name=form.short_name.data,
type=form.type.data,
free_response=bool(form.free_response.data))
db.session.add(question)
db.session.commit()
flash('Question {} successfully created'.format(question.content),
'form-success')
return render_template('question/new_question.html', form=form)
@question.route('/questions')
@login_required
@admin_required
def questions():
"""View all registered users."""
questions = Question.query.all()
return render_template('question/questions.html', questions=questions)
@question.route('/<int:question_id>')
@question.route('/<int:question_id>/info')
@login_required
@admin_required
def question_info(question_id):
"""View a question."""
question = Question.query.filter_by(id=question_id).first()
if question is None:
abort(404)
return render_template('question/manage_question.html', question=question)
@question.route(
'/<int:question_id>/change-question-details', methods=['GET', 'POST'])
@login_required
@admin_required
def change_question_details(question_id):
question = Question.query.filter_by(id=question_id).first()
if question is None:
abort(404)
form = NewQuestionForm()
if form.validate_on_submit():
question.content = form.content.data
question.description = form.description.data
question.type = form.type.data
question.icon_name = form.icon_name.data
question.short_name = form.short_name.data
question.free_response = bool(form.free_response.data)
db.session.add(question)
db.session.commit()
flash('Question successfully edited', 'form-success')
form.content.data = question.content
form.type.data = question.type
form.icon_name.data = question.icon_name
form.short_name.data = question.short_name
form.description.data = question.description
form.free_response.data = str(question.free_response)
return render_template(
'question/manage_question.html', question=question, form=form)
@question.route('/<int:question_id>/delete')
@login_required
@admin_required
def delete_question_request(question_id):
"""Request deletion of a question"""
question = Question.query.filter_by(id=question_id).first()
if question is None:
abort(404)
return render_template('question/manage_question.html', question=question)
@question.route('/<int:question_id>/_delete')
@login_required
@admin_required
def delete_question(question_id):
"""Delete a question."""
question = Question.query.filter_by(id=question_id).first()
db.session.delete(question)
db.session.commit()
flash('Successfully deleted question %s.' % question.content, 'success')
return redirect(url_for('question.questions'))
@question.route('/answer/<int:answer_id>/_delete')
@login_required
@admin_required
def delete_answer(answer_id):
answer = Answer.query.filter_by(id=answer_id).first()
club = answer.club.id
db.session.delete(answer)
db.session.commit()
flash('Successfully deleted answer', 'success')
return redirect(url_for('club.club_info', club_id=club))
@question.route('/answer/<int:answer_id>/flag')
@login_required
def flag_answer(answer_id):
answer = Answer.query.filter_by(id=answer_id).first()
link = url_for(
'question.delete_answer', answer_id=answer.id, _external=True)
for r in Role.query.filter_by(name='Administrator').all():
for a in r.users:
get_queue().enqueue(
send_email,
recipient=a.email,
subject='A new answer report was issued by {}'.format(
current_user.first_name),
template='question/email/flag',
answer=answer,
link=link)
flash('Successfully submitted report', 'success')
return redirect(url_for('club.club_info', club_id=answer.club.id))
| 34.198582 | 78 | 0.695355 |
ed5d559f740fc1bbebc5fed9ef8be5aa6575a3a6 | 1,916 | py | Python | spinta/datasets/commands/link.py | atviriduomenys/spinta | 77a10e201f8cdc63143fce7996fd0898acb1ff58 | [
"MIT"
] | 2 | 2019-03-14T06:41:14.000Z | 2019-03-26T11:48:14.000Z | spinta/datasets/commands/link.py | sirex/spinta | 77a10e201f8cdc63143fce7996fd0898acb1ff58 | [
"MIT"
] | 44 | 2019-04-05T15:52:45.000Z | 2022-03-30T07:41:33.000Z | spinta/datasets/commands/link.py | sirex/spinta | 77a10e201f8cdc63143fce7996fd0898acb1ff58 | [
"MIT"
] | 1 | 2019-04-01T09:54:27.000Z | 2019-04-01T09:54:27.000Z | from typing import List
from spinta import commands
from spinta.components import Context
from spinta.core.access import link_access_param
from spinta.datasets.components import Dataset, Resource, Entity, Attribute
from spinta.exceptions import MissingReference
@commands.link.register(Context, Dataset)
def link(context: Context, dataset: Dataset):
link_access_param(dataset, (dataset.manifest,))
for resource in dataset.resources.values():
commands.link(context, resource)
@commands.link.register(Context, Resource)
def link(context: Context, resource: Resource):
link_access_param(resource, (resource.dataset,))
@commands.link.register(Context, Entity)
def link(context: Context, entity: Entity):
datasets = entity.model.manifest.datasets
if entity.dataset:
if entity.dataset not in datasets:
raise MissingReference(
entity,
param='dataset',
ref=entity.dataset,
)
# XXX: https://gitlab.com/atviriduomenys/spinta/-/issues/44
dataset: str = entity.dataset
entity.dataset = datasets[dataset]
resources = entity.dataset.resources
if entity.resource:
if entity.resource not in resources:
raise MissingReference(
entity,
param='resource',
ref=entity.resource,
)
# XXX: https://gitlab.com/atviriduomenys/spinta/-/issues/44
resource: str = entity.resource
entity.resource = resources[resource]
assert entity.model.name not in entity.resource.models
entity.resource.models[entity.model.name] = entity.model
else:
entity.resource = None
else:
entity.dataset = None
@commands.link.register(Context, Attribute)
def link(context: Context, attribute: Attribute):
pass
| 32.474576 | 75 | 0.653445 |
8a87e3e987e0d2fa8d36cced7b80e5f9cbb3a162 | 3,981 | py | Python | app/utils/yaml_parser.py | usriva2405/visual-relationship-detection-api | 8e6ef885fb5cd79021ea6b1d4b498650aec8e30d | [
"BSD-3-Clause"
] | 3 | 2020-01-29T10:19:25.000Z | 2021-04-16T08:28:05.000Z | app/utils/yaml_parser.py | usriva2405/visual-relationship-detection-api | 8e6ef885fb5cd79021ea6b1d4b498650aec8e30d | [
"BSD-3-Clause"
] | 16 | 2020-01-29T10:25:40.000Z | 2022-01-13T02:08:05.000Z | app/utils/yaml_parser.py | usriva2405/visual-relationship-detection-api | 8e6ef885fb5cd79021ea6b1d4b498650aec8e30d | [
"BSD-3-Clause"
] | 2 | 2021-03-10T13:26:45.000Z | 2021-11-15T04:46:10.000Z | # config_loader.py
import yaml
import os
import re
from dotenv import load_dotenv
from pathlib import Path
import logging
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
class Config:
"""Interact with configuration variables."""
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
__configFilePath = (os.path.join(os.getcwd(), 'conf/config.yaml'))
__configParser = yaml.load(open(__configFilePath), Loader=yaml.SafeLoader)
@classmethod
def __getenv(cls):
"""DEV or PROD"""
# env = cls.env_config['global']['env']
env = cls.__getenvvar("env")
if env is '' or env is None:
# use default value as DEV, in case env is not set
env = 'DEV'
return env
@classmethod
def __getenvvar(cls, key):
value = os.getenv(key)
if value is '' or value is None:
# use default value as DEV, in case env is not set
value = None
return value
# @classmethod
# def get_config_val(cls, key, *args, **kwargs):
# # TODO change it to key1.key2.key3, parse the string, extract depth
# """Get prod values from config.yaml."""
# env = cls.__getenv()
# key_1depth = kwargs.get('key_1depth', None)
# key_2depth = kwargs.get('key_2depth', None)
# key_3depth = kwargs.get('key_3depth', None)
# try:
# if key_1depth is not None:
# if key_2depth is not None:
# if key_3depth is not None:
# return str(cls.__configParser[env][key][key_1depth][key_2depth][key_3depth])
# else:
# return str(cls.__configParser[env][key][key_1depth][key_2depth])
# else:
# return str(cls.__configParser[env][key][key_1depth])
# else:
# return str(cls.__configParser[env][key])
# except Exception as e:
# print(e)
# print('invalid key structure passed for retrieving value from config.yaml')
# return None
@classmethod
def get_config_val(cls, key, *args, **kwargs):
# TODO change it to key1.key2.key3, parse the string, extract depth
"""Get prod values from config.yaml."""
env = cls.__getenv()
config_value = None
value = None
key_1depth = kwargs.get('key_1depth', None)
key_2depth = kwargs.get('key_2depth', None)
key_3depth = kwargs.get('key_3depth', None)
try:
if key_1depth is not None:
if key_2depth is not None:
if key_3depth is not None:
config_value = str(cls.__configParser[env][key][key_1depth][key_2depth][key_3depth])
else:
config_value = str(cls.__configParser[env][key][key_1depth][key_2depth])
else:
config_value = str(cls.__configParser[env][key][key_1depth])
else:
config_value = str(cls.__configParser[env][key])
except Exception as e:
logger.error(e)
logger.error('invalid key structure passed for retrieving value from config.yaml')
config_value = None
value = None
try:
if config_value is not None:
# check if value is an environment variable reference
m = re.search(r'^\${([A-Za-z_-]+)}', config_value)
if m:
value = cls.__getenvvar(m.group(1))
else:
value = config_value
else:
value = None
except Exception as e:
logger.error(e)
value = None
logger.info("config value for key {0} : {1}".format(key, value))
return value
| 35.864865 | 108 | 0.556644 |
273a3c353a5eaac5e62dcafd0fcb1d83f450c3ba | 9,181 | py | Python | moac_tester/backends/mock/main.py | wanpixiaozi/moac-tester | 33d4633833d4b984161f0019dcde977a97474437 | [
"MIT"
] | 1 | 2018-10-16T07:08:17.000Z | 2018-10-16T07:08:17.000Z | moac_tester/backends/mock/main.py | wanpixiaozi/moac-tester | 33d4633833d4b984161f0019dcde977a97474437 | [
"MIT"
] | null | null | null | moac_tester/backends/mock/main.py | wanpixiaozi/moac-tester | 33d4633833d4b984161f0019dcde977a97474437 | [
"MIT"
] | null | null | null | import itertools
import copy
from cytoolz.dicttoolz import (
dissoc,
assoc,
)
from cytoolz.functoolz import (
compose,
partial,
)
from eth_utils import (
decode_hex,
int_to_big_endian,
denoms,
keccak,
to_canonical_address,
to_tuple,
is_integer,
)
from moac_tester.backends.base import (
BaseChainBackend,
)
from moac_tester.exceptions import (
BlockNotFound,
TransactionNotFound,
)
from moac_tester.utils.accounts import (
private_key_to_address,
)
from moac_tester.utils.encoding import (
zpad,
)
from .factory import (
fake_rlp_hash,
make_genesis_block,
make_block_from_parent,
create_transaction,
make_receipt,
)
from .serializers import (
serialize_block,
serialize_full_transaction,
serialize_transaction_as_hash,
serialize_receipt,
)
def _generate_dummy_address(idx):
return to_canonical_address(
decode_hex('0xabbacadaba') + zpad(int_to_big_endian(idx), 15)
)
def _get_default_account_data():
return {
'balance': 1000000 * denoms.ether,
'code': b'',
'nonce': 0,
'storage': {},
}
@to_tuple
def get_default_alloc(num_accounts=10):
for idx in range(num_accounts):
yield (
_generate_dummy_address(idx),
_get_default_account_data(),
)
class MockBackend(BaseChainBackend):
alloc = None
blocks = None
block = None
receipts = None
fork_blocks = None
def __init__(self, alloc=None, genesis_block=None):
if alloc is None:
alloc = get_default_alloc()
if genesis_block is None:
genesis_block = make_genesis_block()
self.fork_blocks = {}
self.genesis_alloc = copy.deepcopy(alloc)
self.genesis_block = copy.deepcopy(genesis_block)
self.reset_to_genesis()
#
# Snapshot API
#
def take_snapshot(self):
return copy.deepcopy({
'alloc': self.alloc,
'blocks': self.blocks,
'block': self.block,
'receipts': self.receipts,
})
def revert_to_snapshot(self, snapshot):
self.alloc = snapshot['alloc']
self.blocks = snapshot['blocks']
self.block = snapshot['block']
self.receipts = snapshot['receipts']
def reset_to_genesis(self):
self.alloc = self.genesis_alloc
self.blocks = []
self.block = self.genesis_block
self.receipts = {}
self.fork_blocks = {}
self.mine_blocks()
@property
def account_state_lookup(self):
return dict(self.alloc)
#
# Meta
#
def time_travel(self, timestamp):
self.block['timestamp'] = timestamp
#
# Mining
#
@to_tuple
def mine_blocks(self, num_blocks=1, coinbase=None):
for _ in range(num_blocks):
block_to_mine = dissoc(self.block, 'hash')
block_hash = fake_rlp_hash(block_to_mine)
mined_block = assoc(block_to_mine, 'hash', block_hash)
assign_block_info = compose(
partial(assoc, key='block_number', value=mined_block['number']),
partial(assoc, key='block_hash', value=mined_block['hash']),
)
mined_block['transactions'] = tuple(
assign_block_info(transaction)
for transaction
in mined_block['transactions']
)
self.blocks.append(mined_block)
self.block = make_block_from_parent(mined_block)
yield block_hash
#
# Accounts
#
def get_accounts(self):
return tuple(account for account, _ in self.alloc)
def add_account(self, private_key):
account = private_key_to_address(private_key)
self.alloc = self.alloc + (
(account, _get_default_account_data()),
)
#
# Chain data
#
def get_block_by_number(self, block_number, full_transactions=False):
if full_transactions:
transaction_serializer = serialize_full_transaction
else:
transaction_serializer = serialize_transaction_as_hash
if block_number == self.block['number']:
block = self.block
elif block_number == "latest":
try:
block = self.blocks[-1]
except IndexError:
block = self.block
elif block_number == "pending":
block = self.block
elif block_number == "earliest":
try:
block = self.blocks[0]
except IndexError:
block = self.block
elif is_integer(block_number):
try:
block = self.blocks[block_number]
except IndexError:
raise BlockNotFound("No block found for #{0}".format(block_number))
else:
raise Exception(
"Invariant. Unrecognized block number format: {0}".format(block_number)
)
return serialize_block(
block,
transaction_serializer=transaction_serializer,
is_pending=(block['number'] == self.block['number']),
)
def get_block_by_hash(self, block_hash, full_transactions=False):
if full_transactions:
transaction_serializer = serialize_full_transaction
else:
transaction_serializer = serialize_transaction_as_hash
for block in itertools.chain([self.block], reversed(self.blocks)):
if block['hash'] == block_hash:
block = block
break
else:
raise BlockNotFound("No block found for hash: {0}".format(block_hash))
return serialize_block(
block,
transaction_serializer=transaction_serializer,
is_pending=(block['number'] == self.block['number']),
)
def _get_transaction_by_hash(self, transaction_hash):
for block in itertools.chain([self.block], reversed(self.blocks)):
for transaction_index, transaction in enumerate(reversed(block['transactions'])):
if transaction['hash'] == transaction_hash:
return transaction, block, transaction_index
else:
raise TransactionNotFound(
"No transaction found for hash: {0}".format(transaction_hash)
)
def get_transaction_by_hash(self, transaction_hash):
transaction, block, transaction_index = self._get_transaction_by_hash(transaction_hash)
return serialize_full_transaction(
transaction,
block,
transaction_index,
is_pending=(block['number'] == self.block['number']),
)
def get_transaction_receipt(self, transaction_hash):
try:
receipt = self.receipts[transaction_hash]
except KeyError:
raise TransactionNotFound(
"No transaction found for hash: {0}".format(transaction_hash)
)
_, block, transaction_index = self._get_transaction_by_hash(transaction_hash)
return serialize_receipt(
receipt,
block,
transaction_index,
is_pending=(block['number'] == self.block['number']),
)
#
# Account state
#
def get_nonce(self, account, block_number=None):
try:
return self.account_state_lookup[account]['nonce']
except KeyError:
return 0
def get_balance(self, account, block_number=None):
try:
return self.account_state_lookup[account]['balance']
except KeyError:
return 0
def get_code(self, account, block_number=None):
try:
return self.account_state_lookup[account]['code']
except KeyError:
return b''
#
# Transactions
#
def send_raw_transaction(self, raw_transaction):
transaction_hash = keccak(raw_transaction)
transaction = {
'from': _generate_dummy_address(0),
'hash': transaction_hash,
'gas': 21000,
}
return self.send_transaction(transaction)
def send_transaction(self, transaction):
full_transaction = create_transaction(
transaction,
self.block,
len(self.block['transactions']) + 1,
is_pending=True,
)
self.receipts[full_transaction['hash']] = make_receipt(
full_transaction,
self.block,
len(self.block['transactions']),
)
self.block['transactions'].append(full_transaction)
self.block['gas_used'] += self.receipts[full_transaction['hash']]['gas_used']
return full_transaction['hash']
def send_signed_transaction(self, signed_transaction):
transaction = dissoc(signed_transaction, 'r', 's', 'v')
return self.send_transaction(transaction)
def estimate_gas(self, transaction):
raise NotImplementedError("Must be implemented by subclasses")
def call(self, transaction, block_number="latest"):
raise NotImplementedError("Must be implemented by subclasses")
| 29.146032 | 95 | 0.606906 |
2306f92081f047393a8b6aa54551654a8c66d486 | 79,789 | py | Python | galgebra/ga.py | meuns/galgebra | 3fea69ff4c4ca8f8afea083b697ef9d5112824b9 | [
"BSD-3-Clause"
] | 1 | 2016-05-08T08:13:10.000Z | 2016-05-08T08:13:10.000Z | galgebra/ga.py | meuns/galgebra | 3fea69ff4c4ca8f8afea083b697ef9d5112824b9 | [
"BSD-3-Clause"
] | 1 | 2019-11-21T18:59:22.000Z | 2019-11-26T08:37:26.000Z | galgebra/ga.py | meuns/galgebra | 3fea69ff4c4ca8f8afea083b697ef9d5112824b9 | [
"BSD-3-Clause"
] | null | null | null | """
Geometric Algebra (inherits Metric)
"""
import operator
import copy
from collections import OrderedDict
from itertools import combinations
import functools
from functools import reduce
from sympy import (
diff, Rational, Symbol, S, Mul, Add,
expand, simplify, eye, trigsimp,
symbols, sqrt, numbers, Function
)
from . import printer
from . import metric
from . import mv
from . import lt
from . import utils
half = Rational(1, 2)
one = S(1)
zero = S(0)
def all_same(items):
return all(x == items[0] for x in items)
def is_bases_product(w):
nc_w = w.args_cnc()
nc = nc_w[1]
return len(nc) == 2 or len(nc) == 1 and nc[0].is_Pow and nc[0].exp == 2
class lazy_dict(dict):
"""
A dictionary that creates missing entries on the fly.
When the dictionary is indexed and the key used is not one of the existing
keys, ``self.f_value(key)`` is called to evaluate the key. The
result is then added to the dictionary so that ``self.f_value`` is not
used to evaluate the same key again.
Parameters
----------
d :
Arguments to pass on to the :class:`dict` constructor, typically
a regular dictionary
f_value : function
The function to call to generate a value for a given key
"""
def __init__(self, d, f_value):
dict.__init__(self, d)
self.f_value = f_value
def __missing__(self, key):
value = self.f_value(key)
self[key] = value
return value
def __repr__(self):
return '{}({}, f_value={!r})'.format(
type(self).__qualname__, dict.__repr__(self), self.f_value)
def _repr_pretty_(self, p, cycle):
# ipython support
p_open, p_close = type(self).__qualname__ + '(', ')'
with p.group(len(p_open), p_open, p_close):
p.type_pprinters[dict](self, p, cycle)
p.text(',')
p.breakable()
p.text('f_value={}'.format(self.f_value))
def update_and_substitute(expr1, expr2, mul_dict):
"""
Linear expand expr1 and expr2 to get (summation convention)::
expr1 = coefs1[i] * bases1[i]
expr2 = coefs2[j] * bases2[j]
where ``coefs1`` and ``coefs2`` are lists of are commutative expressions and
``bases1`` and ``bases2`` are lists of bases for the geometric algebra.
Then evaluate::
expr = coefs1[i] * coefs2[j] * mul_dict[bases1[i], bases2[j]]
where ``mul_dict[bases1[i], bases2[j]]`` contains the appropriate
product of ``bases1[i]*bases2[j]`` as a linear combination of scalars and
bases of the geometric algebra.
"""
if (isinstance(expr1, numbers.Number) or expr1.is_commutative) \
or (isinstance(expr2, numbers.Number) or expr2.is_commutative):
return expr1 * expr2
(coefs1, bases1) = metric.linear_expand(expr1)
(coefs2, bases2) = metric.linear_expand(expr2)
expr = S(0)
for (coef1, base1) in zip(coefs1, bases1):
for (coef2, base2) in zip(coefs2, bases2):
#Special cases where base1 and/or base2 is scalar
if base1 == 1 and base2 == 1:
expr += coef1 * coef2
elif base1 == 1:
expr += coef1 * coef2 * base2
elif base2 == 1:
expr += coef1 * coef2 * base1
else:
key = (base1, base2)
expr += coef1 * coef2 * mul_dict[key]
return expr
def nc_subs(expr, base_keys, base_values=None):
"""
See if expr contains nc keys in base_keys and substitute corresponding
value in base_values for nc key. This was written since standard
sympy subs was very slow in performing this operation for non-commutative
keys for long lists of keys.
"""
if base_values is None:
[base_keys, base_values] = list(zip(*base_keys))
if expr.is_commutative:
return expr
if isinstance(expr, Add):
args = expr.args
else:
args = [expr]
s = zero
for term in args:
if term.is_commutative:
s += term
else:
c, nc = term.args_cnc(split_1=False)
key = Mul._from_args(nc)
coef = Mul._from_args(c)
if key in base_keys:
base = base_values[base_keys.index(key)]
s += coef * base
else:
s += term
return s
class Ga(metric.Metric):
r"""
The vector space (basis, metric, derivatives of basis vectors) is
defined by the base class :class:`~galgebra.metric.Metric`.
The instanciating the class :class:`Ga` constructs the geometric algebra of
the vector space defined by the metric.
The construction includes the multivector bases, multiplication
tables or functions for the geometric (``*``), inner (``|``), outer (``^``)
products, plus the left (``<``) and right (``>``) contractions. The
geometric derivative operator and any required connections for the
derivative are also calculated.
Except for the geometric product in the case of a non-orthogonal
set of basis vectors all products and connections (if needed) are
calculated when needed and place in dictionaries (lists of tuples)
to be used when needed. This greatly speeds up evaluations of
multivector expressions over previous versions of this code since
the products of multivector bases and connection are not calculated
unless they are actually needed in the current calculation.
Only instantiate the :class:`Ga` class via the :class:`~galgebra.mv.Mv` class or any use
of enhanced printing (text or latex) will cause the bases and multiplication
table entries to be incorrectly labeled .
.. rubric:: Inherited from Metric class
.. autosummary::
~galgebra.metric.Metric.g
~galgebra.metric.Metric.g_inv
~galgebra.metric.Metric.norm
~galgebra.metric.Metric.coords
~galgebra.metric.Metric.is_ortho
~galgebra.metric.Metric.connect_flg
~galgebra.metric.Metric.basis
~galgebra.metric.Metric.r_symbols
~galgebra.metric.Metric.n
~galgebra.metric.Metric.n_range
~galgebra.metric.Metric.de
.. rubric:: Basis, basis bases, and basis blades data structures
.. attribute:: indexes
Index list for multivector bases and blades by grade (tuple of tuples). Tuple
so that indexes can be used to index dictionaries.
.. attribute:: bases
List of bases (non-commutative sympy symbols) by grade.
Only created for non-orthogonal basis vectors.
.. attribute:: blades
List of basis blades (non-commutative sympy symbols) by grade.
For orthogonal basis vectors the same as bases.
.. attribute:: coord_vec
Linear combination of coordinates and basis vectors. For
example in orthogonal 3D :math:`x*e_x+y*e_y+z*e_z`.
.. attribute:: blades_to_indexes_dict
Map basis blades to index tuples (dictionary).
.. attribute:: indexes_to_blades_dict
Map index tuples to basis blades (dictionary).
.. attribute:: bases_to_indexes_dict
Map basis bases to index tuples (dictionary).
.. attribute:: indexes_to_bases_dict
Map index tuples to basis bases (dictionary).
.. rubric:: Multiplication tables data structures
Keys in all multiplication tables (``*``, ``^``, ``|``, ``<``, ``>``) are always ``symbol1*symbol2``.
The correct operation is known by the context (name) of the relevant list or dictionary). These dictionaries are
lazy, meaning they may be empty until an attempt is made to index them.
.. attribute:: mul_table_dict
Geometric products of basis blades as a :class:`lazy_dict`, ``{base1*base2: Expansion of base1*base2,...}``
.. attribute:: wedge_table_dict
Outer products of basis blades as a :class:`lazy_dict`, ``{base1*base2: Expansion of base1^base2,...}`
.. attribute:: dot_table_dict
Hestenes inner products of basis blades as a :class:`lazy_dict`, ``{base1*base2: Expansion of base1|base2,...}``
.. attribute:: left_contract_table_dict
Left contraction of basis blades as a :class:`lazy_dict`, ``{base1*base2: Expansion of base1<base2,...}``
.. attribute:: right_contract_table_dict
Right contraction of basis blades as a :class:`lazy_dict`, ``{base1*base2: Expansion of base1>base2,...}``
.. rubric:: Reciprocal basis data structures
.. attribute:: r_symbols
Reciprocal basis vector symbols (list of non-commutative sympy variables)
.. attribute:: r_basis
List of reciprocal basis vectors expanded as linear combination of basis vector symbols.
.. attribute:: r_basis_dict
Dictionary to map reciprocal basis symbols to reciprocal basis expanded in terms of basis symbols
``{reciprocal basis symbol: linear combination of basis symbols, ...}``
.. attribute:: r_basis_mv
List of reciprocal basis vectors in terms of basis multivectors (elements of list can be used in
multivector expressions.)
.. rubric:: Derivative data structures
.. attribute:: de
Derivatives of basis functions. Two dimensional list. First entry is differentiating coordinate index.
Second entry is basis vector index. Quantities are linear combinations of basis vector symbols.
.. attribute:: Pdop_identity
Partial differential operator identity (operates on multivector function to return function).
.. attribute:: Pdiffs
Dictionary of partial differential operators (operates on multivector functions) for each coordinate
:math:`\{x: \partial_{x}, ...\}`
.. attribute:: sPds
Dictionary of scalar partial differential operators (operates on scalar functions) for each coordinate
:math:`\{x: \partial_{x}, ...\}`
.. attribute:: grad
Geometric derivative operator from left. ``grad*F`` returns multivector
derivative, ``F*grad`` returns differential operator.
.. attribute:: rgrad
Geometric derivative operator from right. ``rgrad*F`` returns differential
operator, ``F*rgrad`` returns multivector derivative.
.. Sphinx adds all the other members below this docstring
.. rubric:: Other members
.. attribute:: dot_mode
Controls the behavior of :meth:`dot`
======= ======================
value ``dot`` aliases
======= ======================
``'|'`` :meth:`hestenes_dot`
``'<'`` :meth:`left_contract`
``'>'`` :meth:`right_contract`
======= ======================
"""
dual_mode_value = 'I+'
dual_mode_lst = ['+I', 'I+', '-I', 'I-', '+Iinv', 'Iinv+', '-Iinv', 'Iinv-']
restore = False
a = []
presets = {'o3d': 'x,y,z:[1,1,1]:[1,1,0]',
'cyl3d': 'r,theta,z:[1,r**2,1]:[1,1,0]:norm=True',
'sph3d': 'r,theta,phi:[1,X[0]**2,X[0]**2*cos(X[1])**2]:[1,1,0]:norm=True',
'para3d': 'u,v,z:[u**2+v**2,u**2+v**2,1]:[1,1,0]:norm=True'}
@staticmethod
def dual_mode(mode='I+'):
"""
Sets mode of multivector dual function for all geometric algebras
in users program.
If Ga.dual_mode(mode) not called the default mode is ``'I+'``.
===== ============
mode return value
===== ============
+I I*self
-I -I*self
I+ self*I
I- -self*I
+Iinv Iinv*self
-Iinv -Iinv*self
Iinv+ self*Iinv
Iinv- -self*Iinv
===== ============
"""
if mode not in Ga.dual_mode_lst:
raise ValueError('mode = ' + mode + ' not allowed for Ga.dual_mode.')
Ga.dual_mode_value = mode
return
@staticmethod
def com(A, B):
return half * (A * B - B * A)
@staticmethod
def build(*args, **kwargs):
"""
Static method to instantiate geometric algebra and return geometric
algebra, basis vectors, and grad operator as a tuple.
"""
GA = Ga(*args, **kwargs)
basis = list(GA.mv())
return tuple([GA] + basis)
@staticmethod
def preset(setting, root='e', debug=False):
if setting not in Ga.presets:
raise ValueError(str(setting) + 'not in Ga.presets.')
set_lst = Ga.presets[setting].split(':')
X = symbols(set_lst[0], real=True)
g = eval(set_lst[1])
simps = eval(set_lst[2])
args = [root]
kwargs = {'g': g, 'coords': X, 'debug': debug, 'I': True, 'gsym': False}
if len(set_lst) > 3:
args_lst = set_lst[-1].split(';')
for arg in args_lst:
[name, value] = arg.split('=')
kwargs[name] = eval(value)
Ga.set_simp(*simps)
return Ga(*args, **kwargs)
def __eq__(self, ga):
if self.name == ga.name:
return True
return False
def __init__(self, bases, **kwargs):
# Each time a geometric algebra is intialized in setup of append
# the printer must be restored to the simple text mode (not
# enhanced text of latex printing) so that when 'str' is used to
# create symbol names the names are not mangled.
kwargs = metric.test_init_slots(metric.Metric.init_slots, **kwargs)
self.wedge_print = kwargs['wedge']
if printer.GaLatexPrinter.latex_flg:
printer.GaLatexPrinter.restore()
Ga.restore = True
metric.Metric.__init__(self, bases, **kwargs)
self.par_coords = None
self._build_bases(kwargs.get('sign_and_indexes', None))
self.dot_mode = '|'
self._build_basis_product_tables()
if self.coords is not None:
self.coords = list(self.coords)
self.e = mv.Mv(self._all_blades_lst[-1], ga=self) # Pseudo-scalar for geometric algebra
self.e_sq = simplify(expand((self.e*self.e).scalar()))
if self.coords is not None:
self.coord_vec = sum([coord * base for (coord, base) in zip(self.coords, self.basis)])
self._build_reciprocal_basis(self.gsym)
self.Pdop_identity = mv.Pdop({},ga=self) # Identity Pdop = 1
self.Pdiffs = {}
self.sPds = {}
for x in self.coords: # Partial derivative operator for each coordinate
self.Pdiffs[x] = mv.Pdop({x:1}, ga=self)
self.sPds[x] = mv.Sdop([(S(1), self.Pdiffs[x])], ga=self)
self._build_grads()
else:
self.r_basis_mv = None
if self.connect_flg:
self._build_connection()
self._lt_flg = False # cache for `self.lt`
# Calculate normalized pseudo scalar (I**2 = +/-1)
self.sing_flg = False
if self.e_sq.is_number:
if self.e_sq == S(0):
self.sing_flg = True
print('!!!!If I**2 = 0, I cannot be normalized!!!!')
#raise ValueError('!!!!If I**2 = 0, I cannot be normalized!!!!')
if self.e_sq > S(0):
self.i = self.e/sqrt(self.e_sq)
self.i_inv = self.i
else: # I**2 = -1
self.i = self.e/sqrt(-self.e_sq)
self.i_inv = -self.i
else:
if self.Isq == '+': # I**2 = 1
self.i = self.e/sqrt(self.e_sq)
self.i_inv = self.i
else: # I**2 = -1
self.i = self.e/sqrt(-self.e_sq)
self.i_inv = -self.i
if Ga.restore: # restore printer to appropriate enhanced mode after ga is instantiated
printer.GaLatexPrinter.redirect()
if self.debug:
print('Exit Ga.__init__()')
self.a = [] # List of dummy vectors for Mlt calculations
self._agrads = {} # cache of gradient operator with respect to vector a
self.dslot = -1 # args slot for dervative, -1 for coordinates
self.XOX = self.mv('XOX','vector') # Versor test vector
def make_grad(self, a, cmpflg=False): # make gradient operator with respect to vector a
if isinstance(a,(list,tuple)):
for ai in a:
self.make_grad(ai)
return
if a in list(self._agrads.keys()):
return self._agrads[a]
if isinstance(a, mv.Mv):
ai = a.get_coefs(1)
else:
ai = a
coefs = []
pdiffs = []
for (base, coord) in zip(self.r_basis_mv, ai):
coefs.append(base)
pdiffs.append(mv.Pdop({coord: 1}, ga=self))
self._agrads[a] = mv.Dop(coefs, pdiffs, ga=self, cmpflg=cmpflg)
self.a.append(a)
return self._agrads[a]
def __str__(self):
return self.name
def E(self): # Unnoromalized pseudo-scalar
return self.e
def I(self): # Noromalized pseudo-scalar
return self.i
def I_inv(self):
return self.i_inv
@property
def mv_I(self):
# This exists for backwards compatibility. Note this is not `I()`!
# default pseudoscalar
return self.E()
@property
def mv_x(self):
# This exists for backwards compatibility.
# testing vectors
return Mv('XxXx', 'vector', ga=self)
def X(self):
return self.mv(sum([coord*base for (coord, base) in zip(self.coords, self.basis)]))
def sdop(self, coefs, pdiffs=None):
return mv.Sdop(coefs, pdiffs, ga=self)
def mv(self, root=None, *args, **kwargs):
"""
Instanciate and return a multivector for this, 'self',
geometric algebra.
"""
if root is None: # Return ga basis and compute grad and rgrad
return self.mv_basis
kwargs['ga'] = self
if not utils.isstr(root):
return mv.Mv(root, *args, **kwargs)
if ' ' in root and ' ' not in args[0]:
root_lst = root.split(' ')
mv_lst = []
for root in root_lst:
mv_lst.append(mv.Mv(root, *args, **kwargs))
return tuple(mv_lst)
if ' ' in root and ' ' in args[0]:
root_lst = root.split(' ')
mvtype_lst = args[0].split(' ')
if len(root_lst) != len(mvtype_lst):
raise ValueError('In Ga.mv() for multiple multivectors and ' +
'multivector types incompatible args ' +
str(root_lst) + ' and ' + str(mvtype_lst))
mv_lst = []
for (root, mv_type) in zip(root_lst, mvtype_lst):
args = list(args)
args[0] = mv_type
args = tuple(args)
mv_lst.append(mv.Mv(root, *args, **kwargs))
return tuple(mv_lst)
return mv.Mv(root, *args, **kwargs)
def mvr(self,norm=True):
r"""
Returns tumple of reciprocal basis vectors. If norm=True or
basis vectors are orthogonal the reciprocal basis is normalized
in the sense that
.. math:: {i}\cdot e^{j} = \delta_{i}^{j}.
If the basis is not orthogonal and norm=False then
.. math:: e_{i}\cdot e^{j} = I^{2}\delta_{i}^{j}.
"""
if self.r_basis_mv is None:
self._build_reciprocal_basis(self.gsym)
if norm and not self.is_ortho:
return tuple([self.r_basis_mv[i] / self.e_sq for i in self.n_range])
else:
return tuple(self.r_basis_mv)
def bases_dict(self, prefix=None):
'''
returns a dictionary mapping basis element names to their MultiVector
instances, optionally for specific grades
if you are lazy, you might do this to populate your namespace
with the variables of a given layout.
>>> locals().update(ga.bases())
'''
if prefix is None:
prefix='e'
bl = self._all_mv_blades_lst[1:] # do not include the scalar, which is not named
var_names = [prefix+''.join([k for k in str(b) if k.isdigit()]) for b in bl]
return {key:val for key,val in zip(var_names, bl)}
def _build_grads(self):
if not self.is_ortho:
r_basis = [x / self.e_sq for x in self.r_basis_mv]
else:
r_basis = self.r_basis_mv
if self.norm:
r_basis = [x / e_norm for (x, e_norm) in zip(self.r_basis_mv, self.e_norm)]
pdx = [self.Pdiffs[x] for x in self.coords]
self.grad = mv.Dop(r_basis, pdx, ga=self)
self.rgrad = mv.Dop(r_basis, pdx, ga=self, cmpflg=True)
def grads(self):
if self.coords is None:
raise ValueError("Ga must have been initialized with coords to compute grads")
return self.grad, self.rgrad
def dop(self, *args, **kwargs):
"""
Instanciate and return a multivector differential operator for
this, 'self', geometric algebra.
"""
kwargs['ga'] = self
return mv.Dop(*args, **kwargs)
def lt(self, *args, **kwargs):
"""
Instanciate and return a linear transformation for this, 'self',
geometric algebra.
"""
if not self._lt_flg:
self._lt_flg = True
(self.lt_coords, self.lt_x) = lt.Lt.setup(ga=self)
kwargs['ga'] = self
return lt.Lt(*args, **kwargs)
def sm(self, *args, **kwargs):
"""
Instanciate and return a submanifold for this
geometric algebra. See :class:`Sm` for instantiation inputs.
"""
kwargs['ga'] = self
SM = Sm(*args, **kwargs)
return SM
def parametric(self, coords):
if not isinstance(coords, list):
raise TypeError('In Ga.parametric coords = ' + str(coords) +
' is not a list.')
if len(coords) != self.n:
raise ValueError('In Ga.parametric number of parametric functions' +
' not equal to number of coordinates.')
self.par_coords = {}
for (coord, par_coord) in zip(self.coords, coords):
self.par_coords[coord] = par_coord
return
def basis_vectors(self):
return tuple(self.basis)
def _build_basis_base_symbol(self, base_index):
""" Build a symbol used for the `base_rep` from the given tuple """
if not base_index:
return S(1)
symbol_str = '*'.join([str(self.basis[i]) for i in base_index])
return Symbol(symbol_str, commutative=False)
def _build_basis_blade_symbol(self, base_index):
""" Build a symbol used for the `blade_rep` from the given tuple """
if not base_index:
return S(1)
if self.wedge_print:
symbol_str = '^'.join([str(self.basis[i]) for i in base_index])
else:
sub_str = []
root_str = []
for i in base_index:
basis_vec_str = str(self.basis[i])
split_lst = basis_vec_str.split('_')
if len(split_lst) != 2:
raise ValueError('!!!!Incompatible basis vector '+basis_vec_str+' for wedge_print = False!!!!')
else:
sub_str.append(split_lst[1])
root_str.append(split_lst[0])
if all_same(root_str):
symbol_str = root_str[0] + '_' + ''.join(sub_str)
else:
raise ValueError('!!!!No unique root symbol for wedge_print = False!!!!')
return Symbol(symbol_str, commutative=False)
def build_cobases(self, coindexes=None):
"""
Cobases for building Poincare duality, this is useful for defining wedge and vee without using I nor any metric.
"""
# TODO: check this can be used with another GA than 3D PGA...
if coindexes is None:
raise NotImplementedError('!!!!We should provide a default implementation!!!!')
else:
self.coindexes = coindexes
self.coindexes_lst = [index for cograde_index in coindexes for index in cograde_index]
n = self.n
self.coblades_lst = []
for cograde_index in self.coindexes:
k = len(cograde_index[0]) if len(cograde_index) > 0 else 0
for cobase_index in cograde_index:
coblade_sign = -1 if k == n - 1 and k % 2 == 1 else 1
coblade = coblade_sign * self._build_basis_blade_symbol(cobase_index)
self.coblades_lst.append(coblade)
self.coblades_inv_lst = []
for grade_index in self.indexes:
k = len(grade_index[0]) if len(grade_index) > 0 else 0
for base_index in grade_index:
coblade_inv_sign = -1 if k == 1 and k % 2 == 1 else 1
coblade_inv = coblade_inv_sign * self._build_basis_blade_symbol(base_index)
self.coblades_inv_lst.append(coblade_inv)
self.coblades_inv_lst = list(reversed(self.coblades_inv_lst))
def _build_bases(self, sign_and_indexes=None):
r"""
The bases for the multivector (geometric) algebra are formed from
all combinations of the bases of the vector space and the scalars.
Each base is represented as a non-commutative symbol of the form
.. math:: e_{i_{1}}e_{i_{2}}...e_{i_{r}}
where :math:`0 < i_{1} < i_{2} < ... < i_{r}` and :math:`0 < r \le n` the
dimension of the vector space and :math:`0 < i_{j} \le n`. The total
number of all symbols of this form plus the scalars is :math:`2^{n}`.
Any multivector can be represented as a linear combination
of these bases and the scalars.
If the basis vectors are not orthogonal a second set of symbols
is required given by -
.. math:: e_{i_{1}}\wedge e_{i_{2}}\wedge ...\wedge e_{i_{r}}.
These are called the blade basis for the geometric algebra and
and multivector can also be represented by a linears combination
of these blades and the scalars. The number of basis vectors
that are in the symbol for the blade is call the grade of the
blade.
Representing the multivector as a linear combination of blades
gives a blade decomposition of the multivector.
There is a linear mapping from bases to blades and blades to
bases so that one can easily convert from one representation to
another. For the case of an orthogonal set of basis vectors the
bases and blades are identical.
"""
# index list for multivector bases and blades by grade
if sign_and_indexes is None:
basis_indexes = tuple(self.n_range)
self.indexes = []
self._all_indexes_lst = []
for i in range(len(basis_indexes) + 1):
base_tuple = tuple(combinations(basis_indexes, i))
self.indexes.append(base_tuple)
self._all_indexes_lst += list(base_tuple)
self.indexes = tuple(self.indexes)
else:
self.indexes = sign_and_indexes[1]
self._all_indexes_lst = [index for grade_index in self.indexes for index in grade_index]
# list of non-commutative symbols for multivector bases and blades
# by grade and as a flattened list
self.blades = []
self._all_blades_lst = []
for grade_index in self.indexes:
blades = []
super_scripts = []
for base_index in grade_index:
blade_symbol = self._build_basis_blade_symbol(base_index)
blades.append(blade_symbol)
self._all_blades_lst.append(blade_symbol)
self.blades.append(blades)
self.blades_to_indexes = []
self.indexes_to_blades = []
if sign_and_indexes is None:
for (index, blade) in zip(self._all_indexes_lst, self._all_blades_lst):
self.blades_to_indexes.append((blade, (1, index)))
self.indexes_to_blades.append((index, blade))
else:
basis_indexes = tuple(self.n_range)
default_indexes_lst = []
for i in range(len(basis_indexes) + 1):
base_tuple = tuple(combinations(basis_indexes, i))
default_indexes_lst += list(base_tuple)
signs_lst = [sign for grade_sign in sign_and_indexes[0] for sign in grade_sign]
for (default_index, sign, blade) in zip(default_indexes_lst, signs_lst, self._all_blades_lst):
self.blades_to_indexes.append((blade, (sign, default_index)))
self.indexes_to_blades.append((default_index, sign * blade))
self.blades_to_indexes_dict = OrderedDict(self.blades_to_indexes)
self.indexes_to_blades_dict = OrderedDict(self.indexes_to_blades)
self.blades_to_grades_dict = {}
for igrade, grade in enumerate(self.blades):
for blade in grade:
self.blades_to_grades_dict[blade] = igrade
if not self.is_ortho:
self.bases = []
self._all_bases_lst = []
for grade_index in self.indexes:
bases = []
for base_index in grade_index:
base_symbol = self._build_basis_base_symbol(base_index)
bases.append(base_symbol)
self._all_bases_lst.append(base_symbol)
self.bases.append(bases)
self.bases_to_indexes = []
self.indexes_to_bases = []
for (index, base) in zip(self._all_indexes_lst, self._all_bases_lst):
self.bases_to_indexes.append((base, index))
self.indexes_to_bases.append((index, base))
self.bases_to_indexes_dict = OrderedDict(self.bases_to_indexes)
self.indexes_to_bases_dict = OrderedDict(self.indexes_to_bases)
self.bases_to_grades_dict = {}
for igrade, grade in enumerate(self.bases):
for base in grade:
self.bases_to_grades_dict[base] = igrade
if self.coords is None:
base0 = str(self.basis[0])
if '_' in base0:
sub_index = base0.index('_')
self.basis_super_scripts = [str(base)[sub_index + 1:] for base in self.basis]
else:
self.basis_super_scripts = [str(i + 1) for i in self.n_range]
else:
self.basis_super_scripts = [str(coord) for coord in self.coords]
self.blade_super_scripts = []
for grade_index in self.indexes:
super_scripts = []
for base_index in grade_index:
super_scripts.append(''.join([self.basis_super_scripts[i]
for i in base_index]))
self.blade_super_scripts.append(super_scripts)
if self.debug:
printer.oprint('indexes', self.indexes, 'list(indexes)', self._all_indexes_lst,
'blades', self.blades, 'list(blades)', self._all_blades_lst,
'blades_to_indexes_dict', self.blades_to_indexes_dict,
'indexes_to_blades_dict', self.indexes_to_blades_dict,
'blades_to_grades_dict', self.blades_to_grades_dict,
'blade_super_scripts', self.blade_super_scripts)
if not self.is_ortho:
printer.oprint('bases', self.bases, 'list(bases)', self._all_bases_lst,
'bases_to_indexes_dict', self.bases_to_indexes_dict,
'indexes_to_bases_dict', self.indexes_to_bases_dict,
'bases_to_grades_dict', self.bases_to_grades_dict)
# create the Mv wrappers
self._all_mv_blades_lst = [
mv.Mv(obj, ga=self)
for obj in self._all_blades_lst
]
self.mv_basis = [
mv.Mv(obj, ga=self)
for obj in self.basis
]
# TODO[gh-64]: For compatibility with old behavior, the public
# properties do not include the scalar. We should consider making the
# breaking change such that they do.
self.indexes_lst = self._all_indexes_lst[1:]
self.blades_lst = self._all_blades_lst[1:]
self.mv_blades_lst = self._all_mv_blades_lst[1:]
if not self.is_ortho:
self.bases_lst = self._all_bases_lst[1:]
def _build_basis_product_tables(self):
"""
For the different products of geometric algebra bases/blade
initialize auto-updating of bases/blades product lists. For
orthogonal bases all basis product lists are generated on the
fly using functions and the base and blade representations
are identical. For a non-orthogonal basis the multiplication
table for the geometric product is pre-calcuated for base pairs.
The tables for all other products (including the geometric
product) are calulated on the fly and updated and are for blade
pairs.
All tables are of the form::
[(blade1*blade2, f(blade1, blade1)), ...]
"""
self.mul_table_dict = lazy_dict({}, f_value=self.geometric_product_basis_blades) # Geometric product (*) of blades
if not self.is_ortho:
self._build_non_orthogonal_mul_table() # Fully populated geometric product (*) multiplication table
self._build_base_blade_conversions() # Generates conversion dictionaries between bases and blades
self.wedge_table_dict = lazy_dict({}, f_value=self.wedge_product_basis_blades) # Outer product (^)
# All three (|,<,>) types of contractions use the same generation function
# self.dot_product_basis_blades. The type of dictionary entry generated depend
# on self.dot_mode = '|', '<', or '>' as set in self.dot.
if self.is_ortho:
dot_product_basis_blades = self.dot_product_basis_blades
else:
dot_product_basis_blades = self.non_orthogonal_dot_product_basis_blades
self.dot_table_dict = lazy_dict({}, f_value=functools.partial(dot_product_basis_blades, mode='|'))
self.left_contract_table_dict = lazy_dict({}, f_value=functools.partial(dot_product_basis_blades, mode='<'))
self.right_contract_table_dict = lazy_dict({}, f_value=functools.partial(dot_product_basis_blades, mode='>'))
if self.debug:
print('Exit _build_basis_product_tables.\n')
return
def _build_connection(self):
# Partial derivatives of multivector bases multiplied (*,^,|,<,>)
# on left and right (True and False) by reciprocal basis vectors.
self.connect = {('*', True): [], ('^', True): [], ('|', True): [],
('<', True): [], ('>', True): [], ('*', False): [],
('^', False): [], ('|', False): [], ('<', False): [],
('>', False): []}
# Partial derivatives of multivector bases
self._dbases = {}
return
######## Functions for Calculation products of blades/bases ########
#******************** Geometric Product (*) ***********************#
def geometric_product_basis_blades(self, blade12):
# geometric (*) product for orthogonal basis
if self.is_ortho:
(blade1, blade2) = blade12
sign1, index1 = self.blades_to_indexes_dict[blade1]
sign2, index2 = self.blades_to_indexes_dict[blade2]
blade_index = list(index1 + index2)
repeats = []
sgn = sign1 * sign2
for i in range(1, len(blade_index)):
save = blade_index[i]
j = i
while j > 0 and blade_index[j - 1] > save:
sgn = -sgn
blade_index[j] = blade_index[j - 1]
j -= 1
blade_index[j] = save
if blade_index[j] == blade_index[j - 1]:
repeats.append(save)
result = S(sgn)
for i in repeats:
blade_index.remove(i)
blade_index.remove(i)
result *= self.g[i, i]
if len(blade_index) > 0:
result *= self.indexes_to_blades_dict[tuple(blade_index)]
return result
else:
(blade1, blade2) = blade12
base1 = self.blade_to_base_rep(blade1)
base2 = self.blade_to_base_rep(blade2)
base12 = expand(base1 * base2)
base12 = nc_subs(base12, self.basic_mul_keys, self.basic_mul_values)
return self.base_to_blade_rep(base12)
def reduce_basis(self, blst):
"""
Repetitively applies reduce_basis_loop to blst
product representation until normal form is
realized for non-orthogonal basis
"""
blst = list(blst)
if blst == []: # blst represents scalar
blst_coef = [1]
blst_expand = [[]]
return blst_coef, blst_expand
blst_expand = [blst]
blst_coef = [1]
blst_flg = [False]
# reduce untill all blst revise flgs are True
while not reduce(operator.and_, blst_flg):
for i in range(len(blst_flg)):
if not blst_flg[i]: # keep revising if revise flg is False
tmp = Ga.reduce_basis_loop(self.g, blst_expand[i])
if isinstance(tmp, bool):
blst_flg[i] = tmp # revision of blst_expand[i] complete
elif len(tmp) == 3: # blst_expand[i] contracted
blst_coef[i] = tmp[0] * blst_coef[i]
blst_expand[i] = tmp[1]
blst_flg[i] = tmp[2]
else: # blst_expand[i] revised
blst_coef[i] = -blst_coef[i]
#if revision force one more pass in case revision
#causes repeated index previous to revised pair of
#indexes
blst_flg[i] = False
blst_expand[i] = tmp[3]
blst_coef.append(-blst_coef[i] * tmp[0])
blst_expand.append(tmp[1])
blst_flg.append(tmp[2])
new_blst_coef = []
new_blst_expand = []
for (coef, xpand) in zip(blst_coef, blst_expand):
if xpand in new_blst_expand:
i = new_blst_expand.index(xpand)
new_blst_coef[i] += coef
else:
new_blst_expand.append(xpand)
new_blst_coef.append(coef)
return new_blst_coef, new_blst_expand
@staticmethod
def reduce_basis_loop(g, blst):
r"""
blst is a list of integers :math:`[i_{1},...,i_{r}]` representing the geometric
product of r basis vectors :math:`a_{{i_1}}*...*a_{{i_r}}`. :meth:`reduce_basis_loop`
searches along the list :math:`[i_{1},...,i_{r}]` untill it finds :math:`i_{j} = i_{j+1}`
and in this case contracts the list, or if :math:`i_{j} > i_{j+1}` it revises
the list (:math:`\sim i_{j}` means remove :math:`i_{j}` from the list)
* Case 1: If :math:`i_{j} = i_{j+1}`, return
:math:`a_{i_{j}}^2` and
:math:`[i_{1},..,\sim i_{j},\sim i_{j+1},...,i_{r}]`
* Case 2: If :math:`i_{j} > i_{j+1}`, return
:math:`a_{i_{j}}.a_{i_{j+1}}`,
:math:`[i_{1},..,\sim i_{j},\sim i_{j+1},...,i_{r}]`, and
:math:`[i_{1},..,i_{j+1},i_{j},...,i_{r}]`
"""
nblst = len(blst) # number of basis vectors
if nblst <= 1:
return True # a scalar or vector is already reduced
for jstep in range(1, nblst):
istep = jstep - 1
if blst[istep] == blst[jstep]: # basis vectorindex is repeated
i = blst[istep] # save basis vector index
if len(blst) > 2:
blst = blst[:istep] + blst[jstep + 1:] # contract blst
else:
blst = []
if len(blst) <= 1 or jstep == nblst - 1:
blst_flg = True # revision of blst is complete
else:
blst_flg = False # more revision needed
return g[i, i], blst, blst_flg
if blst[istep] > blst[jstep]: # blst not in normal order
blst1 = blst[:istep] + blst[jstep + 1:] # contract blst
a1 = 2 * g[blst[jstep], blst[istep]] # coef of contraction
blst = blst[:istep] + [blst[jstep]] + [blst[istep]] + blst[jstep + 1:] # revise blst
if len(blst1) <= 1:
blst1_flg = True # revision of blst is complete
else:
blst1_flg = False # more revision needed
return a1, blst1, blst1_flg, blst
return True # revision complete, blst in normal order
#******************* Outer/wedge (^) product **********************#
@staticmethod
def blade_reduce(lst):
sgn = 1
for i in range(1, len(lst)):
save = lst[i]
j = i
while j > 0 and lst[j - 1] > save:
sgn = -sgn
lst[j] = lst[j - 1]
j -= 1
lst[j] = save
if lst[j] == lst[j - 1]:
return 0, None
return sgn, lst
def wedge_product_basis_blades(self, blade12): # blade12 = blade1*blade2
# outer (^) product of basis blades
# this method works for both orthogonal and non-orthogonal basis
(blade1, blade2) = blade12
sign1, index1 = self.blades_to_indexes_dict[blade1]
sign2, index2 = self.blades_to_indexes_dict[blade2]
index12 = list(index1 + index2)
if len(index12) > self.n:
return 0
(sgn, wedge12) = Ga.blade_reduce(index12)
if sgn != 0:
return(sgn * sign1 * sign2 * self.indexes_to_blades_dict[tuple(wedge12)])
else:
return 0
#****** Dot (|) product, reft (<) and right (>) contractions ******#
def dot_product_basis_blades(self, blade12, mode):
# dot (|), left (<), and right (>) products
# dot product for orthogonal basis
(blade1, blade2) = blade12
sign1, index1 = self.blades_to_indexes_dict[blade1]
sign2, index2 = self.blades_to_indexes_dict[blade2]
index = list(index1 + index2)
grade1 = len(index1)
grade2 = len(index2)
if mode == '|':
grade = abs(grade1 - grade2)
elif mode == '<':
grade = grade2 - grade1
if grade < 0:
return 0
elif mode == '>':
grade = grade1 - grade2
if grade < 0:
return 0
n = len(index)
sgn = sign1 * sign2
result = 1
ordered = False
while n > grade:
ordered = True
i2 = 1
while i2 < n:
i1 = i2 - 1
index1 = index[i1]
index2 = index[i2]
if index1 == index2:
n -= 2
if n < grade:
return 0
result *= self.g[index1, index1]
index = index[:i1] + index[i2 + 1:]
elif index1 > index2:
ordered = False
index[i1] = index2
index[i2] = index1
sgn = -sgn
i2 += 1
else:
i2 += 1
if ordered:
break
if n > grade:
return 0
else:
if index == []:
return sgn * result
else:
return sgn * result * self.indexes_to_blades_dict[tuple(index)]
def non_orthogonal_dot_product_basis_blades(self, blade12, mode): # blade12 = (blade1,blade2)
# dot product of basis blades if basis vectors are non-orthogonal
# inner (|), left (<), and right (>) products of basis blades
# blade12 is the sympy product of two basis blades
(blade1, blade2) = blade12
# Need base rep for blades since that is all we can multiply
base1 = self.blade_expansion_dict[blade1]
base2 = self.blade_expansion_dict[blade2]
# geometric product of basis blades
base12 = self.mul(base1, base2)
# blade rep of geometric product
blade12 = self.base_to_blade_rep(base12)
# decompose geometric product by grades
grade_dict = self.grade_decomposition(blade12)
# grades of input blades
grade1 = self.blades_to_grades_dict[blade1]
grade2 = self.blades_to_grades_dict[blade2]
if mode == '|':
grade_dot = abs(grade2 - grade1)
if grade_dot in grade_dict:
return grade_dict[grade_dot]
else:
return zero
elif mode == '<':
grade_contract = grade2 - grade1
if grade_contract in grade_dict:
return grade_dict[grade_contract]
else:
return zero
elif mode == '>':
grade_contract = grade1 - grade2
if grade_contract in grade_dict:
return grade_dict[grade_contract]
else:
return zero
else:
raise ValueError('"' + str(mode) + '" not allowed '
'dot mode in non_orthogonal_dot_basis')
############# Non-Orthogonal Tables and Dictionaries ###############
def _build_non_orthogonal_mul_table(self):
mul_table = []
self.basic_mul_keys = []
self.basic_mul_values = []
for base1 in self._all_bases_lst:
for base2 in self._all_bases_lst:
key = base1 * base2
value = self.non_orthogonal_bases_products((base1, base2))
mul_table.append((key, value))
self.basic_mul_keys.append(key)
self.basic_mul_values.append(value)
self.basic_mul_table = mul_table
self.basic_mul_table_dict = OrderedDict(mul_table)
if self.debug:
print('basic_mul_table =\n', self.basic_mul_table)
return
def non_orthogonal_bases_products(self, base12): # base12 = (base1,base2)
# geometric product of bases for non-orthogonal basis vectors
(base1, base2) = base12
index = self.bases_to_indexes_dict[base1] + self.bases_to_indexes_dict[base2]
(coefs, indexes) = self.reduce_basis(index)
s = 0
if [] in indexes: # extract scalar part from multivector expansion
iscalar = indexes.index([])
s += coefs[iscalar]
del indexes[iscalar]
del coefs[iscalar]
for (coef, index) in zip(coefs, indexes):
s += coef * self.indexes_to_bases_dict[tuple(index)]
return s
def _build_base_blade_conversions(self):
blade_expansion = []
blade_index = []
# expand blade basis in terms of base basis
for blade in self._all_blades_lst:
sign, index = self.blades_to_indexes_dict[blade]
grade = len(index)
if grade <= 1:
blade_expansion.append(blade)
blade_index.append(index)
else:
a = self.indexes_to_blades_dict[(index[0],)]
Aexpand = blade_expansion[blade_index.index(index[1:])]
# Formula for outer (^) product of a vector and grade-r multivector
# a^A_{r} = (a*A + (-1)^{r}*A*a)/2
# The folowing evaluation takes the most time for setup it is the due to
# the substitution required for the multiplications
a_W_A = half * (self.basic_mul(a, Aexpand) - ((-1) ** grade) * self.basic_mul(Aexpand, a))
blade_index.append(index)
blade_expansion.append(expand(a_W_A))
self.blade_expansion = blade_expansion
self.blade_expansion_dict = OrderedDict(list(zip(self._all_blades_lst, blade_expansion)))
if self.debug:
print('blade_expansion_dict =', self.blade_expansion_dict)
# expand base basis in terms of blade basis
base_expand = []
for (base, blade, index) in zip(self._all_bases_lst, self._all_blades_lst, self._all_indexes_lst):
grade = len(index)
if grade <= 1:
base_expand.append((base, base))
else: # back substitution of tridiagonal system
tmp = self.blade_expansion_dict[blade]
tmp = tmp.subs(base, -blade)
tmp = -tmp.subs(base_expand)
base_expand.append((base, expand(tmp)))
self.base_expand = base_expand
self.base_expansion_dict = OrderedDict(base_expand)
if self.debug:
print('base_expansion_dict =', self.base_expansion_dict)
return
def base_to_blade_rep(self, A):
if self.is_ortho:
return A
else:
#return(expand(A).subs(self.base_expansion_dict))
return nc_subs(expand(A), self.base_expand)
def blade_to_base_rep(self, A):
if self.is_ortho:
return A
else:
#return(expand(A).subs(self.blade_expansion_dict))
return nc_subs(expand(A), self._all_blades_lst, self.blade_expansion)
###### Products (*,^,|,<,>) for multivector representations ########
def basic_mul(self, A, B): # geometric product (*) of base representations
# only multiplicative operation to assume A and B are in base representation
AxB = expand(A * B)
AxB = nc_subs(AxB, self.basic_mul_keys, self.basic_mul_values)
return expand(AxB)
def Mul(self, A, B, mode='*'): # Unifies all products into one function
if mode == '*':
return self.mul(A, B)
elif mode == '^':
return self.wedge(A, B)
else:
return self._dot(A, B, mode=mode)
def mul(self, A, B): # geometric (*) product of blade representations
if A == 0 or B == 0:
return 0
return update_and_substitute(A, B, self.mul_table_dict)
def wedge(self, A, B):
# wedge assumes A and B are in blade rep
# wedge product is same for both orthogonal and non-orthogonal for A and B in blade rep
if A == 0 or B == 0:
return 0
return update_and_substitute(A, B, self.wedge_table_dict)
def _dot(self, A, B, mode):
if A == 0 or B == 0:
return 0
if mode == '|': # Hestenes dot product
A = self.remove_scalar_part(A)
B = self.remove_scalar_part(B)
return update_and_substitute(A, B, self.dot_table_dict)
elif mode == '<' or mode == '>':
r"""
Let :math:`A = a + A'` and :math:`B = b + B'` where :math:`a` and
:math:`b` are the scalar parts of :math:`A` and :math:`B`, and
:math:`A'` and :math:`B'` are the remaining parts of :math:`A` and
:math:`B`. Then we have:
.. math::
(a+A') \rfloor (b+B') &= a(b+B') + A' \rfloor B' \\
(a+A') \lfloor (b+B') &= b(a+A') + A' \lfloor B'
We use these relations to reduce :math:`A \rfloor B` (``A<B``) and
:math:`A \lfloor B` (``A>B``).
"""
(a, Ap) = self.split_multivector(A) # Ap = A'
(b, Bp) = self.split_multivector(B) # Bp = B'
if mode == '<': # Left contraction
if Ap != 0 and Bp != 0: # Neither nc part of A or B is zero
prod = update_and_substitute(Ap, Bp, self.left_contract_table_dict)
return prod + a * B
else: # Ap or Bp is zero
return a * B
elif mode == '>': # Right contraction
if Ap != 0 and Bp != 0: # Neither nc part of A or B is zero
prod = update_and_substitute(Ap, Bp, self.right_contract_table_dict)
return prod + b * A
else: # Ap or Bp is zero
return b * A
else:
raise ValueError('"' + str(mode) + '" not a legal mode in dot')
def hestenes_dot(self, A, B):
r""" compute the hestenes dot product, :math:`A \bullet B` """
return self._dot(A, B, mode='|')
def left_contract(self, A, B):
r""" compute the left contraction, :math:`A \rfloor B` """
return self._dot(A, B, mode='<')
def right_contract(self, A, B):
r""" compute the right contraction, :math:`A \lfloor B` """
return self._dot(A, B, mode='>')
def dot(self, A, B):
r"""
Inner product ``|``, ``<``, or ``>``.
The :attr:`dot_mode` attribute determines which of these is used.
"""
return self._dot(A, B, mode=self.dot_mode)
######################## Helper Functions ##########################
def grade_decomposition(self, A):
"""
Returns dictionary with grades as keys of grades of A. For example
if A is a rotor the dictionary keys would be 0 and 2. For a vector
the single key would be 1. Note A can be input as a multivector or
an multivector object (sympy expression). If A is a multivector the
dictionary entries are multivectors. If A is a sympy expression
(in this case a linear combination of non-commutative symbols) the
dictionary entries are sympy expressions.
"""
if isinstance(A,mv.Mv):
A.blade_rep()
A.characterise_Mv()
Aobj = expand(A.obj)
else:
Aobj = A
coefs,blades = metric.linear_expand(Aobj)
grade_dict = {}
for (coef,blade) in zip(coefs,blades):
if blade == one:
if 0 in list(grade_dict.keys()):
grade_dict[0] += coef
else:
grade_dict[0] = coef
else:
grade = self.blades_to_grades_dict[blade]
if grade in grade_dict:
grade_dict[grade] += coef * blade
else:
grade_dict[grade] = coef * blade
if isinstance(A, mv.Mv):
for grade in list(grade_dict.keys()):
grade_dict[grade] = self.mv(grade_dict[grade])
return grade_dict
def split_multivector(self, A):
"""
Split multivector :math:`A` into commutative part :math:`a` and
non-commutative part :math:`A'` so that :math:`A = a+A'`
"""
if isinstance(A, mv.Mv):
return self.split_multivector(A.obj)
else:
A = expand(A)
if isinstance(A, Add):
a = sum([x for x in A.args if x.is_commutative])
Ap = sum([x for x in A.args if not x.is_commutative])
return (a, Ap)
elif isinstance(A, Symbol):
if A.is_commutative:
return (A, 0)
else:
return (0, A)
else:
if A.is_commutative:
return (A, 0)
else:
return (0, A)
def remove_scalar_part(self, A):
"""
Return non-commutative part (sympy object) of ``A.obj``.
"""
if isinstance(A, mv.Mv):
return self.remove_scalar_part(A.obj)
else:
if isinstance(A, Add):
A = expand(A)
return(sum([x for x in A.args if not x.is_commutative]))
elif isinstance(A, Symbol):
if A.is_commutative:
return 0
else:
return A
else:
if A.is_commutative:
return 0
else:
return A
def scalar_part(self, A):
if isinstance(A, mv.Mv):
return self.scalar_part(A.obj)
else:
A = expand(A)
if isinstance(A, Add):
return(sum([x for x in A.args if x.is_commutative]))
elif isinstance(A, Symbol):
if A.is_commutative:
return A
else:
return 0
else:
if A.is_commutative:
return A
else:
return 0
"""
else:
if A.is_commutative:
return A
else:
return zero
"""
def grades(self, A): # Return list of grades present in A
A = self.base_to_blade_rep(A)
A = expand(A)
blades = []
if isinstance(A, Add):
args = A.args
else:
args = [A]
for term in args:
blade = term.args_cnc()[1]
l_blade = len(blade)
if l_blade > 0:
if blade[0] not in blades:
blades.append(blade[0])
else:
if one not in blades:
blades.append(one)
grade_lst = []
if one in blades:
grade_lst.append(0)
for blade in blades:
if blade != one:
grade = self.blades_to_grades_dict[blade]
if grade not in grade_lst:
grade_lst.append(grade)
grade_lst.sort()
return(grade_lst)
def reverse(self, A): # Calculates reverse of A (see documentation)
A = expand(A)
blades = {}
if isinstance(A, Add):
args = A.args
else:
if A.is_commutative:
return A
else:
args = [A]
for term in args:
if term.is_commutative:
if 0 in blades:
blades[0] += term
else:
blades[0] = term
else:
_c, nc = term.args_cnc()
blade = nc[0]
grade = self.blades_to_grades_dict[blade]
if grade in blades:
blades[grade] += term
else:
blades[grade] = term
s = zero
for grade in blades:
if (grade * (grade - 1)) / 2 % 2 == 0:
s += blades[grade]
else:
s -= blades[grade]
return s
def get_grade(self, A, r): # Return grade r of A, <A>_{r}
if r == 0:
return self.scalar_part(A)
coefs, bases = metric.linear_expand(A)
s = zero
for (coef, base) in zip(coefs, bases):
if base != one and self.blades_to_grades_dict[base] == r:
s += coef * base
return s
def even_odd(self, A, even=True): # Return even or odd part of A
A = expand(A)
if A.is_commutative and even:
return A
if isinstance(A, Add):
args = A.args
else:
args = [A]
s = zero
for term in args:
if term.is_commutative:
if even:
s += term
else:
c, nc = term.args_cnc(split_1=False)
blade = nc[0]
grade = self.blades_to_grades_dict[blade]
if even and grade % 2 == 0:
s += Mul._from_args(c) * blade
elif not even and grade % 2 == 1:
s += Mul._from_args(c) * blade
return s
##################### Multivector derivatives ######################
def _build_reciprocal_basis(self,gsym):
r"""
Calculate reciprocal basis vectors :math:`e^{j}` where
.. math:: e^{j}\cdot e_{k} = \delta_{k}^{j}
and :math:`\delta_{k}^{j}` is the kronecker delta. We use the formula
from Doran and Lasenby 4.94:
.. math:: e^{j} = (-1)^{j-1}e_{1} \wedge ...e_{j-1} \wedge e_{j+1} \wedge ... \wedge e_{n}*E_{n}^{-1}
where :math:`E_{n} = e_{1}\wedge ...\wedge e_{n}`.
For non-orthogonal basis :math:`e^{j}` is not normalized and must be
divided by :math:`E_{n}^2` (``self.e_sq``) in any relevant calculations.
If ``gsym = True`` then :math:`E_{n}^2` is not evaluated, but is represented
as :math:`E_{n}^2 = (-1)^{n*(n-1)/2}\operatorname{det}(g)` where
:math:`\operatorname{det}(g)` the determinant
of the metric tensor can be general scalar function of the coordinates.
"""
if self.debug:
print('Enter _build_reciprocal_basis.\n')
if self.is_ortho:
self.r_basis = [self.basis[i] / self.g[i, i] for i in self.n_range]
else:
if gsym is not None:
# Define name of metric tensor determinant as sympy symbol
if printer.GaLatexPrinter.latex_flg:
det_str = r'\det\left ( ' + gsym + r'\right ) '
else:
det_str = 'det(' + gsym + ')'
# Define square of pseudo-scalar in terms of metric tensor
# determinant
n = self.n
if self.coords is None: # Metric tensor is constant
self.e_sq = (-1) ** (n*(n - 1)/2) * Symbol(det_str,real=True)
else: # Metric tensor is function of coordinates
n = len(self.coords)
self.e_sq = (-1) ** (n*(n - 1)/2) * Function(det_str,real=True)(*self.coords)
else:
self.e_sq = simplify((self.e * self.e).obj)
if self.debug:
print('E**2 =', self.e_sq)
duals = list(self.blades[self.n - 1])
# After reverse, the j-th of them is exactly e_{1}^...e_{j-1}^e_{j+1}^...^e_{n}
duals.reverse()
sgn = 1
self.r_basis = []
for dual in duals:
dual_base_rep = self.blade_to_base_rep(dual)
# {E_n}^{-1} = \frac{E_n}{{E_n}^{2}}
# r_basis_j = sgn * duals[j] * E_n so it's not normalized, missing a factor of {E_n}^{-2}
"""
print('blades list =',self._all_blades_lst)
print('debug =',expand(self.base_to_blade_rep(self.mul(sgn * dual_base_rep, self.e.obj))))
print('collect arg =',expand(self.base_to_blade_rep(self.mul(sgn * dual_base_rep, self.e.obj))))
"""
r_basis_j = metric.collect(expand(self.base_to_blade_rep(self.mul(sgn * dual_base_rep, self.e.obj))), self._all_blades_lst)
self.r_basis.append(r_basis_j)
# sgn = (-1)**{j-1}
sgn = -sgn
if self.debug:
printer.oprint('E', self.e, 'E**2', self.e_sq, 'unnormalized reciprocal basis =\n', self.r_basis)
print('reciprocal basis test =')
for ei in self.basis:
for ej in self.r_basis:
ei_dot_ej = self.hestenes_dot(ei, ej)
if ei_dot_ej == zero:
print('e_{i}|e_{j} = ' + str(ei_dot_ej))
else:
print('e_{i}|e_{j} = ' + str(expand(ei_dot_ej / self.e_sq)))
# Dictionary to represent reciprocal basis vectors as expansions
# in terms of basis vectors.
self.r_basis_dict = {}
self.r_basis_mv = []
for (r_symbol, r_base) in zip(self.r_symbols, self.r_basis):
self.r_basis_dict[r_symbol] = r_base
self.r_basis_mv.append(mv.Mv(r_base, ga=self))
# Replace reciprocal basis vectors with expansion in terms of
# basis vectors in derivatives of basis vectors
if self.connect_flg:
for x_i in self.n_range:
for jb in self.n_range:
if not self.is_ortho:
self.de[x_i][jb] = metric.Simp.apply(self.de[x_i][jb].subs(self.r_basis_dict) / self.e_sq)
else:
self.de[x_i][jb] = metric.Simp.apply(self.de[x_i][jb].subs(self.r_basis_dict))
g_inv = eye(self.n)
# Calculate inverse of metric tensor, g^{ij}
for i in self.n_range:
rx_i = self.r_symbols[i]
for j in self.n_range:
rx_j = self.r_symbols[j]
if j >= i:
g_inv[i, j] = self.hestenes_dot(self.r_basis_dict[rx_i], self.r_basis_dict[rx_j])
if not self.is_ortho:
g_inv[i, j] /= self.e_sq**2
else:
g_inv[i, j] = g_inv[j, i]
self.g_inv = simplify(g_inv)
if self.debug:
print('reciprocal basis dictionary =\n', self.r_basis_dict)
# True is for left derivative and False is for right derivative
self.deriv = {('*', True): [], ('^', True): [], ('|', True): [],
('<', True): [], ('>', True): [], ('*', False): [],
('^', False): [], ('|', False): [], ('<', False): [],
('>', False): []}
return
def er_blade(self, er, blade, mode='*', left=True):
r"""
Product (``*``, ``^``, ``|``, ``<``, ``>``) of reciprocal basis vector
'er' and basis
blade 'blade' needed for application of derivatives to
multivectors. left is 'True' means 'er' is multiplying 'blade'
on the left, 'False' is for 'er' multiplying 'blade' on the
right. Symbolically for left geometric product:
.. math:: e^{j}*(e_{i_{1}}\wedge ...\wedge e_{i_{r}})
"""
if mode == '*':
base = self.blade_to_base_rep(blade)
if left:
return self.base_to_blade_rep(self.mul(er, base))
else:
return self.base_to_blade_rep(self.mul(base, er))
elif mode == '^':
if left:
return self.wedge(er, blade)
else:
return self.wedge(blade, er)
else:
if left:
return self._dot(er, blade, mode=mode)
else:
return self._dot(blade, er, mode=mode)
def blade_derivation(self, blade, ib):
"""
Calculate derivatives of basis blade 'blade' using derivative of
basis vectors calculated by metric. 'ib' is the index of the
coordinate the derivation is with respect to or the coordinate
symbol. These are requried for the calculation of the geometric
derivatives in curvilinear coordinates or for more general
manifolds.
'blade_derivation' caches the results in a dictionary, ``self._dbases``,
so that the derivation for a given blade and coordinate is never
calculated more that once.
Note that the return value is not a multivector, but linear combination
of basis blade symbols.
"""
if isinstance(ib, int):
coord = self.coords[ib]
else:
coord = ib
ib = self.coords.index(coord)
key = (coord, blade)
if key in self._dbases:
return self._dbases[key]
index = self.blades_to_indexes_dict[blade]
grade = len(index)
if grade == 1:
db = self.de[ib][index[0]]
elif grade == 2:
db = self.wedge(self.de[ib][index[0]], self.basis[index[1]]) + \
self.wedge(self.basis[index[0]], self.de[ib][index[1]])
else:
db = self.wedge(self.de[ib][index[0]], self.indexes_to_blades[index[1:]]) + \
self.wedge(self.indexes_to_blades[index[:-1]], self.de[ib][index[-1]])
for i in range(1, grade - 1):
db += self.wedge(self.wedge(self.indexes_to_blades[index[:i]], self.de[ib][index[i]]),
self.indexes_to_blades[index[i + 1:]])
self._dbases[key] = db
return db
def pdop(self,*args):
return mv.Pdop(args,ga=self)
def pDiff(self, A, coord):
"""
Compute partial derivative of multivector function 'A' with
respect to coordinate 'coord'.
"""
if isinstance(coord, list):
# Perform multiple partial differentiation where coord is
# a list of differentiation orders for each coordinate and
# the coordinate is determinded by the list index. If the
# element in the list is zero no differentiation is to be
# performed for that coordinate index.
dA = copy.copy(A) # Make copy of A
for i in self.n_range:
x = self.coords[i]
xn = coord[i]
if xn > 0: # Differentiate with respect to coordinate x
for _j in range(xn): # xn > 1 multiple differentiation
dA = self.pDiff(dA, x)
return dA
# Simple partial differentiation, once with respect to a single
# variable, but including case of non-constant basis vectors
dA = self.mv(expand(diff(A.obj, coord)))
if self.connect_flg and self.dslot == -1 and not A.is_scalar(): # Basis blades are function of coordinates
B = self.remove_scalar_part(A)
if B != zero:
if isinstance(B, Add):
args = B.args
else:
args = [B]
for term in args:
if not term.is_commutative:
c, nc = term.args_cnc(split_1=False)
x = self.blade_derivation(nc[0], coord)
if x != zero:
if len(c) == 1:
dA += c[0] * x
elif len(c) == 0:
dA += x
else:
dA += reduce(operator.mul, c, one) * x
return dA
def grad_sqr(self, A, grad_sqr_mode, mode, left):
r"""
Calculate :math:`(grad *_{1} grad) *_{2} A` or :math:`A *_{2} (grad *_{1} grad)`
where ``grad_sqr_mode`` = :math:`*_{1}` = ``*``, ``^``, or ``|`` and
``mode`` = :math:`*_{2}` = ``*``, ``^``, or ``|``.
"""
(Sop, Bop) = Ga.DopFop[(grad_sqr_mode, mode)]
print('(Sop, Bop) =', Sop, Bop)
print('grad_sqr:A =', A)
s = zero
if Sop is False and Bop is False:
return s
dA_i = []
for coord_i in self.coords:
dA_i.append(self.pDiff(A, coord_i))
print('dA_i =', dA_i)
if Sop:
for i in self.n_range:
coord_i = self.coords[i]
if self.connect_flg:
s += self.grad_sq_scl_connect[coord_i] * dA_i[i]
for j in self.n_range:
d2A_j = self.pDiff(dA_i[i], self.coords[j])
s += self.g_inv[i, j] * d2A_j
if Bop and self.connect_flg:
for i in self.n_range:
coord_i = self.coords[i]
print('mode =', mode)
print('dA_i[i] =', dA_i[i])
if left:
if mode == '|':
s += self.dot(self.grad_sq_mv_connect[coord_i], dA_i[i])
if mode == '^':
s += self.wedge(self.grad_sq_mv_connect[coord_i], dA_i[i])
if mode == '*':
s += self.mul(self.grad_sq_mv_connect[coord_i], dA_i[i])
else:
if mode == '|':
s += self.dot(dA_i[i], self.grad_sq_mv_connect[coord_i])
if mode == '^':
s += self.wedge(dA_i[i], self.grad_sq_mv_connect[coord_i])
if mode == '*':
s += self.mul(dA_i[i], self.grad_sq_mv_connect[coord_i])
return s
def connection(self, rbase, key_base, mode, left):
"""
Compute required multivector connections of the form
(Einstein summation convention) :math:`e^{j}*(D_{j}e_{i_{1}...i_{r}})`
and :math:`(D_{j}e_{i_{1}...i_{r}})*e^{j}` where :math:`*` could be
``*``, ``^``, ``|``, ``<``, or ``>`` depending upon the mode, and
:math:`e^{j}` are reciprocal basis vectors.
"""
mode_key = (mode, left)
keys = [i for i, j in self.connect[mode_key]]
if left:
key = rbase * key_base
else:
key = key_base * rbase
if key not in keys:
keys.append(key)
C = zero
for ib in self.n_range:
x = self.blade_derivation(key_base, ib)
if self.norm:
x /= self.e_norm[ib]
C += self.er_blade(self.r_basis[ib], x, mode, left)
# Update connection dictionaries
self.connect[mode_key].append((key, C))
return C
def ReciprocalFrame(self, basis, mode='norm'):
dim = len(basis)
indexes = tuple(range(dim))
index = [()]
for i in indexes[-2:]:
index.append(tuple(combinations(indexes, i + 1)))
MFbasis = []
for igrade in index[-2:]:
grade = []
for iblade in igrade:
blade = self.mv('1', 'scalar')
for ibasis in iblade:
blade ^= basis[ibasis]
blade = blade.trigsimp()
grade.append(blade)
MFbasis.append(grade)
E = MFbasis[-1][0]
E_sq = trigsimp((E * E).scalar())
duals = copy.copy(MFbasis[-2])
duals.reverse()
sgn = 1
rbasis = []
for dual in duals:
recpv = (sgn * dual * E).trigsimp()
rbasis.append(recpv)
sgn = -sgn
if mode != 'norm':
rbasis.append(E_sq)
else:
for i in range(dim):
rbasis[i] = rbasis[i] / E_sq
return tuple(rbasis)
def Mlt(self,*args,**kwargs):
return lt.Mlt(args[0], self, *args[1:], **kwargs)
class Sm(Ga):
"""
Submanifold is a geometric algebra defined on a submanifold of a
base geometric algebra defined on a manifold. The submanifold is
defined by a mapping from the coordinates of the base manifold to
the coordinates of the submanifold. The inputs required to define
the submanifold are:
Parameters
----------
u :
(``args[0]``) The coordinate map defining the submanifold
which is a list of functions of coordinates of the base
manifold in terms of the coordinates of the submanifold.
for example if the manifold is a unit sphere then -
``u = [sin(u)*cos(v),sin(u)*sin(v),cos(u)]``.
Alternatively (``args[0]``) is a parametric vector function
of the basis vectors of the base manifold. The
coefficients of the bases are functions of the coordinates
(``args[1]``). In this case we would call the submanifold
a "vector" manifold and additional characteristics of the
manifold can be calculated since we have given an explicit
embedding of the manifold in the base manifold.
coords :
(``args[1]``) The coordinate list for the submanifold, for
example ``[u, v]``.
Notes
-----
See 'init_slots' for possible other inputs. The 'Ga' member function
'sm' can be used to instantiate the submanifold via (o3d is the base
manifold)::
coords = (u,v) = symbols(',v',real=True)
sm_example = o3d.sm([sin(u)*cos(v),sin(u)*sin(v),cos(u)],coords)
(eu,ev) = sm_example.mv()
sm_grad = sm_example.grad
"""
init_slots = {'debug': (False, 'True for debug output'),
'root': ('e', 'Root symbol for basis vectors'),
'name': (None, 'Name of submanifold'),
'norm': (False, 'Normalize basis if True'),
'ga': (None, 'Base Geometric Algebra')}
def __init__(self, *args, **kwargs):
#print '!!!Enter Sm!!!'
if printer.GaLatexPrinter.latex_flg:
printer.GaLatexPrinter.restore()
Ga.restore = True
kwargs = metric.test_init_slots(Sm.init_slots, **kwargs)
u = args[0] # Coordinate map or vector embedding to define submanifold
coords = args[1] # List of cordinates
ga = kwargs['ga'] # base geometric algebra
if ga is None:
raise ValueError('Base geometric algebra must be specified for submanifold.')
g_base = ga.g_raw
n_base = ga.n
n_sub = len(coords)
# Construct names of basis vectors
root = kwargs['root']
"""
basis_str = ''
for x in coords:
basis_str += root + '_' + str(x) + ' '
basis_str = basis_str[:-1]
"""
#print 'u =', u
if isinstance(u,mv.Mv): #Define vector manifold
self.ebasis = []
for coord in coords:
#Partial derivation of vector function to get basis vectors
self.ebasis.append(u.diff(coord))
#print 'sm ebasis =', self.ebasis
self.g = []
for b1 in self.ebasis:
#Metric tensor from dot products of basis vectors
tmp = []
for b2 in self.ebasis:
tmp.append(b1 | b2)
self.g.append(tmp)
else:
if len(u) != n_base:
raise ValueError('In submanifold dimension of base manifold' +
' not equal to dimension of mapping.')
dxdu = []
for x_i in u:
tmp = []
for u_j in coords:
tmp.append(diff(x_i, u_j))
dxdu.append(tmp)
#print 'dxdu =', dxdu
sub_pairs = list(zip(ga.coords, u))
#Construct metric tensor form coordinate maps
g = eye(n_sub) #Zero n_sub x n_sub sympy matrix
n_range = list(range(n_sub))
for i in n_range:
for j in n_range:
s = zero
for k in ga.n_range:
for l in ga.n_range:
s += dxdu[k][i] * dxdu[l][j] * g_base[k, l].subs(sub_pairs)
g[i, j] = trigsimp(s)
norm = kwargs['norm']
debug = kwargs['debug']
if Ga.restore: # restore printer to appropriate enhanced mode after sm is instantiated
printer.GaLatexPrinter.redirect()
Ga.__init__(self, root, g=g, coords=coords, norm=norm, debug=debug)
if isinstance(u,mv.Mv): #Construct additional functions for vector manifold
#self.r_basis_mv under construction
pass
self.ga = ga
self.u = u
if debug:
print('Exit Sm.__init__()')
def vpds(self):
if not self.is_ortho:
r_basis = [x / self.e_sq for x in self.r_basis_mv]
else:
r_basis = self.r_basis_mv
if self.norm:
r_basis = [x / e_norm for (x, e_norm) in zip(self.r_basis_mv, self.e_norm)]
pdx = [self.Pdiffs[x] for x in self.coords]
self.vpd = mv.Dop(r_basis, pdx, ga=self)
self.rvpd = mv.Dop(r_basis, pdx, ga=self, cmpflg=True)
return self.vpd, self.rvpd
| 37.284579 | 139 | 0.544411 |
8176062410f4649ea9d2cc86c51c6d81bb223963 | 7,579 | py | Python | app/adjutorium-covid19-public/model_training/train_mortality_model.py | loramf/mlforhealthlabpub | aa5a42a4814cf69c8223f27c21324ee39d43c404 | [
"BSD-3-Clause"
] | 171 | 2021-02-12T10:23:19.000Z | 2022-03-29T01:58:52.000Z | app/adjutorium-covid19-public/model_training/train_mortality_model.py | loramf/mlforhealthlabpub | aa5a42a4814cf69c8223f27c21324ee39d43c404 | [
"BSD-3-Clause"
] | 4 | 2021-06-01T08:18:33.000Z | 2022-02-20T13:37:30.000Z | app/adjutorium-covid19-public/model_training/train_mortality_model.py | loramf/mlforhealthlabpub | aa5a42a4814cf69c8223f27c21324ee39d43c404 | [
"BSD-3-Clause"
] | 93 | 2021-02-10T03:21:59.000Z | 2022-03-30T19:10:37.000Z |
import numpy as np
import pandas as pd
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from xgboost import XGBClassifier
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
import pickle
import scipy.stats
import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
from data.data_processing import *
def monotonize(curve):
new_curve = []
for i in range(len(curve)):
if i > 0:
if i == 1:
new_curve.append(np.maximum(curve[i], curve[i-1]))
else:
new_curve.append(np.maximum(curve[i], np.max(curve[:i])))
else:
new_curve.append(curve[i])
return new_curve
# def prepare_CHESS_data(data_collection_date, feature_groups):
#
# CHESS_data, feature_names = get_data(curr_date=data_collection_date)
#
# for u in range(len(feature_groups)):
#
# if u==0:
#
# feature_set = feature_names[feature_groups[u]]
#
# else:
#
# feature_set = feature_set + feature_names[feature_groups[u]]
#
#
# imputer = IterativeImputer(max_iter=10, random_state=0)
#
# imputer.fit(CHESS_data[feature_set])
#
# X = np.float32(imputer.transform(CHESS_data[feature_set]))
#
# return CHESS_data, X, feature_set, feature_names, imputer
def prepare_CHESS_data(data_collection_date, feature_groups, imputer=None):
CHESS_data, feature_names, aux_data = get_data(curr_date=data_collection_date)
for u in range(len(feature_groups)):
if u == 0:
feature_set = feature_names[feature_groups[u]]
else:
feature_set = feature_set + feature_names[feature_groups[u]]
if imputer is None:
imputer = IterativeImputer(max_iter=10, random_state=0)
imputer.fit(CHESS_data[feature_set])
X = np.float32(imputer.transform(CHESS_data[feature_set]))
return CHESS_data, X, feature_set, feature_names, imputer, aux_data
class adjutorium_mortality_model:
def __init__(self, number_of_days=7, n_bootstraps=100, outcome="Dead", surrogate_outcome="Discharged",
outcome_time="Follow up time", data_collection_date="2020-04-04",
feature_groups=["Personal info", "Comorbidity info"], train_frac=.8):
self.horizons = list(range(1, number_of_days + 1))
self.n_samples = n_bootstraps
self.outcome = outcome
self.surrogate_out = surrogate_outcome
self.outcome_time = outcome_time
self.train_frac = train_frac
self.sampled_models = []
self.data, self.X, self.feature_set, self.feature_names, self.imputer, _ = prepare_CHESS_data(
data_collection_date=data_collection_date,
feature_groups=feature_groups)
def train(self):
for _ in range(self.n_samples):
models_ = []
print("Model number: %s" % str(_+1))
for horizon in self.horizons:
print("Prediction horizon: %s days" % str(horizon))
if self.surrogate_out is not None:
included = list(((self.data[self.outcome]==1) | (self.data[self.surrogate_out]==1) | (self.data[self.outcome_time] >= horizon)))
else:
included = list(((self.data[self.outcome]==1) | (self.data[self.outcome_time] >= horizon)))
train_size = int(np.floor(self.train_frac * np.sum(included)))
train_indexes = np.random.choice(np.sum(included), train_size, replace=False)
test_indexes = list(set(list(range(np.sum(included)))) - set(list(train_indexes)))
X_ = self.X[included, :]
Y_ = np.array(((self.data.loc[included, self.outcome]==1) & (self.data.loc[included, self.outcome_time]<=horizon)) * 1)
X_train = X_[train_indexes, :]
Y_train = Y_[train_indexes]
X_test = X_[test_indexes, :]
Y_test = Y_[test_indexes]
# replace this with AutoPrognosis
base_model = GradientBoostingClassifier(n_estimators=150)
base_model.fit(X_train, Y_train)
base_model_ = CalibratedClassifierCV(base_model, cv='prefit')
base_model_.fit(X_test, Y_test)
models_.append(base_model_)
self.sampled_models.append(models_)
def predict(self, X):
preds = []
if len(X.shape)==1:
X = X.reshape((1, -1))
for v in range(X.shape[0]):
surv_curv = [monotonize([self.sampled_models[u][k].predict_proba(X[v, :].reshape((1,-1)))[:, 1][0] for k in range(len(self.horizons))]) for u in range(len(self.sampled_models))]
preds.append(np.mean(surv_curv, axis=0))
return preds
def predict_batch(model, X):
if len(X.shape) == 1:
X = X.reshape((1, -1))
prediction_list = []
for u in range(len(model.sampled_models)):
y_hat_list = []
for k in range(len(model.horizons)):
# N,
y_hat = model.sampled_models[u][k].predict_proba(X)[:, 1]
y_hat_list.append(y_hat)
# N, horizon
y_hat_mat = np.stack(y_hat_list, axis=1)
curve_list = []
for i in range(y_hat_mat.shape[0]):
single_curve = y_hat_mat[i]
single_curve = np.array(monotonize(single_curve))
curve_list.append(single_curve)
# N, horizon
model_curve = np.stack(curve_list, axis=0)
prediction_list.append(model_curve)
prediction_mat = np.stack(prediction_list, axis=-1)
predictions = np.mean(prediction_mat, axis=-1)
return predictions
if __name__ == "__main__":
feature_groups = ["Personal info", "Ethnicity info", "Comorbidity info", "Lab test info",
"Hospitalization info", "Complications info", "Interventions info"]
death_model = adjutorium_mortality_model(number_of_days=14, n_bootstraps=20, data_collection_date="14/4/2020")
discharge_model = adjutorium_mortality_model(number_of_days=14, n_bootstraps=20, data_collection_date="14/4/2020", outcome="Discharged", surrogate_outcome="Dead")
ICU_model = adjutorium_mortality_model(number_of_days=14, n_bootstraps=20, outcome="ICU Admission",
surrogate_outcome=None, outcome_time="Time to ICU", data_collection_date="14/4/2020",
feature_groups=["Personal info", "Comorbidity info"], train_frac=.8)
death_model.train()
discharge_model.train()
ICU_model.train()
with open('adjutorium_mortality', 'wb') as handle:
pickle.dump(death_model, handle)
with open('adjutorium_discharge', 'wb') as handle:
pickle.dump(discharge_model, handle)
with open('adjutorium_icu', 'wb') as handle:
pickle.dump(ICU_model, handle)
| 32.809524 | 189 | 0.601531 |
6564a62e4bae5ad350811ce424784e3e035d2525 | 3,227 | py | Python | tg_apicore/settings.py | thorgate/tg-apicore | 5d93ae89efe6537ef0b762dfbc4eafdfdab383cb | [
"ISC"
] | 14 | 2018-02-22T14:27:48.000Z | 2020-05-16T12:51:10.000Z | tg_apicore/settings.py | thorgate/tg-apicore | 5d93ae89efe6537ef0b762dfbc4eafdfdab383cb | [
"ISC"
] | 4 | 2018-03-22T10:41:00.000Z | 2021-11-15T18:24:18.000Z | tg_apicore/settings.py | thorgate/tg-apicore | 5d93ae89efe6537ef0b762dfbc4eafdfdab383cb | [
"ISC"
] | null | null | null | from django.conf import settings
from rest_framework.settings import api_settings
DEFAULTS = {
'REST_FRAMEWORK': {
'EXCEPTION_HANDLER': 'rest_framework_json_api.exceptions.exception_handler',
'DEFAULT_PAGINATION_CLASS': 'tg_apicore.pagination.CursorPagination',
'PAGE_SIZE': 50,
'DEFAULT_PARSER_CLASSES': (
'tg_apicore.parsers.JSONParser',
'rest_framework_json_api.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_RENDERER_CLASSES': (
'tg_apicore.renderers.JSONRenderer',
'rest_framework_json_api.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_METADATA_CLASS': 'rest_framework_json_api.metadata.JSONAPIMetadata',
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',
'SCHEMA_COERCE_METHOD_NAMES': {},
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
'TEST_REQUEST_RENDERER_CLASSES': (
'tg_apicore.renderers.JSONRenderer',
'rest_framework.renderers.MultiPartRenderer',
),
},
'JSON_API_FORMAT_TYPES': 'underscore',
}
INVALID_DRF_CONFIG_MSG = """You must define %(name)s setting in REST_FRAMEWORK settings!
e.g in your settings.py:
REST_FRAMEWORK = {
# other settings...
%(example)s,
}
"""
INVALID_DJANGO_CONFIG_MSG = """You must define %(name)s setting in Django settings!
e.g in your settings.py:
# other settings...
%(example)s
"""
def patch_django_settings():
if not getattr(settings, 'TG_APICORE_PATCH_DRF_SETTINGS', True):
return
for k, v in DEFAULTS.items():
current = getattr(settings, k, None)
if current is None:
setattr(settings, k, v)
continue
if isinstance(current, dict) and isinstance(v, dict):
for subk, subv in v.items():
if subk not in current:
current[subk] = subv
def invalid_setting_error(name, example_config, msg_template):
return msg_template % {
'name': name,
'example': example_config,
}
def invalid_drf_setting_error(name, example_config):
return invalid_setting_error(name, example_config, INVALID_DRF_CONFIG_MSG)
def invalid_django_setting_error(name, example_config):
return invalid_setting_error(name, example_config, INVALID_DJANGO_CONFIG_MSG)
def verify_settings():
assert api_settings.ALLOWED_VERSIONS is not None, \
invalid_drf_setting_error('ALLOWED_VERSIONS', "'ALLOWED_VERSIONS': ('2018-01-01',)")
assert len(api_settings.ALLOWED_VERSIONS) >= 1
assert get_latest_version() in api_settings.ALLOWED_VERSIONS, \
"Value of API_VERSION_LATEST setting is not among REST_FRAMEWORK's ALLOWED_VERSIONS"
# If the API_VERSION_LATEST setting isn't defined, do it now to make it easier to access via Django settings.
if not hasattr(settings, 'API_VERSION_LATEST'):
settings.API_VERSION_LATEST = get_latest_version()
def get_latest_version() -> str:
return getattr(settings, 'API_VERSION_LATEST', None) or api_settings.ALLOWED_VERSIONS[-1]
| 31.028846 | 113 | 0.692284 |
a947b20d69892581bac146608df2ad54ada6d532 | 31,371 | py | Python | pymatgen/io/abinit/abitimer.py | wangyusu/pymatgen | a90af2fe71eff15134ca33c6e58f07caba425ae9 | [
"MIT"
] | 2 | 2020-01-28T19:19:15.000Z | 2020-03-30T18:10:32.000Z | pymatgen/io/abinit/abitimer.py | wangyusu/pymatgen | a90af2fe71eff15134ca33c6e58f07caba425ae9 | [
"MIT"
] | 3 | 2021-08-03T17:59:02.000Z | 2021-08-12T00:43:59.000Z | pymatgen/io/abinit/abitimer.py | wangyusu/pymatgen | a90af2fe71eff15134ca33c6e58f07caba425ae9 | [
"MIT"
] | 13 | 2015-03-05T09:42:11.000Z | 2018-08-28T15:22:53.000Z | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides objects for extracting timing data from the ABINIT output files
It also provides tools to analye and to visualize the parallel efficiency.
"""
import collections
import logging
import os
import sys
import numpy as np
from monty.string import is_string, list_strings
from pymatgen.util.num import minloc
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
logger = logging.getLogger(__name__)
def alternate(*iterables):
"""
[a[0], b[0], ... , a[1], b[1], ..., a[n], b[n] ...]
>>> alternate([1,4], [2,5], [3,6])
[1, 2, 3, 4, 5, 6]
"""
items = []
for tup in zip(*iterables):
items.extend(tup)
return items
class AbinitTimerParserError(Exception):
"""Errors raised by AbinitTimerParser"""
class AbinitTimerParser(collections.abc.Iterable):
"""
Responsible for parsing a list of output files, extracting the timing results
and analyzing the results.
Assume the Abinit output files have been produced with `timopt -1`.
Example:
parser = AbinitTimerParser()
parser.parse(list_of_files)
To analyze all *.abo files withing top, use:
parser, paths, okfiles = AbinitTimerParser.walk(top=".", ext=".abo")
"""
# The markers enclosing the data.
BEGIN_TAG = "-<BEGIN_TIMER"
END_TAG = "-<END_TIMER>"
Error = AbinitTimerParserError
# DEFAULT_MPI_RANK = "0"
@classmethod
def walk(cls, top=".", ext=".abo"):
"""
Scan directory tree starting from top, look for files with extension `ext` and
parse timing data.
Return: (parser, paths, okfiles)
where `parser` is the new object, `paths` is the list of files found and `okfiles`
is the list of files that have been parsed successfully.
(okfiles == paths) if all files have been parsed.
"""
paths = []
for root, dirs, files in os.walk(top):
for f in files:
if f.endswith(ext):
paths.append(os.path.join(root, f))
parser = cls()
okfiles = parser.parse(paths)
return parser, paths, okfiles
def __init__(self):
"""Initialize object."""
# List of files that have been parsed.
self._filenames = []
# timers[filename][mpi_rank]
# contains the timer extracted from the file filename associated to the MPI rank mpi_rank.
self._timers = collections.OrderedDict()
def __iter__(self):
return self._timers.__iter__()
def __len__(self):
return len(self._timers)
@property
def filenames(self):
"""List of files that have been parsed successfully."""
return self._filenames
def parse(self, filenames):
"""
Read and parse a filename or a list of filenames.
Files that cannot be opened are ignored. A single filename may also be given.
Return: list of successfully read files.
"""
filenames = list_strings(filenames)
read_ok = []
for fname in filenames:
try:
fh = open(fname) # pylint: disable=R1732
except IOError:
logger.warning("Cannot open file %s" % fname)
continue
try:
self._read(fh, fname)
read_ok.append(fname)
except self.Error as e:
logger.warning("exception while parsing file %s:\n%s" % (fname, str(e)))
continue
finally:
fh.close()
# Add read_ok to the list of files that have been parsed.
self._filenames.extend(read_ok)
return read_ok
def _read(self, fh, fname):
"""Parse the TIMER section"""
if fname in self._timers:
raise self.Error("Cannot overwrite timer associated to: %s " % fname)
def parse_line(line):
"""Parse single line."""
name, vals = line[:25], line[25:].split()
try:
ctime, cfract, wtime, wfract, ncalls, gflops = vals
except ValueError:
# v8.3 Added two columns at the end [Speedup, Efficacity]
ctime, cfract, wtime, wfract, ncalls, gflops, speedup, eff = vals
return AbinitTimerSection(name, ctime, cfract, wtime, wfract, ncalls, gflops)
sections, info, cpu_time, wall_time = None, None, None, None
data = {}
inside, has_timer = 0, False
for line in fh:
# print(line.strip())
if line.startswith(self.BEGIN_TAG):
has_timer = True
sections = []
info = {}
inside = 1
line = line[len(self.BEGIN_TAG) :].strip()[:-1]
info["fname"] = fname
for tok in line.split(","):
key, val = [s.strip() for s in tok.split("=")]
info[key] = val
elif line.startswith(self.END_TAG):
inside = 0
timer = AbinitTimer(sections, info, cpu_time, wall_time)
mpi_rank = info["mpi_rank"]
data[mpi_rank] = timer
elif inside:
inside += 1
line = line[1:].strip()
if inside == 2:
d = dict()
for tok in line.split(","):
key, val = [s.strip() for s in tok.split("=")]
d[key] = float(val)
cpu_time, wall_time = d["cpu_time"], d["wall_time"]
elif inside > 5:
sections.append(parse_line(line))
else:
try:
parse_line(line)
except Exception:
parser_failed = True
if not parser_failed:
raise self.Error("line should be empty: " + str(inside) + line)
if not has_timer:
raise self.Error("%s: No timer section found" % fname)
# Add it to the dict
self._timers[fname] = data
def timers(self, filename=None, mpi_rank="0"):
"""
Return the list of timers associated to the given `filename` and MPI rank mpi_rank.
"""
if filename is not None:
return [self._timers[filename][mpi_rank]]
return [self._timers[filename][mpi_rank] for filename in self._filenames]
def section_names(self, ordkey="wall_time"):
"""
Return the names of sections ordered by ordkey.
For the time being, the values are taken from the first timer.
"""
section_names = []
# FIXME this is not trivial
for idx, timer in enumerate(self.timers()):
if idx == 0:
section_names = [s.name for s in timer.order_sections(ordkey)]
# check = section_names
# else:
# new_set = set( [s.name for s in timer.order_sections(ordkey)])
# section_names.intersection_update(new_set)
# check = check.union(new_set)
# if check != section_names:
# print("sections", section_names)
# print("check",check)
return section_names
def get_sections(self, section_name):
"""
Return the list of sections stored in self.timers() given `section_name`
A fake section is returned if the timer does not have section_name.
"""
sections = []
for timer in self.timers():
for sect in timer.sections:
if sect.name == section_name:
sections.append(sect)
break
else:
sections.append(AbinitTimerSection.fake())
return sections
def pefficiency(self):
"""
Analyze the parallel efficiency.
Return: :class:`ParallelEfficiency` object.
"""
timers = self.timers()
# Number of CPUs employed in each calculation.
ncpus = [timer.ncpus for timer in timers]
# Find the minimum number of cpus used and its index in timers.
min_idx = minloc(ncpus)
min_ncpus = ncpus[min_idx]
# Reference timer
ref_t = timers[min_idx]
# Compute the parallel efficiency (total and section efficiency)
peff = {}
ctime_peff = [(min_ncpus * ref_t.wall_time) / (t.wall_time * ncp) for (t, ncp) in zip(timers, ncpus)]
wtime_peff = [(min_ncpus * ref_t.cpu_time) / (t.cpu_time * ncp) for (t, ncp) in zip(timers, ncpus)]
n = len(timers)
peff["total"] = {}
peff["total"]["cpu_time"] = ctime_peff
peff["total"]["wall_time"] = wtime_peff
peff["total"]["cpu_fract"] = n * [100]
peff["total"]["wall_fract"] = n * [100]
for sect_name in self.section_names():
# print(sect_name)
ref_sect = ref_t.get_section(sect_name)
sects = [t.get_section(sect_name) for t in timers]
try:
ctime_peff = [(min_ncpus * ref_sect.cpu_time) / (s.cpu_time * ncp) for (s, ncp) in zip(sects, ncpus)]
wtime_peff = [(min_ncpus * ref_sect.wall_time) / (s.wall_time * ncp) for (s, ncp) in zip(sects, ncpus)]
except ZeroDivisionError:
ctime_peff = n * [-1]
wtime_peff = n * [-1]
assert sect_name not in peff
peff[sect_name] = {}
peff[sect_name]["cpu_time"] = ctime_peff
peff[sect_name]["wall_time"] = wtime_peff
peff[sect_name]["cpu_fract"] = [s.cpu_fract for s in sects]
peff[sect_name]["wall_fract"] = [s.wall_fract for s in sects]
return ParallelEfficiency(self._filenames, min_idx, peff)
def summarize(self, **kwargs):
"""
Return pandas DataFrame with the most important results stored in the timers.
"""
import pandas as pd
colnames = [
"fname",
"wall_time",
"cpu_time",
"mpi_nprocs",
"omp_nthreads",
"mpi_rank",
]
frame = pd.DataFrame(columns=colnames)
for i, timer in enumerate(self.timers()):
frame = frame.append({k: getattr(timer, k) for k in colnames}, ignore_index=True)
frame["tot_ncpus"] = frame["mpi_nprocs"] * frame["omp_nthreads"]
# Compute parallel efficiency (use the run with min number of cpus to normalize).
i = frame["tot_ncpus"].values.argmin()
ref_wtime = frame.iloc[i]["wall_time"]
ref_ncpus = frame.iloc[i]["tot_ncpus"]
frame["peff"] = (ref_ncpus * ref_wtime) / (frame["wall_time"] * frame["tot_ncpus"])
return frame
@add_fig_kwargs
def plot_efficiency(self, key="wall_time", what="good+bad", nmax=5, ax=None, **kwargs):
"""
Plot the parallel efficiency
Args:
key: Parallel efficiency is computed using the wall_time.
what: Specifies what to plot: `good` for sections with good parallel efficiency.
`bad` for sections with bad efficiency. Options can be concatenated with `+`.
nmax: Maximum number of entries in plot
ax: matplotlib :class:`Axes` or None if a new figure should be created.
================ ====================================================
kwargs Meaning
================ ====================================================
linewidth matplotlib linewidth. Default: 2.0
markersize matplotlib markersize. Default: 10
================ ====================================================
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
lw = kwargs.pop("linewidth", 2.0)
msize = kwargs.pop("markersize", 10)
what = what.split("+")
timers = self.timers()
peff = self.pefficiency()
n = len(timers)
xx = np.arange(n)
# ax.set_color_cycle(['g', 'b', 'c', 'm', 'y', 'k'])
ax.set_prop_cycle(color=["g", "b", "c", "m", "y", "k"])
lines, legend_entries = [], []
# Plot sections with good efficiency.
if "good" in what:
good = peff.good_sections(key=key, nmax=nmax)
for g in good:
# print(g, peff[g])
yy = peff[g][key]
(line,) = ax.plot(xx, yy, "-->", linewidth=lw, markersize=msize)
lines.append(line)
legend_entries.append(g)
# Plot sections with bad efficiency.
if "bad" in what:
bad = peff.bad_sections(key=key, nmax=nmax)
for b in bad:
# print(b, peff[b])
yy = peff[b][key]
(line,) = ax.plot(xx, yy, "-.<", linewidth=lw, markersize=msize)
lines.append(line)
legend_entries.append(b)
# Add total if not already done
if "total" not in legend_entries:
yy = peff["total"][key]
(total_line,) = ax.plot(xx, yy, "r", linewidth=lw, markersize=msize)
lines.append(total_line)
legend_entries.append("total")
ax.legend(lines, legend_entries, loc="best", shadow=True)
# ax.set_title(title)
ax.set_xlabel("Total_NCPUs")
ax.set_ylabel("Efficiency")
ax.grid(True)
# Set xticks and labels.
labels = ["MPI=%d, OMP=%d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
ax.set_xticks(xx)
ax.set_xticklabels(labels, fontdict=None, minor=False, rotation=15)
return fig
@add_fig_kwargs
def plot_pie(self, key="wall_time", minfract=0.05, **kwargs):
"""
Plot pie charts of the different timers.
Args:
key: Keyword used to extract data from timers.
minfract: Don't show sections whose relative weight is less that minfract.
Returns:
`matplotlib` figure
"""
timers = self.timers()
n = len(timers)
# Make square figures and axes
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
fig = plt.gcf()
gspec = GridSpec(n, 1)
for idx, timer in enumerate(timers):
ax = plt.subplot(gspec[idx, 0])
ax.set_title(str(timer))
timer.pie(ax=ax, key=key, minfract=minfract, show=False)
return fig
@add_fig_kwargs
def plot_stacked_hist(self, key="wall_time", nmax=5, ax=None, **kwargs):
"""
Plot stacked histogram of the different timers.
Args:
key: Keyword used to extract data from the timers. Only the first `nmax`
sections with largest value are show.
mmax: Maximum nuber of sections to show. Other entries are grouped together
in the `others` section.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
mpi_rank = "0"
timers = self.timers(mpi_rank=mpi_rank)
n = len(timers)
names, values = [], []
rest = np.zeros(n)
for idx, sname in enumerate(self.section_names(ordkey=key)):
sections = self.get_sections(sname)
svals = np.asarray([s.__dict__[key] for s in sections])
if idx < nmax:
names.append(sname)
values.append(svals)
else:
rest += svals
names.append("others (nmax=%d)" % nmax)
values.append(rest)
# The dataset is stored in values. Now create the stacked histogram.
ind = np.arange(n) # the locations for the groups
width = 0.35 # the width of the bars
colors = nmax * ["r", "g", "b", "c", "k", "y", "m"]
bars = []
bottom = np.zeros(n)
for idx, vals in enumerate(values):
color = colors[idx]
bar_ = ax.bar(ind, vals, width, color=color, bottom=bottom)
bars.append(bar_)
bottom += vals
ax.set_ylabel(key)
ax.set_title("Stacked histogram with the %d most important sections" % nmax)
ticks = ind + width / 2.0
labels = ["MPI=%d, OMP=%d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
ax.set_xticks(ticks)
ax.set_xticklabels(labels, rotation=15)
# Add legend.
ax.legend([bar_[0] for bar_ in bars], names, loc="best")
return fig
def plot_all(self, show=True, **kwargs):
"""
Call all plot methods provided by the parser.
"""
figs = []
app = figs.append
app(self.plot_stacked_hist(show=show))
app(self.plot_efficiency(show=show))
app(self.plot_pie(show=show))
return figs
class ParallelEfficiency(dict):
"""
Store results concerning the parallel efficiency of the job.
"""
def __init__(self, filenames, ref_idx, *args, **kwargs):
"""
Args:
filennames: List of filenames
ref_idx: Index of the Reference time (calculation done with the smallest number of cpus)
"""
self.update(*args, **kwargs)
self.filenames = filenames
self._ref_idx = ref_idx
def _order_by_peff(self, key, criterion, reverse=True):
self.estimator = {
"min": min,
"max": max,
"mean": lambda items: sum(items) / len(items),
}[criterion]
data = []
for (sect_name, peff) in self.items():
# Ignore values where we had a division by zero.
if all(v != -1 for v in peff[key]):
values = peff[key][:]
# print(sect_name, values)
if len(values) > 1:
ref_value = values.pop(self._ref_idx)
assert ref_value == 1.0
data.append((sect_name, self.estimator(values)))
data.sort(key=lambda t: t[1], reverse=reverse)
return tuple(sect_name for (sect_name, e) in data)
def totable(self, stop=None, reverse=True):
"""
Return table (list of lists) with timing results.
Args:
stop: Include results up to stop. None for all
reverse: Put items with highest wall_time in first positions if True.
"""
osects = self._order_by_peff("wall_time", criterion="mean", reverse=reverse)
if stop is not None:
osects = osects[:stop]
n = len(self.filenames)
table = [["AbinitTimerSection"] + alternate(self.filenames, n * ["%"])]
for sect_name in osects:
peff = self[sect_name]["wall_time"]
fract = self[sect_name]["wall_fract"]
vals = alternate(peff, fract)
table.append([sect_name] + ["%.2f" % val for val in vals])
return table
def good_sections(self, key="wall_time", criterion="mean", nmax=5):
"""
Return first `nmax` sections with best value of key `key` using criterion `criterion`.
"""
good_sections = self._order_by_peff(key, criterion=criterion)
return good_sections[:nmax]
def bad_sections(self, key="wall_time", criterion="mean", nmax=5):
"""
Return first `nmax` sections with worst value of key `key` using criterion `criterion`.
"""
bad_sections = self._order_by_peff(key, criterion=criterion, reverse=False)
return bad_sections[:nmax]
class AbinitTimerSection:
"""Record with the timing results associated to a section of code."""
STR_FIELDS = ["name"]
NUMERIC_FIELDS = [
"wall_time",
"wall_fract",
"cpu_time",
"cpu_fract",
"ncalls",
"gflops",
]
FIELDS = tuple(STR_FIELDS + NUMERIC_FIELDS)
@classmethod
def fake(cls):
"""Return a fake section. Mainly used to fill missing entries if needed."""
return AbinitTimerSection("fake", 0.0, 0.0, 0.0, 0.0, -1, 0.0)
def __init__(self, name, cpu_time, cpu_fract, wall_time, wall_fract, ncalls, gflops):
"""
Args:
name: Name of the sections.
cpu_time: CPU time in seconds.
cpu_fract: Percentage of CPU time.
wall_time: Wall-time in seconds.
wall_fract: Percentage of wall-time.
ncalls: Number of calls
gflops: Gigaflops.
"""
self.name = name.strip()
self.cpu_time = float(cpu_time)
self.cpu_fract = float(cpu_fract)
self.wall_time = float(wall_time)
self.wall_fract = float(wall_fract)
self.ncalls = int(ncalls)
self.gflops = float(gflops)
def to_tuple(self):
"""Convert object to tuple."""
return tuple(self.__dict__[at] for at in AbinitTimerSection.FIELDS)
def to_dict(self):
"""Convert object to dictionary."""
return {at: self.__dict__[at] for at in AbinitTimerSection.FIELDS}
def to_csvline(self, with_header=False):
"""Return a string with data in CSV format. Add header if `with_header`"""
string = ""
if with_header:
string += "# " + " ".join(at for at in AbinitTimerSection.FIELDS) + "\n"
string += ", ".join(str(v) for v in self.to_tuple()) + "\n"
return string
def __str__(self):
"""String representation."""
string = ""
for a in AbinitTimerSection.FIELDS:
string += a + " = " + self.__dict__[a] + ","
return string[:-1]
class AbinitTimer:
"""Container class storing the timing results."""
def __init__(self, sections, info, cpu_time, wall_time):
"""
Args:
sections: List of sections
info: Dictionary with extra info.
cpu_time: Cpu-time in seconds.
wall_time: Wall-time in seconds.
"""
# Store sections and names
self.sections = tuple(sections)
self.section_names = tuple(s.name for s in self.sections)
self.info = info
self.cpu_time = float(cpu_time)
self.wall_time = float(wall_time)
self.mpi_nprocs = int(info["mpi_nprocs"])
self.omp_nthreads = int(info["omp_nthreads"])
self.mpi_rank = info["mpi_rank"].strip()
self.fname = info["fname"].strip()
def __str__(self):
string = "file=%s, wall_time=%.1f, mpi_nprocs=%d, omp_nthreads=%d" % (
self.fname,
self.wall_time,
self.mpi_nprocs,
self.omp_nthreads,
)
# string += ", rank = " + self.mpi_rank
return string
@property
def ncpus(self):
"""Total number of CPUs employed."""
return self.mpi_nprocs * self.omp_nthreads
def get_section(self, section_name):
"""Return section associated to `section_name`."""
try:
idx = self.section_names.index(section_name)
except Exception:
raise
sect = self.sections[idx]
assert sect.name == section_name
return sect
def to_csv(self, fileobj=sys.stdout):
"""Write data on file fileobj using CSV format."""
openclose = is_string(fileobj)
if openclose:
fileobj = open(fileobj, "w") # pylint: disable=R1732
for idx, section in enumerate(self.sections):
fileobj.write(section.to_csvline(with_header=(idx == 0)))
fileobj.flush()
if openclose:
fileobj.close()
def to_table(self, sort_key="wall_time", stop=None):
"""Return a table (list of lists) with timer data"""
table = [
list(AbinitTimerSection.FIELDS),
]
ord_sections = self.order_sections(sort_key)
if stop is not None:
ord_sections = ord_sections[:stop]
for osect in ord_sections:
row = [str(item) for item in osect.to_tuple()]
table.append(row)
return table
# Maintain old API
totable = to_table
def get_dataframe(self, sort_key="wall_time", **kwargs):
"""
Return a pandas DataFrame with entries sorted according to `sort_key`.
"""
import pandas as pd
frame = pd.DataFrame(columns=AbinitTimerSection.FIELDS)
for osect in self.order_sections(sort_key):
frame = frame.append(osect.to_dict(), ignore_index=True)
# Monkey patch
frame.info = self.info
frame.cpu_time = self.cpu_time
frame.wall_time = self.wall_time
frame.mpi_nprocs = self.mpi_nprocs
frame.omp_nthreads = self.omp_nthreads
frame.mpi_rank = self.mpi_rank
frame.fname = self.fname
return frame
def get_values(self, keys):
"""
Return a list of values associated to a particular list of keys.
"""
if is_string(keys):
return [s.__dict__[keys] for s in self.sections]
values = []
for k in keys:
values.append([s.__dict__[k] for s in self.sections])
return values
def names_and_values(self, key, minval=None, minfract=None, sorted=True):
"""
Select the entries whose value[key] is >= minval or whose fraction[key] is >= minfract
Return the names of the sections and the corresponding values.
"""
values = self.get_values(key)
names = self.get_values("name")
new_names, new_values = [], []
other_val = 0.0
if minval is not None:
assert minfract is None
for n, v in zip(names, values):
if v >= minval:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minval " + str(minval))
new_values.append(other_val)
elif minfract is not None:
assert minval is None
total = self.sum_sections(key)
for n, v in zip(names, values):
if v / total >= minfract:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minfract " + str(minfract))
new_values.append(other_val)
else:
# all values
new_names, new_values = names, values
if sorted:
# Sort new_values and rearrange new_names.
nandv = list(zip(new_names, new_values))
nandv.sort(key=lambda t: t[1])
new_names, new_values = [n[0] for n in nandv], [n[1] for n in nandv]
return new_names, new_values
def _reduce_sections(self, keys, operator):
return operator(self.get_values(keys))
def sum_sections(self, keys):
"""Sum value of keys."""
return self._reduce_sections(keys, sum)
def order_sections(self, key, reverse=True):
"""Sort sections according to the value of key."""
return sorted(self.sections, key=lambda s: s.__dict__[key], reverse=reverse)
@add_fig_kwargs
def cpuwall_histogram(self, ax=None, **kwargs):
"""
Plot histogram with cpu- and wall-time on axis `ax`.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns: `matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
nk = len(self.sections)
ind = np.arange(nk) # the x locations for the groups
width = 0.35 # the width of the bars
cpu_times = self.get_values("cpu_time")
rects1 = plt.bar(ind, cpu_times, width, color="r")
wall_times = self.get_values("wall_time")
rects2 = plt.bar(ind + width, wall_times, width, color="y")
# Add ylable and title
ax.set_ylabel("Time (s)")
# plt.title('CPU-time and Wall-time for the different sections of the code')
ticks = self.get_values("name")
ax.set_xticks(ind + width, ticks)
ax.legend((rects1[0], rects2[0]), ("CPU", "Wall"), loc="best")
return fig
@add_fig_kwargs
def pie(self, key="wall_time", minfract=0.05, ax=None, **kwargs):
"""
Plot pie chart for this timer.
Args:
key: Keyword used to extract data from the timer.
minfract: Don't show sections whose relative weight is less that minfract.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns: `matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
# Set aspect ratio to be equal so that pie is drawn as a circle.
ax.axis("equal")
# Don't show section whose value is less that minfract
labels, vals = self.names_and_values(key, minfract=minfract)
ax.pie(vals, explode=None, labels=labels, autopct="%1.1f%%", shadow=True)
return fig
@add_fig_kwargs
def scatter_hist(self, ax=None, **kwargs):
"""
Scatter plot + histogram.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns: `matplotlib` figure
"""
from mpl_toolkits.axes_grid1 import make_axes_locatable
ax, fig, plt = get_ax_fig_plt(ax=ax)
x = np.asarray(self.get_values("cpu_time"))
y = np.asarray(self.get_values("wall_time"))
# the scatter plot:
axScatter = plt.subplot(1, 1, 1)
axScatter.scatter(x, y)
axScatter.set_aspect("auto")
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(axScatter)
axHistx = divider.append_axes("top", 1.2, pad=0.1, sharex=axScatter)
axHisty = divider.append_axes("right", 1.2, pad=0.1, sharey=axScatter)
# make some labels invisible
plt.setp(axHistx.get_xticklabels() + axHisty.get_yticklabels(), visible=False)
# now determine nice limits by hand:
binwidth = 0.25
xymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])
lim = (int(xymax / binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation="horizontal")
# the xaxis of axHistx and yaxis of axHisty are shared with axScatter,
# thus there is no need to manually adjust the xlim and ylim of these axis.
# axHistx.axis["bottom"].major_ticklabels.set_visible(False)
for tl in axHistx.get_xticklabels():
tl.set_visible(False)
axHistx.set_yticks([0, 50, 100])
# axHisty.axis["left"].major_ticklabels.set_visible(False)
for tl in axHisty.get_yticklabels():
tl.set_visible(False)
axHisty.set_xticks([0, 50, 100])
# plt.draw()
return fig
| 33.480256 | 119 | 0.56361 |
89779aba5139033dcc49a36732b27dadc2c8910d | 1,606 | py | Python | src/repositories/comando.py | laurosn/flask-api-starter-kit | 09a5b67bf48abeb427beae9bccae323aea16d9ae | [
"MIT"
] | null | null | null | src/repositories/comando.py | laurosn/flask-api-starter-kit | 09a5b67bf48abeb427beae9bccae323aea16d9ae | [
"MIT"
] | null | null | null | src/repositories/comando.py | laurosn/flask-api-starter-kit | 09a5b67bf48abeb427beae9bccae323aea16d9ae | [
"MIT"
] | null | null | null | """ Defines the Comando repository """
from models.comando import Comando
from models import db
class ComandoRepository:
""" The repository for the comando model """
@staticmethod
def get(id):
""" Query a comando by id """
return Comando.query.filter_by(id=id).one_or_none()
@staticmethod
def get_by_sistema(id):
""" Query a comando by sistema """
return Comando.query.filter_by(sistema_id=id).all()
def update(self, id, name, parametros, retorno, sistema_id):
""" Update a comando """
comando = self.get(id)
comando.name = name
comando.parametros = parametros
comando.retorno = retorno
comando.sistema_id = sistema_id
return comando.save()
@staticmethod
def create(id, name, parametros, retorno, sistema_id):
""" Create a new comando """
comando = Comando(id=id, name=name, parametros=parametros, retorno=retorno, sistema_id=sistema_id)
return comando.save()
@staticmethod
def create_all(comandos, sistema_id):
""" Create a new comando """
comandos_entries = []
for comando in comandos:
new_comando = Comando(name=comando['name'], parametros=comando['parametros'], retorno=comando['retorno'], sistema_id=sistema_id)
comandos_entries.append(new_comando)
db.session.add_all(comandos_entries)
db.session.commit()
def delete(self, id):
""" Delete a comando """
comando = self.get(id)
if comando:
comando.delete()
return comando
| 29.740741 | 140 | 0.628892 |
0d95f9daa53fbe359850b7fcb1e9e12fed7d3ddb | 5,265 | py | Python | terminal/gm.py | jghibiki/DungeonBuilder | af0879fa89b2e73f2bc256dc9e5311b815fe2220 | [
"MIT"
] | 3 | 2016-12-08T20:09:57.000Z | 2021-05-21T13:59:21.000Z | terminal/gm.py | jghibiki/Cursed | af0879fa89b2e73f2bc256dc9e5311b815fe2220 | [
"MIT"
] | 26 | 2016-12-09T20:30:31.000Z | 2017-05-31T20:57:21.000Z | terminal/gm.py | jghibiki/DungeonBuilder | af0879fa89b2e73f2bc256dc9e5311b815fe2220 | [
"MIT"
] | null | null | null | from interactive import InteractiveModule, UserModule
from viewer import ViewerConstants
from screen import Screen
from viewport import Viewport
from colon_line import ColonLine
from state import State
from client import Client
import log
import curses
log = log.logger
class GM(InteractiveModule, UserModule):
def __init__(self):
super(GM, self).__init__()
def _handle_combo(self, viewer, buf):
pass
def _handle_help(self, viewer, buf):
pass
def _handle(self, viewer, ch):
screen = viewer.get_submodule(Screen)
viewport = viewer.get_submodule(Viewport)
state = viewer.get_submodule(State)
wsad = state.get_state("direction_scheme")
wsad = True if wsad is not None and wsad is True else False
if True: #state.get_state("ignore_direction_keys") != "on":
if not wsad:
if ch == ord("j"):
self.down(viewer)
elif ch == ord("k"):
self.up(viewer)
elif ch == ord("h"):
self.left(viewer)
elif ch == ord("l"):
self.right(viewer)
elif ch == ord("J"):
self.vp_down(viewer)
elif ch == ord("K"):
self.vp_up(viewer)
elif ch == ord("H"):
self.vp_left(viewer)
elif ch == ord("L"):
self.vp_right(viewer)
else:
if ch == ord("s"):
self.down(viewer)
elif ch == ord("w"):
self.up(viewer)
elif ch == ord("a"):
self.left(viewer)
elif ch == ord("d"):
self.right(viewer)
elif ch == ord("S"):
self.vp_down(viewer)
elif ch == ord("W"):
self.vp_up(viewer)
elif ch == ord("A"):
self.vp_left(viewer)
elif ch == ord("D"):
self.vp_right(viewer)
if ch == ord("N"):
self.edit_note(viewer)
# some simple utilities
# TODO: Move these utilities to a dev module
elif ch == ord("p"):
import curses
try:
for i in range(0, curses.COLORS*curses.COLORS):
viewer.screen.addstr(str(i), curses.color_pair(i))
except:
pass
elif ch == ord("P"):
for i in range(0, 1000):
self.default_screen.addch(i)
def up(self, viewer):
vp = viewer.get_submodule(Viewport)
vp.cursor_up()
def down(self, viewer):
vp = viewer.get_submodule(Viewport)
vp.cursor_down()
def left(self, viewer):
vp = viewer.get_submodule(Viewport)
vp.cursor_left()
def right(self, viewer):
vp = viewer.get_submodule(Viewport)
vp.cursor_right()
def vp_down(self, viewer):
vp = viewer.get_submodule(Viewport)
screen = viewer.get_submodule(Screen)
cl = viewer.get_submodule(ColonLine)
vp.down()
cl.mark_dirty()
screen.fix_cursor()
def vp_up(self, viewer):
vp = viewer.get_submodule(Viewport)
screen = viewer.get_submodule(Screen)
cl = viewer.get_submodule(ColonLine)
vp.up()
cl.mark_dirty()
screen.fix_cursor()
def vp_right(self, viewer):
vp = viewer.get_submodule(Viewport)
screen = viewer.get_submodule(Screen)
cl = viewer.get_submodule(ColonLine)
vp.right()
cl.mark_dirty()
screen.fix_cursor()
def vp_left(self, viewer):
vp = viewer.get_submodule(Viewport)
screen = viewer.get_submodule(Screen)
cl = viewer.get_submodule(ColonLine)
vp.left()
cl.mark_dirty()
screen.fix_cursor()
def edit_note(self, viewer):
import sys, tempfile, os
import subprocess
vp = viewer.get_submodule(Viewport)
screen = viewer.get_submodule(Screen)
idx = vp.get_feature_idx(
vp.cursor_y,
vp.cursor_x)
if idx:
feature = vp.get_feature(idx)
notes = feature["notes"]
EDITOR = os.environ.get('EDITOR','vim')
with tempfile.NamedTemporaryFile(suffix=".md") as tf:
text = notes.encode("UTF-8")
tf.write(text)
tf.flush()
subprocess.call([EDITOR, tf.name])
# do the parsing with `tf` using regular File operations.
# for instance:
tf.seek(0)
text = tf.read().decode("UTF-8")
# fix cursor after opening editor
curses.curs_set(1)
curses.curs_set(0)
# TODO: add a way to upload edited note to server
feature["notes"] = text
client = viewer.get_submodule(Client)
feature_dict = feature
client.make_request("/map/update/", payload=feature_dict)
viewer._draw(force=True) # force redraw after closing vim
| 28.005319 | 73 | 0.518898 |
f247aa2e87778ee498d4bd7c0e61ceefb6e3f57c | 707 | py | Python | util/get_photos.py | jsundram/haydnenthusiasts.org | 70ae58983cf0f840e43b80ae656cd348361e5716 | [
"MIT"
] | null | null | null | util/get_photos.py | jsundram/haydnenthusiasts.org | 70ae58983cf0f840e43b80ae656cd348361e5716 | [
"MIT"
] | null | null | null | util/get_photos.py | jsundram/haydnenthusiasts.org | 70ae58983cf0f840e43b80ae656cd348361e5716 | [
"MIT"
] | null | null | null | import json
import requests
import os
from PIL import Image
directory = '../static/assets/event_photos/'
with open('../static/data.json') as f:
data = json.load(f)
for event in data['events']:
photo_url = event['photo']
# name only; Get rid of query string if present
name = photo_url.split('/')[-1].split('?')[0]
to_write = directory + name
if not os.path.exists(to_write):
print("downloading %s" % name)
r = requests.get(photo_url)
with open(to_write, 'wb') as f:
f.write(r.content)
im = Image.open(to_write)
if im.size != (400, 300):
print("%s (date: %s) needs resizing: (%s)" % (name, event['date'], im.size))
| 27.192308 | 88 | 0.595474 |
7f459f7f23b17cb4c3d29d23fcf5f8fbc5648ce1 | 1,175 | py | Python | ur_cc_app/__init__.py | Sim4n6/UR-CodingChallenge | 287876f76a551e8b5adf92a5218fb7e40d79a825 | [
"MIT"
] | null | null | null | ur_cc_app/__init__.py | Sim4n6/UR-CodingChallenge | 287876f76a551e8b5adf92a5218fb7e40d79a825 | [
"MIT"
] | 52 | 2020-01-03T17:28:43.000Z | 2022-03-01T17:08:41.000Z | ur_cc_app/__init__.py | Sim4n6/UR-CodingChallenge | 287876f76a551e8b5adf92a5218fb7e40d79a825 | [
"MIT"
] | null | null | null | from .config import config_choices
import os
from flask import Blueprint, Flask
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_simple_geoip import SimpleGeoIP
db = SQLAlchemy()
ma = Marshmallow()
# Initialize the extension Geo IP location ext
simple_geoip = SimpleGeoIP()
def create_app():
app = Flask(__name__, template_folder="templates", static_folder="static")
# load the environment configs using FLASK_CONFIG otherwise load Development.
app.config.from_object(config_choices[os.getenv("FLASK_CONFIG") or "Development"])
# print(app.config)
db.init_app(app)
ma.init_app(app)
simple_geoip.init_app(app)
bootstrap = Bootstrap(app)
# register bluprints
from ur_cc_app.main import main_routes
app.register_blueprint(main_routes.main_bp)
from ur_cc_app.api import api_routes
app.register_blueprint(api_routes.api_bp)
from ur_cc_app.errors import errors_routes
app.register_blueprint(errors_routes.error_bp)
from ur_cc_app.auth import auth_routes
app.register_blueprint(auth_routes.auth_bp)
return app
| 23.979592 | 86 | 0.774468 |
a2a92e6ddaa1e32825eb2b40eb21ff1bac62a9d6 | 39,717 | py | Python | vispy/color/colormap.py | mkkb/vispy | 8540f8d96fe3af84ba80bde6d6bf55484eaa8e3a | [
"BSD-3-Clause"
] | null | null | null | vispy/color/colormap.py | mkkb/vispy | 8540f8d96fe3af84ba80bde6d6bf55484eaa8e3a | [
"BSD-3-Clause"
] | null | null | null | vispy/color/colormap.py | mkkb/vispy | 8540f8d96fe3af84ba80bde6d6bf55484eaa8e3a | [
"BSD-3-Clause"
] | 1 | 2019-03-18T19:35:17.000Z | 2019-03-18T19:35:17.000Z | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division # just to be safe...
import inspect
import numpy as np
from .color_array import ColorArray
from ..ext.six import string_types
from ..ext.cubehelix import cubehelix
from ..ext.husl import husl_to_rgb
###############################################################################
# Color maps
# Utility functions for interpolation in NumPy.
def _vector_or_scalar(x, type='row'):
"""Convert an object to either a scalar or a row or column vector."""
if isinstance(x, (list, tuple)):
x = np.array(x)
if isinstance(x, np.ndarray):
assert x.ndim == 1
if type == 'column':
x = x[:, None]
return x
def _vector(x, type='row'):
"""Convert an object to a row or column vector."""
if isinstance(x, (list, tuple)):
x = np.array(x, dtype=np.float32)
elif not isinstance(x, np.ndarray):
x = np.array([x], dtype=np.float32)
assert x.ndim == 1
if type == 'column':
x = x[:, None]
return x
def _find_controls(x, controls=None, clip=None):
x_controls = np.clip(np.searchsorted(controls, x) - 1, 0, clip)
return x_controls.astype(np.int32)
# Normalization
def _normalize(x, cmin=None, cmax=None, clip=True):
"""Normalize an array from the range [cmin, cmax] to [0,1],
with optional clipping."""
if not isinstance(x, np.ndarray):
x = np.array(x)
if cmin is None:
cmin = x.min()
if cmax is None:
cmax = x.max()
if cmin == cmax:
return .5 * np.ones(x.shape)
else:
cmin, cmax = float(cmin), float(cmax)
y = (x - cmin) * 1. / (cmax - cmin)
if clip:
y = np.clip(y, 0., 1.)
return y
# Interpolation functions in NumPy.
def _mix_simple(a, b, x):
"""Mix b (with proportion x) with a."""
x = np.clip(x, 0.0, 1.0)
return (1.0 - x)*a + x*b
def _interpolate_multi(colors, x, controls):
x = x.ravel()
n = len(colors)
# For each element in x, the control index of its bin's left boundary.
x_step = _find_controls(x, controls, n-2)
# The length of each bin.
controls_length = np.diff(controls).astype(np.float32)
# Prevent division by zero error.
controls_length[controls_length == 0.] = 1.
# Like x, but relative to each bin.
_to_clip = x - controls[x_step]
_to_clip /= controls_length[x_step]
x_rel = np.clip(_to_clip, 0., 1.)
return (colors[x_step],
colors[x_step + 1],
x_rel[:, None])
def mix(colors, x, controls=None):
a, b, x_rel = _interpolate_multi(colors, x, controls)
return _mix_simple(a, b, x_rel)
def smoothstep(edge0, edge1, x):
""" performs smooth Hermite interpolation
between 0 and 1 when edge0 < x < edge1. """
# Scale, bias and saturate x to 0..1 range
x = np.clip((x - edge0)/(edge1 - edge0), 0.0, 1.0)
# Evaluate polynomial
return x*x*(3 - 2*x)
def step(colors, x, controls=None):
x = x.ravel()
"""Step interpolation from a set of colors. x belongs in [0, 1]."""
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(colors)
assert ncolors == len(controls) - 1
assert ncolors >= 2
x_step = _find_controls(x, controls, ncolors-1)
return colors[x_step, ...]
# GLSL interpolation functions.
def _glsl_mix(controls=None):
"""Generate a GLSL template function from a given interpolation patterns
and control points."""
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(controls)
assert ncolors >= 2
if ncolors == 2:
s = " return mix($color_0, $color_1, t);\n"
else:
s = ""
for i in range(ncolors-1):
if i == 0:
ifs = 'if (t < %.6f)' % (controls[i+1])
elif i == (ncolors-2):
ifs = 'else'
else:
ifs = 'else if (t < %.6f)' % (controls[i+1])
adj_t = '(t - %s) / %s' % (controls[i],
controls[i+1] - controls[i])
s += ("%s {\n return mix($color_%d, $color_%d, %s);\n} " %
(ifs, i, i+1, adj_t))
return "vec4 colormap(float t) {\n%s\n}" % s
def _glsl_step(controls=None):
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(controls) - 1
assert ncolors >= 2
s = ""
for i in range(ncolors-1):
if i == 0:
ifs = 'if (t < %.6f)' % (controls[i+1])
elif i == (ncolors-2):
ifs = 'else'
else:
ifs = 'else if (t < %.6f)' % (controls[i+1])
s += """%s {\n return $color_%d;\n} """ % (ifs, i)
return """vec4 colormap(float t) {\n%s\n}""" % s
# Mini GLSL template system for colors.
def _process_glsl_template(template, colors):
"""Replace $color_i by color #i in the GLSL template."""
for i in range(len(colors) - 1, -1, -1):
color = colors[i]
assert len(color) == 4
vec4_color = 'vec4(%.3f, %.3f, %.3f, %.3f)' % tuple(color)
template = template.replace('$color_%d' % i, vec4_color)
return template
class BaseColormap(object):
u"""Class representing a colormap:
t in [0, 1] --> rgba_color
Parameters
----------
colors : list of lists, tuples, or ndarrays
The control colors used by the colormap (shape = (ncolors, 4)).
Notes
-----
Must be overriden. Child classes need to implement:
glsl_map : string
The GLSL function for the colormap. Use $color_0 to refer
to the first color in `colors`, and so on. These are vec4 vectors.
map(item) : function
Takes a (N, 1) vector of values in [0, 1], and returns a rgba array
of size (N, 4).
"""
# Control colors used by the colormap.
colors = None
# GLSL string with a function implementing the color map.
glsl_map = None
def __init__(self, colors=None):
# Ensure the colors are arrays.
if colors is not None:
self.colors = colors
if not isinstance(self.colors, ColorArray):
self.colors = ColorArray(self.colors)
# Process the GLSL map function by replacing $color_i by the
if len(self.colors) > 0:
self.glsl_map = _process_glsl_template(self.glsl_map,
self.colors.rgba)
def map(self, item):
"""Return a rgba array for the requested items.
This function must be overriden by child classes.
This function doesn't need to implement argument checking on `item`.
It can always assume that `item` is a (N, 1) array of values between
0 and 1.
Parameters
----------
item : ndarray
An array of values in [0,1].
Returns
-------
rgba : ndarray
An array with rgba values, with one color per item. The shape
should be ``item.shape + (4,)``.
Notes
-----
Users are expected to use a colormap with ``__getitem__()`` rather
than ``map()`` (which implements a lower-level API).
"""
raise NotImplementedError()
def __getitem__(self, item):
if isinstance(item, tuple):
raise ValueError('ColorArray indexing is only allowed along '
'the first dimension.')
# Ensure item is either a scalar or a column vector.
item = _vector(item, type='column')
# Clip the values in [0, 1].
item = np.clip(item, 0., 1.)
colors = self.map(item)
return ColorArray(colors)
def __setitem__(self, item, value):
raise RuntimeError("It is not possible to set items to "
"BaseColormap instances.")
def _repr_html_(self):
n = 100
html = ("""
<style>
table.vispy_colormap {
height: 30px;
border: 0;
margin: 0;
padding: 0;
}
table.vispy_colormap td {
width: 3px;
border: 0;
margin: 0;
padding: 0;
}
</style>
<table class="vispy_colormap">
""" +
'\n'.join([(("""<td style="background-color: %s;"
title="%s"></td>""") % (color, color))
for color in self[np.linspace(0., 1., n)].hex]) +
"""
</table>
""")
return html
def _default_controls(ncolors):
"""Generate linearly spaced control points from a set of colors."""
return np.linspace(0., 1., ncolors)
# List the parameters of every supported interpolation mode.
_interpolation_info = {
'linear': {
'ncontrols': lambda ncolors: ncolors, # take ncolors as argument
'glsl_map': _glsl_mix, # take 'controls' as argument
'map': mix,
},
'zero': {
'ncontrols': lambda ncolors: (ncolors+1),
'glsl_map': _glsl_step,
'map': step,
}
}
class Colormap(BaseColormap):
"""A colormap defining several control colors and an interpolation scheme.
Parameters
----------
colors : list of colors | ColorArray
The list of control colors. If not a ``ColorArray``, a new
``ColorArray`` instance is created from this list. See the
documentation of ``ColorArray``.
controls : array-like
The list of control points for the given colors. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
Examples
--------
Here is a basic example:
>>> from vispy.color import Colormap
>>> cm = Colormap(['r', 'g', 'b'])
>>> cm[0.], cm[0.5], cm[np.linspace(0., 1., 100)]
"""
def __init__(self, colors, controls=None, interpolation='linear'):
self.interpolation = interpolation
ncontrols = self._ncontrols(len(colors))
# Default controls.
if controls is None:
controls = _default_controls(ncontrols)
assert len(controls) == ncontrols
self._controls = np.array(controls, dtype=np.float32)
self.glsl_map = self._glsl_map_generator(self._controls)
super(Colormap, self).__init__(colors)
@property
def interpolation(self):
"""The interpolation mode of the colormap"""
return self._interpolation
@interpolation.setter
def interpolation(self, val):
if val not in _interpolation_info:
raise ValueError('The interpolation mode can only be one of: ' +
', '.join(sorted(_interpolation_info.keys())))
# Get the information of the interpolation mode.
info = _interpolation_info[val]
# Get the function that generates the GLSL map, as a function of the
# controls array.
self._glsl_map_generator = info['glsl_map']
# Number of controls as a function of the number of colors.
self._ncontrols = info['ncontrols']
# Python map function.
self._map_function = info['map']
self._interpolation = val
def map(self, x):
"""The Python mapping function from the [0,1] interval to a
list of rgba colors
Parameters
----------
x : array-like
The values to map.
Returns
-------
colors : list
List of rgba colors.
"""
return self._map_function(self.colors.rgba, x, self._controls)
class CubeHelixColormap(Colormap):
def __init__(self, start=0.5, rot=1, gamma=1.0, reverse=True, nlev=32,
minSat=1.2, maxSat=1.2, minLight=0., maxLight=1., **kwargs):
"""Cube helix colormap
A full implementation of Dave Green's "cubehelix" for Matplotlib.
Based on the FORTRAN 77 code provided in
D.A. Green, 2011, BASI, 39, 289.
http://adsabs.harvard.edu/abs/2011arXiv1108.5083G
User can adjust all parameters of the cubehelix algorithm.
This enables much greater flexibility in choosing color maps, while
always ensuring the color map scales in intensity from black
to white. A few simple examples:
Default color map settings produce the standard "cubehelix".
Create color map in only blues by setting rot=0 and start=0.
Create reverse (white to black) backwards through the rainbow once
by setting rot=1 and reverse=True.
Parameters
----------
start : scalar, optional
Sets the starting position in the color space. 0=blue, 1=red,
2=green. Defaults to 0.5.
rot : scalar, optional
The number of rotations through the rainbow. Can be positive
or negative, indicating direction of rainbow. Negative values
correspond to Blue->Red direction. Defaults to -1.5
gamma : scalar, optional
The gamma correction for intensity. Defaults to 1.0
reverse : boolean, optional
Set to True to reverse the color map. Will go from black to
white. Good for density plots where shade~density. Defaults to
False
nlev : scalar, optional
Defines the number of discrete levels to render colors at.
Defaults to 32.
sat : scalar, optional
The saturation intensity factor. Defaults to 1.2
NOTE: this was formerly known as "hue" parameter
minSat : scalar, optional
Sets the minimum-level saturation. Defaults to 1.2
maxSat : scalar, optional
Sets the maximum-level saturation. Defaults to 1.2
startHue : scalar, optional
Sets the starting color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in start parameter
endHue : scalar, optional
Sets the ending color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in rot parameter
minLight : scalar, optional
Sets the minimum lightness value. Defaults to 0.
maxLight : scalar, optional
Sets the maximum lightness value. Defaults to 1.
"""
super(CubeHelixColormap, self).__init__(
cubehelix(start=start, rot=rot, gamma=gamma, reverse=reverse,
nlev=nlev, minSat=minSat, maxSat=maxSat,
minLight=minLight, maxLight=maxLight, **kwargs))
class _Fire(BaseColormap):
colors = [(1.0, 1.0, 1.0, 1.0),
(1.0, 1.0, 0.0, 1.0),
(1.0, 0.0, 0.0, 1.0)]
glsl_map = """
vec4 fire(float t) {
return mix(mix($color_0, $color_1, t),
mix($color_1, $color_2, t*t), t);
}
"""
def map(self, t):
a, b, d = self.colors.rgba
c = _mix_simple(a, b, t)
e = _mix_simple(b, d, t**2)
return _mix_simple(c, e, t)
class _Grays(BaseColormap):
glsl_map = """
vec4 grays(float t) {
return vec4(t, t, t, 1.0);
}
"""
def map(self, t):
if isinstance(t, np.ndarray):
return np.hstack([t, t, t, np.ones(t.shape)]).astype(np.float32)
else:
return np.array([t, t, t, 1.0], dtype=np.float32)
class _Ice(BaseColormap):
glsl_map = """
vec4 ice(float t) {
return vec4(t, t, 1.0, 1.0);
}
"""
def map(self, t):
if isinstance(t, np.ndarray):
return np.hstack([t, t, np.ones(t.shape),
np.ones(t.shape)]).astype(np.float32)
else:
return np.array([t, t, 1.0, 1.0], dtype=np.float32)
class _Hot(BaseColormap):
colors = [(0., .33, .66, 1.0),
(.33, .66, 1., 1.0)]
glsl_map = """
vec4 hot(float t) {
return vec4(smoothstep($color_0.rgb, $color_1.rgb, vec3(t, t, t)),
1.0);
}
"""
def map(self, t):
rgba = self.colors.rgba
smoothed = smoothstep(rgba[0, :3], rgba[1, :3], t)
return np.hstack((smoothed, np.ones((len(t), 1))))
class _Winter(BaseColormap):
colors = [(0.0, 0.0, 1.0, 1.0),
(0.0, 1.0, 0.5, 1.0)]
glsl_map = """
vec4 winter(float t) {
return mix($color_0, $color_1, sqrt(t));
}
"""
def map(self, t):
return _mix_simple(self.colors.rgba[0],
self.colors.rgba[1],
np.sqrt(t))
class _SingleHue(Colormap):
"""A colormap which is solely defined by the given hue and value.
Given the color hue and value, this color map increases the saturation
of a color. The start color is almost white but still contains a hint of
the given color, and at the end the color is fully saturated.
Parameters
----------
hue : scalar, optional
The hue refers to a "true" color, without any shading or tinting.
Must be in the range [0, 360]. Defaults to 200 (blue).
saturation_range : array-like, optional
The saturation represents how "pure" a color is. Less saturation means
more white light mixed in the color. A fully saturated color means
the pure color defined by the hue. No saturation means completely
white. This colormap changes the saturation, and with this parameter
you can specify the lower and upper bound. Default is [0.2, 0.8].
value : scalar, optional
The value defines the "brightness" of a color: a value of 0.0 means
completely black while a value of 1.0 means the color defined by the
hue without shading. Must be in the range [0, 1.0]. The default value
is 1.0.
Notes
-----
For more information about the hue values see the `wikipedia page`_.
.. _wikipedia page: https://en.wikipedia.org/wiki/Hue
"""
def __init__(self, hue=200, saturation_range=[0.1, 0.8], value=1.0):
colors = ColorArray([
(hue, saturation_range[0], value),
(hue, saturation_range[1], value)
], color_space='hsv')
super(_SingleHue, self).__init__(colors)
class _HSL(Colormap):
"""A colormap which is defined by n evenly spaced points in
a circular color space.
This means that we change the hue value while keeping the
saturation and value constant.
Parameters
---------
n_colors : int, optional
The number of colors to generate.
hue_start : int, optional
The hue start value. Must be in the range [0, 360], the default is 0.
saturation : float, optional
The saturation component of the colors to generate. The default is
fully saturated (1.0). Must be in the range [0, 1.0].
value : float, optional
The value (brightness) component of the colors to generate. Must
be in the range [0, 1.0], and the default is 1.0
controls : array-like, optional
The list of control points for the colors to generate. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str, optional
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
"""
def __init__(self, ncolors=6, hue_start=0, saturation=1.0, value=1.0,
controls=None, interpolation='linear'):
hues = np.linspace(0, 360, ncolors + 1)[:-1]
hues += hue_start
hues %= 360
colors = ColorArray([(hue, saturation, value) for hue in hues],
color_space='hsv')
super(_HSL, self).__init__(colors, controls=controls,
interpolation=interpolation)
class _HUSL(Colormap):
"""A colormap which is defined by n evenly spaced points in
the HUSL hue space.
Parameters
---------
n_colors : int, optional
The number of colors to generate.
hue_start : int, optional
The hue start value. Must be in the range [0, 360], the default is 0.
saturation : float, optional
The saturation component of the colors to generate. The default is
fully saturated (1.0). Must be in the range [0, 1.0].
value : float, optional
The value component of the colors to generate or "brightness". Must
be in the range [0, 1.0], and the default is 0.7.
controls : array-like, optional
The list of control points for the colors to generate. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str, optional
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
Notes
-----
For more information about HUSL colors see http://husl-colors.org
"""
def __init__(self, ncolors=6, hue_start=0, saturation=1.0, value=0.7,
controls=None, interpolation='linear'):
hues = np.linspace(0, 360, ncolors + 1)[:-1]
hues += hue_start
hues %= 360
saturation *= 99
value *= 99
colors = ColorArray(
[husl_to_rgb(hue, saturation, value) for hue in hues],
)
super(_HUSL, self).__init__(colors, controls=controls,
interpolation=interpolation)
class _Diverging(Colormap):
def __init__(self, h_pos=20, h_neg=250, saturation=1.0, value=0.7,
center="light"):
saturation *= 99
value *= 99
start = husl_to_rgb(h_neg, saturation, value)
mid = ((0.133, 0.133, 0.133) if center == "dark" else
(0.92, 0.92, 0.92))
end = husl_to_rgb(h_pos, saturation, value)
colors = ColorArray([start, mid, end])
super(_Diverging, self).__init__(colors)
class _RedYellowBlueCyan(Colormap):
"""A colormap which is goes red-yellow positive and blue-cyan negative
Parameters
---------
limits : array-like, optional
The limits for the fully transparent, opaque red, and yellow points.
"""
def __init__(self, limits=(0.33, 0.66, 1.0)):
limits = np.array(limits, float).ravel()
if len(limits) != 3:
raise ValueError('limits must have 3 values')
if (np.diff(limits) < 0).any() or (limits <= 0).any():
raise ValueError('limits must be strictly increasing and positive')
controls = np.array([-limits[2], -limits[1], -limits[0],
limits[0], limits[1], limits[2]])
controls = ((controls / limits[2]) + 1) / 2.
colors = [(0., 1., 1., 1.), (0., 0., 1., 1.), (0., 0., 1., 0.),
(1., 0., 0., 0.), (1., 0., 0., 1.), (1., 1., 0., 1.)]
colors = ColorArray(colors)
super(_RedYellowBlueCyan, self).__init__(
colors, controls=controls, interpolation='linear')
# https://github.com/matplotlib/matplotlib/pull/4707/files#diff-893cf0348279e9f4570488a7a297ab1eR774 # noqa
# Taken from original Viridis colormap data in matplotlib implementation
# Sampled 128 points from the raw data-set of 256 samples.
# Sub sampled to 128 points since 256 points causes VisPy to freeze.
#
# Issue #1331 https://github.com/vispy/vispy/issues/1331 explains that the
# 128 viridis sample size
# fails on some GPUs but lowering to 64 samples allows more GPUs to use
# viridis. The 64 samples are linearly interpolated anyhow and yeild smooth
# colormaps. To get 64 samples
# the original Viridis colormap data is sampled with a stride of 4 ie [::4].
#
# HACK: Ideally, all 256 points should be included, with VisPy generating
# a 1D texture lookup for ColorMap, rather than branching code.
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
_colormaps = dict(
# Some colormap presets
autumn=Colormap([(1., 0., 0., 1.), (1., 1., 0., 1.)]),
blues=Colormap([(1., 1., 1., 1.), (0., 0., 1., 1.)]),
cool=Colormap([(0., 1., 1., 1.), (1., 0., 1., 1.)]),
greens=Colormap([(1., 1., 1., 1.), (0., 1., 0., 1.)]),
reds=Colormap([(1., 1., 1., 1.), (1., 0., 0., 1.)]),
spring=Colormap([(1., 0., 1., 1.), (1., 1., 0., 1.)]),
summer=Colormap([(0., .5, .4, 1.), (1., 1., .4, 1.)]),
fire=_Fire(),
grays=_Grays(),
hot=_Hot(),
ice=_Ice(),
winter=_Winter(),
light_blues=_SingleHue(),
orange=_SingleHue(hue=35),
viridis=Colormap(ColorArray(_viridis_data[::4])),
# Diverging presets
coolwarm=Colormap(ColorArray(
[
(226, 0.59, 0.92), (222, 0.44, 0.99), (218, 0.26, 0.97),
(30, 0.01, 0.87),
(20, 0.3, 0.96), (15, 0.5, 0.95), (8, 0.66, 0.86)
],
color_space="hsv"
)),
PuGr=_Diverging(145, 280, 0.85, 0.30),
GrBu=_Diverging(255, 133, 0.75, 0.6),
GrBu_d=_Diverging(255, 133, 0.75, 0.6, "dark"),
RdBu=_Diverging(220, 20, 0.75, 0.5),
# Configurable colormaps
cubehelix=CubeHelixColormap,
single_hue=_SingleHue,
hsl=_HSL,
husl=_HUSL,
diverging=_Diverging,
RdYeBuCy=_RedYellowBlueCyan,
)
def get_colormap(name, *args, **kwargs):
"""Obtain a colormap
Some colormaps can have additional configuration parameters. Refer to
their corresponding documentation for more information.
Parameters
----------
name : str | Colormap
Colormap name. Can also be a Colormap for pass-through.
Examples
--------
>>> get_colormap('autumn')
>>> get_colormap('single_hue', hue=10)
"""
if isinstance(name, BaseColormap):
cmap = name
else:
if not isinstance(name, string_types):
raise TypeError('colormap must be a Colormap or string name')
if name not in _colormaps:
raise KeyError('colormap name %s not found' % name)
cmap = _colormaps[name]
if inspect.isclass(cmap):
cmap = cmap(*args, **kwargs)
return cmap
def get_colormaps():
"""Return the list of colormap names."""
return _colormaps.copy()
| 37.861773 | 108 | 0.532543 |
7a1ebe162f06b2597b3f259e65253818a4eb6453 | 4,397 | py | Python | system_metrics_collector/test/base_metrics_test.py | ros-tooling/system_metrics_collector | 59acb990b3007210c11138ca22d34bb4a892cc05 | [
"Apache-2.0"
] | 19 | 2019-10-30T23:57:46.000Z | 2021-06-11T09:24:20.000Z | system_metrics_collector/test/base_metrics_test.py | ros-tooling/system_metrics_collector | 59acb990b3007210c11138ca22d34bb4a892cc05 | [
"Apache-2.0"
] | 192 | 2019-11-04T17:32:06.000Z | 2021-06-09T17:08:20.000Z | system_metrics_collector/test/base_metrics_test.py | ros-tooling/system_metrics_collector | 59acb990b3007210c11138ca22d34bb4a892cc05 | [
"Apache-2.0"
] | 9 | 2019-12-10T13:02:39.000Z | 2022-01-11T20:20:16.000Z | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from pathlib import Path
from threading import Lock
from typing import Iterable
from typing import Set
import unittest
from ament_index_python.packages import get_package_share_directory
from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from lifecycle_msgs.msg import State
import rclpy
from rclpy.task import Future
import retrying
import ros2lifecycle.api
import ros2node.api
from statistics_msgs.msg import MetricsMessage
def include_python_launch_file(package: str, launchfile: str) -> IncludeLaunchDescription:
package_dir = Path(get_package_share_directory(package))
launchfile_path = str(package_dir / launchfile)
return IncludeLaunchDescription(PythonLaunchDescriptionSource(launchfile_path))
class TestMetricsBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
rclpy.init()
cls.node = rclpy.create_node('test_system_metrics_nodes')
@classmethod
def tearDownClass(cls):
cls.node.destroy_node()
rclpy.shutdown()
@retrying.retry(
stop_max_attempt_number=10,
wait_exponential_multiplier=1000,
wait_exponential_max=10000)
def _test_nodes_exist(self, expected_nodes: Set[str]):
node_names = ros2node.api.get_node_names(node=self.node)
full_names = {n.full_name for n in node_names}
self.assertTrue(expected_nodes.issubset(full_names))
@retrying.retry(
stop_max_attempt_number=10,
wait_exponential_multiplier=1000,
wait_exponential_max=10000)
def _test_lifecycle_nodes_exist(self, expected_nodes: Set[str]) -> None:
node_names = ros2lifecycle.api.get_node_names(node=self.node)
full_names = {n.full_name for n in node_names}
self.assertTrue(expected_nodes.issubset(full_names))
def _test_lifecycle_nodes_active(self, expected_lifecycle_nodes: Iterable[str]) -> None:
states = ros2lifecycle.api.call_get_states(
node=self.node,
node_names=expected_lifecycle_nodes)
self.assertTrue(all(s.id == State.PRIMARY_STATE_ACTIVE for s in states.values()))
def _test_topic_exists(self, topic_name: str) -> None:
topics_and_types = self.node.get_topic_names_and_types()
found = False
for name, types in topics_and_types:
if name == topic_name:
found = True
assert all(t == 'statistics_msgs/msg/MetricsMessage' for t in types)
self.assertTrue(found, f'No topic named {topic_name}')
def _test_statistic_publication(self, topic_name: str, expected_nodes: Iterable[str]):
future = Future()
message_counter = Counter()
lock = Lock()
# arbitrary choice, just tells if it's working for a little while
expected_messages_per_node = 3
# we are receiving stats every 10 seconds, so this should pass in 30s
timeout_sec = 180
def message_callback(msg):
node_name = '/' + msg.measurement_source_name
with lock:
message_counter[node_name] += 1
if all(
message_counter[node] >= expected_messages_per_node
for node in expected_nodes
):
print('Successfully received all expected messages')
future.set_result(True)
sub = self.node.create_subscription(
MetricsMessage, topic_name, message_callback, qos_profile=10)
rclpy.spin_until_future_complete(self.node, future, timeout_sec=timeout_sec)
self.assertTrue(future.done(), f'Timed out, received message count: {message_counter}')
self.node.destroy_subscription(sub)
| 40.33945 | 95 | 0.710939 |
26c09b34b56db7f4dd497357ea42bafa8669694d | 2,496 | py | Python | ZimbraBuild/support/gobuild/helpers/maven.py | fciubotaru/z-pec | 82335600341c6fb1bb8a471fd751243a90bc4d57 | [
"MIT"
] | 5 | 2019-03-26T07:51:56.000Z | 2021-08-30T07:26:05.000Z | ZimbraBuild/support/gobuild/helpers/maven.py | fciubotaru/z-pec | 82335600341c6fb1bb8a471fd751243a90bc4d57 | [
"MIT"
] | 1 | 2015-08-18T19:03:32.000Z | 2015-08-18T19:03:32.000Z | ZimbraBuild/support/gobuild/helpers/maven.py | fciubotaru/z-pec | 82335600341c6fb1bb8a471fd751243a90bc4d57 | [
"MIT"
] | 13 | 2015-03-11T00:26:35.000Z | 2020-07-26T16:25:18.000Z | # Copyright 2008 VMware, Inc. All rights reserved. -- VMware Confidential
"""
Helpers for maven-based targets.
"""
import os
import helpers.target
class MavenHelper:
"""
Helper class for targets that build with maven.
"""
def _Command(self, hosttype, target, mavenversion='2.0.8', mavenoptions={}, **systemproperties):
"""
Return a dictionary representing a command to invoke maven with
standard mavenflags.
"""
def q(s):
return '"%s"' % s
defaults = {
'GOBUILD_OFFICIAL_BUILD' : '1',
'GOBUILD_AUTO_COMPONENTS': 'false',
'OBJDIR' : q('%(buildtype)'),
'RELTYPE' : q('%(releasetype)'),
'BUILD_NUMBER' : q('%(buildnumber)'),
'PRODUCT_BUILD_NUMBER' : q('%(productbuildnumber)'),
'CHANGE_NUMBER' : q('%(changenumber)'),
'BRANCH_NAME' : q('%(branch)'),
'PUBLISH_DIR' : q('%(buildroot)/publish'),
'BUILDLOG_DIR' : q('%(buildroot)/logs'),
'REMOTE_COPY_SCRIPT' : q('%(gobuildc) %(buildid)'),
}
# Add a GOBUILD_*_ROOT flag for every component we depend on.
if hasattr(self, 'GetComponentDependencies'):
for d in self.GetComponentDependencies():
d = d.replace('-', '_')
defaults['GOBUILD_%s_ROOT' % d.upper()] = \
'%%(gobuild_component_%s_root)' % d
# Override the defaults above with the systemproperties passed in by
# the client.
defaults.update(systemproperties)
# Choose maven
if hosttype.startswith('windows'):
tcroot = os.environ.get('TCROOT', 'C:/TCROOT-not-set')
mavencmd = '%s/noarch/apache-maven-%s/bin/mvn.bat' % (tcroot, mavenversion)
else:
mavencmd = '/build/toolchain/noarch/apache-maven-%s/bin/mvn' % mavenversion
# Create the command line to invoke maven
options = ''
for k in sorted(defaults.keys()):
v = defaults[k]
options += ' -D' + str(k)
if v is not None:
options += '=' + str(v)
for k in sorted(mavenoptions.keys()):
v = mavenoptions[k]
options += ' ' + str(k)
if v is not None:
options += '=' + str(v)
cmd = '%s %s' % (mavencmd, options)
if target:
cmd += ' ' + str(target)
return cmd
| 33.28 | 99 | 0.526843 |
14c9c6956e2ca318196214f3b0ea7fd29b02440a | 1,126 | py | Python | btpp/migrations/0001_initial.py | yvesjordan06/hiro_django | 24ccfdfaf9258bd254d031563190a45a0d837a02 | [
"MIT"
] | null | null | null | btpp/migrations/0001_initial.py | yvesjordan06/hiro_django | 24ccfdfaf9258bd254d031563190a45a0d837a02 | [
"MIT"
] | null | null | null | btpp/migrations/0001_initial.py | yvesjordan06/hiro_django | 24ccfdfaf9258bd254d031563190a45a0d837a02 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.7 on 2019-11-22 18:41
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Tache',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('intitule', models.CharField(max_length=200)),
('description', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='Annonce',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('intitule', models.CharField(max_length=200)),
('description', models.CharField(max_length=500)),
('date_post', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date published')),
('taches', models.ManyToManyField(to='btpp.Tache')),
],
),
]
| 33.117647 | 118 | 0.581705 |
47ba48f7e200884893f11a83809fea11ff24f651 | 1,334 | py | Python | setup.py | zcoinofficial/boltzmann | 3c04a7609eecf0e2aa0e859ebe9625a0054124cd | [
"MIT"
] | 3 | 2018-05-13T12:48:55.000Z | 2019-11-13T04:17:28.000Z | setup.py | zcoinofficial/boltzmann | 3c04a7609eecf0e2aa0e859ebe9625a0054124cd | [
"MIT"
] | null | null | null | setup.py | zcoinofficial/boltzmann | 3c04a7609eecf0e2aa0e859ebe9625a0054124cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
from setuptools.command.build_ext import build_ext as _build_ext
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
setup(
name='boltzmann',
packages=find_packages(),
version='0.0.1',
description='A python script computing the entropy of Bitcoin transactions and the linkability of their inputs and outputs',
author='laurentmt',
author_email='llll@lll.com',
maintainer='laurentmt',
url='https://www.github.com/LaurentMT/boltzmann',
download_url='https://www.github.com/LaurentMT/boltzmann/tarball/0.0.1',
keywords=['bitcoin', 'privacy'],
classifiers=['Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License',
'Natural Language :: English', 'Programming Language :: Python :: 3.3',
'Topic :: Security'],
cmdclass={'build_ext': build_ext},
install_requires=[
'numpy >= 1.8.0',
'sortedcontainers',
'python-bitcoinrpc',
'mpmath',
'sympy'
]
)
| 37.055556 | 128 | 0.664918 |
84525c07c2ea00740eb7547cbabc6c29bf693d6f | 19,059 | py | Python | legacy/modTurb.py | andreagalle/pigcasso | e9b60fb595ba6bd2c402a5b4b16665d4d41fa748 | [
"MIT"
] | null | null | null | legacy/modTurb.py | andreagalle/pigcasso | e9b60fb595ba6bd2c402a5b4b16665d4d41fa748 | [
"MIT"
] | 4 | 2020-06-13T09:17:15.000Z | 2020-06-17T16:26:03.000Z | legacy/modTurb.py | andreagalle/pigcasso | e9b60fb595ba6bd2c402a5b4b16665d4d41fa748 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 7 08:08:12 2016
@author: a.grebert
"""
import vtk
import draft as fct
import numpy as np
import string
import os, sys, shutil
import matplotlib.pyplot as plt
import modUtils as util
import numpy.linalg as la
def get1DSpectrum(myArray, Dz):
Ruu = np.correlate(myArray,myArray,mode='full')#[-myArray.shape[0]:]
Ruu = Ruu[Ruu.size/2:]
# fig = plt.figure(figsize=(10, 6))
# ax = fig.add_subplot(111)
# ax.plot(Ruu/Ruu[0])
# # ax.set_xscale('log')
# # ax.set_yscale('log')
# plt.show()
# Euu = np.zeros(((Nz-1)/2))
# kz = np.zeros(((Nz-1)/2))
# for n in range((Nz-1)/2):
# su = 0
# for k in range(1,(Nz-1)/2):
# su += Ruu[k]*np.cos(2.0*np.pi*n*k/(Nz-1))
# Euu[n] = 1 + 2*np.abs(su)
# kz[n] = n/Dz*1e-3
# print Euu.shape, kz.shape
# print Euu
# print kz
Euu = np.abs(np.fft.rfft(Ruu))**2
kz = np.fft.rfftfreq(Ruu.size, Dz)
idx = np.argsort(kz)
Euu = Euu[idx] ; kz = kz[idx]
return [kz, Euu, Ruu]
def getTurbulenceTriangle(data_outVTK, wall_outVTK, cord_choice, x_perc, Uinf=None, Rhoinf=None):
"""
Parameters
----------
wall_outVTK : VTK output object from a VTK reader
This field is supposed to describe a curve with 1D cell type, like in getLineTangentialVector().
cord_choice : integer, 0, 1 or 2
Gives the axis that is going to be used to define the point where the velocity
profile is extracted. Convention :
- 0 = x axis
- 1 = y axis
- 2 = z axis
x_perc : float between 0 and 1
Gives the percentage of the relative cord along the axis defined with cord_d.
Uinf : float
(optional) Free stream velocity
Rhoinf : float
(optional) Free stream density
Returns
-------
ori : tuple(3)
The point coordinates of the point at the wall where we extract the data.
yp : vector of floats
dimensionless wall normal coordinates at x_perc.
Urms : vector of floats (3 components)
normalised (using Utau) Reynolds stress tensor components : Rxx, Ryy, Rzz
Urey : vector of floats (3 components)
normalised (using Utau) Reynolds stress tensor components : Ryz, Rzx, Rxy
scaling : vector of floats
Density correction factor sqrt(Rho/Rho[0])
"""
# function display
print '---- DAEPy::getRMSAtPosition ----'
# test if the field RHO_AVG is present
if data_outVTK.GetPointData().HasArray('RHO_AVG')!=1:
raise ValueError("Error : field RHO_AVG not present")
# test if the field MU_LAM_AVG is present
if data_outVTK.GetPointData().HasArray('MU_LAM_AVG')!=1:
raise ValueError("Error : field MU_LAM_AVG not present")
# test if the field U_RMS is present
if data_outVTK.GetPointData().HasArray('U_RMS')!=1:
raise ValueError("Error : field U_RMS not present")
# test if the field U_REY is present
if data_outVTK.GetPointData().HasArray('U_REY')!=1:
raise ValueError("Error : field U_REY not present")
# get the vector used to slice the field data
[ori, vec_tan] = fct.getLineTangentialVector(wall_outVTK, x_perc, cord_choice)
# slice the 2D field data using the tangential vector to the wall
data1D = fct.getSlice(data_outVTK, ori, vec_tan)
# extract the tangential velocity
[Vcoords, Urms, Urey, Rho, Mu] = fct.getArrayFromPointData(data1D, ['U_RMS','U_REY','RHO_AVG','MU_LAM_AVG'])
# define a new scalar coordinates along the line orthogonal to the wall
y_coo = np.array(fct.getScalarCoord(Vcoords, 1))
id_sort = y_coo.argsort()
# sort cordinates along the line orthogonal to the wall
y_coo = y_coo[id_sort]
Rho = Rho[id_sort]
Mu = Mu[id_sort]
[_, Cf, Utau, _, _] = fct.getSkinFrictionAtPosition(data_outVTK, wall_outVTK, cord_choice, x_perc, Uinf, Rhoinf)
# sort the RMS and REY quantities according to the coordinates y_coo. Normalisation using Utau is also performed
Urms = (Urms[id_sort]**2).T
Urey = (Urey[id_sort]).T
yp = y_coo*Utau*Rho[0]/Mu[0]
IIb = np.zeros((len(yp)))
IIIb = np.zeros((len(yp)))
Ni = np.zeros((len(yp)))
Ei = np.zeros((len(yp)))
for idy in range(len(yp)):
k = (Urms[0,idy]+Urms[1,idy]+Urms[2,idy])
bij = np.matrix(([[Urms[0,idy]/k-1/3., Urey[2,idy]/k,Urey[1,idy]/k],\
[Urey[2,idy]/k, Urms[1,idy]/k-1/3., Urey[0,idy]/k],\
[Urey[1,idy]/k, Urey[0,idy]/k, Urms[2,idy]/k-1/3.]]))
# ubji = np.triu(bij**2,0)
# ubji = np.triu(np.multiply(bij,bij),0)
# IIb[idy] = -0.5*np.sum(ubji)
# IIIb[idy] = la.det(bij)
IIb[idy] = -(bij[0,0]**2+bij[1,1]**2+bij[2,2]**2)/2.
IIIb[idy] = (bij[0,0]**3+bij[1,1]**3+bij[2,2]**3)/3.
Ni[idy] = np.sqrt(-1./3.*IIb[idy])
Ei[idy] = np.power(np.abs(IIIb[idy])*0.5,1./3.)
# function output
print ''
return [ori, y_coo, yp, Ei, Ni, IIIb, IIb]
def getStressAtPosition(data_outVTK, wall_outVTK, cord_choice, x_perc, Uinf=None, Rhoinf=None):
# function display
print '---- DAEPy::getRMSAtPosition ----'
# test if the field RHO_AVG is present
if data_outVTK.GetPointData().HasArray('RHO_AVG')!=1:
raise ValueError("Error : field RHO_AVG not present")
# test if the field MU_LAM_AVG is present
if data_outVTK.GetPointData().HasArray('MU_LAM_AVG')!=1:
raise ValueError("Error : field MU_LAM_AVG not present")
# test if the field U_RMS is present
if data_outVTK.GetPointData().HasArray('U_RMS')!=1:
raise ValueError("Error : field U_RMS not present")
# test if the field U_REY is present
if data_outVTK.GetPointData().HasArray('U_REY')!=1:
raise ValueError("Error : field U_REY not present")
# test if the field TauWallAvg is present
if wall_outVTK.GetPointData().HasArray('TauWallAvg')!=1:
raise ValueError("Error : field TauWallAvg not present")
# get the vector used to slice the field data
[ori, vec_tan] = fct.getLineTangentialVector(wall_outVTK, x_perc, cord_choice)
# slice the 2D field data using the tangential vector to the wall
data1D = fct.getSlice(data_outVTK, ori, vec_tan)
wall1D = fct.getSlice(wall_outVTK, ori, vec_tan)
# extract the shear stress
[_, Tw] = fct.getArrayFromPointData(wall1D, ['TauWallAvg'])
# extract the tangential velocity
[Vcoords, U, Urms, Urey, Rho, Mu, MuSGS] = fct.getArrayFromPointData(data1D, ['U_AVG','U_RMS','U_REY','RHO_AVG','MU_LAM_AVG','MU_SGS_AVG'])
Ut = np.sum(U*vec_tan, axis=1)
# define a new scalar coordinates along the line orthogonal to the wall
y_coo = np.array(fct.getScalarCoord(Vcoords, 1))
id_sort = y_coo.argsort()
# sort cordinates along the line orthogonal to the wall
y_coo = y_coo[id_sort]
Rho = Rho[id_sort]
Mu = Mu[id_sort]
MuSGS = MuSGS[id_sort]
Ut = Ut[id_sort]
U = U[id_sort]
Urms = Urms[id_sort]**2
Urey = Urey[id_sort]
Utau = np.sqrt(np.sqrt(Tw[0,0]**2+Tw[0,1]**2+Tw[0,2]**2)/Rho[0])
yp = y_coo*Utau*Rho[0]/Mu[0]
Rey = -Rho*Urey.T[2]
dy = np.gradient(y_coo,edge_order=2)
dudy = np.gradient(Ut,dy,edge_order=2)
dudy = (MuSGS+Mu)*dudy
# print Mu
# data_outVTK = computeVarGradient(data_outVTK,'U_AVG')
# data1D = fct.getSlice(data_outVTK, ori, vec_tan)
# [Vcoords, GradU] = fct.getArrayFromPointData(data1D, ['GRAD_U_AVG'])
# y_coo = np.array(fct.getScalarCoord(Vcoords, 1))
# id_sort = y_coo.argsort()
# GradU = GradU[id_sort].T
# dudy = Mu*GradU[1]
# function output
print ''
return [ori, yp, Rey, dudy, Tw]
def computeVarGradient(data_outVTK,var):
# test if the field "var" is present
if data_outVTK.GetPointData().HasArray(var)!=1:
raise ValueError("Error : field %s not present"%var)
gradientFilter = vtk.vtkGradientFilter()
gradientFilter.SetInputData(data_outVTK)
gradientFilter.SetInputArrayToProcess(0,0,0,0,var)
gradientFilter.SetResultArrayName('GRAD_%s'%var)
gradientFilter.Update()
data_outVTK = gradientFilter.GetOutput()
return data_outVTK
def getPressureGradientAtPosition(data_outVTK, wall_outVTK, cord_choice, x_perc, y_perc, var, Uinf=None, Rhoinf=None):
# test if the field U_REY is present
if data_outVTK.GetPointData().HasArray('GRAD_%s'%var)!=1:
raise ValueError("Error : field U_REY not present")
# test if the field TauWallAvg is present
if wall_outVTK.GetPointData().HasArray('TauWallAvg')!=1:
raise ValueError("Error : field TauWallAvg not present")
# get the vector used to slice the field data
[ori, vec_tan] = fct.getLineTangentialVector(wall_outVTK, x_perc, cord_choice)
# slice the 2D field data using the tangential vector to the wall
wall1D = fct.getSlice(wall_outVTK, ori, vec_tan)
# extract the shear stress
[_, Tw] = fct.getArrayFromPointData(wall1D, ['TauWallAvg'])
data1D = fct.getSlice(data_outVTK, ori, vec_tan)
[Vcoords, GradP, P] = fct.getArrayFromPointData(data1D, ['GRAD_%s'%var,var])
# define a new scalar coordinates along the line orthogonal to the wall
y_coo = np.array(fct.getScalarCoord(Vcoords, 1))
id_sort = y_coo.argsort()
y_coo = y_coo[id_sort]
GradP = GradP[id_sort].T
P = P[id_sort]
[_, _, deltaS, _] = fct.getDeltaAtPosition(data_outVTK, wall_outVTK, cord_choice, x_perc, Uinf, Rhoinf)
magTw = np.sqrt(Tw[0,0]**2+Tw[0,1]**2+Tw[0,2]**2)
y_pos = util.find_nearest(y_coo, y_perc*(y_coo[-1]-y_coo[0]))
scaling = (deltaS/magTw)
BetaX = (deltaS/magTw)*GradP[0][y_pos]
BetaY = (deltaS/magTw)*GradP[1][y_pos]
ori[1] = y_coo[y_pos]
return [ori, BetaX, BetaY, P[y_pos], scaling]
def getPressureGradientBetweenPosition(data_outVTK, wall_outVTK, cord_choice, x_p0, x_p1, Npts, y_perc, var, Uinf=None, Rhoinf=None):
# function display
print '---- DAEPy::getPressureGradientBetweenPosition ----'
data_outVTK = computeVarGradient(data_outVTK,var)
Beta = np.zeros((Npts,2))
P = np.zeros(Npts)
scaling = np.zeros(Npts)
pos = np.zeros((Npts, 3))
for i in range(Npts):
x_p_temp = x_p0 + (x_p1-x_p0)/(Npts-1)*i
[pos[i,:], Beta[i,0], Beta[i,1], P[i], scaling[i]] = getPressureGradientAtPosition(data_outVTK, wall_outVTK, cord_choice, x_p_temp, y_perc, var, Uinf, Rhoinf)
return [pos, Beta.T, P, scaling]
def getViscosity(data_outVTK, wall_outVTK, cord_choice, x_perc, Uinf=None, Rhoinf=None):
# function display
print '---- DAEPy::getViscosity ----'
# test if the field RHO_AVG is present
if data_outVTK.GetPointData().HasArray('MU_LAM_AVG')!=1:
raise ValueError("Error : field MU_LAM_AVG not present")
# test if the field RHO_AVG is present
if data_outVTK.GetPointData().HasArray('MU_SGS_AVG')!=1:
raise ValueError("Error : field MU_SGS_AVG not present")
# get the vector used to slice the field data
[ori, vec_tan] = fct.getLineTangentialVector(wall_outVTK, x_perc, cord_choice)
# slice the 2D field data using the tangential vector to the wall
# data1D = fct.getSlice(data_outVTK, ori, vec_tan)
data1D = fct.getSlice(data_outVTK, ori, vec_tan)
[Vcoords, Mu, MuSGS, Rho] = fct.getArrayFromPointData(data1D, ['MU_LAM_AVG','MU_SGS_AVG','RHO_AVG'])
# define a new scalar coordinates along the line orthogonal to the wall
y_coo = np.array(fct.getScalarCoord(Vcoords, 1))
id_sort = y_coo.argsort()
# sort cordinates along the line orthogonal to the wall
y_coo = y_coo[id_sort]
# sort the other quantities according to the coordinates y_coo. Velocity at the wall Ut[0] is
# set to 0 to respect no slip condition
Mu = Mu[id_sort]
MuSGS = MuSGS[id_sort]
Rho = Rho[id_sort]
[_, Cf, Utau, _, _] = fct.getSkinFrictionAtPosition(data_outVTK, wall_outVTK, cord_choice, x_perc, Uinf, Rhoinf)
yp = y_coo*Utau*Rho[0]/Mu[0]
return [ori, y_coo, yp, Mu, MuSGS]
def getTurbBudget(data_outVTK, wall_outVTK, cord_choice, x_perc, Uinf=None, Rhoinf=None):
# function display
print '---- DAEPy::getTurbBudget ----'
# test if the field RHO_AVG is present
if data_outVTK.GetPointData().HasArray('RHO_AVG')!=1:
raise ValueError("Error : field RHO_AVG not present")
# test if the field RHO_AVG is present
if data_outVTK.GetPointData().HasArray('RHO_RMS')!=1:
raise ValueError("Error : field RHO_RMS not present")
# test if the field U_AVG is present
if data_outVTK.GetPointData().HasArray('U_AVG')!=1:
raise ValueError("Error : field U_AVG not present")
# test if the field U_RMS is present
if data_outVTK.GetPointData().HasArray('U_RMS')!=1:
raise ValueError("Error : field U_RMS not present")
# test if the field U_REY is present
if data_outVTK.GetPointData().HasArray('U_REY')!=1:
raise ValueError("Error : field U_REY not present")
# test if the field TauWallAvg is present
if wall_outVTK.GetPointData().HasArray('TauWallAvg')!=1:
raise ValueError("Error : field TauWallAvg not present")
# get the vector used to slice the field data
[ori, vec_tan] = fct.getLineTangentialVector(wall_outVTK, x_perc, cord_choice)
# slice the 2D field data using the tangential vector to the wall
# data1D = fct.getSlice(data_outVTK, ori, vec_tan)
wall1D = fct.getSlice(wall_outVTK, ori, vec_tan)
# extract the shear stress
[_, Tw] = fct.getArrayFromPointData(wall1D, ['TauWallAvg'])
[Vcoords, U, Urms, Urey, Rho, Rhorms, Mu] = fct.getArrayFromPointData(data_outVTK, ['U_AVG','U_RMS','U_REY','RHO_AVG','RHO_RMS','MU_LAM_AVG'])
end = 200 ; nstep = 3
# define a new scalar coordinates along the line orthogonal to the wall
y_coo = np.array(fct.getScalarCoord(Vcoords, 1))
id_sort = y_coo.argsort()
y_coo = y_coo[id_sort]
Urms = (Urms[id_sort]**2).T
Urey = Urey[id_sort].T
U = U[id_sort].T
Rho = Rho[id_sort]
Rhorms = Rhorms[id_sort]
Mu = Mu[id_sort]
Utau = np.sqrt(np.sqrt(Tw[0,0]**2+Tw[0,1]**2+Tw[0,2]**2)/Rho[0])
yp = y_coo*Utau*Rho[0]/Mu[0]
# vec = (Rho*U[0]+Rhorms*Urms[0])/Rho
vec = Urms[0]/Rho
dy = np.gradient(y_coo[0:end:nstep],edge_order=2)
# dy = np.ediff1d(yp)
dudy = np.gradient(vec[0:end:nstep],dy,edge_order=2)
Pr = Rho[0:end:nstep]*Urey[2][0:end:nstep]*dudy
scaling = Rho[0]**2*Utau**4/Mu[0]
return [ori, yp[0:end:nstep], Pr, scaling]
def getTurbulenceSpectrum(data_outVTK, cord_choice, x_perc):
# function display
print '---- DAEPy::getTurbulenceSpectrum ----'
# test if the field RHO_AVG is present
if data_outVTK.GetPointData().HasArray('U_AVG')!=1:
raise ValueError("Error : field U_AVG not present")
# test if the field U_RMS is present
if data_outVTK.GetPointData().HasArray('U_RMS')!=1:
raise ValueError("Error : field U_RMS not present")
# test if the field U_REY is present
if data_outVTK.GetPointData().HasArray('U_REY')!=1:
raise ValueError("Error : field U_REY not present")
# # get the vector used to slice the field data
# [ori, vec_tan] = fct.getLineTangentialVector(wall_outVTK, x_perc, cord_choice)
# print ori, vec_tan
# # slice the 2D field data using the tangential vector to the wall
# data1D = fct.getSlice(data_outVTK, ori, vec_tan)
# extract the tangential velocity
[Vcoords, U, Uavg, Urms, Urey, Rho, Rhoavg, P] = fct.getArrayFromPointData(data_outVTK, ['U','U_AVG','U_RMS','U_REY','RHO','RHO_AVG','T'])
# define a new scalar coordinates along the line orthogonal to the wall
y_coo = np.array(fct.getScalarCoord(Vcoords, 2))
id_sort = y_coo.argsort()
y_coo = y_coo[id_sort]
Urms = (Urms[id_sort]**2).T
Urey = Urey[id_sort].T
U = U[id_sort].T
Uavg = Uavg[id_sort].T
Rho = Rho[id_sort]
Rhoavg = Rhoavg[id_sort]
P = P[id_sort]
UArray = (U[0]-U[0].mean())
VArray = (U[1]-U[1].mean())
WArray = (U[2]-U[2].mean())
RhoArray = Rho-Rho.mean()
PArray = P-P.mean()
Dz = np.ediff1d(y_coo).mean()
[kp, Epp, Rpp] = get1DSpectrum(PArray, Dz)
[kr, Err, Rrr] = get1DSpectrum(RhoArray, Dz)
[ku, Euu, Ruu] = get1DSpectrum(UArray, Dz)
# [kv, Evv, Rvv] = get1DSpectrum(VArray, Dz)
# [kw, Eww, Rww] = get1DSpectrum(WArray, Dz)
idmax = np.argmax(Euu)
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
ax.plot(ku,Euu/Euu[1],'b-')
# ax.plot(kv,Evv/Evv[1],'r-')
# ax.plot(kw,Eww/Eww[1],'k-')
ax.plot(kp,Epp/Epp[1],'r-')
ax.plot(kr,Err/Err[1],'k-')
# ax.plot(kz,Euu[idmax]/Euu[1]*(kz/kz[idmax])**(-5./3.),'k--')
ax.plot(ku,10*(ku/1000)**(-5./3.),'k--')
ax.set_xscale('log')
ax.set_yscale('log')
plt.show()
# Ruu = np.correlate(Urms[0],Urms[0],mode='same')
# plt.plot(Ruu)
# plt.show()
# nz = Ruu.shape[0]
# kz = np.asarray([0]+range(0, nz/2))
# kz = np.fft.fftfreq(nz,d=1.42857316e-05)
# FFT = np.fft.rfft(0.5*Ruu)
# nz = U[0].shape[0]
# kz = np.asarray(range(0, nz/2)+[0] + range(-nz/2+1, 0))
# FFT = np.fft.rfftn(U[0])
# nz = myArray.shape[0]
# kz = np.asarray(range(0, nz/2)+[0] + range(-nz/2+1, 0))
# FFT = np.fft.rfftn(myArray)
# nbins = 128
# size = FFT.shape[0]/2
# if nbins is None:
# nbins = size
# dk = float(size)/float(nbins)
# spectrum_energy = np.zeros(nbins)
# samples = np.zeros(nbins)
# for k in range(nz):
# kmod = float(kz[k]**2)**0.5
# if kmod < size:
# index = int(kmod/dk)
# k_approx = (index+0.5)*dk
# # print index,size,kz,ky_tmp,kx_tmp,myArray.shape
# samples[index] += 1
# spectrum_energy[index] += \
# np.abs(FFT[k])**2 * \
# (np.pi*4*k_approx**2*dk)
# npts = (2.*nz)
# for k in range(nbins):
# spectrum_energy[k] /= samples[k]*npts**2
# kmod_list = np.arange(0, size, dk)
return
def getTurbulenceTrianglePiroz(yp,Urms):
IIb = np.zeros((len(yp)))
IIIb = np.zeros((len(yp)))
Ni = np.zeros((len(yp)))
Ei = np.zeros((len(yp)))
for idy in range(len(yp)):
k = (Urms[0,idy]+Urms[1,idy]+Urms[2,idy])
bij = np.matrix(([[Urms[0,idy]/k-1/3., 0., 0.],\
[0., Urms[1,idy]/k-1/3., 0.],\
[0., 0., Urms[2,idy]/k-1/3.]]))
# ubji = np.triu(bij**2,0)
# ubji = np.triu(np.multiply(bij,bij),0)
# IIb[idy] = -0.5*np.sum(ubji)
# IIIb[idy] = la.det(bij)
IIb[idy] = -(bij[0,0]**2+bij[1,1]**2+bij[2,2]**2)/2.
IIIb[idy] = (bij[0,0]**3+bij[1,1]**3+bij[2,2]**3)/3.
Ni[idy] = np.sqrt(-1./3.*IIb[idy])
Ei[idy] = np.power(np.abs(IIIb[idy])*0.5,1./3.)
# function output
print ''
return [Ei, Ni, IIIb, IIb]
| 32.30339 | 166 | 0.629676 |
09408f262ec3e3c104c388ff1c5ff7ff7f689cfd | 20,682 | py | Python | qubes_storage_zfs/zfs_encrypted.py | ayakael/qubes-storage-zfs | 5376af64b3b04b43aa87bfc5e420250c2b75d7ff | [
"BSD-3-Clause"
] | 1 | 2021-06-27T21:39:52.000Z | 2021-06-27T21:39:52.000Z | qubes_storage_zfs/zfs_encrypted.py | ayakael/qubes-storage-zfs | 5376af64b3b04b43aa87bfc5e420250c2b75d7ff | [
"BSD-3-Clause"
] | null | null | null | qubes_storage_zfs/zfs_encrypted.py | ayakael/qubes-storage-zfs | 5376af64b3b04b43aa87bfc5e420250c2b75d7ff | [
"BSD-3-Clause"
] | null | null | null | """
Pool backed by encrypted ZFS zvols on top of an existing zfs_zvol pool.
For a breakdown of how the encryption scheme works, see:
https://blog.heckel.io/2017/01/08/zfs-encryption-openzfs-zfs-on-linux/
"""
import asyncio
import libzfs_core
import logging
import os
import subprocess
import time
import qubes
import qubes.storage
import qubes_storage_zfs.zfs as qzfs
# TODO something that checks the unload_timeout
import functools
def locked(method):
"""Decorator running given Volume's coroutine under a lock.
Needs to be added after wrapping with @asyncio.coroutine, for example:
>>>@locked
>>>@asyncio.coroutine
>>>def start(self):
>>> pass
"""
@asyncio.coroutine
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
# not that we use '_zfs_enc_lock' here and not '_lock' to prevent
# clashing with inherited locks from the parent.
if not hasattr(self, '_zfs_enc_lock'):
self._zfs_enc_lock = asyncio.Lock()
with (yield from self._zfs_enc_lock): # pylint: disable=protected-access
return (yield from method(self, *args, **kwargs))
return wrapper
class ZFSQEncryptedPool(qubes.storage.Pool):
"""ZFS pool for encrypted datasets inside an existing
ZFSQPool(a.k.a. zfs_zvol)
"""
driver = "zfs_encrypted"
app_reference = None
def __repr__(self):
return "<{} at {:#x} name={!r} underlying={!r}>".format(
type(self).__name__,
id(self),
self.name,
self.zpool_name
)
async def _ask_password(self, receiving_cmd):
"""
Call out to QRexec qubes.AskPassword and passes the resulting stdout
to :receiving_cmd: when successful.
"""
if not self.app_reference:
# TODO this sucks, is there an easier way to get a reference to the
# global 'app' qubes.Qubes() instance?
self.log.warning("had no REFERENCE ETC to APP VARIABLE!!")
self.app_reference = qubes.Qubes()
pw_vm = self.app_reference.domains[self.ask_password_domain]
if not pw_vm:
raise qubes.storage.StoragePoolException(
"unable to find handle for ask_password_domain={}".format(
self.ask_password_domain
)
)
pw_pipe_in, pw_pipe_out = os.pipe()
try:
# TODO how do we pass $1 to this stuff? we can pass **kwargs to
# asyncio.create_subprocess_exec, but we can't influence command
# await pw_vm.run_service_for_stdio(
# TODO THIS used to work, now it fucking broke.
# 'qubes.AskPassword',
# autostart=True, gui=True,
# user='user',
# input=self.name.encode()+b'\n', # context for the prompt
# stdout=pw_pipe_out)
# TODO instead for now:
environ = os.environ.copy()
environ["QREXEC_REMOTE_DOMAIN"] = "dom0"
environ["DISPLAY"] = ":0"
proc = await asyncio.create_subprocess_exec(
*['sudo','-Eiu','user', '/etc/qubes-rpc/qubes.AskPassword'],
stdout=pw_pipe_out,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
close_fds=True,
env=environ,
)
proc.stdin.write(self.name.encode()+b'\n')
await proc.stdin.drain()
# TODO flush aka drain+write_eof() apparently, wtf python
proc.stdin.write_eof()
await proc.wait()
except subprocess.CalledProcessError as e:
# TODO os.close(pw_pipe_in pw_pipe_out)
os.close(pw_pipe_in)
os.close(pw_pipe_out)
self.log.warning(
"zfs ask_password: exception while trying to get pw: {}".format(
e
)
)
raise e
environ = os.environ.copy()
environ["LC_ALL"] = "C.utf8"
p = await asyncio.create_subprocess_exec(
*receiving_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=pw_pipe_in,
close_fds=True,
env=environ
)
zfsout, zfserr = await p.communicate()
self.log.warning("ZFS key consumer: ".format(
p.returncode, zfsout, zfserr))
os.close(pw_pipe_in)
os.close(pw_pipe_out)
# TODO make sure zfs get keystat foo == 'available'
# and zfs get encryptionroot foo == foo
return (p, zfsout, zfserr)
async def ask_password(self, receiving_cmd, retries=5):
"""
Wrapper around self._ask_password which retries a number of times
"""
attempts = retries + 1
for attempt in range(1, 1 + attempts):
try:
self.log.warning('attempting to ask for pw: {}'.format(
receiving_cmd))
(p, stdout, stderr) = await self._ask_password(receiving_cmd)
if p.returncode != 0:
# like if the user clicked cancel
raise qubes.storage.StoragePoolException(stderr)
return (p, stdout, stderr)
except Exception as e:
self.log.warning(
"zfs ask_password failed, attempt {}/{}: {}".format(
attempt, attempts, e))
if attempt == attempts:
# out of retries:
raise qubes.storage.StoragePoolException(e)
def __init__(self, name, zpool_name, ask_password_domain='dom0',
unload_timeout=1200, **kwargs):
"""
Initializes a new encrypted pool. The pool will timeout after
:param zfs_parent: Name of existing zfs_zvol or zfs_encrypted pool
:type zfs_parent: str
:param ask_password_domain: Domain to direct QRexec qubes.AskPassword
calls to for retrieving the encryption
passphrase.
:type ask_password_domain: str
:param unload_timeout: The number of seconds after which the key
protecting this pool will be unloaded when there
are no more volumes in use.
:type unload_timeout: str
:param `**kwargs`: Passed to the underlying :class:`qzfs.ZFSQPool`
:raises :class:`qubes.storage.StoragePoolException`:
ask_password_domain is invalid
:raises :class:`qubes.storage.StoragePoolException`:
Supposed parent pool doesn't exist
TODO is initialization order guaranteed by included_in(), e.g. can we
be sure the underlying zpool will always be initialized by the time
we get called ?
"""
self.name = name
self.pool = self
self.log = logging.getLogger("qubes.storage.{}.{}".format(
self.driver, self.name))
self.unload_timeout = int(unload_timeout)
assert self.unload_timeout >= 0
self.ask_password_domain = ask_password_domain
if self.ask_password_domain == '':
raise qubes.storage.StoragePoolException(
"ask_password_domain is empty"
)
self.zpool_name = zpool_name
# TODO look up pool in qubes.app
#self.underlying = qubes.app.get(zfs_parent, None)
#if not self.underlying:
# raise qubes.storage.StoragePoolException(
# "zfs_encrypted: unable to look up parent qvm-pool {}".format(
# zfs_parent
# )
# )
# TODO validate name
if not libzfs_core.lzc_exists(self.zpool_name.encode()):
raise qubes.storage.StoragePoolException(
"zfs_encrypted: underlying namespace {!r} does \
not exist".format(self.zpool_name))
# Here we configure the prefixes for the datasets we will be making.
# We get a parent namespace from the underlying, and add to it like:
# {underlying ns}/encryption/{this pool name}/
self.name = name
self.encryption_ns = b"/".join([self.zpool_name.encode(),
b"encryption"])
# zfs_ns must be a string:
self.zfs_ns = "/".join([self.encryption_ns.decode(), self.name])
# Keep this around to make sure something doesn't overwrite it:
self.zfs_ns_safety_valve = self.zfs_ns.encode()
# Track which volumes are in use, and when the set of
# used volumes was last modified.
# (The idea being that we can unload-key the encryption key
# when the encrypted pool has been unused for some time):
self.used_volumes = set()
self.used_volumes_last_empty = time.clock_gettime(
time.CLOCK_MONOTONIC_RAW)
self._await_timeout_instance = asyncio.get_event_loop().create_task(
self.await_timeout()
)
async def await_timeout(self):
"""
This runs as an (eternal) background task that will periodically
wake up and check if we should attempt to unload the encryption keys
for this encrypted pool.
"""
# at initialization time, we can always wait at least one period:
self.log.warning(
"await_timeout is locked and loaded. unload_timeout={}".format(
self.unload_timeout
)
)
countdown = self.unload_timeout
while True:
self.log.warning(
"going to await_timeout, sleep {} sec".format(countdown))
await asyncio.sleep(countdown)
now = time.clock_gettime(time.CLOCK_MONOTONIC_RAW)
elapsed = now - self.used_volumes_last_empty
if self.unload_timeout > 0:
countdown = self.unload_timeout - elapsed
else:
# When no timeout is configured, we keep this task alive in case
# the user decides to change their timeout settings.
# we look for new settings every so often:
countdown = 60
# action kicks in when timeout is reached:
if countdown < 1:
# reset countdown:
countdown = self.unload_timeout
if not self.used_volumes:
self.log.warning(
'should zfs unload-key {} unless we are already \
unloaded'.format(self.zfs_ns))
try:
libzfs_core.lzc_unload_key(self.zfs_ns.encode())
self.log.warning("UNLOADED key for {}.".format(
self.zfs_ns))
except libzfs_core.exceptions.EncryptionKeyNotLoaded:
pass
except libzfs_core.exceptions.ZFSError as e:
self.log.warning(
"key unloading failed for {}: {}".format(
self.zfs_ns, e
)
)
# try again:
countdown = 20
async def ensure_key_is_loaded(self):
keystatus = qzfs.run_command(
["zfs", "list", "-H",
"-o", 'keystatus', self.zfs_ns]
)
self.log.warning("track volume start keystatus {}".format(keystatus))
if keystatus.strip() == b'unavailable':
await self.ask_password(["zfs", "load-key", self.zfs_ns])
# TODO ideally I guess here we would wait for udevd to kick in...
await asyncio.sleep(1)
if keystatus.strip() == b'-':
self.log.warning("zfs track volume start err why is keystatus '-' ?")
@locked
async def track_volume_start(self, vm):
"""
Register a volume (not a VM!) as used, for the purpose
of unloading the encryption key after a timeout
when no volumes are in use.
It will also prompt the user for their passphrase when trying
to start a volume in a group whose key is currently not loaded.
"""
self.used_volumes.add(vm)
self.log.warning('track_volume_start add {} => {}'.format(
vm, self.used_volumes))
await self.ensure_key_is_loaded()
@locked
async def track_volume_stop(self, vm):
"""Register a volume (not a VM!) as NOT used anymore,
for the purpose of unloading the encryption key after a timeout
when no volumes are in use.
"""
self.used_volumes.discard(vm)
self.used_volumes_last_empty = time.clock_gettime(
time.CLOCK_MONOTONIC_RAW)
def init_volume(self, appvm, volume_config):
self.app_reference = appvm.app
vm_name = appvm.name
if not hasattr(volume_config, 'zfs_ns'):
volume_config["zfs_ns"] = self.zfs_ns
volume_config["pool"] = self
return ZFSQEncryptedVolume(
vm_name=vm_name,
encrypted_pool=self,
**volume_config
)
def included_in(self, app):
"""
Returns the parent pool if found, otherwise raises KeyError.
This function also moonlights as our method to retrieve a handle for
'app' which we record and re-use when asking for passwords.
"""
self.app_reference = app
found = app.pools[self.zpool_name]
return found
def destroy(self):
"""
Currently does nothing. TODO
"""
# zfs umount foo/bar/baz
# zfs key -u foo/bar
self.log.warning(
"zfs_encrypted:destroy(): TODO implement, please do this \
yourself with zfs destroy {}/encrypted/{}".format(
self.zpool_name, self.name
)
)
@property
def config(self):
return {
"name": self.name,
"zpool_name": self.zpool_name,
"zfs_ns": self.zfs_ns,
"driver": self.driver,
"unload_timeout": str(self.unload_timeout),
}
async def setup(self):
"""
Install a new encrypted Pool.
"""
# TODO at the moment this is set on pool initialization
# by the underlying zpool for ALL zpools, we should be nice
# and only enable this for pools that actually need it, by recursively
# walking zfs_parent until the first element (the pool),
# basically we can split by '/' and take the first element:
# "sudo", "zpool", "set", "feature@encryption=enable",
# self.zfs_ns.split('/',1)[0]
# General namespace for encrypted VMs:
if not libzfs_core.lzc_exists(self.encryption_ns):
await qzfs.qubes_cmd_coro(
["create",
self.encryption_ns,
[] # empty set of options TODO
])
# Namespace for this pool.
# It will be encrypted, and the datasets and zvols inside
# will inheir the encryption key.)
assert self.zfs_ns.encode() == self.zfs_ns_safety_valve
if libzfs_core.lzc_exists(self.zfs_ns.encode()):
raise qubes.storage.StoragePoolException(
"our ns already exists. TODO does this leave a \
broken qvm-pool floating around?"
)
(p, stdout, stderr) = await self.ask_password(
[ # <- cmd list
"zfs",
"create",
"-o", "encryption=aes-256-gcm",
"-o", "keylocation=prompt",
"-o", "keyformat=passphrase",
# TODO "pbkdf2iters=1000000",
# ^-- minimum is 100k, default 350k
# TODO DEFAULT_DATASET_PROPS
self.zfs_ns
]
)
self.log.warning("ask_password create {}/{}/{}".format(
p, stdout, stderr))
if p.returncode != 0:
self.log.warning("failed to recv password / create encrypted ns")
raise qubes.storage.StoragePoolException(
"failed to create dataset with enc password: {}".format(
stderr
))
self.log.warning("encrypted ns {} made!".format(self.zfs_ns))
# zfs mount -l our_ns
# ok so now we need to basically do the same as
# super().setup() does after it has created the zpool.
# TODO for now we copy-paste, but this should really move \
# to something inheritable.
for namespace in [b"import", b"vm"]:
try:
#qubes_cmd_coro(["create", ])
# TODO here we would like to set 'refreservation=auto'
ds_name = b"/".join([self.zfs_ns.encode(), namespace])
libzfs_core.lzc_create(
ds_name,
ds_type="zfs",
props=qzfs.DEFAULT_DATASET_PROPS,
)
except libzfs_core.exceptions.FilesystemExists:
raise qubes.storage.StoragePoolException(
"ZFS dataset for {}/{} already exists".format(
self.zfs_ns, namespace
)
)
except libzfs_core.exceptions.ZFSError as e:
raise qubes.storage.StoragePoolException(
"ZFS dataset {}/{} could not be created: {!r}".format(
self.zfs_ns, namespace.encode(), e)
)
class ZFSQEncryptedVolume(qzfs.ZFSQzvol):
"""
Storage volume contained inside an encrypted ZFS dataset pool.
"""
def __init__(self, vm_name, name, encrypted_pool, **kwargs):
self.name = name
self.encrypted_pool = encrypted_pool
self.pool = self.encrypted_pool
if not hasattr(kwargs, 'zfs_ns'):
zfs_ns = encrypted_pool.zfs_ns
kwargs["zfs_ns"] = zfs_ns
self.zfs_ns = zfs_ns
super(ZFSQEncryptedVolume, self).__init__(
vm_name=vm_name,
name=name,
# like it's passed to qubes.storage.Volume.__init__(),
# lord knows what that does to it
**kwargs)
@locked
async def create(self):
"""
Installs a new volume by initializing a zvol inside an
encrypted dataset.
create() in a volume and setup() in a pool do the same.
"""
self.log.warning("zfs_encrypted:create() {}".format(self.vid))
# Prevent encryption key from timing out while we initialize:
await self.encrypted_pool.track_volume_start(self.vid)
# TODO should verify that encryption=on and that key is loaded.
try:
self.log.warning("zfs_encrypted: about to call super({})".format(
self.vid))
await super(ZFSQEncryptedVolume, self).create()
self.log.warning("zfs_encrypted:create(): worked: {}".format(
libzfs_core.lzc_exists(self.vid.encode())))
except Exception as e:
await self.encrypted_pool.track_volume_stop(self.vid)
raise e
# If all went well, stop tracking this. This coroutine is @locked,
# so a subsequent start() will add it again.
# If we don't relinquish the lock here, creation of a new VM would
# permanently disable the unload_timeout:
await self.encrypted_pool.track_volume_stop(self.vid)
@locked
async def start(self):
self.log.warning('zfs encrypted vol start {}'.format(
self.vid))
# Called before the parent to allow their start to act on the dataset:
await self.encrypted_pool.track_volume_start(self.vid)
try:
await super(ZFSQEncryptedVolume, self).start()
except Exception as e:
# If that failed due to for instance abort_if_import_in_progress(),
# the import volume should still be active,
# which means we can safely:
await self.encrypted_pool.track_volume_stop(self.vid)
raise e
@locked
async def stop(self):
self.log.warning("ZFS STOP {}".format(self.vid))
try:
await super(ZFSQEncryptedVolume, self).stop()
except Exception as e:
# Called after the parent to allow their shutdown
# before unloading key:
await self.encrypted_pool.track_volume_stop(self.vid)
raise e
finally:
await self.encrypted_pool.track_volume_stop(self.vid)
| 39.469466 | 81 | 0.569674 |
a6f7a227bed3c266973756c319d41aa607039766 | 2,483 | py | Python | src/modeling/deeplab.py | ShotaroKataoka/work_detection_PLOSONE | b8016e0003e11f6eb01355f52804c790a26a7741 | [
"MIT"
] | null | null | null | src/modeling/deeplab.py | ShotaroKataoka/work_detection_PLOSONE | b8016e0003e11f6eb01355f52804c790a26a7741 | [
"MIT"
] | null | null | null | src/modeling/deeplab.py | ShotaroKataoka/work_detection_PLOSONE | b8016e0003e11f6eb01355f52804c790a26a7741 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
from modeling.aspp import build_aspp
from modeling.decoder import build_decoder
from modeling.backbone import build_backbone
class DeepLab(nn.Module):
def __init__(self, backbone='resnet', output_stride=16, num_classes=21,
sync_bn=True, freeze_bn=False):
super(DeepLab, self).__init__()
if backbone == 'drn':
output_stride = 8
if sync_bn == True:
BatchNorm = SynchronizedBatchNorm2d
else:
BatchNorm = nn.BatchNorm2d
self.backbone = build_backbone(backbone, output_stride, BatchNorm)
self.aspp = build_aspp(backbone, output_stride, BatchNorm)
self.decoder = build_decoder(num_classes, backbone, BatchNorm)
if freeze_bn:
self.freeze_bn()
def forward(self, input):
x, low_level_feat = self.backbone(input)
x = self.aspp(x)
x = self.decoder(x, low_level_feat)
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.modules():
if isinstance(m, SynchronizedBatchNorm2d):
m.eval()
elif isinstance(m, nn.BatchNorm2d):
m.eval()
def get_1x_lr_params(self):
modules = [self.backbone]
for i in range(len(modules)):
for m in modules[i].named_modules():
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \
or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
def get_10x_lr_params(self):
modules = [self.aspp, self.decoder]
for i in range(len(modules)):
for m in modules[i].named_modules():
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \
or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
if __name__ == "__main__":
model = DeepLab(backbone='mobilenet', output_stride=16)
print(model)
#model.eval()
#input = torch.rand(1, 3, 513, 513)
#output = model(input)
# print(output.size())
| 34.013699 | 93 | 0.590012 |
e4facc0353090058a6d061bf97db79a1f5a5a558 | 235,535 | py | Python | vectorbt/portfolio/base.py | alexis-rodriguez/vectorbt | 820edb8a2bf0408fe004198ffadd0a244199534a | [
"Apache-2.0"
] | null | null | null | vectorbt/portfolio/base.py | alexis-rodriguez/vectorbt | 820edb8a2bf0408fe004198ffadd0a244199534a | [
"Apache-2.0"
] | null | null | null | vectorbt/portfolio/base.py | alexis-rodriguez/vectorbt | 820edb8a2bf0408fe004198ffadd0a244199534a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 Oleg Polakow. All rights reserved.
# This code is licensed under Apache 2.0 with Commons Clause license (see LICENSE.md for details)
"""Base class for modeling portfolio and measuring its performance.
Provides the class `vectorbt.portfolio.base.Portfolio` for modeling portfolio performance
and calculating various risk and performance metrics. It uses Numba-compiled
functions from `vectorbt.portfolio.nb` for most computations and record classes based on
`vectorbt.records.base.Records` for evaluating events such as orders, logs, trades, positions, and drawdowns.
The job of the `Portfolio` class is to create a series of positions allocated
against a cash component, produce an equity curve, incorporate basic transaction costs
and produce a set of statistics about its performance. In particular, it outputs
position/profit metrics and drawdown information.
Run for the examples below:
```python-repl
>>> import numpy as np
>>> import pandas as pd
>>> from datetime import datetime
>>> import talib
>>> from numba import njit
>>> import vectorbt as vbt
>>> from vectorbt.utils.colors import adjust_opacity
>>> from vectorbt.utils.enum import map_enum_fields
>>> from vectorbt.base.reshape_fns import broadcast, flex_select_auto_nb, to_2d_array
>>> from vectorbt.portfolio.enums import SizeType, Direction, NoOrder, OrderStatus, OrderSide
>>> from vectorbt.portfolio import nb
```
## Workflow
`Portfolio` class does quite a few things to simulate your strategy.
**Preparation** phase (in the particular class method):
* Receives a set of inputs, such as signal arrays and other parameters
* Resolves parameter defaults by searching for them in the global settings
* Brings input arrays to a single shape
* Does some basic validation of inputs and converts Pandas objects to NumPy arrays
* Passes everything to a Numba-compiled simulation function
**Simulation** phase (in the particular simulation function using Numba):
* The simulation function traverses the broadcasted shape element by element, row by row (time dimension),
column by column (asset dimension)
* For each asset and timestamp (= element):
* Gets all available information related to this element and executes the logic
* Generates an order or skips the element altogether
* If an order has been issued, processes the order and fills/ignores/rejects it
* If the order has been filled, registers the result by appending it to the order records
* Updates the current state such as the cash and asset balances
**Construction** phase (in the particular class method):
* Receives the returned order records and initializes a new `Portfolio` object
**Analysis** phase (in the `Portfolio` object)
* Offers a broad range of risk & performance metrics based on order records
## Simulation modes
There are three main simulation modes.
### From orders
`Portfolio.from_orders` is the most straightforward and the fastest out of all simulation modes.
An order is a simple instruction that contains size, price, fees, and other information
(see `vectorbt.portfolio.enums.Order` for details about what information a typical order requires).
Instead of creating a `vectorbt.portfolio.enums.Order` tuple for each asset and timestamp (which may
waste a lot of memory) and appending it to a (potentially huge) list for processing, `Portfolio.from_orders`
takes each of those information pieces as an array, broadcasts them against each other, and creates a
`vectorbt.portfolio.enums.Order` tuple out of each element for us.
Thanks to broadcasting, we can pass any of the information as a 2-dim array, as a 1-dim array
per column or row, and as a constant. And we don't even need to provide every piece of information -
vectorbt fills the missing data with default constants, without wasting memory.
Here's an example:
```python-repl
>>> size = pd.Series([1, -1, 1, -1]) # per row
>>> price = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [4, 3, 2, 1]}) # per element
>>> direction = ['longonly', 'shortonly'] # per column
>>> fees = 0.01 # per frame
>>> pf = vbt.Portfolio.from_orders(price, size, direction=direction, fees=fees)
>>> pf.orders.records_readable
Order Id Column Timestamp Size Price Fees Side
0 0 a 0 1.0 1.0 0.01 Buy
1 1 a 1 1.0 2.0 0.02 Sell
2 2 a 2 1.0 3.0 0.03 Buy
3 3 a 3 1.0 4.0 0.04 Sell
4 4 b 0 1.0 4.0 0.04 Sell
5 5 b 1 1.0 3.0 0.03 Buy
6 6 b 2 1.0 2.0 0.02 Sell
7 7 b 3 1.0 1.0 0.01 Buy
```
This method is particularly useful in situations where you don't need any further logic
apart from filling orders at predefined timestamps. If you want to issue orders depending
upon the previous performance, the current state, or other custom conditions, head over to
`Portfolio.from_signals` or `Portfolio.from_order_func`.
### From signals
`Portfolio.from_signals` is centered around signals. It adds an abstraction layer on top of `Portfolio.from_orders`
to automate some signaling processes. For example, by default, it won't let us execute another entry signal
if we are already in the position. It also implements stop loss and take profit orders for exiting positions.
Nevertheless, this method behaves similarly to `Portfolio.from_orders` and accepts most of its arguments;
in fact, by setting `accumulate=True`, it behaves quite similarly to `Portfolio.from_orders`.
Let's replicate the example above using signals:
```python-repl
>>> entries = pd.Series([True, False, True, False])
>>> exits = pd.Series([False, True, False, True])
>>> pf = vbt.Portfolio.from_signals(price, entries, exits, size=1, direction=direction, fees=fees)
>>> pf.orders.records_readable
Order Id Column Timestamp Size Price Fees Side
0 0 a 0 1.0 1.0 0.01 Buy
1 1 a 1 1.0 2.0 0.02 Sell
2 2 a 2 1.0 3.0 0.03 Buy
3 3 a 3 1.0 4.0 0.04 Sell
4 4 b 0 1.0 4.0 0.04 Sell
5 5 b 1 1.0 3.0 0.03 Buy
6 6 b 2 1.0 2.0 0.02 Sell
7 7 b 3 1.0 1.0 0.01 Buy
```
In a nutshell: this method automates some procedures that otherwise would be only possible by using
`Portfolio.from_order_func` while following the same broadcasting principles as `Portfolio.from_orders` -
the best of both worlds, given you can express your strategy as a sequence of signals. But as soon as
your strategy requires any signal to depend upon more complex conditions or to generate multiple orders at once,
it's best to run your custom signaling logic using `Portfolio.from_order_func`.
### From order function
`Portfolio.from_order_func` is the most powerful form of simulation. Instead of pulling information
from predefined arrays, it lets us define an arbitrary logic through callbacks. There are multiple
kinds of callbacks, each called at some point while the simulation function traverses the shape.
For example, apart from the main callback that returns an order (`order_func_nb`), there is a callback
that does preprocessing on the entire group of columns at once. For more details on the general procedure
and the callback zoo, see `vectorbt.portfolio.nb.simulate_nb`.
Let's replicate our example using an order function:
```python-repl
>>> @njit
>>> def order_func_nb(c, size, direction, fees):
... return nb.order_nb(
... price=c.close[c.i, c.col],
... size=size[c.i],
... direction=direction[c.col],
... fees=fees
... )
>>> direction_num = map_enum_fields(direction, Direction)
>>> pf = vbt.Portfolio.from_order_func(
... price,
... order_func_nb,
... np.asarray(size), np.asarray(direction_num), fees
... )
>>> pf.orders.records_readable
Order Id Column Timestamp Size Price Fees Side
0 0 a 0 1.0 1.0 0.01 Buy
1 1 a 1 1.0 2.0 0.02 Sell
2 2 a 2 1.0 3.0 0.03 Buy
3 3 a 3 1.0 4.0 0.04 Sell
4 4 b 0 1.0 4.0 0.04 Sell
5 5 b 1 1.0 3.0 0.03 Buy
6 6 b 2 1.0 2.0 0.02 Sell
7 7 b 3 1.0 1.0 0.01 Buy
```
There is an even more flexible version available - `vectorbt.portfolio.nb.flex_simulate_nb` (activated by
passing `flexible=True` to `Portfolio.from_order_func`) - that allows creating multiple orders per symbol and bar.
This method has many advantages:
* Realistic simulation as it follows the event-driven approach - less risk of exposure to the look-ahead bias
* Provides a lot of useful information during the runtime, such as the current position's PnL
* Enables putting all logic including custom indicators into a single place, and running it as the data
comes in, in a memory-friendly manner
But there are drawbacks too:
* Doesn't broadcast arrays - needs to be done by the user prior to the execution
* Requires at least a basic knowledge of NumPy and Numba
* Requires at least an intermediate knowledge of both to optimize for efficiency
## Example
To showcase the features of `Portfolio`, run the following example: it checks candlestick data of 6 major
cryptocurrencies in 2020 against every single pattern found in TA-Lib, and translates them into orders.
```python-repl
>>> # Fetch price history
>>> symbols = ['BTC-USD', 'ETH-USD', 'XRP-USD', 'BNB-USD', 'BCH-USD', 'LTC-USD']
>>> start = '2020-01-01 UTC' # crypto is UTC
>>> end = '2020-09-01 UTC'
>>> # OHLCV by column
>>> ohlcv = vbt.YFData.download(symbols, start=start, end=end).concat()
>>> ohlcv['Open']
symbol BTC-USD ETH-USD XRP-USD BNB-USD \\
Date
2020-01-01 00:00:00+00:00 7194.892090 129.630661 0.192912 13.730962
2020-01-02 00:00:00+00:00 7202.551270 130.820038 0.192708 13.698126
2020-01-03 00:00:00+00:00 6984.428711 127.411263 0.187948 13.035329
... ... ... ... ...
2020-08-30 00:00:00+00:00 11508.713867 399.616699 0.274568 23.009060
2020-08-31 00:00:00+00:00 11713.306641 428.509003 0.283065 23.647858
2020-09-01 00:00:00+00:00 11679.316406 434.874451 0.281612 23.185047
symbol BCH-USD LTC-USD
Date
2020-01-01 00:00:00+00:00 204.671295 41.326534
2020-01-02 00:00:00+00:00 204.354538 42.018085
2020-01-03 00:00:00+00:00 196.007690 39.863129
... ... ...
2020-08-30 00:00:00+00:00 268.842865 57.207737
2020-08-31 00:00:00+00:00 279.280426 62.844059
2020-09-01 00:00:00+00:00 274.480865 61.105076
[244 rows x 6 columns]
>>> # Run every single pattern recognition indicator and combine the results
>>> result = pd.DataFrame.vbt.empty_like(ohlcv['Open'], fill_value=0.)
>>> for pattern in talib.get_function_groups()['Pattern Recognition']:
... PRecognizer = vbt.IndicatorFactory.from_talib(pattern)
... pr = PRecognizer.run(ohlcv['Open'], ohlcv['High'], ohlcv['Low'], ohlcv['Close'])
... result = result + pr.integer
>>> # Don't look into the future
>>> result = result.vbt.fshift(1)
>>> # Treat each number as order value in USD
>>> size = result / ohlcv['Open']
>>> # Simulate portfolio
>>> pf = vbt.Portfolio.from_orders(
... ohlcv['Close'], size, price=ohlcv['Open'],
... init_cash='autoalign', fees=0.001, slippage=0.001)
>>> # Visualize portfolio value
>>> pf.value().vbt.plot()
```

## Broadcasting
`Portfolio` is very flexible towards inputs:
* Accepts both Series and DataFrames as inputs
* Broadcasts inputs to the same shape using vectorbt's own broadcasting rules
* Many inputs (such as `fees`) can be passed as a single value, value per column/row, or as a matrix
* Implements flexible indexing wherever possible to save memory
### Flexible indexing
Instead of expensive broadcasting, most methods keep the original shape and do indexing in a smart way.
A nice feature of this is that it has almost no memory footprint and can broadcast in
any direction indefinitely.
For example, let's broadcast three inputs and select the last element using both approaches:
```python-repl
>>> # Classic way
>>> a = np.array([1, 2, 3])
>>> b = np.array([[4], [5], [6]])
>>> c = np.array(10)
>>> a_, b_, c_ = broadcast(a, b, c)
>>> a_
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
>>> a_[2, 2]
3
>>> b_
array([[4, 4, 4],
[5, 5, 5],
[6, 6, 6]])
>>> b_[2, 2]
6
>>> c_
array([[10, 10, 10],
[10, 10, 10],
[10, 10, 10]])
>>> c_[2, 2]
10
>>> # Flexible indexing being done during simulation
>>> flex_select_auto_nb(a, 2, 2)
3
>>> flex_select_auto_nb(b, 2, 2)
6
>>> flex_select_auto_nb(c, 2, 2)
10
```
## Defaults
If you look at the arguments of each class method, you will notice that most of them default to None.
None has a special meaning in vectorbt: it's a command to pull the default value from the global settings config
- `vectorbt._settings.settings`. The branch for the `Portfolio` can be found under the key 'portfolio'.
For example, the default size used in `Portfolio.from_signals` and `Portfolio.from_orders` is `np.inf`:
```python-repl
>>> vbt.settings.portfolio['size']
inf
```
## Grouping
One of the key features of `Portfolio` is the ability to group columns. Groups can be specified by
`group_by`, which can be anything from positions or names of column levels, to a NumPy array with
actual groups. Groups can be formed to share capital between columns (make sure to pass `cash_sharing=True`)
or to compute metrics for a combined portfolio of multiple independent columns.
For example, let's divide our portfolio into two groups sharing the same cash balance:
```python-repl
>>> # Simulate combined portfolio
>>> group_by = pd.Index([
... 'first', 'first', 'first',
... 'second', 'second', 'second'
... ], name='group')
>>> comb_pf = vbt.Portfolio.from_orders(
... ohlcv['Close'], size, price=ohlcv['Open'],
... init_cash='autoalign', fees=0.001, slippage=0.001,
... group_by=group_by, cash_sharing=True)
>>> # Get total profit per group
>>> comb_pf.total_profit()
group
first 26221.571200
second 10141.952674
Name: total_profit, dtype: float64
```
Not only can we analyze each group, but also each column in the group:
```python-repl
>>> # Get total profit per column
>>> comb_pf.total_profit(group_by=False)
symbol
BTC-USD 5792.120252
ETH-USD 16380.039692
XRP-USD 4049.411256
BNB-USD 6081.253551
BCH-USD 400.573418
LTC-USD 3660.125705
Name: total_profit, dtype: float64
```
In the same way, we can introduce new grouping to the method itself:
```python-repl
>>> # Get total profit per group
>>> pf.total_profit(group_by=group_by)
group
first 26221.571200
second 10141.952674
Name: total_profit, dtype: float64
```
!!! note
If cash sharing is enabled, grouping can be disabled but cannot be modified.
## Indexing
Like any other class subclassing `vectorbt.base.array_wrapper.Wrapping`, we can do pandas indexing
on a `Portfolio` instance, which forwards indexing operation to each object with columns:
```python-repl
>>> pf['BTC-USD']
<vectorbt.portfolio.base.Portfolio at 0x7fac7517ac88>
>>> pf['BTC-USD'].total_profit()
5792.120252189081
```
Combined portfolio is indexed by group:
```python-repl
>>> comb_pf['first']
<vectorbt.portfolio.base.Portfolio at 0x7fac5756b828>
>>> comb_pf['first'].total_profit()
26221.57120014546
```
!!! note
Changing index (time axis) is not supported. The object should be treated as a Series
rather than a DataFrame; for example, use `pf.iloc[0]` instead of `pf.iloc[:, 0]`.
Indexing behavior depends solely upon `vectorbt.base.array_wrapper.ArrayWrapper`.
For example, if `group_select` is enabled indexing will be performed on groups,
otherwise on single columns. You can pass wrapper arguments with `wrapper_kwargs`.
## Logging
To collect more information on how a specific order was processed or to be able to track the whole
simulation from the beginning to the end, we can turn on logging:
```python-repl
>>> # Simulate portfolio with logging
>>> pf = vbt.Portfolio.from_orders(
... ohlcv['Close'], size, price=ohlcv['Open'],
... init_cash='autoalign', fees=0.001, slippage=0.001, log=True)
>>> pf.logs.records
id group col idx cash position debt free_cash val_price \\
0 0 0 0 0 inf 0.000000 0.0 inf 7194.892090
1 1 0 0 1 inf 0.000000 0.0 inf 7202.551270
2 2 0 0 2 inf 0.000000 0.0 inf 6984.428711
... ... ... ... ... ... ... ... ... ...
1461 1461 5 5 241 inf 272.389644 0.0 inf 57.207737
1462 1462 5 5 242 inf 274.137659 0.0 inf 62.844059
1463 1463 5 5 243 inf 282.093860 0.0 inf 61.105076
value ... new_free_cash new_val_price new_value res_size \\
0 inf ... inf 7194.892090 inf NaN
1 inf ... inf 7202.551270 inf NaN
2 inf ... inf 6984.428711 inf NaN
... ... ... ... ... ... ...
1461 inf ... inf 57.207737 inf 1.748015
1462 inf ... inf 62.844059 inf 7.956202
1463 inf ... inf 61.105076 inf 1.636525
res_price res_fees res_side res_status res_status_info order_id
0 NaN NaN -1 1 0 -1
1 NaN NaN -1 1 5 -1
2 NaN NaN -1 1 5 -1
... ... ... ... ... ... ...
1461 57.264945 0.1001 0 0 -1 1070
1462 62.906903 0.5005 0 0 -1 1071
1463 61.043971 0.0999 1 0 -1 1072
[1464 rows x 37 columns]
```
Just as orders, logs are also records and thus can be easily analyzed:
```python-repl
>>> pf.logs.res_status.value_counts()
symbol BTC-USD ETH-USD XRP-USD BNB-USD BCH-USD LTC-USD
Filled 184 172 177 178 177 185
Ignored 60 72 67 66 67 59
```
Logging can also be turned on just for one order, row, or column, since as many other
variables it's specified per order and can broadcast automatically.
!!! note
Logging can slow down simulation.
## Caching
`Portfolio` heavily relies upon caching. If a method or a property requires heavy computation,
it's wrapped with `vectorbt.utils.decorators.cached_method` and `vectorbt.utils.decorators.cached_property`
respectively. Caching can be disabled globally via `caching` in `vectorbt._settings.settings`.
!!! note
Because of caching, class is meant to be immutable and all properties are read-only.
To change any attribute, use the `copy` method and pass the attribute as keyword argument.
Alternatively, we can precisely point at attributes and methods that should or shouldn't
be cached. For example, we can blacklist the entire `Portfolio` class except a few most called
methods such as `Portfolio.cash_flow` and `Portfolio.asset_flow`:
```python-repl
>>> vbt.settings.caching['blacklist'].append(
... vbt.CacheCondition(base_cls='Portfolio')
... )
>>> vbt.settings.caching['whitelist'].extend([
... vbt.CacheCondition(base_cls='Portfolio', func='cash_flow'),
... vbt.CacheCondition(base_cls='Portfolio', func='asset_flow')
... ])
```
Define rules for one instance of `Portfolio`:
```python-repl
>>> vbt.settings.caching['blacklist'].append(
... vbt.CacheCondition(instance=pf)
... )
>>> vbt.settings.caching['whitelist'].extend([
... vbt.CacheCondition(instance=pf, func='cash_flow'),
... vbt.CacheCondition(instance=pf, func='asset_flow')
... ])
```
See `vectorbt.utils.decorators.should_cache` for caching rules.
To reset caching:
```python-repl
>>> vbt.settings.caching.reset()
```
## Performance and memory
If you're running out of memory when working with large arrays, make sure to disable caching
and then store most important time series manually. For example, if you're interested in Sharpe
ratio or other metrics based on returns, run and save `Portfolio.returns` in a variable and then use the
`vectorbt.returns.accessors.ReturnsAccessor` to analyze them. Do not use methods akin to
`Portfolio.sharpe_ratio` because they will re-calculate returns each time.
Alternatively, you can track portfolio value and returns using `Portfolio.from_order_func` and its callbacks
(preferably in `post_segment_func_nb`):
```python-repl
>>> pf_baseline = vbt.Portfolio.from_orders(
... ohlcv['Close'], size, price=ohlcv['Open'],
... init_cash='autoalign', fees=0.001, slippage=0.001, freq='d')
>>> pf_baseline.sharpe_ratio()
symbol
BTC-USD 1.743437
ETH-USD 2.800903
XRP-USD 1.607904
BNB-USD 1.805373
BCH-USD 0.269392
LTC-USD 1.040494
Name: sharpe_ratio, dtype: float64
>>> @njit
... def order_func_nb(c, size, price, fees, slippage):
... return nb.order_nb(
... size=nb.get_elem_nb(c, size),
... price=nb.get_elem_nb(c, price),
... fees=nb.get_elem_nb(c, fees),
... slippage=nb.get_elem_nb(c, slippage),
... )
>>> @njit
... def post_segment_func_nb(c, returns_out):
... returns_out[c.i, c.group] = c.last_return[c.group]
>>> returns_out = np.empty_like(ohlcv['Close'], dtype=np.float_)
>>> pf = vbt.Portfolio.from_order_func(
... ohlcv['Close'],
... order_func_nb,
... np.asarray(size),
... np.asarray(ohlcv['Open']),
... np.asarray(0.001),
... np.asarray(0.001),
... post_segment_func_nb=post_segment_func_nb,
... post_segment_args=(returns_out,),
... init_cash=pf_baseline.init_cash
... )
>>> returns = pf.wrapper.wrap(returns_out)
>>> del pf
>>> returns.vbt.returns(freq='d').sharpe_ratio()
symbol
BTC-USD -2.261443
ETH-USD 0.059538
XRP-USD 2.159093
BNB-USD 1.555386
BCH-USD 0.784214
LTC-USD 1.460077
Name: sharpe_ratio, dtype: float64
```
The only drawback of this approach is that you cannot use `init_cash='auto'` or `init_cash='autoalign'`
because then, during the simulation, the portfolio value is `np.inf` and the returns are `np.nan`.
## Saving and loading
Like any other class subclassing `vectorbt.utils.config.Pickleable`, we can save a `Portfolio`
instance to the disk with `Portfolio.save` and load it with `Portfolio.load`:
```python-repl
>>> pf = vbt.Portfolio.from_orders(
... ohlcv['Close'], size, price=ohlcv['Open'],
... init_cash='autoalign', fees=0.001, slippage=0.001, freq='d')
>>> pf.sharpe_ratio()
symbol
BTC-USD 1.743437
ETH-USD 2.800903
XRP-USD 1.607904
BNB-USD 1.805373
BCH-USD 0.269392
LTC-USD 1.040494
Name: sharpe_ratio, dtype: float64
>>> pf.save('my_pf')
>>> pf = vbt.Portfolio.load('my_pf')
>>> pf.sharpe_ratio()
symbol
BTC-USD 1.743437
ETH-USD 2.800903
XRP-USD 1.607904
BNB-USD 1.805373
BCH-USD 0.269392
LTC-USD 1.040494
Name: sharpe_ratio, dtype: float64
```
!!! note
Save files won't include neither cached results nor global defaults. For example,
passing `fillna_close` as None will also use None when the portfolio is loaded from disk.
Make sure to either pass all arguments explicitly or to also save the `vectorbt._settings.settings` config.
## Stats
!!! hint
See `vectorbt.generic.stats_builder.StatsBuilderMixin.stats` and `Portfolio.metrics`.
Let's simulate a portfolio with two columns:
```python-repl
>>> close = vbt.YFData.download(
... "BTC-USD",
... start='2020-01-01 UTC',
... end='2020-09-01 UTC'
... ).get('Close')
>>> pf = vbt.Portfolio.from_random_signals(close, n=[10, 20], seed=42)
>>> pf.wrapper.columns
Int64Index([10, 20], dtype='int64', name='rand_n')
```
### Column, group, and tag selection
To return the statistics for a particular column/group, use the `column` argument:
```python-repl
>>> pf.stats(column=10)
UserWarning: Metric 'sharpe_ratio' requires frequency to be set
UserWarning: Metric 'calmar_ratio' requires frequency to be set
UserWarning: Metric 'omega_ratio' requires frequency to be set
UserWarning: Metric 'sortino_ratio' requires frequency to be set
Start 2020-01-01 00:00:00+00:00
End 2020-09-01 00:00:00+00:00
Period 244
Start Value 100.0
End Value 106.721585
Total Return [%] 6.721585
Benchmark Return [%] 66.252621
Max Gross Exposure [%] 100.0
Total Fees Paid 0.0
Max Drawdown [%] 22.190944
Max Drawdown Duration 101.0
Total Trades 10
Total Closed Trades 10
Total Open Trades 0
Open Trade PnL 0.0
Win Rate [%] 60.0
Best Trade [%] 15.31962
Worst Trade [%] -9.904223
Avg Winning Trade [%] 4.671959
Avg Losing Trade [%] -4.851205
Avg Winning Trade Duration 11.333333
Avg Losing Trade Duration 14.25
Profit Factor 1.347457
Expectancy 0.672158
Name: 10, dtype: object
```
If vectorbt couldn't parse the frequency of `close`:
1) it won't return any duration in time units,
2) it won't return any metric that requires annualization, and
3) it will throw a bunch of warnings (you can silence those by passing `silence_warnings=True`)
We can provide the frequency as part of the settings dict:
```python-repl
>>> pf.stats(column=10, settings=dict(freq='d'))
UserWarning: Changing the frequency will create a copy of this object.
Consider setting the frequency upon object creation to re-use existing cache.
Start 2020-01-01 00:00:00+00:00
End 2020-09-01 00:00:00+00:00
Period 244 days 00:00:00
Start Value 100.0
End Value 106.721585
Total Return [%] 6.721585
Benchmark Return [%] 66.252621
Max Gross Exposure [%] 100.0
Total Fees Paid 0.0
Max Drawdown [%] 22.190944
Max Drawdown Duration 101 days 00:00:00
Total Trades 10
Total Closed Trades 10
Total Open Trades 0
Open Trade PnL 0.0
Win Rate [%] 60.0
Best Trade [%] 15.31962
Worst Trade [%] -9.904223
Avg Winning Trade [%] 4.671959
Avg Losing Trade [%] -4.851205
Avg Winning Trade Duration 11 days 08:00:00
Avg Losing Trade Duration 14 days 06:00:00
Profit Factor 1.347457
Expectancy 0.672158
Sharpe Ratio 0.445231
Calmar Ratio 0.460573
Omega Ratio 1.099192
Sortino Ratio 0.706986
Name: 10, dtype: object
```
But in this case, our portfolio will be copied to set the new frequency and we wouldn't be
able to re-use its cached attributes. Let's define the frequency upon the simulation instead:
```python-repl
>>> pf = vbt.Portfolio.from_random_signals(close, n=[10, 20], seed=42, freq='d')
```
We can change the grouping of the portfolio on the fly. Let's form a single group:
```python-repl
>>> pf.stats(group_by=True)
Start 2020-01-01 00:00:00+00:00
End 2020-09-01 00:00:00+00:00
Period 244 days 00:00:00
Start Value 200.0
End Value 277.49299
Total Return [%] 38.746495
Benchmark Return [%] 66.252621
Max Gross Exposure [%] 100.0
Total Fees Paid 0.0
Max Drawdown [%] 14.219327
Max Drawdown Duration 86 days 00:00:00
Total Trades 30
Total Closed Trades 30
Total Open Trades 0
Open Trade PnL 0.0
Win Rate [%] 66.666667
Best Trade [%] 18.332559
Worst Trade [%] -9.904223
Avg Winning Trade [%] 5.754788
Avg Losing Trade [%] -4.718907
Avg Winning Trade Duration 7 days 19:12:00
Avg Losing Trade Duration 8 days 07:12:00
Profit Factor 2.427948
Expectancy 2.5831
Sharpe Ratio 1.57907
Calmar Ratio 4.445448
Omega Ratio 1.334032
Sortino Ratio 2.59669
Name: group, dtype: object
```
We can see how the initial cash has changed from $100 to $200, indicating that both columns now
contribute to the performance.
### Aggregation
If the portfolio consists of multiple columns/groups and no column/group has been selected,
each metric is aggregated across all columns/groups based on `agg_func`, which is `np.mean` by default.
```python-repl
>>> pf.stats()
UserWarning: Object has multiple columns. Aggregating using <function mean at 0x7fc77152bb70>.
Pass column to select a single column/group.
Start 2020-01-01 00:00:00+00:00
End 2020-09-01 00:00:00+00:00
Period 244 days 00:00:00
Start Value 100.0
End Value 138.746495
Total Return [%] 38.746495
Benchmark Return [%] 66.252621
Max Gross Exposure [%] 100.0
Total Fees Paid 0.0
Max Drawdown [%] 20.35869
Max Drawdown Duration 93 days 00:00:00
Total Trades 15.0
Total Closed Trades 15.0
Total Open Trades 0.0
Open Trade PnL 0.0
Win Rate [%] 65.0
Best Trade [%] 16.82609
Worst Trade [%] -9.701273
Avg Winning Trade [%] 5.445408
Avg Losing Trade [%] -4.740956
Avg Winning Trade Duration 8 days 19:25:42.857142857
Avg Losing Trade Duration 9 days 07:00:00
Profit Factor 2.186957
Expectancy 2.105364
Sharpe Ratio 1.165695
Calmar Ratio 3.541079
Omega Ratio 1.331624
Sortino Ratio 2.084565
Name: agg_func_mean, dtype: object
```
Here, the Sharpe ratio of 0.445231 (column=10) and 1.88616 (column=20) lead to the avarage of 1.16569.
We can also return a DataFrame with statistics per column/group by passing `agg_func=None`:
```python-repl
>>> pf.stats(agg_func=None)
Start End Period ... Sortino Ratio
rand_n ...
10 2020-01-01 00:00:00+00:00 2020-09-01 00:00:00+00:00 244 days ... 0.706986
20 2020-01-01 00:00:00+00:00 2020-09-01 00:00:00+00:00 244 days ... 3.462144
[2 rows x 25 columns]
```
### Metric selection
To select metrics, use the `metrics` argument (see `Portfolio.metrics` for supported metrics):
```python-repl
>>> pf.stats(metrics=['sharpe_ratio', 'sortino_ratio'], column=10)
Sharpe Ratio 0.445231
Sortino Ratio 0.706986
Name: 10, dtype: float64
```
We can also select specific tags (see any metric from `Portfolio.metrics` that has the `tag` key):
```python-repl
>>> pf.stats(column=10, tags=['trades'])
Total Trades 10
Total Open Trades 0
Open Trade PnL 0
Long Trades [%] 100
Win Rate [%] 60
Best Trade [%] 15.3196
Worst Trade [%] -9.90422
Avg Winning Trade [%] 4.67196
Avg Winning Trade Duration 11 days 08:00:00
Avg Losing Trade [%] -4.8512
Avg Losing Trade Duration 14 days 06:00:00
Profit Factor 1.34746
Expectancy 0.672158
Name: 10, dtype: object
```
Or provide a boolean expression:
```python-repl
>>> pf.stats(column=10, tags='trades and open and not closed')
Total Open Trades 0.0
Open Trade PnL 0.0
Name: 10, dtype: float64
```
The reason why we included "not closed" along with "open" is because some metrics such as the win rate
have both tags attached since they are based upon both open and closed trades/positions
(to see this, pass `settings=dict(incl_open=True)` and `tags='trades and open'`).
### Passing parameters
We can use `settings` to pass parameters used across multiple metrics.
For example, let's pass required and risk-free return to all return metrics:
```python-repl
>>> pf.stats(column=10, settings=dict(required_return=0.1, risk_free=0.01))
Start 2020-01-01 00:00:00+00:00
End 2020-09-01 00:00:00+00:00
Period 244 days 00:00:00
Start Value 100.0
End Value 106.721585
Total Return [%] 6.721585
Benchmark Return [%] 66.252621
Max Gross Exposure [%] 100.0
Total Fees Paid 0.0
Max Drawdown [%] 22.190944
Max Drawdown Duration 101 days 00:00:00
Total Trades 10
Total Closed Trades 10
Total Open Trades 0
Open Trade PnL 0.0
Win Rate [%] 60.0
Best Trade [%] 15.31962
Worst Trade [%] -9.904223
Avg Winning Trade [%] 4.671959
Avg Losing Trade [%] -4.851205
Avg Winning Trade Duration 11 days 08:00:00
Avg Losing Trade Duration 14 days 06:00:00
Profit Factor 1.347457
Expectancy 0.672158
Sharpe Ratio -9.504742 << here
Calmar Ratio 0.460573 << here
Omega Ratio 0.233279 << here
Sortino Ratio -18.763407 << here
Name: 10, dtype: object
```
Passing any argument inside of `settings` either overrides an existing default, or acts as
an optional argument that is passed to the calculation function upon resolution (see below).
Both `required_return` and `risk_free` can be found in the signature of the 4 ratio methods,
so vectorbt knows exactly it has to pass them.
Let's imagine that the signature of `vectorbt.returns.accessors.ReturnsAccessor.sharpe_ratio`
doesn't list those arguments: vectorbt would simply call this method without passing those two arguments.
In such case, we have two options:
1) Set parameters globally using `settings` and set `pass_{arg}=True` individually using `metric_settings`:
```python-repl
>>> pf.stats(
... column=10,
... settings=dict(required_return=0.1, risk_free=0.01),
... metric_settings=dict(
... sharpe_ratio=dict(pass_risk_free=True),
... omega_ratio=dict(pass_required_return=True, pass_risk_free=True),
... sortino_ratio=dict(pass_required_return=True)
... )
... )
```
2) Set parameters individually using `metric_settings`:
```python-repl
>>> pf.stats(
... column=10,
... metric_settings=dict(
... sharpe_ratio=dict(risk_free=0.01),
... omega_ratio=dict(required_return=0.1, risk_free=0.01),
... sortino_ratio=dict(required_return=0.1)
... )
... )
```
### Custom metrics
To calculate a custom metric, we need to provide at least two things: short name and a settings
dict with the title and calculation function (see arguments in `vectorbt.generic.stats_builder.StatsBuilderMixin`):
```python-repl
>>> max_winning_streak = (
... 'max_winning_streak',
... dict(
... title='Max Winning Streak',
... calc_func=lambda trades: trades.winning_streak.max(),
... resolve_trades=True
... )
... )
>>> pf.stats(metrics=max_winning_streak, column=10)
Max Winning Streak 3.0
Name: 10, dtype: float64
```
You might wonder how vectorbt knows which arguments to pass to `calc_func`?
In the example above, the calculation function expects two arguments: `trades` and `group_by`.
To automatically pass any of the them, vectorbt searches for each in the current settings.
As `trades` cannot be found, it either throws an error or tries to resolve this argument if
`resolve_{arg}=True` was passed. Argument resolution is the process of searching for property/method with
the same name (also with prefix `get_`) in the attributes of the current portfolio, automatically passing the
current settings such as `group_by` if they are present in the method's signature
(a similar resolution procedure), and calling the method/property. The result of the resolution
process is then passed as `arg` (or `trades` in our example).
Here's an example without resolution of arguments:
```python-repl
>>> max_winning_streak = (
... 'max_winning_streak',
... dict(
... title='Max Winning Streak',
... calc_func=lambda self, group_by:
... self.get_trades(group_by=group_by).winning_streak.max()
... )
... )
>>> pf.stats(metrics=max_winning_streak, column=10)
Max Winning Streak 3.0
Name: 10, dtype: float64
```
And here's an example without resolution of the calculation function:
```python-repl
>>> max_winning_streak = (
... 'max_winning_streak',
... dict(
... title='Max Winning Streak',
... calc_func=lambda self, settings:
... self.get_trades(group_by=settings['group_by']).winning_streak.max(),
... resolve_calc_func=False
... )
... )
>>> pf.stats(metrics=max_winning_streak, column=10)
Max Winning Streak 3.0
Name: 10, dtype: float64
```
Since `max_winning_streak` method can be expressed as a path from this portfolio, we can simply write:
```python-repl
>>> max_winning_streak = (
... 'max_winning_streak',
... dict(
... title='Max Winning Streak',
... calc_func='trades.winning_streak.max'
... )
... )
```
In this case, we don't have to pass `resolve_trades=True` any more as vectorbt does it automatically.
Another advantage is that vectorbt can access the signature of the last method in the path
(`vectorbt.records.mapped_array.MappedArray.max` in our case) and resolve its arguments.
To switch between entry trades, exit trades, and positions, use the `trades_type` setting.
Additionally, you can pass `incl_open=True` to also include open trades.
```python-repl
>>> pf.stats(column=10, settings=dict(trades_type='positions', incl_open=True))
Start 2020-01-01 00:00:00+00:00
End 2020-09-01 00:00:00+00:00
Period 244 days 00:00:00
Start Value 100.0
End Value 106.721585
Total Return [%] 6.721585
Benchmark Return [%] 66.252621
Max Gross Exposure [%] 100.0
Total Fees Paid 0.0
Max Drawdown [%] 22.190944
Max Drawdown Duration 100 days 00:00:00
Total Trades 10
Total Closed Trades 10
Total Open Trades 0
Open Trade PnL 0.0
Win Rate [%] 60.0
Best Trade [%] 15.31962
Worst Trade [%] -9.904223
Avg Winning Trade [%] 4.671959
Avg Losing Trade [%] -4.851205
Avg Winning Trade Duration 11 days 08:00:00
Avg Losing Trade Duration 14 days 06:00:00
Profit Factor 1.347457
Expectancy 0.672158
Sharpe Ratio 0.445231
Calmar Ratio 0.460573
Omega Ratio 1.099192
Sortino Ratio 0.706986
Name: 10, dtype: object
```
Any default metric setting or even global setting can be overridden by the user using metric-specific
keyword arguments. Here, we override the global aggregation function for `max_dd_duration`:
```python-repl
>>> pf.stats(agg_func=lambda sr: sr.mean(),
... metric_settings=dict(
... max_dd_duration=dict(agg_func=lambda sr: sr.max())
... )
... )
UserWarning: Object has multiple columns. Aggregating using <function <lambda> at 0x7fbf6e77b268>.
Pass column to select a single column/group.
Start 2020-01-01 00:00:00+00:00
End 2020-09-01 00:00:00+00:00
Period 244 days 00:00:00
Start Value 100.0
End Value 138.746495
Total Return [%] 38.746495
Benchmark Return [%] 66.252621
Max Gross Exposure [%] 100.0
Total Fees Paid 0.0
Max Drawdown [%] 20.35869
Max Drawdown Duration 101 days 00:00:00 << here
Total Trades 15.0
Total Closed Trades 15.0
Total Open Trades 0.0
Open Trade PnL 0.0
Win Rate [%] 65.0
Best Trade [%] 16.82609
Worst Trade [%] -9.701273
Avg Winning Trade [%] 5.445408
Avg Losing Trade [%] -4.740956
Avg Winning Trade Duration 8 days 19:25:42.857142857
Avg Losing Trade Duration 9 days 07:00:00
Profit Factor 2.186957
Expectancy 2.105364
Sharpe Ratio 1.165695
Calmar Ratio 3.541079
Omega Ratio 1.331624
Sortino Ratio 2.084565
Name: agg_func_<lambda>, dtype: object
```
Let's create a simple metric that returns a passed value to demonstrate how vectorbt overrides settings,
from least to most important:
```python-repl
>>> # vbt.settings.portfolio.stats
>>> vbt.settings.portfolio.stats['settings']['my_arg'] = 100
>>> my_arg_metric = ('my_arg_metric', dict(title='My Arg', calc_func=lambda my_arg: my_arg))
>>> pf.stats(my_arg_metric, column=10)
My Arg 100
Name: 10, dtype: int64
>>> # settings >>> vbt.settings.portfolio.stats
>>> pf.stats(my_arg_metric, column=10, settings=dict(my_arg=200))
My Arg 200
Name: 10, dtype: int64
>>> # metric settings >>> settings
>>> my_arg_metric = ('my_arg_metric', dict(title='My Arg', my_arg=300, calc_func=lambda my_arg: my_arg))
>>> pf.stats(my_arg_metric, column=10, settings=dict(my_arg=200))
My Arg 300
Name: 10, dtype: int64
>>> # metric_settings >>> metric settings
>>> pf.stats(my_arg_metric, column=10, settings=dict(my_arg=200),
... metric_settings=dict(my_arg_metric=dict(my_arg=400)))
My Arg 400
Name: 10, dtype: int64
```
Here's an example of a parametrized metric. Let's get the number of trades with PnL over some amount:
```python-repl
>>> trade_min_pnl_cnt = (
... 'trade_min_pnl_cnt',
... dict(
... title=vbt.Sub('Trades with PnL over $$${min_pnl}'),
... calc_func=lambda trades, min_pnl: trades.apply_mask(
... trades.pnl.values >= min_pnl).count(),
... resolve_trades=True
... )
... )
>>> pf.stats(
... metrics=trade_min_pnl_cnt, column=10,
... metric_settings=dict(trade_min_pnl_cnt=dict(min_pnl=0)))
Trades with PnL over $0 6
Name: stats, dtype: int64
>>> pf.stats(
... metrics=trade_min_pnl_cnt, column=10,
... metric_settings=dict(trade_min_pnl_cnt=dict(min_pnl=10)))
Trades with PnL over $10 1
Name: stats, dtype: int64
```
If the same metric name was encountered more than once, vectorbt automatically appends an
underscore and its position, so we can pass keyword arguments to each metric separately:
```python-repl
>>> pf.stats(
... metrics=[
... trade_min_pnl_cnt,
... trade_min_pnl_cnt,
... trade_min_pnl_cnt
... ],
... column=10,
... metric_settings=dict(
... trade_min_pnl_cnt_0=dict(min_pnl=0),
... trade_min_pnl_cnt_1=dict(min_pnl=10),
... trade_min_pnl_cnt_2=dict(min_pnl=20))
... )
Trades with PnL over $0 6
Trades with PnL over $10 1
Trades with PnL over $20 0
Name: stats, dtype: int64
```
To add a custom metric to the list of all metrics, we have three options.
The first option is to change the `Portfolio.metrics` dict in-place (this will append to the end):
```python-repl
>>> pf.metrics['max_winning_streak'] = max_winning_streak[1]
>>> pf.stats(column=10)
Start 2020-01-01 00:00:00+00:00
End 2020-09-01 00:00:00+00:00
Period 244 days 00:00:00
Start Value 100.0
End Value 106.721585
Total Return [%] 6.721585
Benchmark Return [%] 66.252621
Max Gross Exposure [%] 100.0
Total Fees Paid 0.0
Max Drawdown [%] 22.190944
Max Drawdown Duration 101 days 00:00:00
Total Trades 10
Total Closed Trades 10
Total Open Trades 0
Open Trade PnL 0.0
Win Rate [%] 60.0
Best Trade [%] 15.31962
Worst Trade [%] -9.904223
Avg Winning Trade [%] 4.671959
Avg Losing Trade [%] -4.851205
Avg Winning Trade Duration 11 days 08:00:00
Avg Losing Trade Duration 14 days 06:00:00
Profit Factor 1.347457
Expectancy 0.672158
Sharpe Ratio 0.445231
Calmar Ratio 0.460573
Omega Ratio 1.099192
Sortino Ratio 0.706986
Max Winning Streak 3.0 << here
Name: 10, dtype: object
```
Since `Portfolio.metrics` is of type `vectorbt.utils.config.Config`, we can reset it at any time
to get default metrics:
```python-repl
>>> pf.metrics.reset()
```
The second option is to copy `Portfolio.metrics`, append our metric, and pass as `metrics` argument:
```python-repl
>>> my_metrics = list(pf.metrics.items()) + [max_winning_streak]
>>> pf.stats(metrics=my_metrics, column=10)
```
The third option is to set `metrics` globally under `portfolio.stats` in `vectorbt._settings.settings`.
```python-repl
>>> vbt.settings.portfolio['stats']['metrics'] = my_metrics
>>> pf.stats(column=10)
```
## Returns stats
We can compute the stats solely based on the portfolio's returns using `Portfolio.returns_stats`,
which calls `vectorbt.returns.accessors.ReturnsAccessor.stats`.
```python-repl
>>> pf.returns_stats(column=10)
Start 2020-01-01 00:00:00+00:00
End 2020-09-01 00:00:00+00:00
Period 244 days 00:00:00
Total Return [%] 6.721585
Benchmark Return [%] 66.252621
Annualized Return [%] 10.22056
Annualized Volatility [%] 36.683518
Max Drawdown [%] 22.190944
Max Drawdown Duration 100 days 00:00:00
Sharpe Ratio 0.445231
Calmar Ratio 0.460573
Omega Ratio 1.099192
Sortino Ratio 0.706986
Skew 1.328259
Kurtosis 10.80246
Tail Ratio 1.057913
Common Sense Ratio 1.166037
Value at Risk -0.031011
Alpha -0.075109
Beta 0.220351
Name: 10, dtype: object
```
Most metrics defined in `vectorbt.returns.accessors.ReturnsAccessor` are also available
as attributes of `Portfolio`:
```python-repl
>>> pf.sharpe_ratio()
randnx_n
10 0.445231
20 1.886158
Name: sharpe_ratio, dtype: float64
```
Moreover, we can access quantstats functions using `vectorbt.returns.qs_adapter.QSAdapter`:
```python-repl
>>> pf.qs.sharpe()
randnx_n
10 0.445231
20 1.886158
dtype: float64
>>> pf[10].qs.plot_snapshot()
```

## Plots
!!! hint
See `vectorbt.generic.plots_builder.PlotsBuilderMixin.plots`.
The features implemented in this method are very similar to `Portfolio.stats`.
See also the examples under `Portfolio.stats`.
Plot portfolio of a random strategy:
```python-repl
>>> pf.plot(column=10)
```

You can choose any of the subplots in `Portfolio.subplots`, in any order, and
control their appearance using keyword arguments:
```python-repl
>>> pf.plot(
... subplots=['drawdowns', 'underwater'],
... column=10,
... subplot_settings=dict(
... drawdowns=dict(top_n=3),
... underwater=dict(
... trace_kwargs=dict(
... line=dict(color='#FF6F00'),
... fillcolor=adjust_opacity('#FF6F00', 0.3)
... )
... )
... )
... )
```

To create a new subplot, a preferred way is to pass a plotting function:
```python-repl
>>> def plot_order_size(pf, size, column=None, add_trace_kwargs=None, fig=None):
... size = pf.select_one_from_obj(size, pf.wrapper.regroup(False), column=column)
... size.rename('Order Size').vbt.barplot(
... add_trace_kwargs=add_trace_kwargs, fig=fig)
>>> order_size = pf.orders.size.to_pd(fill_value=0.)
>>> pf.plot(subplots=[
... 'orders',
... ('order_size', dict(
... title='Order Size',
... yaxis_kwargs=dict(title='Order size'),
... check_is_not_grouped=True,
... plot_func=plot_order_size
... ))
... ],
... column=10,
... subplot_settings=dict(
... order_size=dict(
... size=order_size
... )
... )
... )
```
Alternatively, you can create a placeholder and overwrite it manually later:
```python-repl
>>> fig = pf.plot(subplots=[
... 'orders',
... ('order_size', dict(
... title='Order Size',
... yaxis_kwargs=dict(title='Order size'),
... check_is_not_grouped=True
... )) # placeholder
... ], column=10)
>>> order_size[10].rename('Order Size').vbt.barplot(
... add_trace_kwargs=dict(row=2, col=1),
... fig=fig
... )
```

If a plotting function can in any way be accessed from the current portfolio, you can pass
the path to this function (see `vectorbt.utils.attr.deep_getattr` for the path format).
You can additionally use templates to make some parameters to depend upon passed keyword arguments:
```python-repl
>>> subplots = [
... ('cumulative_returns', dict(
... title='Cumulative Returns',
... yaxis_kwargs=dict(title='Cumulative returns'),
... plot_func='returns.vbt.returns.cumulative.vbt.plot',
... pass_add_trace_kwargs=True
... )),
... ('rolling_drawdown', dict(
... title='Rolling Drawdown',
... yaxis_kwargs=dict(title='Rolling drawdown'),
... plot_func=[
... 'returns.vbt.returns', # returns accessor
... (
... 'rolling_max_drawdown', # function name
... (vbt.Rep('window'),)), # positional arguments
... 'vbt.plot' # plotting function
... ],
... pass_add_trace_kwargs=True,
... trace_names=[vbt.Sub('rolling_drawdown(${window})')], # add window to the trace name
... ))
... ]
>>> pf.plot(
... subplots,
... column=10,
... subplot_settings=dict(
... rolling_drawdown=dict(
... template_mapping=dict(
... window=10
... )
... )
... )
... )
```
You can also replace templates across all subplots by using the global template mapping:
```python-repl
>>> pf.plot(subplots, column=10, template_mapping=dict(window=10))
```

"""
import warnings
import numpy as np
import pandas as pd
from vectorbt import _typing as tp
from vectorbt.utils import checks
from vectorbt.utils.decorators import cached_property, cached_method
from vectorbt.utils.enum import map_enum_fields
from vectorbt.utils.config import merge_dicts, Config
from vectorbt.utils.template import RepEval, deep_substitute
from vectorbt.utils.random import set_seed
from vectorbt.utils.colors import adjust_opacity
from vectorbt.utils.figure import get_domain
from vectorbt.base.reshape_fns import to_1d_array, to_2d_array, broadcast, broadcast_to, to_pd_array
from vectorbt.base.array_wrapper import ArrayWrapper, Wrapping
from vectorbt.generic.stats_builder import StatsBuilderMixin
from vectorbt.generic.plots_builder import PlotsBuilderMixin
from vectorbt.generic.drawdowns import Drawdowns
from vectorbt.signals.generators import RANDNX, RPROBNX
from vectorbt.returns.accessors import ReturnsAccessor
from vectorbt.returns import nb as returns_nb
from vectorbt.portfolio import nb
from vectorbt.portfolio.orders import Orders
from vectorbt.portfolio.trades import Trades, EntryTrades, ExitTrades, Positions
from vectorbt.portfolio.logs import Logs
from vectorbt.portfolio.enums import *
from vectorbt.portfolio.decorators import attach_returns_acc_methods
try:
import quantstats as qs
except ImportError:
QSAdapterT = tp.Any
else:
from vectorbt.returns.qs_adapter import QSAdapter as QSAdapterT
__pdoc__ = {}
returns_acc_config = Config(
{
'daily_returns': dict(source_name='daily'),
'annual_returns': dict(source_name='annual'),
'cumulative_returns': dict(source_name='cumulative'),
'annualized_return': dict(source_name='annualized'),
'annualized_volatility': dict(),
'calmar_ratio': dict(),
'omega_ratio': dict(),
'sharpe_ratio': dict(),
'deflated_sharpe_ratio': dict(),
'downside_risk': dict(),
'sortino_ratio': dict(),
'information_ratio': dict(),
'beta': dict(),
'alpha': dict(),
'tail_ratio': dict(),
'value_at_risk': dict(),
'cond_value_at_risk': dict(),
'capture': dict(),
'up_capture': dict(),
'down_capture': dict(),
'drawdown': dict(),
'max_drawdown': dict()
},
readonly=True,
as_attrs=False
)
"""_"""
__pdoc__['returns_acc_config'] = f"""Config of returns accessor methods to be added to `Portfolio`.
```json
{returns_acc_config.to_doc()}
```
"""
PortfolioT = tp.TypeVar("PortfolioT", bound="Portfolio")
class MetaPortfolio(type(StatsBuilderMixin), type(PlotsBuilderMixin)):
pass
@attach_returns_acc_methods(returns_acc_config)
class Portfolio(Wrapping, StatsBuilderMixin, PlotsBuilderMixin, metaclass=MetaPortfolio):
"""Class for modeling portfolio and measuring its performance.
Args:
wrapper (ArrayWrapper): Array wrapper.
See `vectorbt.base.array_wrapper.ArrayWrapper`.
close (array_like): Last asset price at each time step.
order_records (array_like): A structured NumPy array of order records.
log_records (array_like): A structured NumPy array of log records.
init_cash (InitCashMode, float or array_like of float): Initial capital.
cash_sharing (bool): Whether to share cash within the same group.
call_seq (array_like of int): Sequence of calls per row and group. Defaults to None.
fillna_close (bool): Whether to forward and backward fill NaN values in `close`.
Applied after the simulation to avoid NaNs in asset value.
See `Portfolio.get_filled_close`.
trades_type (str or int): Default `vectorbt.portfolio.trades.Trades` to use across `Portfolio`.
See `vectorbt.portfolio.enums.TradesType`.
For defaults, see `portfolio` in `vectorbt._settings.settings`.
!!! note
Use class methods with `from_` prefix to build a portfolio.
The `__init__` method is reserved for indexing purposes.
!!! note
This class is meant to be immutable. To change any attribute, use `Portfolio.replace`."""
def __init__(self,
wrapper: ArrayWrapper,
close: tp.ArrayLike,
order_records: tp.RecordArray,
log_records: tp.RecordArray,
init_cash: tp.ArrayLike,
cash_sharing: bool,
call_seq: tp.Optional[tp.Array2d] = None,
fillna_close: tp.Optional[bool] = None,
trades_type: tp.Optional[tp.Union[int, str]] = None) -> None:
Wrapping.__init__(
self,
wrapper,
close=close,
order_records=order_records,
log_records=log_records,
init_cash=init_cash,
cash_sharing=cash_sharing,
call_seq=call_seq,
fillna_close=fillna_close,
trades_type=trades_type
)
StatsBuilderMixin.__init__(self)
PlotsBuilderMixin.__init__(self)
# Get defaults
from vectorbt._settings import settings
portfolio_cfg = settings['portfolio']
if fillna_close is None:
fillna_close = portfolio_cfg['fillna_close']
if trades_type is None:
trades_type = portfolio_cfg['trades_type']
if isinstance(trades_type, str):
trades_type = map_enum_fields(trades_type, TradesType)
# Store passed arguments
self._close = broadcast_to(close, wrapper.dummy(group_by=False))
self._order_records = order_records
self._log_records = log_records
self._init_cash = init_cash
self._cash_sharing = cash_sharing
self._call_seq = call_seq
self._fillna_close = fillna_close
self._trades_type = trades_type
def indexing_func(self: PortfolioT, pd_indexing_func: tp.PandasIndexingFunc, **kwargs) -> PortfolioT:
"""Perform indexing on `Portfolio`."""
new_wrapper, _, group_idxs, col_idxs = \
self.wrapper.indexing_func_meta(pd_indexing_func, column_only_select=True, **kwargs)
new_close = new_wrapper.wrap(to_2d_array(self.close)[:, col_idxs], group_by=False)
new_order_records = self.orders.get_by_col_idxs(col_idxs)
new_log_records = self.logs.get_by_col_idxs(col_idxs)
if isinstance(self._init_cash, int):
new_init_cash = self._init_cash
else:
new_init_cash = to_1d_array(self._init_cash)[group_idxs if self.cash_sharing else col_idxs]
if self.call_seq is not None:
new_call_seq = self.call_seq.values[:, col_idxs]
else:
new_call_seq = None
return self.replace(
wrapper=new_wrapper,
close=new_close,
order_records=new_order_records,
log_records=new_log_records,
init_cash=new_init_cash,
call_seq=new_call_seq
)
# ############# Class methods ############# #
@classmethod
def from_orders(cls: tp.Type[PortfolioT],
close: tp.ArrayLike,
size: tp.Optional[tp.ArrayLike] = None,
size_type: tp.Optional[tp.ArrayLike] = None,
direction: tp.Optional[tp.ArrayLike] = None,
price: tp.Optional[tp.ArrayLike] = None,
fees: tp.Optional[tp.ArrayLike] = None,
fixed_fees: tp.Optional[tp.ArrayLike] = None,
slippage: tp.Optional[tp.ArrayLike] = None,
min_size: tp.Optional[tp.ArrayLike] = None,
max_size: tp.Optional[tp.ArrayLike] = None,
reject_prob: tp.Optional[tp.ArrayLike] = None,
lock_cash: tp.Optional[tp.ArrayLike] = None,
allow_partial: tp.Optional[tp.ArrayLike] = None,
raise_reject: tp.Optional[tp.ArrayLike] = None,
log: tp.Optional[tp.ArrayLike] = None,
val_price: tp.Optional[tp.ArrayLike] = None,
init_cash: tp.Optional[tp.ArrayLike] = None,
cash_sharing: tp.Optional[bool] = None,
call_seq: tp.Optional[tp.ArrayLike] = None,
ffill_val_price: tp.Optional[bool] = None,
update_value: tp.Optional[bool] = None,
max_orders: tp.Optional[int] = None,
max_logs: tp.Optional[int] = None,
seed: tp.Optional[int] = None,
group_by: tp.GroupByLike = None,
broadcast_kwargs: tp.KwargsLike = None,
wrapper_kwargs: tp.KwargsLike = None,
freq: tp.Optional[tp.FrequencyLike] = None,
attach_call_seq: tp.Optional[bool] = None,
**kwargs) -> PortfolioT:
"""Simulate portfolio from orders - size, price, fees, and other information.
Args:
close (array_like): Last asset price at each time step.
Will broadcast.
Used for calculating unrealized PnL and portfolio value.
size (float or array_like): Size to order.
See `vectorbt.portfolio.enums.Order.size`. Will broadcast.
size_type (SizeType or array_like): See `vectorbt.portfolio.enums.SizeType`.
See `vectorbt.portfolio.enums.Order.size_type`. Will broadcast.
!!! note
`SizeType.Percent` does not support position reversal. Switch to a single direction.
!!! warning
Be cautious using `SizeType.Percent` with `call_seq` set to 'auto'.
To execute sell orders before buy orders, the value of each order in the group
needs to be approximated in advance. But since `SizeType.Percent` depends
upon the cash balance, which cannot be calculated in advance since it may change
after each order, this can yield a non-optimal call sequence.
direction (Direction or array_like): See `vectorbt.portfolio.enums.Direction`.
See `vectorbt.portfolio.enums.Order.direction`. Will broadcast.
price (array_like of float): Order price.
See `vectorbt.portfolio.enums.Order.price`. Defaults to `np.inf`. Will broadcast.
!!! note
Make sure to use the same timestamp for all order prices in the group with cash sharing
and `call_seq` set to `CallSeqType.Auto`.
fees (float or array_like): Fees in percentage of the order value.
See `vectorbt.portfolio.enums.Order.fees`. Will broadcast.
fixed_fees (float or array_like): Fixed amount of fees to pay per order.
See `vectorbt.portfolio.enums.Order.fixed_fees`. Will broadcast.
slippage (float or array_like): Slippage in percentage of price.
See `vectorbt.portfolio.enums.Order.slippage`. Will broadcast.
min_size (float or array_like): Minimum size for an order to be accepted.
See `vectorbt.portfolio.enums.Order.min_size`. Will broadcast.
max_size (float or array_like): Maximum size for an order.
See `vectorbt.portfolio.enums.Order.max_size`. Will broadcast.
Will be partially filled if exceeded.
reject_prob (float or array_like): Order rejection probability.
See `vectorbt.portfolio.enums.Order.reject_prob`. Will broadcast.
lock_cash (bool or array_like): Whether to lock cash when shorting.
See `vectorbt.portfolio.enums.Order.lock_cash`. Will broadcast.
allow_partial (bool or array_like): Whether to allow partial fills.
See `vectorbt.portfolio.enums.Order.allow_partial`. Will broadcast.
Does not apply when size is `np.inf`.
raise_reject (bool or array_like): Whether to raise an exception if order gets rejected.
See `vectorbt.portfolio.enums.Order.raise_reject`. Will broadcast.
log (bool or array_like): Whether to log orders.
See `vectorbt.portfolio.enums.Order.log`. Will broadcast.
val_price (array_like of float): Asset valuation price.
Will broadcast.
* Any `-np.inf` element is replaced by the latest valuation price (the previous `close` or
the latest known valuation price if `ffill_val_price`).
* Any `np.inf` element is replaced by the current order price.
Used at the time of decision making to calculate value of each asset in the group,
for example, to convert target value into target amount.
!!! note
In contrast to `Portfolio.from_order_func`, order price is known beforehand (kind of),
thus `val_price` is set to the current order price (using `np.inf`) by default.
To valuate using previous close, set it in the settings to `-np.inf`.
!!! note
Make sure to use timestamp for `val_price` that comes before timestamps of
all orders in the group with cash sharing (previous `close` for example),
otherwise you're cheating yourself.
init_cash (InitCashMode, float or array_like of float): Initial capital.
By default, will broadcast to the number of columns.
If cash sharing is enabled, will broadcast to the number of groups.
See `vectorbt.portfolio.enums.InitCashMode` to find optimal initial cash.
!!! note
Mode `InitCashMode.AutoAlign` is applied after the portfolio is initialized
to set the same initial cash for all columns/groups. Changing grouping
will change the initial cash, so be aware when indexing.
cash_sharing (bool): Whether to share cash within the same group.
If `group_by` is None, `group_by` becomes True to form a single group with cash sharing.
!!! warning
Introduces cross-asset dependencies.
This method presumes that in a group of assets that share the same capital all
orders will be executed within the same tick and retain their price regardless
of their position in the queue, even though they depend upon each other and thus
cannot be executed in parallel.
call_seq (CallSeqType or array_like): Default sequence of calls per row and group.
Each value in this sequence should indicate the position of column in the group to
call next. Processing of `call_seq` goes always from left to right.
For example, `[2, 0, 1]` would first call column 'c', then 'a', and finally 'b'.
* Use `vectorbt.portfolio.enums.CallSeqType` to select a sequence type.
* Set to array to specify custom sequence. Will not broadcast.
If `CallSeqType.Auto` selected, rearranges calls dynamically based on order value.
Calculates value of all orders per row and group, and sorts them by this value.
Sell orders will be executed first to release funds for buy orders.
!!! warning
`CallSeqType.Auto` should be used with caution:
* It not only presumes that order prices are known beforehand, but also that
orders can be executed in arbitrary order and still retain their price.
In reality, this is hardly the case: after processing one asset, some time
has passed and the price for other assets might have already changed.
* Even if you're able to specify a slippage large enough to compensate for
this behavior, slippage itself should depend upon execution order.
This method doesn't let you do that.
* If one order is rejected, it still may execute next orders and possibly
leave them without required funds.
For more control, use `Portfolio.from_order_func`.
ffill_val_price (bool): Whether to track valuation price only if it's known.
Otherwise, unknown `close` will lead to NaN in valuation price at the next timestamp.
update_value (bool): Whether to update group value after each filled order.
max_orders (int): Size of the order records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
max_logs (int): Size of the log records array.
Defaults to the number of elements in the broadcasted shape if any of the `log` is True,
otherwise to 1.
Set to a lower number if you run out of memory.
seed (int): Seed to be set for both `call_seq` and at the beginning of the simulation.
group_by (any): Group columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.
freq (any): Index frequency in case it cannot be parsed from `close`.
attach_call_seq (bool): Whether to pass `call_seq` to the constructor.
Makes sense if you want to analyze some metrics in the simulation order.
Otherwise, just takes memory.
**kwargs: Keyword arguments passed to the `__init__` method.
All broadcastable arguments will broadcast using `vectorbt.base.reshape_fns.broadcast`
but keep original shape to utilize flexible indexing and to save memory.
For defaults, see `portfolio` in `vectorbt._settings.settings`.
!!! note
When `call_seq` is not `CallSeqType.Auto`, at each timestamp, processing of the assets in
a group goes strictly in order defined in `call_seq`. This order can't be changed dynamically.
This has one big implication for this particular method: the last asset in the call stack
cannot be processed until other assets are processed. This is the reason why rebalancing
cannot work properly in this setting: one has to specify percentages for all assets beforehand
and then tweak the processing order to sell to-be-sold assets first in order to release funds
for to-be-bought assets. This can be automatically done by using `CallSeqType.Auto`.
!!! hint
All broadcastable arguments can be set per frame, series, row, column, or element.
## Example
* Buy 10 units each tick:
```python-repl
>>> close = pd.Series([1, 2, 3, 4, 5])
>>> pf = vbt.Portfolio.from_orders(close, 10)
>>> pf.assets()
0 10.0
1 20.0
2 30.0
3 40.0
4 40.0
dtype: float64
>>> pf.cash()
0 90.0
1 70.0
2 40.0
3 0.0
4 0.0
dtype: float64
```
* Reverse each position by first closing it:
```python-repl
>>> size = [1, 0, -1, 0, 1]
>>> pf = vbt.Portfolio.from_orders(close, size, size_type='targetpercent')
>>> pf.assets()
0 100.000000
1 0.000000
2 -66.666667
3 0.000000
4 26.666667
dtype: float64
>>> pf.cash()
0 0.000000
1 200.000000
2 400.000000
3 133.333333
4 0.000000
dtype: float64
```
* Equal-weighted portfolio as in `vectorbt.portfolio.nb.simulate_nb` example
(it's more compact but has less control over execution):
```python-repl
>>> np.random.seed(42)
>>> close = pd.DataFrame(np.random.uniform(1, 10, size=(5, 3)))
>>> size = pd.Series(np.full(5, 1/3)) # each column 33.3%
>>> size[1::2] = np.nan # skip every second tick
>>> pf = vbt.Portfolio.from_orders(
... close, # acts both as reference and order price here
... size,
... size_type='targetpercent',
... call_seq='auto', # first sell then buy
... group_by=True, # one group
... cash_sharing=True, # assets share the same cash
... fees=0.001, fixed_fees=1., slippage=0.001 # costs
... )
>>> pf.asset_value(group_by=False).vbt.plot()
```

"""
# Get defaults
from vectorbt._settings import settings
portfolio_cfg = settings['portfolio']
if size is None:
size = portfolio_cfg['size']
if size_type is None:
size_type = portfolio_cfg['size_type']
size_type = map_enum_fields(size_type, SizeType)
if direction is None:
direction = portfolio_cfg['order_direction']
direction = map_enum_fields(direction, Direction)
if price is None:
price = np.inf
if size is None:
size = portfolio_cfg['size']
if fees is None:
fees = portfolio_cfg['fees']
if fixed_fees is None:
fixed_fees = portfolio_cfg['fixed_fees']
if slippage is None:
slippage = portfolio_cfg['slippage']
if min_size is None:
min_size = portfolio_cfg['min_size']
if max_size is None:
max_size = portfolio_cfg['max_size']
if reject_prob is None:
reject_prob = portfolio_cfg['reject_prob']
if lock_cash is None:
lock_cash = portfolio_cfg['lock_cash']
if allow_partial is None:
allow_partial = portfolio_cfg['allow_partial']
if raise_reject is None:
raise_reject = portfolio_cfg['raise_reject']
if log is None:
log = portfolio_cfg['log']
if val_price is None:
val_price = portfolio_cfg['val_price']
if init_cash is None:
init_cash = portfolio_cfg['init_cash']
if isinstance(init_cash, str):
init_cash = map_enum_fields(init_cash, InitCashMode)
if isinstance(init_cash, int) and init_cash in InitCashMode:
init_cash_mode = init_cash
init_cash = np.inf
else:
init_cash_mode = None
if cash_sharing is None:
cash_sharing = portfolio_cfg['cash_sharing']
if cash_sharing and group_by is None:
group_by = True
if call_seq is None:
call_seq = portfolio_cfg['call_seq']
auto_call_seq = False
if isinstance(call_seq, str):
call_seq = map_enum_fields(call_seq, CallSeqType)
if isinstance(call_seq, int):
if call_seq == CallSeqType.Auto:
call_seq = CallSeqType.Default
auto_call_seq = True
if ffill_val_price is None:
ffill_val_price = portfolio_cfg['ffill_val_price']
if update_value is None:
update_value = portfolio_cfg['update_value']
if seed is None:
seed = portfolio_cfg['seed']
if seed is not None:
set_seed(seed)
if freq is None:
freq = portfolio_cfg['freq']
if attach_call_seq is None:
attach_call_seq = portfolio_cfg['attach_call_seq']
if broadcast_kwargs is None:
broadcast_kwargs = {}
if wrapper_kwargs is None:
wrapper_kwargs = {}
if not wrapper_kwargs.get('group_select', True) and cash_sharing:
raise ValueError("group_select cannot be disabled if cash_sharing=True")
# Prepare the simulation
# Only close is broadcast, others can remain unchanged thanks to flexible indexing
broadcastable_args = (
size,
price,
size_type,
direction,
fees,
fixed_fees,
slippage,
min_size,
max_size,
reject_prob,
lock_cash,
allow_partial,
raise_reject,
log,
val_price,
close
)
keep_raw = [True] * len(broadcastable_args)
keep_raw[-1] = False
broadcast_kwargs = merge_dicts(dict(
keep_raw=keep_raw,
require_kwargs=dict(requirements='W')
), broadcast_kwargs)
broadcasted_args = broadcast(*broadcastable_args, **broadcast_kwargs)
close = broadcasted_args[-1]
if not checks.is_pandas(close):
close = pd.Series(close) if close.ndim == 1 else pd.DataFrame(close)
target_shape_2d = (close.shape[0], close.shape[1] if close.ndim > 1 else 1)
wrapper = ArrayWrapper.from_obj(close, freq=freq, group_by=group_by, **wrapper_kwargs)
cs_group_lens = wrapper.grouper.get_group_lens(group_by=None if cash_sharing else False)
init_cash = np.require(np.broadcast_to(init_cash, (len(cs_group_lens),)), dtype=np.float_)
group_lens = wrapper.grouper.get_group_lens(group_by=group_by)
if checks.is_any_array(call_seq):
call_seq = nb.require_call_seq(broadcast(call_seq, to_shape=target_shape_2d, to_pd=False))
else:
call_seq = nb.build_call_seq(target_shape_2d, group_lens, call_seq_type=call_seq)
if max_orders is None:
max_orders = target_shape_2d[0] * target_shape_2d[1]
if max_logs is None:
max_logs = target_shape_2d[0] * target_shape_2d[1]
if not np.any(log):
max_logs = 1
# Perform the simulation
order_records, log_records = nb.simulate_from_orders_nb(
target_shape_2d,
cs_group_lens, # group only if cash sharing is enabled to speed up
init_cash,
call_seq,
*map(np.asarray, broadcasted_args),
auto_call_seq,
ffill_val_price,
update_value,
max_orders,
max_logs,
close.ndim == 2
)
# Create an instance
return cls(
wrapper,
close,
order_records,
log_records,
init_cash if init_cash_mode is None else init_cash_mode,
cash_sharing,
call_seq=call_seq if attach_call_seq else None,
**kwargs
)
@classmethod
def from_signals(cls: tp.Type[PortfolioT],
close: tp.ArrayLike,
entries: tp.Optional[tp.ArrayLike] = None,
exits: tp.Optional[tp.ArrayLike] = None,
short_entries: tp.Optional[tp.ArrayLike] = None,
short_exits: tp.Optional[tp.ArrayLike] = None,
signal_func_nb: nb.SignalFuncT = nb.no_signal_func_nb,
signal_args: tp.ArgsLike = (),
size: tp.Optional[tp.ArrayLike] = None,
size_type: tp.Optional[tp.ArrayLike] = None,
price: tp.Optional[tp.ArrayLike] = None,
fees: tp.Optional[tp.ArrayLike] = None,
fixed_fees: tp.Optional[tp.ArrayLike] = None,
slippage: tp.Optional[tp.ArrayLike] = None,
min_size: tp.Optional[tp.ArrayLike] = None,
max_size: tp.Optional[tp.ArrayLike] = None,
reject_prob: tp.Optional[tp.ArrayLike] = None,
lock_cash: tp.Optional[tp.ArrayLike] = None,
allow_partial: tp.Optional[tp.ArrayLike] = None,
raise_reject: tp.Optional[tp.ArrayLike] = None,
log: tp.Optional[tp.ArrayLike] = None,
accumulate: tp.Optional[tp.ArrayLike] = None,
upon_long_conflict: tp.Optional[tp.ArrayLike] = None,
upon_short_conflict: tp.Optional[tp.ArrayLike] = None,
upon_dir_conflict: tp.Optional[tp.ArrayLike] = None,
upon_opposite_entry: tp.Optional[tp.ArrayLike] = None,
direction: tp.Optional[tp.ArrayLike] = None,
val_price: tp.Optional[tp.ArrayLike] = None,
open: tp.Optional[tp.ArrayLike] = None,
high: tp.Optional[tp.ArrayLike] = None,
low: tp.Optional[tp.ArrayLike] = None,
sl_stop: tp.Optional[tp.ArrayLike] = None,
sl_trail: tp.Optional[tp.ArrayLike] = None,
tp_stop: tp.Optional[tp.ArrayLike] = None,
stop_entry_price: tp.Optional[tp.ArrayLike] = None,
stop_exit_price: tp.Optional[tp.ArrayLike] = None,
upon_stop_exit: tp.Optional[tp.ArrayLike] = None,
upon_stop_update: tp.Optional[tp.ArrayLike] = None,
adjust_sl_func_nb: nb.AdjustSLFuncT = nb.no_adjust_sl_func_nb,
adjust_sl_args: tp.Args = (),
adjust_tp_func_nb: nb.AdjustTPFuncT = nb.no_adjust_tp_func_nb,
adjust_tp_args: tp.Args = (),
use_stops: tp.Optional[bool] = None,
init_cash: tp.Optional[tp.ArrayLike] = None,
cash_sharing: tp.Optional[bool] = None,
call_seq: tp.Optional[tp.ArrayLike] = None,
ffill_val_price: tp.Optional[bool] = None,
update_value: tp.Optional[bool] = None,
max_orders: tp.Optional[int] = None,
max_logs: tp.Optional[int] = None,
seed: tp.Optional[int] = None,
group_by: tp.GroupByLike = None,
broadcast_named_args: tp.KwargsLike = None,
broadcast_kwargs: tp.KwargsLike = None,
template_mapping: tp.Optional[tp.Mapping] = None,
wrapper_kwargs: tp.KwargsLike = None,
freq: tp.Optional[tp.FrequencyLike] = None,
attach_call_seq: tp.Optional[bool] = None,
**kwargs) -> PortfolioT:
"""Simulate portfolio from entry and exit signals.
See `vectorbt.portfolio.nb.simulate_from_signal_func_nb`.
You have three options to provide signals:
* `entries` and `exits`: The direction of each pair of signals is taken from `direction` argument.
Best to use when the direction doesn't change throughout time.
Uses `vectorbt.portfolio.nb.dir_enex_signal_func_nb` as `signal_func_nb`.
!!! hint
`entries` and `exits` can be easily translated to direction-aware signals:
* (True, True, 'longonly') -> True, True, False, False
* (True, True, 'shortonly') -> False, False, True, True
* (True, True, 'both') -> True, False, True, False
* `entries` (acting as long), `exits` (acting as long), `short_entries`, and `short_exits`:
The direction is already built into the arrays. Best to use when the direction changes frequently
(for example, if you have one indicator providing long signals and one providing short signals).
Uses `vectorbt.portfolio.nb.ls_enex_signal_func_nb` as `signal_func_nb`.
* `signal_func_nb` and `signal_args`: Custom signal function that returns direction-aware signals.
Best to use when signals should be placed dynamically based on custom conditions.
Args:
close (array_like): See `Portfolio.from_orders`.
entries (array_like of bool): Boolean array of entry signals.
Defaults to True if all other signal arrays are not set, otherwise False. Will broadcast.
* If `short_entries` and `short_exits` are not set: Acts as a long signal if `direction`
is `all` or `longonly`, otherwise short.
* If `short_entries` or `short_exits` are set: Acts as `long_entries`.
exits (array_like of bool): Boolean array of exit signals.
Defaults to False. Will broadcast.
* If `short_entries` and `short_exits` are not set: Acts as a short signal if `direction`
is `all` or `longonly`, otherwise long.
* If `short_entries` or `short_exits` are set: Acts as `long_exits`.
short_entries (array_like of bool): Boolean array of short entry signals.
Defaults to False. Will broadcast.
short_exits (array_like of bool): Boolean array of short exit signals.
Defaults to False. Will broadcast.
signal_func_nb (callable): Function called to generate signals.
Should accept `vectorbt.portfolio.enums.SignalContext` and `*signal_args`.
Should return long entry signal, long exit signal, short entry signal, and short exit signal.
!!! note
Stop signal has priority: `signal_func_nb` is executed only if there is no stop signal.
signal_args (tuple): Packed arguments passed to `signal_func_nb`.
Defaults to `()`.
size (float or array_like): See `Portfolio.from_orders`.
!!! note
Negative size is not allowed. You should express direction using signals.
size_type (SizeType or array_like): See `Portfolio.from_orders`.
Only `SizeType.Amount`, `SizeType.Value`, and `SizeType.Percent` are supported.
Other modes such as target percentage are not compatible with signals since
their logic may contradict the direction of the signal.
!!! note
`SizeType.Percent` does not support position reversal. Switch to a single
direction or use `vectorbt.portfolio.enums.OppositeEntryMode.Close` to close the position first.
See warning in `Portfolio.from_orders`.
price (array_like of float): See `Portfolio.from_orders`.
fees (float or array_like): See `Portfolio.from_orders`.
fixed_fees (float or array_like): See `Portfolio.from_orders`.
slippage (float or array_like): See `Portfolio.from_orders`.
min_size (float or array_like): See `Portfolio.from_orders`.
max_size (float or array_like): See `Portfolio.from_orders`.
Will be partially filled if exceeded. You might not be able to properly close
the position if accumulation is enabled and `max_size` is too low.
reject_prob (float or array_like): See `Portfolio.from_orders`.
lock_cash (bool or array_like): See `Portfolio.from_orders`.
allow_partial (bool or array_like): See `Portfolio.from_orders`.
raise_reject (bool or array_like): See `Portfolio.from_orders`.
log (bool or array_like): See `Portfolio.from_orders`.
accumulate (bool, AccumulationMode or array_like): See `vectorbt.portfolio.enums.AccumulationMode`.
If True, becomes 'both'. If False, becomes 'disabled'. Will broadcast.
When enabled, `Portfolio.from_signals` behaves similarly to `Portfolio.from_orders`.
upon_long_conflict (ConflictMode or array_like): Conflict mode for long signals.
See `vectorbt.portfolio.enums.ConflictMode`. Will broadcast.
upon_short_conflict (ConflictMode or array_like): Conflict mode for short signals.
See `vectorbt.portfolio.enums.ConflictMode`. Will broadcast.
upon_dir_conflict (DirectionConflictMode or array_like): See `vectorbt.portfolio.enums.DirectionConflictMode`. Will broadcast.
upon_opposite_entry (OppositeEntryMode or array_like): See `vectorbt.portfolio.enums.OppositeEntryMode`. Will broadcast.
direction (Direction or array_like): See `Portfolio.from_orders`.
Takes only effect if `short_entries` and `short_exits` are not set.
val_price (array_like of float): See `Portfolio.from_orders`.
open (array_like of float): First asset price at each time step.
Defaults to `np.nan`, which gets replaced by `close`. Will broadcast.
Used solely for stop signals.
high (array_like of float): Highest asset price at each time step.
Defaults to `np.nan`, which gets replaced by the maximum out of `open` and `close`. Will broadcast.
Used solely for stop signals.
low (array_like of float): Lowest asset price at each time step.
Defaults to `np.nan`, which gets replaced by the minimum out of `open` and `close`. Will broadcast.
Used solely for stop signals.
sl_stop (array_like of float): Stop loss.
Will broadcast.
A percentage below/above the acquisition price for long/short position.
Note that 0.01 = 1%.
sl_trail (array_like of bool): Whether `sl_stop` should be trailing.
Will broadcast.
tp_stop (array_like of float): Take profit.
Will broadcast.
A percentage above/below the acquisition price for long/short position.
Note that 0.01 = 1%.
stop_entry_price (StopEntryPrice or array_like): See `vectorbt.portfolio.enums.StopEntryPrice`.
Will broadcast.
If provided on per-element basis, gets applied upon entry.
stop_exit_price (StopExitPrice or array_like): See `vectorbt.portfolio.enums.StopExitPrice`.
Will broadcast.
If provided on per-element basis, gets applied upon exit.
upon_stop_exit (StopExitMode or array_like): See `vectorbt.portfolio.enums.StopExitMode`.
Will broadcast.
If provided on per-element basis, gets applied upon exit.
upon_stop_update (StopUpdateMode or array_like): See `vectorbt.portfolio.enums.StopUpdateMode`.
Will broadcast.
Only has effect if accumulation is enabled.
If provided on per-element basis, gets applied upon repeated entry.
adjust_sl_func_nb (callable): Function to adjust stop loss.
Defaults to `vectorbt.portfolio.nb.no_adjust_sl_func_nb`.
Called for each element before each row.
Should accept `vectorbt.portfolio.enums.AdjustSLContext` and `*adjust_sl_args`.
Should return a tuple of a new stop value and trailing flag.
adjust_sl_args (tuple): Packed arguments passed to `adjust_sl_func_nb`.
Defaults to `()`.
adjust_tp_func_nb (callable): Function to adjust take profit.
Defaults to `vectorbt.portfolio.nb.no_adjust_tp_func_nb`.
Called for each element before each row.
Should accept `vectorbt.portfolio.enums.AdjustTPContext` and `*adjust_tp_args`.
of the stop, and `*adjust_tp_args`. Should return a new stop value.
adjust_tp_args (tuple): Packed arguments passed to `adjust_tp_func_nb`.
Defaults to `()`.
use_stops (bool): Whether to use stops.
Defaults to None, which becomes True if any of the stops are not NaN or
any of the adjustment functions are custom.
Disable this to make simulation a bit faster for simple use cases.
init_cash (InitCashMode, float or array_like of float): See `Portfolio.from_orders`.
cash_sharing (bool): See `Portfolio.from_orders`.
call_seq (CallSeqType or array_like): See `Portfolio.from_orders`.
ffill_val_price (bool): See `Portfolio.from_orders`.
update_value (bool): See `Portfolio.from_orders`.
max_orders (int): See `Portfolio.from_orders`.
max_logs (int): See `Portfolio.from_orders`.
seed (int): See `Portfolio.from_orders`.
group_by (any): See `Portfolio.from_orders`.
broadcast_named_args (dict): Dictionary with named arguments to broadcast.
You can then pass argument names to the functions and this method will substitute
them by their corresponding broadcasted objects.
broadcast_kwargs (dict): See `Portfolio.from_orders`.
template_mapping (mapping): Mapping to replace templates in arguments.
wrapper_kwargs (dict): See `Portfolio.from_orders`.
freq (any): See `Portfolio.from_orders`.
attach_call_seq (bool): See `Portfolio.from_orders`.
**kwargs: Keyword arguments passed to the `__init__` method.
All broadcastable arguments will broadcast using `vectorbt.base.reshape_fns.broadcast`
but keep original shape to utilize flexible indexing and to save memory.
For defaults, see `portfolio` in `vectorbt._settings.settings`.
!!! note
Stop signal has priority - it's executed before other signals within the same bar.
That is, if a stop signal is present, no other signals are generated and executed
since there is a limit of one order per symbol and bar.
!!! hint
If you generated signals using close price, don't forget to shift your signals by one tick
forward, for example, with `signals.vbt.fshift(1)`. In general, make sure to use a price
that comes after the signal.
Also see notes and hints for `Portfolio.from_orders`.
## Example
* By default, if all signal arrays are None, `entries` becomes True,
which opens a position at the very first tick and does nothing else:
```python-repl
>>> close = pd.Series([1, 2, 3, 4, 5])
>>> pf = vbt.Portfolio.from_signals(close, size=1)
>>> pf.asset_flow()
0 1.0
1 0.0
2 0.0
3 0.0
4 0.0
dtype: float64
```
* Entry opens long, exit closes long:
```python-repl
>>> pf = vbt.Portfolio.from_signals(
... close,
... entries=pd.Series([True, True, True, False, False]),
... exits=pd.Series([False, False, True, True, True]),
... size=1,
... direction='longonly'
... )
>>> pf.asset_flow()
0 1.0
1 0.0
2 0.0
3 -1.0
4 0.0
dtype: float64
>>> # Using direction-aware arrays instead of `direction`
>>> pf = vbt.Portfolio.from_signals(
... close,
... entries=pd.Series([True, True, True, False, False]), # long_entries
... exits=pd.Series([False, False, True, True, True]), # long_exits
... short_entries=False,
... short_exits=False,
... size=1
... )
>>> pf.asset_flow()
0 1.0
1 0.0
2 0.0
3 -1.0
4 0.0
dtype: float64
```
Notice how both `short_entries` and `short_exits` are provided as constants - as any other
broadcastable argument, they are treated as arrays where each element is False.
* Entry opens short, exit closes short:
```python-repl
>>> pf = vbt.Portfolio.from_signals(
... close,
... entries=pd.Series([True, True, True, False, False]),
... exits=pd.Series([False, False, True, True, True]),
... size=1,
... direction='shortonly'
... )
>>> pf.asset_flow()
0 -1.0
1 0.0
2 0.0
3 1.0
4 0.0
dtype: float64
>>> # Using direction-aware arrays instead of `direction`
>>> pf = vbt.Portfolio.from_signals(
... close,
... entries=False, # long_entries
... exits=False, # long_exits
... short_entries=pd.Series([True, True, True, False, False]),
... short_exits=pd.Series([False, False, True, True, True]),
... size=1
... )
>>> pf.asset_flow()
0 -1.0
1 0.0
2 0.0
3 1.0
4 0.0
dtype: float64
```
* Entry opens long and closes short, exit closes long and opens short:
```python-repl
>>> pf = vbt.Portfolio.from_signals(
... close,
... entries=pd.Series([True, True, True, False, False]),
... exits=pd.Series([False, False, True, True, True]),
... size=1,
... direction='both'
... )
>>> pf.asset_flow()
0 1.0
1 0.0
2 0.0
3 -2.0
4 0.0
dtype: float64
>>> # Using direction-aware arrays instead of `direction`
>>> pf = vbt.Portfolio.from_signals(
... close,
... entries=pd.Series([True, True, True, False, False]), # long_entries
... exits=False, # long_exits
... short_entries=pd.Series([False, False, True, True, True]),
... short_exits=False,
... size=1
... )
>>> pf.asset_flow()
0 1.0
1 0.0
2 0.0
3 -2.0
4 0.0
dtype: float64
```
* More complex signal combinations are best expressed using direction-aware arrays.
For example, ignore opposite signals as long as the current position is open:
```python-repl
>>> pf = vbt.Portfolio.from_signals(
... close,
... entries =pd.Series([True, False, False, False, False]), # long_entries
... exits =pd.Series([False, False, True, False, False]), # long_exits
... short_entries=pd.Series([False, True, False, True, False]),
... short_exits =pd.Series([False, False, False, False, True]),
... size=1,
... upon_opposite_entry='ignore'
... )
>>> pf.asset_flow()
0 1.0
1 0.0
2 -1.0
3 -1.0
4 1.0
dtype: float64
```
* First opposite signal closes the position, second one opens a new position:
```python-repl
>>> pf = vbt.Portfolio.from_signals(
... close,
... entries=pd.Series([True, True, True, False, False]),
... exits=pd.Series([False, False, True, True, True]),
... size=1,
... direction='both',
... upon_opposite_entry='close'
... )
>>> pf.asset_flow()
0 1.0
1 0.0
2 0.0
3 -1.0
4 -1.0
dtype: float64
```
* If both long entry and exit signals are True (a signal conflict), choose exit:
```python-repl
>>> pf = vbt.Portfolio.from_signals(
... close,
... entries=pd.Series([True, True, True, False, False]),
... exits=pd.Series([False, False, True, True, True]),
... size=1.,
... direction='longonly',
... upon_long_conflict='exit')
>>> pf.asset_flow()
0 1.0
1 0.0
2 -1.0
3 0.0
4 0.0
dtype: float64
```
* If both long entry and short entry signal are True (a direction conflict), choose short:
```python-repl
>>> pf = vbt.Portfolio.from_signals(
... close,
... entries=pd.Series([True, True, True, False, False]),
... exits=pd.Series([False, False, True, True, True]),
... size=1.,
... direction='both',
... upon_dir_conflict='short')
>>> pf.asset_flow()
0 1.0
1 0.0
2 -2.0
3 0.0
4 0.0
dtype: float64
```
!!! note
Remember that when direction is set to 'both', entries become `long_entries` and exits become
`short_entries`, so this becomes a conflict of directions rather than signals.
* If there are both signal and direction conflicts:
```python-repl
>>> pf = vbt.Portfolio.from_signals(
... close,
... entries=True, # long_entries
... exits=True, # long_exits
... short_entries=True,
... short_exits=True,
... size=1,
... upon_long_conflict='entry',
... upon_short_conflict='entry',
... upon_dir_conflict='short'
... )
>>> pf.asset_flow()
0 -1.0
1 0.0
2 0.0
3 0.0
4 0.0
dtype: float64
```
* Turn on accumulation of signals. Entry means long order, exit means short order
(acts similar to `from_orders`):
```python-repl
>>> pf = vbt.Portfolio.from_signals(
... close,
... entries=pd.Series([True, True, True, False, False]),
... exits=pd.Series([False, False, True, True, True]),
... size=1.,
... direction='both',
... accumulate=True)
>>> pf.asset_flow()
0 1.0
1 1.0
2 0.0
3 -1.0
4 -1.0
dtype: float64
```
* Allow increasing a position (of any direction), deny decreasing a position:
```python-repl
>>> pf = vbt.Portfolio.from_signals(
... close,
... entries=pd.Series([True, True, True, False, False]),
... exits=pd.Series([False, False, True, True, True]),
... size=1.,
... direction='both',
... accumulate='addonly')
>>> pf.asset_flow()
0 1.0 << open a long position
1 1.0 << add to the position
2 0.0
3 -3.0 << close and open a short position
4 -1.0 << add to the position
dtype: float64
```
* Testing multiple parameters (via broadcasting):
```python-repl
>>> pf = vbt.Portfolio.from_signals(
... close,
... entries=pd.Series([True, True, True, False, False]),
... exits=pd.Series([False, False, True, True, True]),
... direction=[list(Direction)],
... broadcast_kwargs=dict(columns_from=Direction._fields))
>>> pf.asset_flow()
Long Short All
0 100.0 -100.0 100.0
1 0.0 0.0 0.0
2 0.0 0.0 0.0
3 -100.0 50.0 -200.0
4 0.0 0.0 0.0
```
* Set risk/reward ratio by passing trailing stop loss and take profit thresholds:
```python-repl
>>> close = pd.Series([10, 11, 12, 11, 10, 9])
>>> entries = pd.Series([True, False, False, False, False, False])
>>> exits = pd.Series([False, False, False, False, False, True])
>>> pf = vbt.Portfolio.from_signals(
... close, entries, exits,
... sl_stop=0.1, sl_trail=True, tp_stop=0.2) # take profit hit
>>> pf.asset_flow()
0 10.0
1 0.0
2 -10.0
3 0.0
4 0.0
5 0.0
dtype: float64
>>> pf = vbt.Portfolio.from_signals(
... close, entries, exits,
... sl_stop=0.1, sl_trail=True, tp_stop=0.3) # stop loss hit
>>> pf.asset_flow()
0 10.0
1 0.0
2 0.0
3 0.0
4 -10.0
5 0.0
dtype: float64
>>> pf = vbt.Portfolio.from_signals(
... close, entries, exits,
... sl_stop=np.inf, sl_trail=True, tp_stop=np.inf) # nothing hit, exit as usual
>>> pf.asset_flow()
0 10.0
1 0.0
2 0.0
3 0.0
4 0.0
5 -10.0
dtype: float64
```
!!! note
When the stop price is hit, the stop signal invalidates any other signal defined for this bar.
Thus, make sure that your signaling logic happens at the very end of the bar
(for example, by using the closing price), otherwise you may expose yourself to a look-ahead bias.
See `vectorbt.portfolio.enums.StopExitPrice` for more details.
* We can implement our own stop loss or take profit, or adjust the existing one at each time step.
Let's implement [stepped stop-loss](https://www.freqtrade.io/en/stable/strategy-advanced/#stepped-stoploss):
```python-repl
>>> @njit
... def adjust_sl_func_nb(c):
... current_profit = (c.val_price_now - c.init_price) / c.init_price
... if current_profit >= 0.40:
... return 0.25, True
... elif current_profit >= 0.25:
... return 0.15, True
... elif current_profit >= 0.20:
... return 0.07, True
... return c.curr_stop, c.curr_trail
>>> close = pd.Series([10, 11, 12, 11, 10])
>>> pf = vbt.Portfolio.from_signals(close, adjust_sl_func_nb=adjust_sl_func_nb)
>>> pf.asset_flow()
0 10.0
1 0.0
2 0.0
3 -10.0 # 7% from 12 hit
4 11.0
dtype: float64
```
* Sometimes there is a need to provide or transform signals dynamically. For this, we can implement
a custom signal function `signal_func_nb`. For example, let's implement a signal function that
takes two numerical arrays - long and short one - and transforms them into 4 direction-aware boolean
arrays that vectorbt understands:
```python-repl
>>> @njit
... def signal_func_nb(c, long_num_arr, short_num_arr):
... long_num = nb.get_elem_nb(c, long_num_arr)
... short_num = nb.get_elem_nb(c, short_num_arr)
... is_long_entry = long_num > 0
... is_long_exit = long_num < 0
... is_short_entry = short_num > 0
... is_short_exit = short_num < 0
... return is_long_entry, is_long_exit, is_short_entry, is_short_exit
>>> pf = vbt.Portfolio.from_signals(
... pd.Series([1, 2, 3, 4, 5]),
... signal_func_nb=signal_func_nb,
... signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
... broadcast_named_args=dict(
... long_num_arr=pd.Series([1, 0, -1, 0, 0]),
... short_num_arr=pd.Series([0, 1, 0, 1, -1])
... ),
... size=1,
... upon_opposite_entry='ignore'
... )
>>> pf.asset_flow()
0 1.0
1 0.0
2 -1.0
3 -1.0
4 1.0
dtype: float64
```
Passing both arrays as `broadcast_named_args` broadcasts them internally as any other array,
so we don't have to worry about their dimensions every time we change our data.
"""
# Get defaults
from vectorbt._settings import settings
portfolio_cfg = settings['portfolio']
ls_mode = short_entries is not None or short_exits is not None
signal_func_mode = signal_func_nb is not nb.no_signal_func_nb
if (entries is not None or exits is not None or ls_mode) and signal_func_mode:
raise ValueError("Either any of the signal arrays or signal_func_nb should be set, not both")
if entries is None:
if exits is None and not ls_mode:
entries = True
else:
entries = False
if exits is None:
exits = False
if short_entries is None:
short_entries = False
if short_exits is None:
short_exits = False
if signal_func_nb is nb.no_signal_func_nb:
if ls_mode:
signal_func_nb = nb.ls_enex_signal_func_nb
else:
signal_func_nb = nb.dir_enex_signal_func_nb
if size is None:
size = portfolio_cfg['size']
if size_type is None:
size_type = portfolio_cfg['size_type']
size_type = map_enum_fields(size_type, SizeType)
if price is None:
price = np.inf
if fees is None:
fees = portfolio_cfg['fees']
if fixed_fees is None:
fixed_fees = portfolio_cfg['fixed_fees']
if slippage is None:
slippage = portfolio_cfg['slippage']
if min_size is None:
min_size = portfolio_cfg['min_size']
if max_size is None:
max_size = portfolio_cfg['max_size']
if reject_prob is None:
reject_prob = portfolio_cfg['reject_prob']
if lock_cash is None:
lock_cash = portfolio_cfg['lock_cash']
if allow_partial is None:
allow_partial = portfolio_cfg['allow_partial']
if raise_reject is None:
raise_reject = portfolio_cfg['raise_reject']
if log is None:
log = portfolio_cfg['log']
if accumulate is None:
accumulate = portfolio_cfg['accumulate']
accumulate = map_enum_fields(accumulate, AccumulationMode, ignore_type=(int, bool))
if upon_long_conflict is None:
upon_long_conflict = portfolio_cfg['upon_long_conflict']
upon_long_conflict = map_enum_fields(upon_long_conflict, ConflictMode)
if upon_short_conflict is None:
upon_short_conflict = portfolio_cfg['upon_short_conflict']
upon_short_conflict = map_enum_fields(upon_short_conflict, ConflictMode)
if upon_dir_conflict is None:
upon_dir_conflict = portfolio_cfg['upon_dir_conflict']
upon_dir_conflict = map_enum_fields(upon_dir_conflict, DirectionConflictMode)
if upon_opposite_entry is None:
upon_opposite_entry = portfolio_cfg['upon_opposite_entry']
upon_opposite_entry = map_enum_fields(upon_opposite_entry, OppositeEntryMode)
if direction is not None and ls_mode:
warnings.warn("direction has no effect if short_entries and short_exits are set", stacklevel=2)
if direction is None:
direction = portfolio_cfg['signal_direction']
direction = map_enum_fields(direction, Direction)
if val_price is None:
val_price = portfolio_cfg['val_price']
if open is None:
open = np.nan
if high is None:
high = np.nan
if low is None:
low = np.nan
if sl_stop is None:
sl_stop = portfolio_cfg['sl_stop']
if sl_trail is None:
sl_trail = portfolio_cfg['sl_trail']
if tp_stop is None:
tp_stop = portfolio_cfg['tp_stop']
if stop_entry_price is None:
stop_entry_price = portfolio_cfg['stop_entry_price']
stop_entry_price = map_enum_fields(stop_entry_price, StopEntryPrice)
if stop_exit_price is None:
stop_exit_price = portfolio_cfg['stop_exit_price']
stop_exit_price = map_enum_fields(stop_exit_price, StopExitPrice)
if upon_stop_exit is None:
upon_stop_exit = portfolio_cfg['upon_stop_exit']
upon_stop_exit = map_enum_fields(upon_stop_exit, StopExitMode)
if upon_stop_update is None:
upon_stop_update = portfolio_cfg['upon_stop_update']
upon_stop_update = map_enum_fields(upon_stop_update, StopUpdateMode)
if use_stops is None:
use_stops = portfolio_cfg['use_stops']
if use_stops is None:
if isinstance(sl_stop, float) and \
np.isnan(sl_stop) and \
isinstance(tp_stop, float) and \
np.isnan(tp_stop) and \
adjust_sl_func_nb == nb.no_adjust_sl_func_nb and \
adjust_tp_func_nb == nb.no_adjust_tp_func_nb:
use_stops = False
else:
use_stops = True
if init_cash is None:
init_cash = portfolio_cfg['init_cash']
if isinstance(init_cash, str):
init_cash = map_enum_fields(init_cash, InitCashMode)
if isinstance(init_cash, int) and init_cash in InitCashMode:
init_cash_mode = init_cash
init_cash = np.inf
else:
init_cash_mode = None
if cash_sharing is None:
cash_sharing = portfolio_cfg['cash_sharing']
if cash_sharing and group_by is None:
group_by = True
if call_seq is None:
call_seq = portfolio_cfg['call_seq']
auto_call_seq = False
if isinstance(call_seq, str):
call_seq = map_enum_fields(call_seq, CallSeqType)
if isinstance(call_seq, int):
if call_seq == CallSeqType.Auto:
call_seq = CallSeqType.Default
auto_call_seq = True
if ffill_val_price is None:
ffill_val_price = portfolio_cfg['ffill_val_price']
if update_value is None:
update_value = portfolio_cfg['update_value']
if seed is None:
seed = portfolio_cfg['seed']
if seed is not None:
set_seed(seed)
if freq is None:
freq = portfolio_cfg['freq']
if attach_call_seq is None:
attach_call_seq = portfolio_cfg['attach_call_seq']
if broadcast_named_args is None:
broadcast_named_args = {}
if broadcast_kwargs is None:
broadcast_kwargs = {}
if template_mapping is None:
template_mapping = {}
if wrapper_kwargs is None:
wrapper_kwargs = {}
if not wrapper_kwargs.get('group_select', True) and cash_sharing:
raise ValueError("group_select cannot be disabled if cash_sharing=True")
# Prepare the simulation
broadcastable_args = dict(
size=size,
price=price,
size_type=size_type,
fees=fees,
fixed_fees=fixed_fees,
slippage=slippage,
min_size=min_size,
max_size=max_size,
reject_prob=reject_prob,
lock_cash=lock_cash,
allow_partial=allow_partial,
raise_reject=raise_reject,
log=log,
accumulate=accumulate,
upon_long_conflict=upon_long_conflict,
upon_short_conflict=upon_short_conflict,
upon_dir_conflict=upon_dir_conflict,
upon_opposite_entry=upon_opposite_entry,
val_price=val_price,
open=open,
high=high,
low=low,
close=close,
sl_stop=sl_stop,
sl_trail=sl_trail,
tp_stop=tp_stop,
stop_entry_price=stop_entry_price,
stop_exit_price=stop_exit_price,
upon_stop_exit=upon_stop_exit,
upon_stop_update=upon_stop_update
)
if not signal_func_mode:
if ls_mode:
broadcastable_args['entries'] = entries
broadcastable_args['exits'] = exits
broadcastable_args['short_entries'] = short_entries
broadcastable_args['short_exits'] = short_exits
else:
broadcastable_args['entries'] = entries
broadcastable_args['exits'] = exits
broadcastable_args['direction'] = direction
broadcastable_args = {**broadcastable_args, **broadcast_named_args}
# Only close is broadcast, others can remain unchanged thanks to flexible indexing
close_idx = list(broadcastable_args.keys()).index('close')
keep_raw = [True] * len(broadcastable_args)
keep_raw[close_idx] = False
broadcast_kwargs = merge_dicts(dict(
keep_raw=keep_raw,
require_kwargs=dict(requirements='W')
), broadcast_kwargs)
broadcasted_args = broadcast(*broadcastable_args.values(), **broadcast_kwargs)
broadcasted_args = dict(zip(broadcastable_args.keys(), broadcasted_args))
close = broadcasted_args['close']
if not checks.is_pandas(close):
close = pd.Series(close) if close.ndim == 1 else pd.DataFrame(close)
broadcasted_args['close'] = to_2d_array(close)
target_shape_2d = (close.shape[0], close.shape[1] if close.ndim > 1 else 1)
wrapper = ArrayWrapper.from_obj(close, freq=freq, group_by=group_by, **wrapper_kwargs)
cs_group_lens = wrapper.grouper.get_group_lens(group_by=None if cash_sharing else False)
init_cash = np.require(np.broadcast_to(init_cash, (len(cs_group_lens),)), dtype=np.float_)
group_lens = wrapper.grouper.get_group_lens(group_by=group_by)
if checks.is_any_array(call_seq):
call_seq = nb.require_call_seq(broadcast(call_seq, to_shape=target_shape_2d, to_pd=False))
else:
call_seq = nb.build_call_seq(target_shape_2d, group_lens, call_seq_type=call_seq)
if max_orders is None:
max_orders = target_shape_2d[0] * target_shape_2d[1]
if max_logs is None:
max_logs = target_shape_2d[0] * target_shape_2d[1]
if not np.any(log):
max_logs = 1
template_mapping = {**broadcasted_args, **dict(
target_shape=target_shape_2d,
group_lens=cs_group_lens,
init_cash=init_cash,
call_seq=call_seq,
adjust_sl_func_nb=adjust_sl_func_nb,
adjust_sl_args=adjust_sl_args,
adjust_tp_func_nb=adjust_tp_func_nb,
adjust_tp_args=adjust_tp_args,
use_stops=use_stops,
auto_call_seq=auto_call_seq,
ffill_val_price=ffill_val_price,
update_value=update_value,
max_orders=max_orders,
max_logs=max_logs,
flex_2d=close.ndim == 2,
wrapper=wrapper
), **template_mapping}
adjust_sl_args = deep_substitute(adjust_sl_args, template_mapping)
adjust_tp_args = deep_substitute(adjust_tp_args, template_mapping)
if signal_func_mode:
signal_args = deep_substitute(signal_args, template_mapping)
else:
if ls_mode:
signal_args = (
broadcasted_args['entries'],
broadcasted_args['exits'],
broadcasted_args['short_entries'],
broadcasted_args['short_exits']
)
else:
signal_args = (
broadcasted_args['entries'],
broadcasted_args['exits'],
broadcasted_args['direction']
)
checks.assert_numba_func(signal_func_nb)
checks.assert_numba_func(adjust_sl_func_nb)
checks.assert_numba_func(adjust_tp_func_nb)
# Perform the simulation
order_records, log_records = nb.simulate_from_signal_func_nb(
target_shape_2d,
cs_group_lens, # group only if cash sharing is enabled to speed up
init_cash,
call_seq,
signal_func_nb=signal_func_nb,
signal_args=signal_args,
size=broadcasted_args['size'],
price=broadcasted_args['price'],
size_type=broadcasted_args['size_type'],
fees=broadcasted_args['fees'],
fixed_fees=broadcasted_args['fixed_fees'],
slippage=broadcasted_args['slippage'],
min_size=broadcasted_args['min_size'],
max_size=broadcasted_args['max_size'],
reject_prob=broadcasted_args['reject_prob'],
lock_cash=broadcasted_args['lock_cash'],
allow_partial=broadcasted_args['allow_partial'],
raise_reject=broadcasted_args['raise_reject'],
log=broadcasted_args['log'],
accumulate=broadcasted_args['accumulate'],
upon_long_conflict=broadcasted_args['upon_long_conflict'],
upon_short_conflict=broadcasted_args['upon_short_conflict'],
upon_dir_conflict=broadcasted_args['upon_dir_conflict'],
upon_opposite_entry=broadcasted_args['upon_opposite_entry'],
val_price=broadcasted_args['val_price'],
open=broadcasted_args['open'],
high=broadcasted_args['high'],
low=broadcasted_args['low'],
close=broadcasted_args['close'],
sl_stop=broadcasted_args['sl_stop'],
sl_trail=broadcasted_args['sl_trail'],
tp_stop=broadcasted_args['tp_stop'],
stop_entry_price=broadcasted_args['stop_entry_price'],
stop_exit_price=broadcasted_args['stop_exit_price'],
upon_stop_exit=broadcasted_args['upon_stop_exit'],
upon_stop_update=broadcasted_args['upon_stop_update'],
adjust_sl_func_nb=adjust_sl_func_nb,
adjust_sl_args=adjust_sl_args,
adjust_tp_func_nb=adjust_tp_func_nb,
adjust_tp_args=adjust_tp_args,
use_stops=use_stops,
auto_call_seq=auto_call_seq,
ffill_val_price=ffill_val_price,
update_value=update_value,
max_orders=max_orders,
max_logs=max_logs,
flex_2d=close.ndim == 2
)
# Create an instance
return cls(
wrapper,
close,
order_records,
log_records,
init_cash if init_cash_mode is None else init_cash_mode,
cash_sharing,
call_seq=call_seq if attach_call_seq else None,
**kwargs
)
@classmethod
def from_holding(cls: tp.Type[PortfolioT], close: tp.ArrayLike, **kwargs) -> PortfolioT:
"""Simulate portfolio from holding.
Based on `Portfolio.from_signals`.
```python-repl
>>> close = pd.Series([1, 2, 3, 4, 5])
>>> pf = vbt.Portfolio.from_holding(close)
>>> pf.final_value()
500.0
```"""
return cls.from_signals(close, entries=True, exits=False, **kwargs)
@classmethod
def from_random_signals(cls: tp.Type[PortfolioT],
close: tp.ArrayLike,
n: tp.Optional[tp.ArrayLike] = None,
prob: tp.Optional[tp.ArrayLike] = None,
entry_prob: tp.Optional[tp.ArrayLike] = None,
exit_prob: tp.Optional[tp.ArrayLike] = None,
param_product: bool = False,
seed: tp.Optional[int] = None,
run_kwargs: tp.KwargsLike = None,
**kwargs) -> PortfolioT:
"""Simulate portfolio from random entry and exit signals.
Generates signals based either on the number of signals `n` or the probability
of encountering a signal `prob`.
* If `n` is set, see `vectorbt.signals.generators.RANDNX`.
* If `prob` is set, see `vectorbt.signals.generators.RPROBNX`.
Based on `Portfolio.from_signals`.
!!! note
To generate random signals, the shape of `close` is used. Broadcasting with other
arrays happens after the generation.
## Example
* Test multiple combinations of random entries and exits:
```python-repl
>>> close = pd.Series([1, 2, 3, 4, 5])
>>> pf = vbt.Portfolio.from_random_signals(close, n=[2, 1, 0], seed=42)
>>> pf.orders.count()
randnx_n
2 4
1 2
0 0
Name: count, dtype: int64
```
* Test the Cartesian product of entry and exit encounter probabilities:
```python-repl
>>> pf = vbt.Portfolio.from_random_signals(
... close,
... entry_prob=[0, 0.5, 1],
... exit_prob=[0, 0.5, 1],
... param_product=True,
... seed=42)
>>> pf.orders.count()
rprobnx_entry_prob rprobnx_exit_prob
0.0 0.0 0
0.5 0
1.0 0
0.5 0.0 1
0.5 4
1.0 3
1.0 0.0 1
0.5 4
1.0 5
Name: count, dtype: int64
```
"""
from vectorbt._settings import settings
portfolio_cfg = settings['portfolio']
close = to_pd_array(close)
close_wrapper = ArrayWrapper.from_obj(close)
if entry_prob is None:
entry_prob = prob
if exit_prob is None:
exit_prob = prob
if seed is None:
seed = portfolio_cfg['seed']
if run_kwargs is None:
run_kwargs = {}
if n is not None and (entry_prob is not None or exit_prob is not None):
raise ValueError("Either n or entry_prob and exit_prob should be set")
if n is not None:
rand = RANDNX.run(
n=n,
input_shape=close.shape,
input_index=close_wrapper.index,
input_columns=close_wrapper.columns,
seed=seed,
**run_kwargs
)
entries = rand.entries
exits = rand.exits
elif entry_prob is not None and exit_prob is not None:
rprobnx = RPROBNX.run(
entry_prob=entry_prob,
exit_prob=exit_prob,
param_product=param_product,
input_shape=close.shape,
input_index=close_wrapper.index,
input_columns=close_wrapper.columns,
seed=seed,
**run_kwargs
)
entries = rprobnx.entries
exits = rprobnx.exits
else:
raise ValueError("At least n or entry_prob and exit_prob should be set")
return cls.from_signals(close, entries, exits, seed=seed, **kwargs)
@classmethod
def from_order_func(cls: tp.Type[PortfolioT],
close: tp.ArrayLike,
order_func_nb: tp.Union[nb.OrderFuncT, nb.FlexOrderFuncT],
*order_args,
flexible: tp.Optional[bool] = None,
init_cash: tp.Optional[tp.ArrayLike] = None,
cash_sharing: tp.Optional[bool] = None,
call_seq: tp.Optional[tp.ArrayLike] = None,
segment_mask: tp.Optional[tp.ArrayLike] = None,
call_pre_segment: tp.Optional[bool] = None,
call_post_segment: tp.Optional[bool] = None,
pre_sim_func_nb: nb.PreSimFuncT = nb.no_pre_func_nb,
pre_sim_args: tp.Args = (),
post_sim_func_nb: nb.PostSimFuncT = nb.no_post_func_nb,
post_sim_args: tp.Args = (),
pre_group_func_nb: nb.PreGroupFuncT = nb.no_pre_func_nb,
pre_group_args: tp.Args = (),
post_group_func_nb: nb.PostGroupFuncT = nb.no_post_func_nb,
post_group_args: tp.Args = (),
pre_row_func_nb: nb.PreRowFuncT = nb.no_pre_func_nb,
pre_row_args: tp.Args = (),
post_row_func_nb: nb.PostRowFuncT = nb.no_post_func_nb,
post_row_args: tp.Args = (),
pre_segment_func_nb: nb.PreSegmentFuncT = nb.no_pre_func_nb,
pre_segment_args: tp.Args = (),
post_segment_func_nb: nb.PostSegmentFuncT = nb.no_post_func_nb,
post_segment_args: tp.Args = (),
post_order_func_nb: nb.PostOrderFuncT = nb.no_post_func_nb,
post_order_args: tp.Args = (),
ffill_val_price: tp.Optional[bool] = None,
update_value: tp.Optional[bool] = None,
fill_pos_record: tp.Optional[bool] = None,
row_wise: tp.Optional[bool] = None,
use_numba: tp.Optional[bool] = None,
max_orders: tp.Optional[int] = None,
max_logs: tp.Optional[int] = None,
seed: tp.Optional[int] = None,
group_by: tp.GroupByLike = None,
broadcast_named_args: tp.KwargsLike = None,
broadcast_kwargs: tp.KwargsLike = None,
template_mapping: tp.Optional[tp.Mapping] = None,
wrapper_kwargs: tp.KwargsLike = None,
freq: tp.Optional[tp.FrequencyLike] = None,
attach_call_seq: tp.Optional[bool] = None,
**kwargs) -> PortfolioT:
"""Build portfolio from a custom order function.
!!! hint
See `vectorbt.portfolio.nb.simulate_nb` for illustrations and argument definitions.
For more details on individual simulation functions:
* not `row_wise` and not `flexible`: See `vectorbt.portfolio.nb.simulate_nb`
* not `row_wise` and `flexible`: See `vectorbt.portfolio.nb.flex_simulate_nb`
* `row_wise` and not `flexible`: See `vectorbt.portfolio.nb.simulate_row_wise_nb`
* `row_wise` and `flexible`: See `vectorbt.portfolio.nb.flex_simulate_row_wise_nb`
Args:
close (array_like): Last asset price at each time step.
Will broadcast to `target_shape`.
Used for calculating unrealized PnL and portfolio value.
order_func_nb (callable): Order generation function.
*order_args: Arguments passed to `order_func_nb`.
flexible (bool): Whether to simulate using a flexible order function.
This lifts the limit of one order per tick and symbol.
init_cash (InitCashMode, float or array_like of float): Initial capital.
See `init_cash` in `Portfolio.from_orders`.
cash_sharing (bool): Whether to share cash within the same group.
If `group_by` is None, `group_by` becomes True to form a single group with cash sharing.
call_seq (CallSeqType or array_like): Default sequence of calls per row and group.
* Use `vectorbt.portfolio.enums.CallSeqType` to select a sequence type.
* Set to array to specify custom sequence. Will not broadcast.
!!! note
CallSeqType.Auto should be implemented manually.
Use `vectorbt.portfolio.nb.sort_call_seq_nb` or `vectorbt.portfolio.nb.sort_call_seq_out_nb`
in `pre_segment_func_nb`.
segment_mask (int or array_like of bool): Mask of whether a particular segment should be executed.
Supplying an integer will activate every n-th row.
Supplying a boolean or an array of boolean will broadcast to the number of rows and groups.
Does not broadcast together with `close` and `broadcast_named_args`, only against the final shape.
call_pre_segment (bool): Whether to call `pre_segment_func_nb` regardless of `segment_mask`.
call_post_segment (bool): Whether to call `post_segment_func_nb` regardless of `segment_mask`.
pre_sim_func_nb (callable): Function called before simulation.
Defaults to `vectorbt.portfolio.nb.no_pre_func_nb`.
pre_sim_args (tuple): Packed arguments passed to `pre_sim_func_nb`.
Defaults to `()`.
post_sim_func_nb (callable): Function called after simulation.
Defaults to `vectorbt.portfolio.nb.no_post_func_nb`.
post_sim_args (tuple): Packed arguments passed to `post_sim_func_nb`.
Defaults to `()`.
pre_group_func_nb (callable): Function called before each group.
Defaults to `vectorbt.portfolio.nb.no_pre_func_nb`.
Called only if `row_wise` is False.
pre_group_args (tuple): Packed arguments passed to `pre_group_func_nb`.
Defaults to `()`.
post_group_func_nb (callable): Function called after each group.
Defaults to `vectorbt.portfolio.nb.no_post_func_nb`.
Called only if `row_wise` is False.
post_group_args (tuple): Packed arguments passed to `post_group_func_nb`.
Defaults to `()`.
pre_row_func_nb (callable): Function called before each row.
Defaults to `vectorbt.portfolio.nb.no_pre_func_nb`.
Called only if `row_wise` is True.
pre_row_args (tuple): Packed arguments passed to `pre_row_func_nb`.
Defaults to `()`.
post_row_func_nb (callable): Function called after each row.
Defaults to `vectorbt.portfolio.nb.no_post_func_nb`.
Called only if `row_wise` is True.
post_row_args (tuple): Packed arguments passed to `post_row_func_nb`.
Defaults to `()`.
pre_segment_func_nb (callable): Function called before each segment.
Defaults to `vectorbt.portfolio.nb.no_pre_func_nb`.
pre_segment_args (tuple): Packed arguments passed to `pre_segment_func_nb`.
Defaults to `()`.
post_segment_func_nb (callable): Function called after each segment.
Defaults to `vectorbt.portfolio.nb.no_post_func_nb`.
post_segment_args (tuple): Packed arguments passed to `post_segment_func_nb`.
Defaults to `()`.
post_order_func_nb (callable): Callback that is called after the order has been processed.
post_order_args (tuple): Packed arguments passed to `post_order_func_nb`.
Defaults to `()`.
ffill_val_price (bool): Whether to track valuation price only if it's known.
Otherwise, unknown `close` will lead to NaN in valuation price at the next timestamp.
update_value (bool): Whether to update group value after each filled order.
fill_pos_record (bool): Whether to fill position record.
Disable this to make simulation a bit faster for simple use cases.
row_wise (bool): Whether to iterate over rows rather than columns/groups.
use_numba (bool): Whether to run the main simulation function using Numba.
!!! note
Disabling it does not disable Numba for other functions.
If neccessary, you should ensure that every other function does not uses Numba as well.
You can do this by using the `py_func` attribute of that function.
Or, you could disable Numba globally by doing `os.environ['NUMBA_DISABLE_JIT'] = '1'`.
max_orders (int): Size of the order records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
max_logs (int): Size of the log records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
seed (int): See `Portfolio.from_orders`.
group_by (any): See `Portfolio.from_orders`.
broadcast_named_args (dict): See `Portfolio.from_signals`.
broadcast_kwargs (dict): See `Portfolio.from_orders`.
template_mapping (mapping): See `Portfolio.from_signals`.
wrapper_kwargs (dict): See `Portfolio.from_orders`.
freq (any): See `Portfolio.from_orders`.
attach_call_seq (bool): See `Portfolio.from_orders`.
**kwargs: Keyword arguments passed to the `__init__` method.
For defaults, see `portfolio` in `vectorbt._settings.settings`.
!!! note
All passed functions should be Numba-compiled if Numba is enabled.
Also see notes on `Portfolio.from_orders`.
!!! note
In contrast to other methods, the valuation price is previous `close` instead of the order price
since the price of an order is unknown before the call (which is more realistic by the way).
You can still override the valuation price in `pre_segment_func_nb`.
## Example
* Buy 10 units each tick using closing price:
```python-repl
>>> @njit
... def order_func_nb(c, size):
... return nb.order_nb(size=size)
>>> close = pd.Series([1, 2, 3, 4, 5])
>>> pf = vbt.Portfolio.from_order_func(close, order_func_nb, 10)
>>> pf.assets()
0 10.0
1 20.0
2 30.0
3 40.0
4 40.0
dtype: float64
>>> pf.cash()
0 90.0
1 70.0
2 40.0
3 0.0
4 0.0
dtype: float64
```
* Reverse each position by first closing it. Keep state of last position to determine
which position to open next (just as an example, there are easier ways to do this):
```python-repl
>>> @njit
... def pre_group_func_nb(c):
... last_pos_state = np.array([-1])
... return (last_pos_state,)
>>> @njit
... def order_func_nb(c, last_pos_state):
... if c.position_now != 0:
... return nb.close_position_nb()
...
... if last_pos_state[0] == 1:
... size = -np.inf # open short
... last_pos_state[0] = -1
... else:
... size = np.inf # open long
... last_pos_state[0] = 1
... return nb.order_nb(size=size)
>>> pf = vbt.Portfolio.from_order_func(
... close,
... order_func_nb,
... pre_group_func_nb=pre_group_func_nb
... )
>>> pf.assets()
0 100.000000
1 0.000000
2 -66.666667
3 0.000000
4 26.666667
dtype: float64
>>> pf.cash()
0 0.000000
1 200.000000
2 400.000000
3 133.333333
4 0.000000
dtype: float64
```
* Equal-weighted portfolio as in the example under `vectorbt.portfolio.nb.simulate_nb`:
```python-repl
>>> @njit
... def pre_group_func_nb(c):
... order_value_out = np.empty(c.group_len, dtype=np.float_)
... return (order_value_out,)
>>> @njit
... def pre_segment_func_nb(c, order_value_out, size, price, size_type, direction):
... for col in range(c.from_col, c.to_col):
... c.last_val_price[col] = nb.get_col_elem_nb(c, col, price)
... nb.sort_call_seq_nb(c, size, size_type, direction, order_value_out)
... return ()
>>> @njit
... def order_func_nb(c, size, price, size_type, direction, fees, fixed_fees, slippage):
... return nb.order_nb(
... size=nb.get_elem_nb(c, size),
... price=nb.get_elem_nb(c, price),
... size_type=nb.get_elem_nb(c, size_type),
... direction=nb.get_elem_nb(c, direction),
... fees=nb.get_elem_nb(c, fees),
... fixed_fees=nb.get_elem_nb(c, fixed_fees),
... slippage=nb.get_elem_nb(c, slippage)
... )
>>> np.random.seed(42)
>>> close = np.random.uniform(1, 10, size=(5, 3))
>>> size_template = vbt.RepEval('np.asarray(1 / group_lens[0])')
>>> pf = vbt.Portfolio.from_order_func(
... close,
... order_func_nb,
... size_template, # order_args as *args
... vbt.Rep('price'),
... vbt.Rep('size_type'),
... vbt.Rep('direction'),
... vbt.Rep('fees'),
... vbt.Rep('fixed_fees'),
... vbt.Rep('slippage'),
... segment_mask=2, # rebalance every second tick
... pre_group_func_nb=pre_group_func_nb,
... pre_segment_func_nb=pre_segment_func_nb,
... pre_segment_args=(
... size_template,
... vbt.Rep('price'),
... vbt.Rep('size_type'),
... vbt.Rep('direction')
... ),
... broadcast_named_args=dict( # broadcast against each other
... price=close,
... size_type=SizeType.TargetPercent,
... direction=Direction.LongOnly,
... fees=0.001,
... fixed_fees=1.,
... slippage=0.001
... ),
... template_mapping=dict(np=np), # required by size_template
... cash_sharing=True, group_by=True, # one group with cash sharing
... )
>>> pf.asset_value(group_by=False).vbt.plot()
```

Templates are a very powerful tool to prepare any custom arguments after they are broadcast and
before they are passed to the simulation function. In the example above, we use `broadcast_named_args`
to broadcast some arguments against each other and templates to pass those objects to callbacks.
Additionally, we used an evaluation template to compute the size based on the number of assets in each group.
You may ask: why should we bother using broadcasting and templates if we could just pass `size=1/3`?
Because of flexibility those features provide: we can now pass whatever parameter combinations we want
and it will work flawlessly. For example, to create two groups of equally-allocated positions,
we need to change only two parameters:
```python-repl
>>> close = np.random.uniform(1, 10, size=(5, 6)) # 6 columns instead of 3
>>> group_by = ['g1', 'g1', 'g1', 'g2', 'g2', 'g2'] # 2 groups instead of 1
>>> pf['g1'].asset_value(group_by=False).vbt.plot()
>>> pf['g2'].asset_value(group_by=False).vbt.plot()
```


* Combine multiple exit conditions. Exit early if the price hits some threshold before an actual exit:
```python-repl
>>> @njit
... def pre_sim_func_nb(c):
... # We need to define stop price per column once
... stop_price = np.full(c.target_shape[1], np.nan, dtype=np.float_)
... return (stop_price,)
>>> @njit
... def order_func_nb(c, stop_price, entries, exits, size):
... # Select info related to this order
... entry_now = nb.get_elem_nb(c, entries)
... exit_now = nb.get_elem_nb(c, exits)
... size_now = nb.get_elem_nb(c, size)
... price_now = nb.get_elem_nb(c, c.close)
... stop_price_now = stop_price[c.col]
...
... # Our logic
... if entry_now:
... if c.position_now == 0:
... return nb.order_nb(
... size=size_now,
... price=price_now,
... direction=Direction.LongOnly)
... elif exit_now or price_now >= stop_price_now:
... if c.position_now > 0:
... return nb.order_nb(
... size=-size_now,
... price=price_now,
... direction=Direction.LongOnly)
... return NoOrder
>>> @njit
... def post_order_func_nb(c, stop_price, stop):
... # Same broadcasting as for size
... stop_now = nb.get_elem_nb(c, stop)
...
... if c.order_result.status == OrderStatus.Filled:
... if c.order_result.side == OrderSide.Buy:
... # Position entered: Set stop condition
... stop_price[c.col] = (1 + stop_now) * c.order_result.price
... else:
... # Position exited: Remove stop condition
... stop_price[c.col] = np.nan
>>> def simulate(close, entries, exits, size, threshold):
... return vbt.Portfolio.from_order_func(
... close,
... order_func_nb,
... vbt.Rep('entries'), vbt.Rep('exits'), vbt.Rep('size'), # order_args
... pre_sim_func_nb=pre_sim_func_nb,
... post_order_func_nb=post_order_func_nb,
... post_order_args=(vbt.Rep('threshold'),),
... broadcast_named_args=dict( # broadcast against each other
... entries=entries,
... exits=exits,
... size=size,
... threshold=threshold
... )
... )
>>> close = pd.Series([10, 11, 12, 13, 14])
>>> entries = pd.Series([True, True, False, False, False])
>>> exits = pd.Series([False, False, False, True, True])
>>> simulate(close, entries, exits, np.inf, 0.1).asset_flow()
0 10.0
1 0.0
2 -10.0
3 0.0
4 0.0
dtype: float64
>>> simulate(close, entries, exits, np.inf, 0.2).asset_flow()
0 10.0
1 0.0
2 -10.0
3 0.0
4 0.0
dtype: float64
>>> simulate(close, entries, exits, np.nan).asset_flow()
0 10.0
1 0.0
2 0.0
3 -10.0
4 0.0
dtype: float64
```
The reason why stop of 10% does not result in an order at the second time step is because
it comes at the same time as entry, so it must wait until no entry is present.
This can be changed by replacing the statement "elif" with "if", which would execute
an exit regardless if an entry is present (similar to using `ConflictMode.Opposite` in
`Portfolio.from_signals`).
We can also test the parameter combinations above all at once (thanks to broadcasting):
```python-repl
>>> size = pd.DataFrame(
... [[0.1, 0.2, np.nan]],
... columns=pd.Index(['0.1', '0.2', 'nan'], name='size')
... )
>>> simulate(close, entries, exits, np.inf, size).asset_flow()
size 0.1 0.2 nan
0 10.0 10.0 10.0
1 0.0 0.0 0.0
2 -10.0 -10.0 0.0
3 0.0 0.0 -10.0
4 0.0 0.0 0.0
```
* Let's illustrate how to generate multiple orders per symbol and bar.
For each bar, buy at open and sell at close:
```python-repl
>>> @njit
... def flex_order_func_nb(c, open, size):
... if c.call_idx == 0:
... return c.from_col, nb.order_nb(size=size, price=open[c.i, c.from_col])
... if c.call_idx == 1:
... return c.from_col, nb.close_position_nb(price=c.close[c.i, c.from_col])
... return -1, NoOrder
>>> open = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
>>> close = pd.DataFrame({'a': [2, 3, 4], 'b': [3, 4, 5]})
>>> size = 1
>>> pf = vbt.Portfolio.from_order_func(
... close,
... flex_order_func_nb,
... to_2d_array(open), size,
... flexible=True, max_orders=close.shape[0] * close.shape[1] * 2)
>>> pf.orders.records_readable
Order Id Timestamp Column Size Price Fees Side
0 0 0 a 1.0 1.0 0.0 Buy
1 1 0 a 1.0 2.0 0.0 Sell
2 2 1 a 1.0 2.0 0.0 Buy
3 3 1 a 1.0 3.0 0.0 Sell
4 4 2 a 1.0 3.0 0.0 Buy
5 5 2 a 1.0 4.0 0.0 Sell
6 6 0 b 1.0 4.0 0.0 Buy
7 7 0 b 1.0 3.0 0.0 Sell
8 8 1 b 1.0 5.0 0.0 Buy
9 9 1 b 1.0 4.0 0.0 Sell
10 10 2 b 1.0 6.0 0.0 Buy
11 11 2 b 1.0 5.0 0.0 Sell
```
!!! warning
Each bar is effectively a black box - we don't know how the price moves inside.
Since trades must come in an order that replicates that of the real world, the only reliable
pieces of information are the opening and the closing price.
"""
# Get defaults
from vectorbt._settings import settings
portfolio_cfg = settings['portfolio']
close = to_pd_array(close)
if flexible is None:
flexible = portfolio_cfg['flexible']
if init_cash is None:
init_cash = portfolio_cfg['init_cash']
if isinstance(init_cash, str):
init_cash = map_enum_fields(init_cash, InitCashMode)
if isinstance(init_cash, int) and init_cash in InitCashMode:
init_cash_mode = init_cash
init_cash = np.inf
else:
init_cash_mode = None
if cash_sharing is None:
cash_sharing = portfolio_cfg['cash_sharing']
if cash_sharing and group_by is None:
group_by = True
if not flexible:
if call_seq is None:
call_seq = portfolio_cfg['call_seq']
call_seq = map_enum_fields(call_seq, CallSeqType)
if isinstance(call_seq, int):
if call_seq == CallSeqType.Auto:
raise ValueError("CallSeqType.Auto must be implemented manually. "
"Use sort_call_seq_nb in pre_segment_func_nb.")
if segment_mask is None:
segment_mask = True
if call_pre_segment is None:
call_pre_segment = portfolio_cfg['call_pre_segment']
if call_post_segment is None:
call_post_segment = portfolio_cfg['call_post_segment']
if ffill_val_price is None:
ffill_val_price = portfolio_cfg['ffill_val_price']
if update_value is None:
update_value = portfolio_cfg['update_value']
if fill_pos_record is None:
fill_pos_record = portfolio_cfg['fill_pos_record']
if row_wise is None:
row_wise = portfolio_cfg['row_wise']
if use_numba is None:
use_numba = portfolio_cfg['use_numba']
if seed is None:
seed = portfolio_cfg['seed']
if seed is not None:
set_seed(seed)
if freq is None:
freq = portfolio_cfg['freq']
if attach_call_seq is None:
attach_call_seq = portfolio_cfg['attach_call_seq']
if broadcast_named_args is None:
broadcast_named_args = {}
if broadcast_kwargs is None:
broadcast_kwargs = {}
require_kwargs = dict(require_kwargs=dict(requirements='W'))
broadcast_kwargs = merge_dicts(require_kwargs, broadcast_kwargs)
if template_mapping is None:
template_mapping = {}
if wrapper_kwargs is None:
wrapper_kwargs = {}
if not wrapper_kwargs.get('group_select', True) and cash_sharing:
raise ValueError("group_select cannot be disabled if cash_sharing=True")
# Prepare the simulation
broadcastable_args = {**dict(close=close), **broadcast_named_args}
if len(broadcastable_args) > 1:
close_idx = list(broadcastable_args.keys()).index('close')
keep_raw = [True] * len(broadcastable_args)
keep_raw[close_idx] = False
broadcast_kwargs = merge_dicts(dict(
keep_raw=keep_raw,
require_kwargs=dict(requirements='W')
), broadcast_kwargs)
broadcasted_args = broadcast(*broadcastable_args.values(), **broadcast_kwargs)
broadcasted_args = dict(zip(broadcastable_args.keys(), broadcasted_args))
close = broadcasted_args['close']
if not checks.is_pandas(close):
close = pd.Series(close) if close.ndim == 1 else pd.DataFrame(close)
else:
broadcasted_args = broadcastable_args
broadcasted_args['close'] = to_2d_array(close)
target_shape_2d = (close.shape[0], close.shape[1] if close.ndim > 1 else 1)
wrapper = ArrayWrapper.from_obj(close, freq=freq, group_by=group_by, **wrapper_kwargs)
cs_group_lens = wrapper.grouper.get_group_lens(group_by=None if cash_sharing else False)
init_cash = np.require(np.broadcast_to(init_cash, (len(cs_group_lens),)), dtype=np.float_)
group_lens = wrapper.grouper.get_group_lens(group_by=group_by)
if isinstance(segment_mask, int):
_segment_mask = np.full((target_shape_2d[0], len(group_lens)), False)
_segment_mask[0::segment_mask] = True
segment_mask = _segment_mask
else:
segment_mask = broadcast(
segment_mask,
to_shape=(target_shape_2d[0], len(group_lens)),
to_pd=False,
**require_kwargs
)
if not flexible:
if checks.is_any_array(call_seq):
call_seq = nb.require_call_seq(broadcast(call_seq, to_shape=target_shape_2d, to_pd=False))
else:
call_seq = nb.build_call_seq(target_shape_2d, group_lens, call_seq_type=call_seq)
if max_orders is None:
max_orders = target_shape_2d[0] * target_shape_2d[1]
if max_logs is None:
max_logs = target_shape_2d[0] * target_shape_2d[1]
template_mapping = {**broadcasted_args, **dict(
target_shape=target_shape_2d,
group_lens=group_lens,
init_cash=init_cash,
cash_sharing=cash_sharing,
segment_mask=segment_mask,
call_pre_segment=call_pre_segment,
call_post_segment=call_post_segment,
pre_sim_func_nb=pre_sim_func_nb,
pre_sim_args=pre_sim_args,
post_sim_func_nb=post_sim_func_nb,
post_sim_args=post_sim_args,
pre_group_func_nb=pre_group_func_nb,
pre_group_args=pre_group_args,
post_group_func_nb=post_group_func_nb,
post_group_args=post_group_args,
pre_row_func_nb=pre_row_func_nb,
pre_row_args=pre_row_args,
post_row_func_nb=post_row_func_nb,
post_row_args=post_row_args,
pre_segment_func_nb=pre_segment_func_nb,
pre_segment_args=pre_segment_args,
post_segment_func_nb=post_segment_func_nb,
post_segment_args=post_segment_args,
flex_order_func_nb=order_func_nb,
flex_order_args=order_args,
post_order_func_nb=post_order_func_nb,
post_order_args=post_order_args,
ffill_val_price=ffill_val_price,
update_value=update_value,
fill_pos_record=fill_pos_record,
max_orders=max_orders,
max_logs=max_logs,
flex_2d=close.ndim == 2,
wrapper=wrapper
), **template_mapping}
pre_sim_args = deep_substitute(pre_sim_args, template_mapping)
post_sim_args = deep_substitute(post_sim_args, template_mapping)
pre_group_args = deep_substitute(pre_group_args, template_mapping)
post_group_args = deep_substitute(post_group_args, template_mapping)
pre_row_args = deep_substitute(pre_row_args, template_mapping)
post_row_args = deep_substitute(post_row_args, template_mapping)
pre_segment_args = deep_substitute(pre_segment_args, template_mapping)
post_segment_args = deep_substitute(post_segment_args, template_mapping)
order_args = deep_substitute(order_args, template_mapping)
post_order_args = deep_substitute(post_order_args, template_mapping)
if use_numba:
checks.assert_numba_func(pre_sim_func_nb)
checks.assert_numba_func(post_sim_func_nb)
checks.assert_numba_func(pre_group_func_nb)
checks.assert_numba_func(post_group_func_nb)
checks.assert_numba_func(pre_row_func_nb)
checks.assert_numba_func(post_row_func_nb)
checks.assert_numba_func(pre_segment_func_nb)
checks.assert_numba_func(post_segment_func_nb)
checks.assert_numba_func(order_func_nb)
checks.assert_numba_func(post_order_func_nb)
# Perform the simulation
if row_wise:
if flexible:
simulate_func = nb.flex_simulate_row_wise_nb
if not use_numba and hasattr(simulate_func, 'py_func'):
simulate_func = simulate_func.py_func
order_records, log_records = simulate_func(
target_shape=target_shape_2d,
group_lens=group_lens,
init_cash=init_cash,
cash_sharing=cash_sharing,
segment_mask=segment_mask,
call_pre_segment=call_pre_segment,
call_post_segment=call_post_segment,
pre_sim_func_nb=pre_sim_func_nb,
pre_sim_args=pre_sim_args,
post_sim_func_nb=post_sim_func_nb,
post_sim_args=post_sim_args,
pre_row_func_nb=pre_row_func_nb,
pre_row_args=pre_row_args,
post_row_func_nb=post_row_func_nb,
post_row_args=post_row_args,
pre_segment_func_nb=pre_segment_func_nb,
pre_segment_args=pre_segment_args,
post_segment_func_nb=post_segment_func_nb,
post_segment_args=post_segment_args,
flex_order_func_nb=order_func_nb,
flex_order_args=order_args,
post_order_func_nb=post_order_func_nb,
post_order_args=post_order_args,
close=broadcasted_args['close'],
ffill_val_price=ffill_val_price,
update_value=update_value,
fill_pos_record=fill_pos_record,
max_orders=max_orders,
max_logs=max_logs,
flex_2d=close.ndim == 2
)
else:
simulate_func = nb.simulate_row_wise_nb
if not use_numba and hasattr(simulate_func, 'py_func'):
simulate_func = simulate_func.py_func
order_records, log_records = simulate_func(
target_shape=target_shape_2d,
group_lens=group_lens,
init_cash=init_cash,
cash_sharing=cash_sharing,
call_seq=call_seq,
segment_mask=segment_mask,
call_pre_segment=call_pre_segment,
call_post_segment=call_post_segment,
pre_sim_func_nb=pre_sim_func_nb,
pre_sim_args=pre_sim_args,
post_sim_func_nb=post_sim_func_nb,
post_sim_args=post_sim_args,
pre_row_func_nb=pre_row_func_nb,
pre_row_args=pre_row_args,
post_row_func_nb=post_row_func_nb,
post_row_args=post_row_args,
pre_segment_func_nb=pre_segment_func_nb,
pre_segment_args=pre_segment_args,
post_segment_func_nb=post_segment_func_nb,
post_segment_args=post_segment_args,
order_func_nb=order_func_nb,
order_args=order_args,
post_order_func_nb=post_order_func_nb,
post_order_args=post_order_args,
close=broadcasted_args['close'],
ffill_val_price=ffill_val_price,
update_value=update_value,
fill_pos_record=fill_pos_record,
max_orders=max_orders,
max_logs=max_logs,
flex_2d=close.ndim == 2
)
else:
if flexible:
simulate_func = nb.flex_simulate_nb
if not use_numba and hasattr(simulate_func, 'py_func'):
simulate_func = simulate_func.py_func
order_records, log_records = simulate_func(
target_shape=target_shape_2d,
group_lens=group_lens,
init_cash=init_cash,
cash_sharing=cash_sharing,
segment_mask=segment_mask,
call_pre_segment=call_pre_segment,
call_post_segment=call_post_segment,
pre_sim_func_nb=pre_sim_func_nb,
pre_sim_args=pre_sim_args,
post_sim_func_nb=post_sim_func_nb,
post_sim_args=post_sim_args,
pre_group_func_nb=pre_group_func_nb,
pre_group_args=pre_group_args,
post_group_func_nb=post_group_func_nb,
post_group_args=post_group_args,
pre_segment_func_nb=pre_segment_func_nb,
pre_segment_args=pre_segment_args,
post_segment_func_nb=post_segment_func_nb,
post_segment_args=post_segment_args,
flex_order_func_nb=order_func_nb,
flex_order_args=order_args,
post_order_func_nb=post_order_func_nb,
post_order_args=post_order_args,
close=broadcasted_args['close'],
ffill_val_price=ffill_val_price,
update_value=update_value,
fill_pos_record=fill_pos_record,
max_orders=max_orders,
max_logs=max_logs,
flex_2d=close.ndim == 2
)
else:
simulate_func = nb.simulate_nb
if not use_numba and hasattr(simulate_func, 'py_func'):
simulate_func = simulate_func.py_func
order_records, log_records = simulate_func(
target_shape=target_shape_2d,
group_lens=group_lens,
init_cash=init_cash,
cash_sharing=cash_sharing,
call_seq=call_seq,
segment_mask=segment_mask,
call_pre_segment=call_pre_segment,
call_post_segment=call_post_segment,
pre_sim_func_nb=pre_sim_func_nb,
pre_sim_args=pre_sim_args,
post_sim_func_nb=post_sim_func_nb,
post_sim_args=post_sim_args,
pre_group_func_nb=pre_group_func_nb,
pre_group_args=pre_group_args,
post_group_func_nb=post_group_func_nb,
post_group_args=post_group_args,
pre_segment_func_nb=pre_segment_func_nb,
pre_segment_args=pre_segment_args,
post_segment_func_nb=post_segment_func_nb,
post_segment_args=post_segment_args,
order_func_nb=order_func_nb,
order_args=order_args,
post_order_func_nb=post_order_func_nb,
post_order_args=post_order_args,
close=broadcasted_args['close'],
ffill_val_price=ffill_val_price,
update_value=update_value,
fill_pos_record=fill_pos_record,
max_orders=max_orders,
max_logs=max_logs,
flex_2d=close.ndim == 2
)
# Create an instance
return cls(
wrapper,
close,
order_records,
log_records,
init_cash if init_cash_mode is None else init_cash_mode,
cash_sharing,
call_seq=call_seq if not flexible and attach_call_seq else None,
**kwargs
)
# ############# Properties ############# #
@property
def wrapper(self) -> ArrayWrapper:
"""Array wrapper."""
if self.cash_sharing:
# Allow only disabling grouping when needed (but not globally, see regroup)
return self._wrapper.replace(
allow_enable=False,
allow_modify=False
)
return self._wrapper
def regroup(self: PortfolioT, group_by: tp.GroupByLike, **kwargs) -> PortfolioT:
"""Regroup this object.
See `vectorbt.base.array_wrapper.Wrapping.regroup`.
!!! note
All cached objects will be lost."""
if self.cash_sharing:
if self.wrapper.grouper.is_grouping_modified(group_by=group_by):
raise ValueError("Cannot modify grouping globally when cash_sharing=True")
return Wrapping.regroup(self, group_by, **kwargs)
@property
def cash_sharing(self) -> bool:
"""Whether to share cash within the same group."""
return self._cash_sharing
@property
def call_seq(self, wrap_kwargs: tp.KwargsLike = None) -> tp.Optional[tp.SeriesFrame]:
"""Sequence of calls per row and group."""
if self._call_seq is None:
return None
return self.wrapper.wrap(self._call_seq, group_by=False, **merge_dicts({}, wrap_kwargs))
@property
def fillna_close(self) -> bool:
"""Whether to forward-backward fill NaN values in `Portfolio.close`."""
return self._fillna_close
@property
def trades_type(self) -> int:
"""Default `vectorbt.portfolio.trades.Trades` to use across `Portfolio`."""
return self._trades_type
# ############# Reference price ############# #
@property
def close(self) -> tp.SeriesFrame:
"""Price per unit series."""
return self._close
@cached_method
def get_filled_close(self, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Forward-backward-fill NaN values in `Portfolio.close`"""
close = to_2d_array(self.close.ffill().bfill())
return self.wrapper.wrap(close, group_by=False, **merge_dicts({}, wrap_kwargs))
# ############# Records ############# #
@property
def order_records(self) -> tp.RecordArray:
"""A structured NumPy array of order records."""
return self._order_records
@cached_property
def orders(self) -> Orders:
"""`Portfolio.get_orders` with default arguments."""
return self.get_orders()
@cached_method
def get_orders(self, group_by: tp.GroupByLike = None, **kwargs) -> Orders:
"""Get order records.
See `vectorbt.portfolio.orders.Orders`."""
return Orders(self.wrapper, self.order_records, close=self.close, **kwargs).regroup(group_by)
@property
def log_records(self) -> tp.RecordArray:
"""A structured NumPy array of log records."""
return self._log_records
@cached_property
def logs(self) -> Logs:
"""`Portfolio.get_logs` with default arguments."""
return self.get_logs()
@cached_method
def get_logs(self, group_by: tp.GroupByLike = None, **kwargs) -> Logs:
"""Get log records.
See `vectorbt.portfolio.logs.Logs`."""
return Logs(self.wrapper, self.log_records, **kwargs).regroup(group_by)
@cached_property
def entry_trades(self) -> EntryTrades:
"""`Portfolio.get_entry_trades` with default arguments."""
return self.get_entry_trades()
@cached_method
def get_entry_trades(self, group_by: tp.GroupByLike = None, **kwargs) -> EntryTrades:
"""Get entry trade records.
See `vectorbt.portfolio.trades.EntryTrades`."""
return EntryTrades.from_orders(self.orders, **kwargs).regroup(group_by)
@cached_property
def exit_trades(self) -> ExitTrades:
"""`Portfolio.get_exit_trades` with default arguments."""
return self.get_exit_trades()
@cached_method
def get_exit_trades(self, group_by: tp.GroupByLike = None, **kwargs) -> ExitTrades:
"""Get exit trade records.
See `vectorbt.portfolio.trades.ExitTrades`."""
return ExitTrades.from_orders(self.orders, **kwargs).regroup(group_by)
@cached_property
def trades(self) -> Trades:
"""`Portfolio.get_trades` with default arguments."""
return self.get_trades()
@cached_property
def positions(self) -> Positions:
"""`Portfolio.get_positions` with default arguments."""
return self.get_positions()
@cached_method
def get_positions(self, group_by: tp.GroupByLike = None, **kwargs) -> Positions:
"""Get position records.
See `vectorbt.portfolio.trades.Positions`."""
return Positions.from_trades(self.exit_trades, **kwargs).regroup(group_by)
@cached_method
def get_trades(self, group_by: tp.GroupByLike = None, **kwargs) -> Trades:
"""Get trade/position records depending upon `Portfolio.trades_type`."""
if self.trades_type == TradesType.EntryTrades:
return self.get_entry_trades(group_by=group_by, **kwargs)
elif self.trades_type == TradesType.ExitTrades:
return self.get_exit_trades(group_by=group_by, **kwargs)
return self.get_positions(group_by=group_by, **kwargs)
@cached_property
def drawdowns(self) -> Drawdowns:
"""`Portfolio.get_drawdowns` with default arguments."""
return self.get_drawdowns()
@cached_method
def get_drawdowns(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None,
wrapper_kwargs: tp.KwargsLike = None, **kwargs) -> Drawdowns:
"""Get drawdown records from `Portfolio.value`.
See `vectorbt.generic.drawdowns.Drawdowns`."""
value = self.value(group_by=group_by, wrap_kwargs=wrap_kwargs)
wrapper_kwargs = merge_dicts(self.orders.wrapper.config, wrapper_kwargs, dict(group_by=None))
return Drawdowns.from_ts(value, wrapper_kwargs=wrapper_kwargs, **kwargs)
# ############# Assets ############# #
@cached_method
def asset_flow(self, direction: str = 'both', wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get asset flow series per column.
Returns the total transacted amount of assets at each time step."""
direction = map_enum_fields(direction, Direction)
asset_flow = nb.asset_flow_nb(
self.wrapper.shape_2d,
self.orders.values,
self.orders.col_mapper.col_map,
direction
)
return self.wrapper.wrap(asset_flow, group_by=False, **merge_dicts({}, wrap_kwargs))
@cached_method
def assets(self, direction: str = 'both', wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get asset series per column.
Returns the current position at each time step."""
direction = map_enum_fields(direction, Direction)
asset_flow = to_2d_array(self.asset_flow(direction='both'))
assets = nb.assets_nb(asset_flow)
if direction == Direction.LongOnly:
assets = np.where(assets > 0, assets, 0.)
if direction == Direction.ShortOnly:
assets = np.where(assets < 0, -assets, 0.)
return self.wrapper.wrap(assets, group_by=False, **merge_dicts({}, wrap_kwargs))
@cached_method
def position_mask(self, direction: str = 'both', group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get position mask per column/group.
An element is True if the asset is in the market at this tick."""
direction = map_enum_fields(direction, Direction)
assets = to_2d_array(self.assets(direction=direction))
if self.wrapper.grouper.is_grouped(group_by=group_by):
position_mask = to_2d_array(self.position_mask(direction=direction, group_by=False))
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
position_mask = nb.position_mask_grouped_nb(position_mask, group_lens)
else:
position_mask = assets != 0
return self.wrapper.wrap(position_mask, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def position_coverage(self, direction: str = 'both', group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get position coverage per column/group."""
direction = map_enum_fields(direction, Direction)
assets = to_2d_array(self.assets(direction=direction))
if self.wrapper.grouper.is_grouped(group_by=group_by):
position_mask = to_2d_array(self.position_mask(direction=direction, group_by=False))
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
position_coverage = nb.position_coverage_grouped_nb(position_mask, group_lens)
else:
position_coverage = np.mean(assets != 0, axis=0)
wrap_kwargs = merge_dicts(dict(name_or_index='position_coverage'), wrap_kwargs)
return self.wrapper.wrap_reduced(position_coverage, group_by=group_by, **wrap_kwargs)
# ############# Cash ############# #
@cached_method
def cash_flow(self, group_by: tp.GroupByLike = None, free: bool = False,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get cash flow series per column/group.
Use `free` to return the flow of the free cash, which never goes above the initial level,
because an operation always costs money."""
if self.wrapper.grouper.is_grouped(group_by=group_by):
cash_flow = to_2d_array(self.cash_flow(group_by=False, free=free))
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
cash_flow = nb.cash_flow_grouped_nb(cash_flow, group_lens)
else:
cash_flow = nb.cash_flow_nb(
self.wrapper.shape_2d,
self.orders.values,
self.orders.col_mapper.col_map,
free
)
return self.wrapper.wrap(cash_flow, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_property
def init_cash(self) -> tp.MaybeSeries:
"""`Portfolio.get_init_cash` with default arguments."""
return self.get_init_cash()
@cached_method
def get_init_cash(self, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Initial amount of cash per column/group with default arguments.
!!! note
If the initial cash balance was found automatically and no own cash is used throughout
the simulation (for example, when shorting), it will be set to 1 instead of 0 to enable
smooth calculation of returns."""
if isinstance(self._init_cash, int):
cash_flow = to_2d_array(self.cash_flow(group_by=group_by))
cash_min = np.min(np.cumsum(cash_flow, axis=0), axis=0)
init_cash = np.where(cash_min < 0, np.abs(cash_min), 1.)
if self._init_cash == InitCashMode.AutoAlign:
init_cash = np.full(init_cash.shape, np.max(init_cash))
else:
init_cash = to_1d_array(self._init_cash)
if self.wrapper.grouper.is_grouped(group_by=group_by):
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
init_cash = nb.init_cash_grouped_nb(init_cash, group_lens, self.cash_sharing)
else:
group_lens = self.wrapper.grouper.get_group_lens()
init_cash = nb.init_cash_nb(init_cash, group_lens, self.cash_sharing)
wrap_kwargs = merge_dicts(dict(name_or_index='init_cash'), wrap_kwargs)
return self.wrapper.wrap_reduced(init_cash, group_by=group_by, **wrap_kwargs)
@cached_method
def cash(self, group_by: tp.GroupByLike = None, in_sim_order: bool = False, free: bool = False,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get cash balance series per column/group.
See the explanation on `in_sim_order` in `Portfolio.value`.
For `free`, see `Portfolio.cash_flow`."""
if in_sim_order and not self.cash_sharing:
raise ValueError("Cash sharing must be enabled for in_sim_order=True")
cash_flow = to_2d_array(self.cash_flow(group_by=group_by, free=free))
if self.wrapper.grouper.is_grouped(group_by=group_by):
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
init_cash = to_1d_array(self.get_init_cash(group_by=group_by))
cash = nb.cash_grouped_nb(
self.wrapper.shape_2d,
cash_flow,
group_lens,
init_cash
)
else:
if self.wrapper.grouper.is_grouping_disabled(group_by=group_by) and in_sim_order:
if self.call_seq is None:
raise ValueError("No call sequence attached. "
"Pass `attach_call_seq=True` to the class method "
"(flexible simulations are not supported)")
group_lens = self.wrapper.grouper.get_group_lens()
init_cash = to_1d_array(self.init_cash)
call_seq = to_2d_array(self.call_seq)
cash = nb.cash_in_sim_order_nb(cash_flow, group_lens, init_cash, call_seq)
else:
init_cash = to_1d_array(self.get_init_cash(group_by=False))
cash = nb.cash_nb(cash_flow, init_cash)
return self.wrapper.wrap(cash, group_by=group_by, **merge_dicts({}, wrap_kwargs))
# ############# Performance ############# #
@cached_method
def asset_value(self, direction: str = 'both', group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get asset value series per column/group."""
direction = map_enum_fields(direction, Direction)
if self.fillna_close:
close = to_2d_array(self.get_filled_close()).copy()
else:
close = to_2d_array(self.close).copy()
assets = to_2d_array(self.assets(direction=direction))
close[assets == 0] = 0. # for price being NaN
if self.wrapper.grouper.is_grouped(group_by=group_by):
asset_value = to_2d_array(self.asset_value(direction=direction, group_by=False))
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
asset_value = nb.asset_value_grouped_nb(asset_value, group_lens)
else:
asset_value = nb.asset_value_nb(close, assets)
return self.wrapper.wrap(asset_value, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def gross_exposure(self, direction: str = 'both', group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get gross exposure."""
asset_value = to_2d_array(self.asset_value(group_by=group_by, direction=direction))
cash = to_2d_array(self.cash(group_by=group_by, free=True))
gross_exposure = nb.gross_exposure_nb(asset_value, cash)
return self.wrapper.wrap(gross_exposure, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def net_exposure(self, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get net exposure."""
long_exposure = to_2d_array(self.gross_exposure(direction='longonly', group_by=group_by))
short_exposure = to_2d_array(self.gross_exposure(direction='shortonly', group_by=group_by))
net_exposure = long_exposure - short_exposure
return self.wrapper.wrap(net_exposure, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def value(self, group_by: tp.GroupByLike = None, in_sim_order: bool = False,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get portfolio value series per column/group.
By default, will generate portfolio value for each asset based on cash flows and thus
independent from other assets, with the initial cash balance and position being that of the
entire group. Useful for generating returns and comparing assets within the same group.
When `group_by` is False and `in_sim_order` is True, returns value generated in
simulation order (see [row-major order](https://en.wikipedia.org/wiki/Row-_and_column-major_order).
This value cannot be used for generating returns as-is. Useful to analyze how value
evolved throughout simulation."""
cash = to_2d_array(self.cash(group_by=group_by, in_sim_order=in_sim_order))
asset_value = to_2d_array(self.asset_value(group_by=group_by))
if self.wrapper.grouper.is_grouping_disabled(group_by=group_by) and in_sim_order:
if self.call_seq is None:
raise ValueError("No call sequence attached. "
"Pass `attach_call_seq=True` to the class method "
"(flexible simulations are not supported)")
group_lens = self.wrapper.grouper.get_group_lens()
call_seq = to_2d_array(self.call_seq)
value = nb.value_in_sim_order_nb(cash, asset_value, group_lens, call_seq)
# price of NaN is already addressed by ungrouped_value_nb
else:
value = nb.value_nb(cash, asset_value)
return self.wrapper.wrap(value, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def total_profit(self, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Get total profit per column/group.
Calculated directly from order records (fast)."""
if self.wrapper.grouper.is_grouped(group_by=group_by):
total_profit = to_1d_array(self.total_profit(group_by=False))
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
total_profit = nb.total_profit_grouped_nb(
total_profit,
group_lens
)
else:
if self.fillna_close:
close = to_2d_array(self.get_filled_close())
else:
close = to_2d_array(self.close)
total_profit = nb.total_profit_nb(
self.wrapper.shape_2d,
close,
self.orders.values,
self.orders.col_mapper.col_map
)
wrap_kwargs = merge_dicts(dict(name_or_index='total_profit'), wrap_kwargs)
return self.wrapper.wrap_reduced(total_profit, group_by=group_by, **wrap_kwargs)
@cached_method
def final_value(self, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Get total profit per column/group."""
init_cash = to_1d_array(self.get_init_cash(group_by=group_by))
total_profit = to_1d_array(self.total_profit(group_by=group_by))
final_value = nb.final_value_nb(total_profit, init_cash)
wrap_kwargs = merge_dicts(dict(name_or_index='final_value'), wrap_kwargs)
return self.wrapper.wrap_reduced(final_value, group_by=group_by, **wrap_kwargs)
@cached_method
def total_return(self, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Get total profit per column/group."""
init_cash = to_1d_array(self.get_init_cash(group_by=group_by))
total_profit = to_1d_array(self.total_profit(group_by=group_by))
total_return = nb.total_return_nb(total_profit, init_cash)
wrap_kwargs = merge_dicts(dict(name_or_index='total_return'), wrap_kwargs)
return self.wrapper.wrap_reduced(total_return, group_by=group_by, **wrap_kwargs)
@cached_method
def returns(self, group_by: tp.GroupByLike = None, in_sim_order=False,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get return series per column/group based on portfolio value."""
value = to_2d_array(self.value(group_by=group_by, in_sim_order=in_sim_order))
if self.wrapper.grouper.is_grouping_disabled(group_by=group_by) and in_sim_order:
if self.call_seq is None:
raise ValueError("No call sequence attached. "
"Pass `attach_call_seq=True` to the class method "
"(flexible simulations are not supported)")
group_lens = self.wrapper.grouper.get_group_lens()
init_cash_grouped = to_1d_array(self.init_cash)
call_seq = to_2d_array(self.call_seq)
returns = nb.returns_in_sim_order_nb(value, group_lens, init_cash_grouped, call_seq)
else:
init_cash = to_1d_array(self.get_init_cash(group_by=group_by))
returns = returns_nb.returns_nb(value, init_cash)
return self.wrapper.wrap(returns, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def asset_returns(self, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get asset return series per column/group.
This type of returns is based solely on cash flows and asset value rather than portfolio
value. It ignores passive cash and thus it will return the same numbers irrespective of the amount of
cash currently available, even `np.inf`. The scale of returns is comparable to that of going
all in and keeping available cash at zero."""
cash_flow = to_2d_array(self.cash_flow(group_by=group_by))
asset_value = to_2d_array(self.asset_value(group_by=group_by))
asset_returns = nb.asset_returns_nb(cash_flow, asset_value)
return self.wrapper.wrap(asset_returns, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@property
def returns_acc(self) -> ReturnsAccessor:
"""`Portfolio.get_returns_acc` with default arguments."""
return self.get_returns_acc()
@cached_method
def get_returns_acc(self,
group_by: tp.GroupByLike = None,
benchmark_rets: tp.Optional[tp.ArrayLike] = None,
freq: tp.Optional[tp.FrequencyLike] = None,
year_freq: tp.Optional[tp.FrequencyLike] = None,
use_asset_returns: bool = False,
defaults: tp.KwargsLike = None,
**kwargs) -> ReturnsAccessor:
"""Get returns accessor of type `vectorbt.returns.accessors.ReturnsAccessor`.
!!! hint
You can find most methods of this accessor as (cacheable) attributes of this portfolio."""
if freq is None:
freq = self.wrapper.freq
if use_asset_returns:
returns = self.asset_returns(group_by=group_by)
else:
returns = self.returns(group_by=group_by)
if benchmark_rets is None:
benchmark_rets = self.benchmark_returns(group_by=group_by)
return returns.vbt.returns(
benchmark_rets=benchmark_rets,
freq=freq,
year_freq=year_freq,
defaults=defaults,
**kwargs
)
@cached_property
def qs(self) -> QSAdapterT:
"""`Portfolio.get_qs` with default arguments."""
return self.get_qs()
@cached_method
def get_qs(self,
group_by: tp.GroupByLike = None,
benchmark_rets: tp.Optional[tp.ArrayLike] = None,
freq: tp.Optional[tp.FrequencyLike] = None,
year_freq: tp.Optional[tp.FrequencyLike] = None,
use_asset_returns: bool = False,
**kwargs) -> QSAdapterT:
"""Get quantstats adapter of type `vectorbt.returns.qs_adapter.QSAdapter`.
`**kwargs` are passed to the adapter constructor."""
from vectorbt.returns.qs_adapter import QSAdapter
returns_acc = self.get_returns_acc(
group_by=group_by,
benchmark_rets=benchmark_rets,
freq=freq,
year_freq=year_freq,
use_asset_returns=use_asset_returns
)
return QSAdapter(returns_acc, **kwargs)
@cached_method
def benchmark_value(self, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get market benchmark value series per column/group.
If grouped, evenly distributes the initial cash among assets in the group.
!!! note
Does not take into account fees and slippage. For this, create a separate portfolio."""
if self.fillna_close:
close = to_2d_array(self.get_filled_close())
else:
close = to_2d_array(self.close)
if self.wrapper.grouper.is_grouped(group_by=group_by):
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
init_cash_grouped = to_1d_array(self.get_init_cash(group_by=group_by))
benchmark_value = nb.benchmark_value_grouped_nb(close, group_lens, init_cash_grouped)
else:
init_cash = to_1d_array(self.get_init_cash(group_by=False))
benchmark_value = nb.benchmark_value_nb(close, init_cash)
return self.wrapper.wrap(benchmark_value, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def benchmark_returns(self, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get return series per column/group based on benchmark value."""
benchmark_value = to_2d_array(self.benchmark_value(group_by=group_by))
init_cash = to_1d_array(self.get_init_cash(group_by=group_by))
benchmark_returns = returns_nb.returns_nb(benchmark_value, init_cash)
return self.wrapper.wrap(benchmark_returns, group_by=group_by, **merge_dicts({}, wrap_kwargs))
benchmark_rets = benchmark_returns
@cached_method
def total_benchmark_return(self, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Get total benchmark return."""
benchmark_value = to_2d_array(self.benchmark_value(group_by=group_by))
total_benchmark_return = nb.total_benchmark_return_nb(benchmark_value)
wrap_kwargs = merge_dicts(dict(name_or_index='total_benchmark_return'), wrap_kwargs)
return self.wrapper.wrap_reduced(total_benchmark_return, group_by=group_by, **wrap_kwargs)
# ############# Resolution ############# #
@property
def self_aliases(self) -> tp.Set[str]:
"""Names to associate with this object."""
return {'self', 'portfolio', 'pf'}
def pre_resolve_attr(self, attr: str, final_kwargs: tp.KwargsLike = None) -> str:
"""Pre-process an attribute before resolution.
Uses the following keys:
* `use_asset_returns`: Whether to use `Portfolio.asset_returns` when resolving `returns` argument.
* `trades_type`: Which trade type to use when resolving `trades` argument."""
if 'use_asset_returns' in final_kwargs:
if attr == 'returns' and final_kwargs['use_asset_returns']:
attr = 'asset_returns'
if 'trades_type' in final_kwargs:
trades_type = final_kwargs['trades_type']
if isinstance(final_kwargs['trades_type'], str):
trades_type = map_enum_fields(trades_type, TradesType)
if attr == 'trades' and trades_type != self.trades_type:
if trades_type == TradesType.EntryTrades:
attr = 'entry_trades'
elif trades_type == TradesType.ExitTrades:
attr = 'exit_trades'
else:
attr = 'positions'
return attr
def post_resolve_attr(self, attr: str, out: tp.Any, final_kwargs: tp.KwargsLike = None) -> str:
"""Post-process an object after resolution.
Uses the following keys:
* `incl_open`: Whether to include open trades/positions when resolving an argument
that is an instance of `vectorbt.portfolio.trades.Trades`."""
if 'incl_open' in final_kwargs:
if isinstance(out, Trades) and not final_kwargs['incl_open']:
out = out.closed
return out
# ############# Stats ############# #
@property
def stats_defaults(self) -> tp.Kwargs:
"""Defaults for `Portfolio.stats`.
Merges `vectorbt.generic.stats_builder.StatsBuilderMixin.stats_defaults` and
`portfolio.stats` from `vectorbt._settings.settings`."""
from vectorbt._settings import settings
returns_cfg = settings['returns']
portfolio_stats_cfg = settings['portfolio']['stats']
return merge_dicts(
StatsBuilderMixin.stats_defaults.__get__(self),
dict(
settings=dict(
year_freq=returns_cfg['year_freq'],
trades_type=self.trades_type
)
),
portfolio_stats_cfg
)
_metrics: tp.ClassVar[Config] = Config(
dict(
start=dict(
title='Start',
calc_func=lambda self: self.wrapper.index[0],
agg_func=None,
tags='wrapper'
),
end=dict(
title='End',
calc_func=lambda self: self.wrapper.index[-1],
agg_func=None,
tags='wrapper'
),
period=dict(
title='Period',
calc_func=lambda self: len(self.wrapper.index),
apply_to_timedelta=True,
agg_func=None,
tags='wrapper'
),
start_value=dict(
title='Start Value',
calc_func='get_init_cash',
tags='portfolio'
),
end_value=dict(
title='End Value',
calc_func='final_value',
tags='portfolio'
),
total_return=dict(
title='Total Return [%]',
calc_func='total_return',
post_calc_func=lambda self, out, settings: out * 100,
tags='portfolio'
),
benchmark_return=dict(
title='Benchmark Return [%]',
calc_func='benchmark_rets.vbt.returns.total',
post_calc_func=lambda self, out, settings: out * 100,
tags='portfolio'
),
max_gross_exposure=dict(
title='Max Gross Exposure [%]',
calc_func='gross_exposure.vbt.max',
post_calc_func=lambda self, out, settings: out * 100,
tags='portfolio'
),
total_fees_paid=dict(
title='Total Fees Paid',
calc_func='orders.fees.sum',
tags=['portfolio', 'orders']
),
max_dd=dict(
title='Max Drawdown [%]',
calc_func='drawdowns.max_drawdown',
post_calc_func=lambda self, out, settings: -out * 100,
tags=['portfolio', 'drawdowns']
),
max_dd_duration=dict(
title='Max Drawdown Duration',
calc_func='drawdowns.max_duration',
fill_wrap_kwargs=True,
tags=['portfolio', 'drawdowns', 'duration']
),
total_trades=dict(
title='Total Trades',
calc_func='trades.count',
incl_open=True,
tags=['portfolio', 'trades']
),
total_closed_trades=dict(
title='Total Closed Trades',
calc_func='trades.closed.count',
tags=['portfolio', 'trades', 'closed']
),
total_open_trades=dict(
title='Total Open Trades',
calc_func='trades.open.count',
incl_open=True,
tags=['portfolio', 'trades', 'open']
),
open_trade_pnl=dict(
title='Open Trade PnL',
calc_func='trades.open.pnl.sum',
incl_open=True,
tags=['portfolio', 'trades', 'open']
),
win_rate=dict(
title='Win Rate [%]',
calc_func='trades.win_rate',
post_calc_func=lambda self, out, settings: out * 100,
tags=RepEval("['portfolio', 'trades', *incl_open_tags]")
),
best_trade=dict(
title='Best Trade [%]',
calc_func='trades.returns.max',
post_calc_func=lambda self, out, settings: out * 100,
tags=RepEval("['portfolio', 'trades', *incl_open_tags]")
),
worst_trade=dict(
title='Worst Trade [%]',
calc_func='trades.returns.min',
post_calc_func=lambda self, out, settings: out * 100,
tags=RepEval("['portfolio', 'trades', *incl_open_tags]")
),
avg_winning_trade=dict(
title='Avg Winning Trade [%]',
calc_func='trades.winning.returns.mean',
post_calc_func=lambda self, out, settings: out * 100,
tags=RepEval("['portfolio', 'trades', *incl_open_tags, 'winning']")
),
avg_losing_trade=dict(
title='Avg Losing Trade [%]',
calc_func='trades.losing.returns.mean',
post_calc_func=lambda self, out, settings: out * 100,
tags=RepEval("['portfolio', 'trades', *incl_open_tags, 'losing']")
),
avg_winning_trade_duration=dict(
title='Avg Winning Trade Duration',
calc_func='trades.winning.duration.mean',
apply_to_timedelta=True,
tags=RepEval("['portfolio', 'trades', *incl_open_tags, 'winning', 'duration']")
),
avg_losing_trade_duration=dict(
title='Avg Losing Trade Duration',
calc_func='trades.losing.duration.mean',
apply_to_timedelta=True,
tags=RepEval("['portfolio', 'trades', *incl_open_tags, 'losing', 'duration']")
),
profit_factor=dict(
title='Profit Factor',
calc_func='trades.profit_factor',
tags=RepEval("['portfolio', 'trades', *incl_open_tags]")
),
expectancy=dict(
title='Expectancy',
calc_func='trades.expectancy',
tags=RepEval("['portfolio', 'trades', *incl_open_tags]")
),
sharpe_ratio=dict(
title='Sharpe Ratio',
calc_func='returns_acc.sharpe_ratio',
check_has_freq=True,
check_has_year_freq=True,
tags=['portfolio', 'returns']
),
calmar_ratio=dict(
title='Calmar Ratio',
calc_func='returns_acc.calmar_ratio',
check_has_freq=True,
check_has_year_freq=True,
tags=['portfolio', 'returns']
),
omega_ratio=dict(
title='Omega Ratio',
calc_func='returns_acc.omega_ratio',
check_has_freq=True,
check_has_year_freq=True,
tags=['portfolio', 'returns']
),
sortino_ratio=dict(
title='Sortino Ratio',
calc_func='returns_acc.sortino_ratio',
check_has_freq=True,
check_has_year_freq=True,
tags=['portfolio', 'returns']
)
),
copy_kwargs=dict(copy_mode='deep')
)
@property
def metrics(self) -> Config:
return self._metrics
def returns_stats(self,
group_by: tp.GroupByLike = None,
benchmark_rets: tp.Optional[tp.ArrayLike] = None,
freq: tp.Optional[tp.FrequencyLike] = None,
year_freq: tp.Optional[tp.FrequencyLike] = None,
use_asset_returns: bool = False,
defaults: tp.KwargsLike = None,
**kwargs) -> tp.SeriesFrame:
"""Compute various statistics on returns of this portfolio.
See `Portfolio.returns_acc` and `vectorbt.returns.accessors.ReturnsAccessor.metrics`.
`kwargs` will be passed to `vectorbt.returns.accessors.ReturnsAccessor.stats` method.
If `benchmark_rets` is not set, uses `Portfolio.benchmark_returns`."""
returns_acc = self.get_returns_acc(
group_by=group_by,
benchmark_rets=benchmark_rets,
freq=freq,
year_freq=year_freq,
use_asset_returns=use_asset_returns,
defaults=defaults
)
return getattr(returns_acc, 'stats')(**kwargs)
# ############# Plotting ############# #
def plot_orders(self, column: tp.Optional[tp.Label] = None, **kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column/group of orders."""
kwargs = merge_dicts(dict(close_trace_kwargs=dict(name='Close')), kwargs)
return self.orders.regroup(False).plot(column=column, **kwargs)
def plot_trades(self, column: tp.Optional[tp.Label] = None, **kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column/group of trades."""
kwargs = merge_dicts(dict(close_trace_kwargs=dict(name='Close')), kwargs)
return self.trades.regroup(False).plot(column=column, **kwargs)
def plot_trade_pnl(self, column: tp.Optional[tp.Label] = None, **kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column/group of trade PnL."""
kwargs = merge_dicts(dict(close_trace_kwargs=dict(name='Close')), kwargs)
return self.trades.regroup(False).plot_pnl(column=column, **kwargs)
def plot_positions(self, column: tp.Optional[tp.Label] = None, **kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column/group of positions."""
kwargs = merge_dicts(dict(close_trace_kwargs=dict(name='Close')), kwargs)
return self.positions.regroup(False).plot(column=column, **kwargs)
def plot_position_pnl(self, column: tp.Optional[tp.Label] = None, **kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column/group of position PnL."""
kwargs = merge_dicts(dict(close_trace_kwargs=dict(name='Close')), kwargs)
return self.positions.regroup(False).plot_pnl(column=column, **kwargs)
def plot_asset_flow(self,
column: tp.Optional[tp.Label] = None,
direction: str = 'both',
xref: str = 'x',
yref: str = 'y',
hline_shape_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column of asset flow.
Args:
column (str): Name of the column to plot.
direction (Direction): See `vectorbt.portfolio.enums.Direction`.
xref (str): X coordinate axis.
yref (str): Y coordinate axis.
hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.
**kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericAccessor.plot`.
"""
from vectorbt._settings import settings
plotting_cfg = settings['plotting']
kwargs = merge_dicts(dict(
trace_kwargs=dict(
line=dict(
color=plotting_cfg['color_schema']['brown']
),
name='Assets'
)
), kwargs)
asset_flow = self.asset_flow(direction=direction)
asset_flow = self.select_one_from_obj(asset_flow, self.wrapper.regroup(False), column=column)
fig = asset_flow.vbt.plot(**kwargs)
x_domain = get_domain(xref, fig)
fig.add_shape(**merge_dicts(dict(
type='line',
line=dict(
color='gray',
dash="dash",
),
xref="paper",
yref=yref,
x0=x_domain[0],
y0=0,
x1=x_domain[1],
y1=0
), hline_shape_kwargs))
return fig
def plot_cash_flow(self,
column: tp.Optional[tp.Label] = None,
group_by: tp.GroupByLike = None,
free: bool = False,
xref: str = 'x',
yref: str = 'y',
hline_shape_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column/group of cash flow.
Args:
column (str): Name of the column/group to plot.
group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
free (bool): Whether to plot the flow of the free cash.
xref (str): X coordinate axis.
yref (str): Y coordinate axis.
hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.
**kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericAccessor.plot`.
"""
from vectorbt._settings import settings
plotting_cfg = settings['plotting']
kwargs = merge_dicts(dict(
trace_kwargs=dict(
line=dict(
color=plotting_cfg['color_schema']['green']
),
name='Cash'
)
), kwargs)
cash_flow = self.cash_flow(group_by=group_by, free=free)
cash_flow = self.select_one_from_obj(cash_flow, self.wrapper.regroup(group_by), column=column)
fig = cash_flow.vbt.plot(**kwargs)
x_domain = get_domain(xref, fig)
fig.add_shape(**merge_dicts(dict(
type='line',
line=dict(
color='gray',
dash="dash",
),
xref="paper",
yref=yref,
x0=x_domain[0],
y0=0.,
x1=x_domain[1],
y1=0.
), hline_shape_kwargs))
return fig
def plot_assets(self,
column: tp.Optional[tp.Label] = None,
direction: str = 'both',
xref: str = 'x',
yref: str = 'y',
hline_shape_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column of assets.
Args:
column (str): Name of the column to plot.
direction (Direction): See `vectorbt.portfolio.enums.Direction`.
xref (str): X coordinate axis.
yref (str): Y coordinate axis.
hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.
**kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot_against`.
"""
from vectorbt._settings import settings
plotting_cfg = settings['plotting']
kwargs = merge_dicts(dict(
trace_kwargs=dict(
line=dict(
color=plotting_cfg['color_schema']['brown']
),
name='Assets'
),
pos_trace_kwargs=dict(
fillcolor=adjust_opacity(plotting_cfg['color_schema']['brown'], 0.3)
),
neg_trace_kwargs=dict(
fillcolor=adjust_opacity(plotting_cfg['color_schema']['orange'], 0.3)
),
other_trace_kwargs='hidden'
), kwargs)
assets = self.assets(direction=direction)
assets = self.select_one_from_obj(assets, self.wrapper.regroup(False), column=column)
fig = assets.vbt.plot_against(0, **kwargs)
x_domain = get_domain(xref, fig)
fig.add_shape(**merge_dicts(dict(
type='line',
line=dict(
color='gray',
dash="dash",
),
xref="paper",
yref=yref,
x0=x_domain[0],
y0=0.,
x1=x_domain[1],
y1=0.
), hline_shape_kwargs))
return fig
def plot_cash(self,
column: tp.Optional[tp.Label] = None,
group_by: tp.GroupByLike = None,
free: bool = False,
xref: str = 'x',
yref: str = 'y',
hline_shape_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column/group of cash balance.
Args:
column (str): Name of the column/group to plot.
group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
free (bool): Whether to plot the flow of the free cash.
xref (str): X coordinate axis.
yref (str): Y coordinate axis.
hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.
**kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot_against`.
"""
from vectorbt._settings import settings
plotting_cfg = settings['plotting']
kwargs = merge_dicts(dict(
trace_kwargs=dict(
line=dict(
color=plotting_cfg['color_schema']['green']
),
name='Cash'
),
pos_trace_kwargs=dict(
fillcolor=adjust_opacity(plotting_cfg['color_schema']['green'], 0.3)
),
neg_trace_kwargs=dict(
fillcolor=adjust_opacity(plotting_cfg['color_schema']['red'], 0.3)
),
other_trace_kwargs='hidden'
), kwargs)
init_cash = self.get_init_cash(group_by=group_by)
init_cash = self.select_one_from_obj(init_cash, self.wrapper.regroup(group_by), column=column)
cash = self.cash(group_by=group_by, free=free)
cash = self.select_one_from_obj(cash, self.wrapper.regroup(group_by), column=column)
fig = cash.vbt.plot_against(init_cash, **kwargs)
x_domain = get_domain(xref, fig)
fig.add_shape(**merge_dicts(dict(
type='line',
line=dict(
color='gray',
dash="dash",
),
xref="paper",
yref=yref,
x0=x_domain[0],
y0=init_cash,
x1=x_domain[1],
y1=init_cash
), hline_shape_kwargs))
return fig
def plot_asset_value(self,
column: tp.Optional[tp.Label] = None,
group_by: tp.GroupByLike = None,
direction: str = 'both',
xref: str = 'x',
yref: str = 'y',
hline_shape_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column/group of asset value.
Args:
column (str): Name of the column/group to plot.
group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
direction (Direction): See `vectorbt.portfolio.enums.Direction`.
xref (str): X coordinate axis.
yref (str): Y coordinate axis.
hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.
**kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot_against`.
"""
from vectorbt._settings import settings
plotting_cfg = settings['plotting']
kwargs = merge_dicts(dict(
trace_kwargs=dict(
line=dict(
color=plotting_cfg['color_schema']['cyan']
),
name='Asset Value'
),
pos_trace_kwargs=dict(
fillcolor=adjust_opacity(plotting_cfg['color_schema']['cyan'], 0.3)
),
neg_trace_kwargs=dict(
fillcolor=adjust_opacity(plotting_cfg['color_schema']['orange'], 0.3)
),
other_trace_kwargs='hidden'
), kwargs)
asset_value = self.asset_value(direction=direction, group_by=group_by)
asset_value = self.select_one_from_obj(asset_value, self.wrapper.regroup(group_by), column=column)
fig = asset_value.vbt.plot_against(0, **kwargs)
x_domain = get_domain(xref, fig)
fig.add_shape(**merge_dicts(dict(
type='line',
line=dict(
color='gray',
dash="dash",
),
xref="paper",
yref=yref,
x0=x_domain[0],
y0=0.,
x1=x_domain[1],
y1=0.
), hline_shape_kwargs))
return fig
def plot_value(self,
column: tp.Optional[tp.Label] = None,
group_by: tp.GroupByLike = None,
xref: str = 'x',
yref: str = 'y',
hline_shape_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column/group of value.
Args:
column (str): Name of the column/group to plot.
group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
free (bool): Whether to plot free cash flow.
xref (str): X coordinate axis.
yref (str): Y coordinate axis.
hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.
**kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot_against`.
"""
from vectorbt._settings import settings
plotting_cfg = settings['plotting']
kwargs = merge_dicts(dict(
trace_kwargs=dict(
line=dict(
color=plotting_cfg['color_schema']['purple']
),
name='Value'
),
other_trace_kwargs='hidden'
), kwargs)
init_cash = self.get_init_cash(group_by=group_by)
init_cash = self.select_one_from_obj(init_cash, self.wrapper.regroup(group_by), column=column)
value = self.value(group_by=group_by)
value = self.select_one_from_obj(value, self.wrapper.regroup(group_by), column=column)
fig = value.vbt.plot_against(init_cash, **kwargs)
x_domain = get_domain(xref, fig)
fig.add_shape(**merge_dicts(dict(
type='line',
line=dict(
color='gray',
dash="dash",
),
xref="paper",
yref=yref,
x0=x_domain[0],
y0=init_cash,
x1=x_domain[1],
y1=init_cash
), hline_shape_kwargs))
return fig
def plot_cum_returns(self,
column: tp.Optional[tp.Label] = None,
group_by: tp.GroupByLike = None,
benchmark_rets: tp.Optional[tp.ArrayLike] = None,
use_asset_returns: bool = False,
**kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column/group of cumulative returns.
Args:
column (str): Name of the column/group to plot.
group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
benchmark_rets (array_like): Benchmark returns.
If None, will use `Portfolio.benchmark_returns`.
use_asset_returns (bool): Whether to plot asset returns.
**kwargs: Keyword arguments passed to `vectorbt.returns.accessors.ReturnsSRAccessor.plot_cumulative`.
"""
from vectorbt._settings import settings
plotting_cfg = settings['plotting']
if benchmark_rets is None:
benchmark_rets = self.benchmark_returns(group_by=group_by)
else:
benchmark_rets = broadcast_to(benchmark_rets, self.obj)
benchmark_rets = self.select_one_from_obj(benchmark_rets, self.wrapper.regroup(group_by), column=column)
kwargs = merge_dicts(dict(
benchmark_rets=benchmark_rets,
main_kwargs=dict(
trace_kwargs=dict(
line=dict(
color=plotting_cfg['color_schema']['purple']
),
name='Value'
)
),
hline_shape_kwargs=dict(
type='line',
line=dict(
color='gray',
dash="dash",
)
)
), kwargs)
if use_asset_returns:
returns = self.asset_returns(group_by=group_by)
else:
returns = self.returns(group_by=group_by)
returns = self.select_one_from_obj(returns, self.wrapper.regroup(group_by), column=column)
return returns.vbt.returns.plot_cumulative(**kwargs)
def plot_drawdowns(self,
column: tp.Optional[tp.Label] = None,
group_by: tp.GroupByLike = None,
**kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column/group of drawdowns.
Args:
column (str): Name of the column/group to plot.
group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
**kwargs: Keyword arguments passed to `vectorbt.generic.drawdowns.Drawdowns.plot`.
"""
from vectorbt._settings import settings
plotting_cfg = settings['plotting']
kwargs = merge_dicts(dict(
ts_trace_kwargs=dict(
line=dict(
color=plotting_cfg['color_schema']['purple']
),
name='Value'
)
), kwargs)
return self.get_drawdowns(group_by=group_by).plot(column=column, **kwargs)
def plot_underwater(self,
column: tp.Optional[tp.Label] = None,
group_by: tp.GroupByLike = None,
xref: str = 'x',
yref: str = 'y',
hline_shape_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column/group of underwater.
Args:
column (str): Name of the column/group to plot.
group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
xref (str): X coordinate axis.
yref (str): Y coordinate axis.
hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.
**kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericAccessor.plot`.
"""
from vectorbt._settings import settings
plotting_cfg = settings['plotting']
kwargs = merge_dicts(dict(
trace_kwargs=dict(
line=dict(
color=plotting_cfg['color_schema']['red']
),
fillcolor=adjust_opacity(plotting_cfg['color_schema']['red'], 0.3),
fill='tozeroy',
name='Drawdown'
)
), kwargs)
drawdown = self.drawdown(group_by=group_by)
drawdown = self.select_one_from_obj(drawdown, self.wrapper.regroup(group_by), column=column)
fig = drawdown.vbt.plot(**kwargs)
x_domain = get_domain(xref, fig)
fig.add_shape(**merge_dicts(dict(
type='line',
line=dict(
color='gray',
dash="dash",
),
xref="paper",
yref=yref,
x0=x_domain[0],
y0=0,
x1=x_domain[1],
y1=0
), hline_shape_kwargs))
yaxis = 'yaxis' + yref[1:]
fig.layout[yaxis]['tickformat'] = '%'
return fig
def plot_gross_exposure(self,
column: tp.Optional[tp.Label] = None,
group_by: tp.GroupByLike = None,
direction: str = 'both',
xref: str = 'x',
yref: str = 'y',
hline_shape_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column/group of gross exposure.
Args:
column (str): Name of the column/group to plot.
group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
direction (Direction): See `vectorbt.portfolio.enums.Direction`.
xref (str): X coordinate axis.
yref (str): Y coordinate axis.
hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.
**kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot_against`.
"""
from vectorbt._settings import settings
plotting_cfg = settings['plotting']
kwargs = merge_dicts(dict(
trace_kwargs=dict(
line=dict(
color=plotting_cfg['color_schema']['pink']
),
name='Exposure'
),
pos_trace_kwargs=dict(
fillcolor=adjust_opacity(plotting_cfg['color_schema']['orange'], 0.3)
),
neg_trace_kwargs=dict(
fillcolor=adjust_opacity(plotting_cfg['color_schema']['pink'], 0.3)
),
other_trace_kwargs='hidden'
), kwargs)
gross_exposure = self.gross_exposure(direction=direction, group_by=group_by)
gross_exposure = self.select_one_from_obj(gross_exposure, self.wrapper.regroup(group_by), column=column)
fig = gross_exposure.vbt.plot_against(1, **kwargs)
x_domain = get_domain(xref, fig)
fig.add_shape(**merge_dicts(dict(
type='line',
line=dict(
color='gray',
dash="dash",
),
xref="paper",
yref=yref,
x0=x_domain[0],
y0=1,
x1=x_domain[1],
y1=1
), hline_shape_kwargs))
return fig
def plot_net_exposure(self,
column: tp.Optional[tp.Label] = None,
group_by: tp.GroupByLike = None,
xref: str = 'x',
yref: str = 'y',
hline_shape_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot one column/group of net exposure.
Args:
column (str): Name of the column/group to plot.
group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
xref (str): X coordinate axis.
yref (str): Y coordinate axis.
hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.
**kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot_against`.
"""
from vectorbt._settings import settings
plotting_cfg = settings['plotting']
kwargs = merge_dicts(dict(
trace_kwargs=dict(
line=dict(
color=plotting_cfg['color_schema']['pink']
),
name='Exposure'
),
pos_trace_kwargs=dict(
fillcolor=adjust_opacity(plotting_cfg['color_schema']['pink'], 0.3)
),
neg_trace_kwargs=dict(
fillcolor=adjust_opacity(plotting_cfg['color_schema']['orange'], 0.3)
),
other_trace_kwargs='hidden'
), kwargs)
net_exposure = self.net_exposure(group_by=group_by)
net_exposure = self.select_one_from_obj(net_exposure, self.wrapper.regroup(group_by), column=column)
fig = net_exposure.vbt.plot_against(0, **kwargs)
x_domain = get_domain(xref, fig)
fig.add_shape(**merge_dicts(dict(
type='line',
line=dict(
color='gray',
dash="dash",
),
xref="paper",
yref=yref,
x0=x_domain[0],
y0=0,
x1=x_domain[1],
y1=0
), hline_shape_kwargs))
return fig
@property
def plots_defaults(self) -> tp.Kwargs:
"""Defaults for `Portfolio.plot`.
Merges `vectorbt.generic.plots_builder.PlotsBuilderMixin.plots_defaults` and
`portfolio.plots` from `vectorbt._settings.settings`."""
from vectorbt._settings import settings
returns_cfg = settings['returns']
portfolio_plots_cfg = settings['portfolio']['plots']
return merge_dicts(
PlotsBuilderMixin.plots_defaults.__get__(self),
dict(
settings=dict(
year_freq=returns_cfg['year_freq'],
trades_type=self.trades_type
)
),
portfolio_plots_cfg
)
_subplots: tp.ClassVar[Config] = Config(
dict(
orders=dict(
title="Orders",
yaxis_kwargs=dict(title="Price"),
check_is_not_grouped=True,
plot_func='orders.plot',
tags=['portfolio', 'orders']
),
trades=dict(
title="Trades",
yaxis_kwargs=dict(title="Price"),
check_is_not_grouped=True,
plot_func='trades.plot',
tags=['portfolio', 'trades']
),
trade_pnl=dict(
title="Trade PnL",
yaxis_kwargs=dict(title="Trade PnL"),
check_is_not_grouped=True,
plot_func='trades.plot_pnl',
tags=['portfolio', 'trades']
),
asset_flow=dict(
title="Asset Flow",
yaxis_kwargs=dict(title="Asset flow"),
check_is_not_grouped=True,
plot_func='plot_asset_flow',
pass_add_trace_kwargs=True,
tags=['portfolio', 'assets']
),
cash_flow=dict(
title="Cash Flow",
yaxis_kwargs=dict(title="Cash flow"),
plot_func='plot_cash_flow',
pass_add_trace_kwargs=True,
tags=['portfolio', 'cash']
),
assets=dict(
title="Assets",
yaxis_kwargs=dict(title="Assets"),
check_is_not_grouped=True,
plot_func='plot_assets',
pass_add_trace_kwargs=True,
tags=['portfolio', 'assets']
),
cash=dict(
title="Cash",
yaxis_kwargs=dict(title="Cash"),
plot_func='plot_cash',
pass_add_trace_kwargs=True,
tags=['portfolio', 'cash']
),
asset_value=dict(
title="Asset Value",
yaxis_kwargs=dict(title="Asset value"),
plot_func='plot_asset_value',
pass_add_trace_kwargs=True,
tags=['portfolio', 'assets', 'value']
),
value=dict(
title="Value",
yaxis_kwargs=dict(title="Value"),
plot_func='plot_value',
pass_add_trace_kwargs=True,
tags=['portfolio', 'value']
),
cum_returns=dict(
title="Cumulative Returns",
yaxis_kwargs=dict(title="Cumulative returns"),
plot_func='plot_cum_returns',
pass_hline_shape_kwargs=True,
pass_add_trace_kwargs=True,
pass_xref=True,
pass_yref=True,
tags=['portfolio', 'returns']
),
drawdowns=dict(
title="Drawdowns",
yaxis_kwargs=dict(title="Value"),
plot_func='plot_drawdowns',
pass_add_trace_kwargs=True,
pass_xref=True,
pass_yref=True,
tags=['portfolio', 'value', 'drawdowns']
),
underwater=dict(
title="Underwater",
yaxis_kwargs=dict(title="Drawdown"),
plot_func='plot_underwater',
pass_add_trace_kwargs=True,
tags=['portfolio', 'value', 'drawdowns']
),
gross_exposure=dict(
title="Gross Exposure",
yaxis_kwargs=dict(title="Gross exposure"),
plot_func='plot_gross_exposure',
pass_add_trace_kwargs=True,
tags=['portfolio', 'exposure']
),
net_exposure=dict(
title="Net Exposure",
yaxis_kwargs=dict(title="Net exposure"),
plot_func='plot_net_exposure',
pass_add_trace_kwargs=True,
tags=['portfolio', 'exposure']
)
),
copy_kwargs=dict(copy_mode='deep')
)
plot = PlotsBuilderMixin.plots
@property
def subplots(self) -> Config:
return self._subplots
Portfolio.override_metrics_doc(__pdoc__)
Portfolio.override_subplots_doc(__pdoc__)
__pdoc__['Portfolio.plot'] = "See `vectorbt.generic.plots_builder.PlotsBuilderMixin.plots`."
| 42.801199 | 138 | 0.579587 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.