code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from typing import *
from pymysql import ProgrammingError
class SqlSyntaxError(ProgrammingError):
def __init__(self, error_msg: str, line: int):
super(SqlSyntaxError, self).__init__(f"Syntax Error: \n {error_msg}"
f" at line {line}")
class EmptySqlError(SqlSyntaxError):
def __init__(self):
super().__init__("No SQL detected", 1)
class SqlIsTooShortError(SqlSyntaxError):
def __init__(self):
super().__init__("SQL seems to short, at least 3 lines ?", 1)
class MissingCreateTableSqlError(SqlSyntaxError):
def __init__(self):
super().__init__("Missing 'CREATE TABLE'", 1)
class MissingIfNoExistsSqlError(SqlSyntaxError):
def __init__(self):
super().__init__("Missing 'If No Exists'", 1)
class StartBodyParenthesesSqlError(SqlSyntaxError):
def __init__(self, line_detail):
super().__init__(f"Body parentheses '(' should put in end of line 1, "
f"\nplease adjust to '{line_detail}(' ", 1)
class EndBodyParenthesesSqlError(SqlSyntaxError):
def __init__(self, line_detail, line):
super().__init__(
f"Body parentheses ')' should put in front of end line,"
f"\nplease adjust to '({line_detail}'", line)
class ColumnInvalidSqlError(SqlSyntaxError):
def __init__(self, column_detail, line):
super().__init__(f"Wrong Column: {column_detail} \n"
f"Note: Column should starts with '`' "
f"or 'PRIMARY KEY' or 'UNIQUE KEY'", line)
class ClassSqlCommentError(ProgrammingError):
def __init__(self, cls: ClassVar, exp: Exception):
class_name = f"{cls.__module__}.{cls.__name__}"
super().__init__(f"Class sql comment error {class_name} | {exp}")
class ClassSqlExecuteError(ProgrammingError):
def __init__(self, cls: ClassVar, exp: Exception):
class_name = f"{cls.__module__}.{cls.__name__}"
super().__init__(f"Class sql execute error, "
f"class_name: {class_name} | {exp}") | /rich-torndb-0.0.1.tar.gz/rich-torndb-0.0.1/rich_torndb/utils/sql_invalid_errors.py | 0.677581 | 0.207877 | sql_invalid_errors.py | pypi |
from typing import *
from rich_torndb.utils.sql_invalid_errors import MissingIfNoExistsSqlError
from rich_torndb.utils.sql_valid_checker import SqlValidChecker, CreateSqlValidChecker
class SqlFormatter(object):
def __init__(self, sql):
self.sql = sql if sql is not None else ""
self.formatted = False
def _ensure_formatted(self):
if not self.formatted:
self.format()
def remove_extra_space(self):
while " " in self.sql:
self.sql = self.sql.replace(" ", " ")
sql_lines = self.sql.split("\n")
for i, sql_line in enumerate(sql_lines):
sql_lines[i] = sql_line.strip()
self.sql = "\n".join(sql_lines)
self.sql = self.sql.strip()
return self.sql
def __check_sql_valid(self):
temp_sql = self.sql.upper()
SqlValidChecker(temp_sql).check()
def format(self):
self.remove_extra_space()
self.__check_sql_valid()
self.formatted = True
return self.sql
class CreateSqlFormatter(SqlFormatter):
ADD_COLUMN_TEMPLATE = "ALTER TABLE `{table_name}` ADD COLUMN {column_sql}"
ADD_INDEX_TEMPLATE = "ALTER TABLE `{table_name}` ADD {index_sql}"
def __check_sql_valid(self):
temp_sql = self.sql.upper()
try:
CreateSqlValidChecker(temp_sql).check()
except MissingIfNoExistsSqlError:
temp_sql = self.sql.upper()
c_idx = temp_sql.index("CREATE TABLE")
c_idx2 = c_idx + 12
c_idx3 = c_idx2 + 1
self.sql = f"{self.sql[:c_idx2]} IF NOT EXISTS {self.sql[c_idx3:]}"
self.__check_sql_valid()
def get_table_name(self):
self._ensure_formatted()
sql_lines = self.sql.split("\n")
line = sql_lines[0]
table_name = line[line.index("`") + 1: line.rindex("`")]
return table_name
def format(self):
super(CreateSqlFormatter, self).format()
self.__check_sql_valid()
self.formatted = True
return self.sql
def get_add_columns_sql_dict(self) -> Dict[str, str]:
self._ensure_formatted()
sql_lines = self.sql.split("\n")
columns_sql = sql_lines[1:-1]
columns_sql = [sql for sql in columns_sql if sql.startswith("`")]
columns_sql_dict = dict()
table_name = self.get_table_name()
if not table_name:
print("debug")
for column_sql in columns_sql:
if column_sql.endswith(","):
column_sql = column_sql[0:-1]
column = column_sql.split(" ")[0].replace("`", "")
add_column_sql = self.ADD_COLUMN_TEMPLATE.format(
table_name=table_name, column_sql=column_sql)
columns_sql_dict[column] = add_column_sql
return columns_sql_dict
def get_add_index_sql_list(self) -> List[str]:
self._ensure_formatted()
sql_lines = self.sql.split("\n")
columns_sql = sql_lines[2:-1]
indexes_sql = [sql for sql in columns_sql
if sql.upper().startswith("UNIQUE KEY")
or sql.upper().startswith("KEY")
or sql.upper().startswith("PRIMARY KEY")]
add_indexes_sql = []
table_name = self.get_table_name()
for index_sql in indexes_sql:
if index_sql.endswith(","):
index_sql = index_sql[0:-1]
add_column_sql = self.ADD_INDEX_TEMPLATE.format(
table_name=table_name, index_sql=index_sql)
add_indexes_sql.append(add_column_sql)
return add_indexes_sql | /rich-torndb-0.0.1.tar.gz/rich-torndb-0.0.1/rich_torndb/utils/sql_formatter.py | 0.53777 | 0.159971 | sql_formatter.py | pypi |
from __future__ import annotations
from typing import Any, Callable, Dict, List, Optional, Type, Union, Sequence, Tuple
import click
from click.core import Context, Parameter
from typer.core import TyperCommand, TyperGroup
from .formatting import RichHelpFormatter
def _rich_typer_format_banner(
self: click.core.Command,
ctx: Context,
formatter: RichHelpFormatter
) -> None:
if self.banner:
formatter.write_banner(self.banner, self.banner_justify)
def _rich_typer_format_options(
self: click.core.Command,
ctx: Context,
formatter: RichHelpFormatter
) -> None:
args = []
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
if param.param_type_name == "argument":
args.append(rv)
elif param.param_type_name == "option":
opts.append(rv)
if args:
with formatter.section("Arguments") as table:
formatter.add_params(args, table)
if opts:
with formatter.section("Options") as table:
formatter.add_params(opts, table)
class RichContext(click.core.Context):
formatter_class: Type["RichHelpFormatter"] = RichHelpFormatter
class RichCommand(TyperCommand):
context_class: Type["Context"] = RichContext
def __init__(
self,
name: Optional[str],
context_settings: Optional[Dict[str, Any]] = None,
callback: Optional[Callable[..., Any]] = None,
params: Optional[List["Parameter"]] = None,
help: Optional[str] = None,
epilog: Optional[str] = None,
epilog_blend: Optional[Tuple[Tuple[int, int, int],
Tuple[int, int, int]]] = None,
short_help: Optional[str] = None,
banner: Optional[str] = None,
banner_justify: Optional[str] = 'default',
usage: Optional[str] = None,
options_metavar: Optional[str] = "[OPTIONS]",
add_help_option: bool = True,
no_args_is_help: bool = False,
hidden: bool = False,
deprecated: bool = False,
) -> None:
self.banner = banner
self.banner_justify = banner_justify
self.epilog_blend = epilog_blend
self.usage = usage
super().__init__(
name=name,
context_settings=context_settings,
callback=callback,
params=params,
help=help,
epilog=epilog,
short_help=short_help,
options_metavar=options_metavar,
add_help_option=add_help_option,
no_args_is_help=no_args_is_help,
hidden=hidden,
deprecated=deprecated
)
def format_help(self, ctx: "Context", formatter: RichHelpFormatter) -> None:
self.format_banner(ctx, formatter)
self.format_usage(ctx, formatter)
self.format_help_text(ctx, formatter)
self.format_options(ctx, formatter)
self.format_epilog(ctx, formatter)
def format_banner(self, ctx: "Context", formatter: RichHelpFormatter) -> None:
_rich_typer_format_banner(self, ctx=ctx, formatter=formatter)
def format_usage(self, ctx: "Context", formatter: RichHelpFormatter) -> None:
if self.usage:
formatter.write(self.usage)
formatter.write("\n")
else:
super().format_usage(ctx, formatter)
def format_options(self, ctx: "Context", formatter: RichHelpFormatter) -> None:
_rich_typer_format_options(self, ctx=ctx, formatter=formatter)
def format_epilog(self, ctx: "Context", formatter: RichHelpFormatter) -> None:
if self.epilog:
formatter.write_epilog(self.epilog, self.epilog_blend)
class RichGroup(TyperGroup):
context_class: Type["Context"] = RichContext
def __init__(
self,
name: Optional[str] = None,
commands: Optional[Union[Dict[str, RichCommand],
Sequence[RichCommand]]] = None,
**attrs: Any,
) -> None:
self.banner = attrs.pop("banner", None)
self.banner_justify = attrs.pop("banner_justify", "default")
self.epilog_blend = attrs.pop("epilog_blend", None)
self.usage = attrs.pop("usage", None)
super().__init__(name=name, commands=commands, **attrs)
def format_help(self, ctx: "Context", formatter: RichHelpFormatter) -> None:
self.format_banner(ctx, formatter)
self.format_usage(ctx, formatter)
self.format_help_text(ctx, formatter)
self.format_options(ctx, formatter)
self.format_epilog(ctx, formatter)
def format_banner(self, ctx: "Context", formatter: RichHelpFormatter) -> None:
_rich_typer_format_banner(self, ctx, formatter)
def format_usage(self, ctx: "Context", formatter: RichHelpFormatter) -> None:
if self.usage:
formatter.write(self.usage)
formatter.write("\n")
else:
super().format_usage(ctx, formatter)
def format_commands(self, ctx: Context, formatter: RichHelpFormatter) -> None:
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
commands = []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
if cmd.hidden:
continue
commands.append((subcommand, cmd))
# allow for 3 times the default spacing
if len(commands):
limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
rows = []
for subcommand, cmd in commands:
help = cmd.get_short_help_str(limit)
rows.append((subcommand, help))
if rows:
with formatter.section("Commands") as table:
formatter.add_params(rows, table)
def format_options(self, ctx: "Context", formatter: RichHelpFormatter) -> None:
_rich_typer_format_options(self, ctx=ctx, formatter=formatter)
self.format_commands(ctx, formatter)
def format_epilog(self, ctx: "Context", formatter: RichHelpFormatter) -> None:
if self.epilog:
formatter.write_epilog(self.epilog, self.epilog_blend) | /rich_typer-0.1.7.tar.gz/rich_typer-0.1.7/rich_typer/core.py | 0.859074 | 0.154631 | core.py | pypi |
from __future__ import annotations
import re
from contextlib import contextmanager
from typing import Iterator, List, Optional, Tuple, Dict
from click import HelpFormatter as ClickHelpFormatter
from rich.console import Console, JustifyMethod
from rich.highlighter import RegexHighlighter
from rich.panel import Panel
from rich.table import Table
from rich.text import Text
from rich.theme import Theme
from .utils import blend_text
class RichHelpFormatter(ClickHelpFormatter):
def __init__(
self,
indent_increment: int = 2,
width: Optional[int] = None,
max_width: Optional[int] = None,
) -> None:
super().__init__(indent_increment=indent_increment, width=width, max_width=max_width)
self.highlighters = self.init_highlighters()
self.console = self.init_console()
def init_highlighters(self) -> Dict[str, RegexHighlighter]:
class OptionHighlighter(RegexHighlighter):
highlights = [
r"(?P<switch>^\-\w$)",
r"(?P<option>^\-\-[\w\-]+)(?P<metavar>\s.*$)?",
r"(?P<args_and_cmds>^[\w]+$)",
]
class HelpHighlighter(RegexHighlighter):
highlights = [
# 匹配最后一个小括号,且小括号前面有空格
r"(?P<help_require>(\()(?!.*\2)(?<=\s\2)(.+)\)$)",
]
return_highlighters = {
"opt": OptionHighlighter(), "help": HelpHighlighter()}
return return_highlighters
def init_console(self) -> Console:
console = Console(
theme=Theme(
{
"option": "bold cyan",
"switch": "bold green",
"metavar": "bold yellow",
"help_require": "dim",
"args_and_cmds": "yellow"
}
),
# highlighter=self.highlighter,
)
return console
@contextmanager
def section(self, name: str) -> Iterator[None]:
options_table = Table(highlight=True, box=None, show_header=False)
yield options_table
self.write(Panel(
options_table, border_style="dim", title=name, title_align="left"
))
def add_params(self, params: List[Tuple[str, str]], table: Table) -> None:
for name, help in params:
arg_list = name.split(',')
if len(arg_list) == 2:
opt1 = self.highlighters['opt'](arg_list[0])
opt2 = self.highlighters['opt'](arg_list[1].strip())
else:
opt1 = Text("")
opt2 = self.highlighters['opt'](arg_list[0])
help = self.escape_text(help)
help = Text.from_markup(help, emoji=False)
table.add_row(opt1, opt2, self.highlighters['help'](help))
def escape_text(self, text: str) -> str:
match = re.search(r"(\[)(?!.*\1)(?<=\s\1)(.+)\]$",
text) # 匹配最后一个中括号,且中括号前面有空格
if match:
text = text.replace("[%s]" % match.group(2),
"(%s)" % match.group(2))
return text
def write_usage(
self, prog: str, args: str = "", prefix: Optional[str] = None
) -> None:
# ! 如果过长 会导致换行
if not prefix:
prefix = "[dim]Usage: [/]"
usage_prefix = f"{prefix}{prog}"
table = Table(highlight=True, box=None, show_header=False)
table.add_row(usage_prefix, f"[yellow]{args}[/]")
self.write(table)
self.write("\n")
def write_banner(self, banner: str, justify: Optional[JustifyMethod] = 'default') -> None:
self.write(Text.from_markup(banner, emoji=False), justify)
def write_epilog(
self, epilog: str,
blend: Optional[Tuple[Tuple[int, int, int],
Tuple[int, int, int]]] = None
) -> None:
if blend is False:
self.write(epilog, "right")
return
if blend is None:
blend = ((32, 32, 255), (255, 32, 255))
self.write(blend_text(epilog, blend), "right")
def write(
self, string: str | Text | Panel,
justify: Optional[JustifyMethod] = None
) -> None:
if string == "\n":
self.console.print()
else:
self.console.print(string, justify=justify) | /rich_typer-0.1.7.tar.gz/rich_typer-0.1.7/rich_typer/formatting.py | 0.709824 | 0.210442 | formatting.py | pypi |
from rich.progress import Progress, ProgressColumn, BarColumn, Text
def ema(x, mu=None, alpha=0.3):
"""
Exponential moving average: smoothing to give progressively lower
weights to older values.
Parameters
----------
x : float
New value to include in EMA.
mu : float, optional
Previous EMA value.
alpha : float, optional
Smoothing factor in range [0, 1], [default: 0.3].
Increase to give more weight to recent values.
Ranges from 0 (yields mu) to 1 (yields x).
"""
return x if mu is None else (alpha * x) + (1 - alpha) * mu
class SmartTimeRemainingColumn(ProgressColumn):
max_refresh = 0.5
avg_elapsed_seconds = 0
avg_remaining_seconds = 0
def __init__(self, *args, **kwargs):
self.smoothing = kwargs.get("smoothing", 0.3)
del kwargs["smoothing"]
super().__init__(*args, **kwargs)
def format_seconds(self, seconds):
hours, remainder = divmod(seconds, 3600)
minutes, seconds = divmod(remainder, 60)
if hours:
return f"{hours:>02.0f}h:{minutes:>02.0f}m"
else:
return f"{minutes:>02.0f}m:{seconds:>02.0f}s"
def render(self, task):
remaining = task.time_remaining
if remaining is None:
return Text("--m:--s<--m:--s", style="progress.remaining")
self.avg_elapsed_seconds = ema(task.elapsed, self.avg_elapsed_seconds, self.smoothing)
self.avg_remaining_seconds = ema(remaining, self.avg_remaining_seconds, self.smoothing)
elapsed_delta = self.format_seconds(self.avg_elapsed_seconds)
remaining_delta = self.format_seconds(self.avg_remaining_seconds)
return Text(f"{elapsed_delta}<{remaining_delta}", style="progress.remaining")
class ItemsPerSecondColumn(ProgressColumn):
max_refresh = 0.5
avg_speed = 0
def __init__(self, *args, **kwargs):
self.smoothing = kwargs.get("smoothing", 0.3)
kwargs.pop("smoothing", None)
self.unit = kwargs.get("unit", "items")
kwargs.pop("unit", None)
super().__init__(*args, **kwargs)
def render(self, task):
speed = task.speed
if speed is None:
return Text(f"0{self.unit}/s", style="progress.remaining")
self.avg_speed = ema(speed, self.avg_speed, self.smoothing)
return Text(f"{self.avg_speed:>3.2f}{self.unit}/s", style="progress.remaining")
class GenericProgress(Progress):
def __init__(self, *args, **kwargs):
self.smoothing = kwargs.get("smoothing", 0.3)
kwargs.pop("smoothing", None)
self.unit = kwargs.get("unit", "items")
kwargs.pop("unit", None)
self.bar_width = kwargs.get("bar_width", 56)
kwargs.pop("bar_width", None)
if not args:
args = ("[progress.description]{task.description}", BarColumn(bar_width=self.bar_width),
"[progress.percentage]{task.percentage:>3.0f}%",
"[progress.percentage]({task.completed}/{task.total})",
SmartTimeRemainingColumn(smoothing=self.smoothing),
ItemsPerSecondColumn(smoothing=self.smoothing, unit=self.unit))
super().__init__(*args, **kwargs) | /rich_utils-0.4.0-py3-none-any.whl/rich_utils/progress.py | 0.868465 | 0.410934 | progress.py | pypi |
from typing import List, Optional
from rich.segment import Segment
from rich.style import Style
def render_bar(
height: int = 25,
size: float = 100,
window_size: float = 25,
position: float = 0,
bar_style: Optional[Style] = None,
back_style: Optional[Style] = None,
ascii_only: bool = False,
vertical: bool = True,
) -> List[Segment]:
if vertical:
if ascii_only:
solid = "|"
half_start = "|"
half_end = "|"
else:
solid = "┃"
half_start = "╻"
half_end = "╹"
else:
if ascii_only:
solid = "-"
half_start = "-"
half_end = "-"
else:
solid = "━"
half_start = "╺"
half_end = "╸"
_bar_style = bar_style or Style.parse("bright_magenta")
_back_style = back_style or Style.parse("#555555")
_Segment = Segment
start_bar_segment = _Segment(half_start, _bar_style)
end_bar_segment = _Segment(half_end, _bar_style)
bar_segment = _Segment(solid, _bar_style)
start_back_segment = _Segment(half_end, _back_style)
end_back_segment = _Segment(half_end, _back_style)
back_segment = _Segment(solid, _back_style)
segments = [back_segment] * height
step_size = size / height
start = position / step_size
end = (position + window_size) / step_size
start_index = int(start)
end_index = int(end)
bar_height = (end_index - start_index) + 1
segments[start_index:end_index] = [bar_segment] * bar_height
sub_position = start % 1.0
if sub_position >= 0.5:
segments[start_index] = start_bar_segment
elif start_index:
segments[start_index - 1] = end_back_segment
sub_position = end % 1.0
if sub_position < 0.5:
segments[end_index] = end_bar_segment
elif end_index + 1 < len(segments):
segments[end_index + 1] = start_back_segment
return segments
if __name__ == "__main__":
from rich.console import Console
from rich.segment import Segments
console = Console()
bar = render_bar(height=20, position=10, vertical=False, ascii_only=False)
console.print(Segments(bar, new_lines=False)) | /rich.tui-0.1.0-py3-none-any.whl/rich/tui/scrollbar.py | 0.804098 | 0.324155 | scrollbar.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /richard_distributions-0.3.tar.gz/richard_distributions-0.3/richard_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
import matplotlib as mpl
import palettable
def figsze(hscale,
vscale=0.618034,
fig_width_pt = 504.0):
"""Get the fig_width_pt by inserting the textwidth into LaTeX document.
hscale is fraction of text width you want.
vscale is fraction of hscale (defaults to golden ratio)
"""
inches_per_pt = 1.0/72.27 # Convert pt to inch
fig_width = fig_width_pt*inches_per_pt*hscale # width in inches
fig_height = fig_width*vscale # height in inches
fig_size = [fig_width,fig_height]
return fig_size
pgf_with_latex = { # setup matplotlib to use latex for output
"axes.linewidth":1.5, # width of box, 2 is too wide, 1 is too narrow
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
"text.usetex": True, # use LaTeX to write all text
"font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 16, # LaTeX default is 10pt font, font size of axis text label
"axes.labelpad" : 6, # Distance between label and axis
"axes.formatter.limits":[-99,99], # use sci notation if log10 of axis range is smaller than first or larger than second.
# GTR: Actually *don't* -- should change the axis label instead. E.g., "Flux Density (10^-17 ergs/s/cm^2)"
# This is a hack b/c there doesn't seem to be an rcParams version of
# axes.ticklabel_format(style='plain')
#"axes.formatter.style":"plain", # Turn off multiplicative offsets (sci notation) to the axes [GTR: Doesn't work]
"axes.formatter.useoffset":False, # Turn off additive offsets to the axes
"font.size": 16,
"legend.fontsize": 12, # Make the legend/label fonts a little smaller
"xtick.labelsize": 16, # Font size of numbers
"ytick.labelsize": 16,
"xtick.direction": "in",
"ytick.direction": "in",
"xtick.minor.visible": True,
"ytick.minor.visible": True,
'xtick.major.width':1,
'xtick.minor.width':1,
'ytick.major.width':1,
'ytick.minor.width':1,
'xtick.major.size':10, # size of tickmarks in points
'xtick.minor.size':5,
'ytick.major.size':10,
'ytick.minor.size':5,
'xtick.major.pad':8, # distance between box and numbers
'ytick.major.pad':8,
"figure.figsize": figsze(1,1), # default fig size of 0.9 textwidth
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts because your computer can handle it
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
]
}
mpl.rcParams.update(pgf_with_latex)
csdark = palettable.colorbrewer.qualitative.Dark2_3.mpl_colors
cspurple = palettable.colorbrewer.sequential.BuPu_4.mpl_colors
csorange = palettable.colorbrewer.sequential.YlOrBr_5.mpl_colors | /richardsplot-1.1.tar.gz/richardsplot-1.1/richardsplot.py | 0.510985 | 0.417271 | richardsplot.py | pypi |
# richcontext.scholapi
[Rich Context](https://coleridgeinitiative.org/richcontext)
API integrations for federating discovery services and metadata
exchange across multiple scholarly infrastructure providers.
Development of the Rich Context knowledge graph uses this library to:
* identify dataset links to research publications
* locate open access publications
* reconcile journal references
* reconcile author profiles
* reconcile keyword taxonomy
This library has been guided by collaborative work on community
building and metadata exchange to improve Scholarly Infrastructure,
held at the *2019 Rich Context Workshop*.
## Installation
Prerequisites:
- [Python 3.x](https://www.python.org/downloads/)
- [Beautiful Soup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/)
- [Biopython.Entrez](https://biopython.org/)
- [Crossref Commons](https://gitlab.com/crossref/crossref_commons_py)
- [Dimensions CLI](https://github.com/digital-science/dimcli)
- [Requests](https://2.python-requests.org/en/master/)
- [Requests-Cache](https://github.com/reclosedev/requests-cache)
- [Selenium](https://github.com/SeleniumHQ/selenium/)
- [xmltodict](https://github.com/martinblech/xmltodict)
To install from [PyPi](https://pypi.python.org/pypi/richcontext.scholapi):
```
pip install richcontext.scholapi
```
If you install directly from this Git repo, be sure to install the
dependencies as well:
```
pip install -r requirements.txt
```
Then copy the configuration file template `rc_template.cfg` to `rc.cfg`
and populate it with your credentials.
NB: be careful not to commit the `rc.cfg` file in Git since by
definition it will contain sensitive data, e.g., your passwords.
Parameters used in the configuration file include:
| parameter | value |
| --- | --- |
| `chrome_exe_path` | path/to/chrome.exe |
| `core_apikey` | CORE API key |
| `dimensions_password` | Dimensions API password |
| `elsevier_api_key` | Elsvier API key |
| `email` | personal email address |
| `orcid_secret` | ORCID API key |
| `repec_token` | RePEc API token |
Download the [`ChromeDriver`](https://chromedriver.chromium.org/downloads)
webdriver for the `Chrome` brower to enable use of Selenium.
This will be run in a "headless" mode.
For a good (though slightly dated) tutorial for installing and testing
Selenium on Ubuntu Linux, see:
<https://christopher.su/2015/selenium-chromedriver-ubuntu/>
## Usage
```
from richcontext import scholapi as rc_scholapi
# initialize the federated API access
schol = rc_scholapi.ScholInfraAPI(config_file="rc.cfg", logger=None)
source = schol.openaire
# search parameters for example publications
title = "Deal or no deal? The prevalence and nutritional quality of price promotions among U.S. food and beverage purchases."
# run it...
if source.has_credentials():
response = source.title_search(title)
# report results
if response.message:
# error case
print(response.message)
else:
print(response.meta)
source.report_perf(response.timing)
```
## Testing
First, be sure that you're testing the source and not from an
installed library.
Then run unit tests on the APIs for which you have credentials and
generate a coverage report:
```
coverage run -m unittest discover
```
Then create GitHub issues among the submodules for any failed tests.
Also, you can generate a coverage report and upload that via:
```
coverage report
bash <(curl -s https://codecov.io/bash) -t @.cc_token
```
Test coverage reports can be viewed at
<https://codecov.io/gh/Coleridge-Initiative/RCApi>
## API Integrations
APIs used to retrieve metadata:
* *PubMed family*
+ [PubMed](https://www.ncbi.nlm.nih.gov/books/NBK25501/)
+ [EuropePMC](https://europepmc.org/RestfulWebService)
* *Scholix family*
+ [OpenAIRE](https://develop.openaire.eu/)
+ [Crossref](https://www.crossref.org/services/metadata-delivery/)
+ [DataCite](https://support.datacite.org/docs/api)
* *OA family*
+ [Unpaywall](https://unpaywall.org/products/api)
+ [dissemin](https://dissemin.readthedocs.io/en/latest/api.html)
+ [Semantic Scholar](http://api.semanticscholar.org/)
* *Misc.*
+ [RePEc](https://ideas.repec.org/api.html)
+ [NSF-PAR](https://par.nsf.gov/search/)
+ [ORCID](https://www.orcid.org/)
+ [CORE](https://core.ac.uk/services/api/)
+ [Dimensions](https://docs.dimensions.ai/dsl/api.html)
+ [SSRN](https://www.ssrn.com/)
See the coding examples in the `test.py` unit test for usage patterns
per supported API.
## Troubleshooting
* `ChromeDriver`
If you encounter an exception about the `ChromeDriver` version, for
example:
```
selenium.common.exceptions.SessionNotCreatedException: Message: session not created:
This version of ChromeDriver only supports Chrome version 78
```
Then check your instance of the *Chrome* browser to find its release
number, then go to <https://chromedriver.chromium.org/downloads> to
download the corresponding required version of `ChromeDriver`.
## Literature
For more background about *open access publications* see:
> Piwowar H, Priem J, Larivière V, Alperin JP, Matthias L, Norlander B, Farley A, West J, Haustein S. 2017.
The State of OA: A large-scale analysis of the prevalence and impact of Open Access articles
*PeerJ Preprints* 5:e3119v1
<https://doi.org/10.7287/peerj.preprints.3119v1>
## Contributions
If you'd like to contribute, please see our listings of
[*good first issues*](https://github.com/Coleridge-Initiative/RCApi/labels/good%20first%20issue).
For info about joining the AI team working on Rich Context, see
<https://github.com/Coleridge-Initiative/RCGraph/blob/master/SKILLS.md>
## Kudos
Contributors:
[@ceteri](https://github.com/ceteri),
[@IanMulvany](https://github.com/IanMulvany),
[@srand525](https://github.com/srand525),
[@ernestogimeno](https://github.com/ernestogimeno),
[@lobodemonte](https://github.com/lobodemonte),
plus many thanks for the inspiring *2019 Rich Context Workshop* notes by
[@metasj](https://github.com/metasj),
and guidance from
[@claytonrsh](https://github.com/claytonrsh),
[@Juliaingridlane](https://github.com/Juliaingridlane).
| /richcontext-scholapi-1.2.0.tar.gz/richcontext-scholapi-1.2.0/README.md | 0.606032 | 0.876264 | README.md | pypi |
from .client import RichDaddyClient
import typing
from loguru import logger
class RichDaddyMethods:
def __init__(self, client: RichDaddyClient):
"""
Создание экземпляра
:param AsyncRichDaddyClient client: объект клиента AsyncRichDaddyClient
"""
self.client = client
def usersGet(
self,
userIds: typing.List[str] = None,
userVkIds: typing.List[int] = None,
fieldsType: typing.Optional[str] = 'mainPublicFields',
**kwargs: typing.Any) -> dict:
"""
Получить информацию об определенных пользователях
:param userIds list(str): Обязательно если не указан userVkIds. Игровые идентификаторы пользователей.
:param userVkIds list(int): Обязательно если не указан userIds. Идентификаторы пользователей ВК.
:param fieldsType str: Необязательно. Может содержать:
- mainPublicFields — По умолчанию. Основные поля пользователей (имя, фамилия, аватарка…)
- publicFields — Все поля пользователей (скорость клика, капитал бизнесов…)
:return dict:
"""
if userIds is None:
if not list(userVkIds):
logger.error('"userVkIds" parameter must contain: List')
return self.client.request("users/get/", {
'userVkIds': userVkIds,
'fieldsType': fieldsType
})
elif userVkIds is None:
if not list(userIds):
logger.error('"userIds" parameter must contain: List')
return self.client.request("users/get/", {
'userIds': userIds,
'fieldsType': fieldsType
})
else:
if userIds != list(userIds) or userVkIds != list(userVkIds):
logger.error('This parameter must contain: List')
return self.client.request("users/get/", {
'userIds': userIds,
'userVkIds': userVkIds,
'fieldsType': fieldsType
})
def transfersCreate(
self,
id: str,
direction: str = 'Users',
amount: int = 1000,
dialog: bool = False,
title: typing.Optional[str] = None,
label: typing.Optional[str] = None,
**kwargs: typing.Any) -> dict:
"""
Сделать перевод от вашего сервиса к пользователю
:param id str: Идентификатор пользователя в игре
:param direction str: Направление. Всегда должен содержать «Users».
:param amount int: Сумма перевода. Минимум 1000
:param dialog bool: Необязателен. Объект игрового диалога у пользователя о новом переводе.
:param title str: Заголовок. От 5 до 35 символов
:param label str: Описание. От 5 до 200 символов
:return dict:
"""
if dialog == True:
return self.client.request("transfers/create/", {
'accessToken': self.client.accessToken(),
'to': [{'id': id,
'direction': direction,
'amount': amount
}],
'dialog': {
'title': title,
'label': label
}
})
else:
return self.client.request("transfers/create/", {
'accessToken': self.client.accessToken(),
'to': [{'id': id,
'direction': direction,
'amount': amount
}]
})
def transfersGet(
self,
skip: typing.Optional[int] = 0,
limit: typing.Optional[int] = 100,
**kwargs: typing.Any) -> dict:
"""
Получить блоки переводов, каждый блок содержит до 100 переводов
:param skip int: Необязательно. Смещение переводов.
:param limit int: Необязательно. Лимит блока переводов, максимум 100.
:return dict:
"""
return self.client.request("transfers/get/", {
'accessToken': self.client.accessToken(),
'skip': skip,
'limit': limit
}) | /richdaddy_sdk-1.0.0-py3-none-any.whl/richdaddy_sdk/syncrichdaddy/methods.py | 0.518059 | 0.234024 | methods.py | pypi |
from .client import AsyncRichDaddyClient
import asyncio
import typing
from loguru import logger
class ARichDaddyMethods:
def __init__(self, client: AsyncRichDaddyClient):
"""
Создание экземпляра
:param AsyncRichDaddyClient client: объект клиента AsyncRichDaddyClient
"""
self.client = client
async def usersGet(
self,
userIds: typing.List[str] = None,
userVkIds: typing.List[int] = None,
fieldsType: typing.Optional[str] = 'mainPublicFields',
**kwargs: typing.Any) -> dict:
"""
Получить информацию об определенных пользователях
:param userIds list(str): Обязательно если не указан userVkIds. Игровые идентификаторы пользователей.
:param userVkIds list(int): Обязательно если не указан userIds. Идентификаторы пользователей ВК.
:param fieldsType str: Необязательно. Может содержать:
- mainPublicFields — По умолчанию. Основные поля пользователей (имя, фамилия, аватарка…)
- publicFields — Все поля пользователей (скорость клика, капитал бизнесов…)
:return dict:
"""
if userIds is None:
if not list(userVkIds):
logger.error('"userVkIds" parameter must contain: List')
return await self.client.request("users/get/", {
'userVkIds': userVkIds,
'fieldsType': fieldsType
})
elif userVkIds is None:
if not list(userIds):
logger.error('"userIds" parameter must contain: List')
return await self.client.request("users/get/", {
'userIds': userIds,
'fieldsType': fieldsType
})
else:
if userIds != list(userIds) or userVkIds != list(userVkIds):
logger.error('This parameter must contain: List')
return await self.client.request("users/get/", {
'userIds': userIds,
'userVkIds': userVkIds,
'fieldsType': fieldsType
})
async def transfersCreate(
self,
id : str,
direction: str = 'Users',
amount: int = 1000,
dialog: bool = False,
title: typing.Optional[str] = None,
label: typing.Optional[str] = None,
**kwargs: typing.Any) -> dict:
"""
Сделать перевод от вашего сервиса к пользователю
:param id str: Идентификатор пользователя в игре
:param direction str: Направление. Всегда должен содержать «Users».
:param amount int: Сумма перевода. Минимум 1000
:param dialog bool: Необязателен. Объект игрового диалога у пользователя о новом переводе.
:param title str: Заголовок. От 5 до 35 символов
:param label str: Описание. От 5 до 200 символов
:return dict:
"""
if dialog == True:
return await self.client.request("transfers/create/", {
'accessToken': self.client.accessToken(),
'to': [{'id': id,
'direction': direction,
'amount' : amount
}],
'dialog': {
'title': title,
'label': label
}
})
else:
return await self.client.request("transfers/create/", {
'accessToken': self.client.accessToken(),
'to': [{'id': id,
'direction': direction,
'amount': amount
}]
})
async def transfersGet(
self,
skip: typing.Optional[int] = 0,
limit: typing.Optional[int] = 100,
**kwargs: typing.Any) -> dict:
"""
Получить блоки переводов, каждый блок содержит до 100 переводов
:param skip int: Необязательно. Смещение переводов.
:param limit int: Необязательно. Лимит блока переводов, максимум 100.
:return dict:
"""
return await self.client.request("transfers/get/", {
'accessToken': self.client.accessToken(),
'skip': skip,
'limit': limit
}) | /richdaddy_sdk-1.0.0-py3-none-any.whl/richdaddy_sdk/aiorichdaddy/methods.py | 0.52683 | 0.22251 | methods.py | pypi |
import hashlib
import hmac
import json
import logging
from typing import Dict
import requests
from celery import shared_task
from django.conf import settings
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from common.djangoapps.student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore
log = logging.getLogger(__name__)
@shared_task
def sync_course_run_information_to_richie(*args, **kwargs) -> Dict[str, bool]:
"""
Synchronize an OpenEdX course run, identified by its course key, to all Richie instances.
Raises:
ValueError: when course if not found
Returns:
dict: where the key is the richie url and the value is a boolean if the synchronization
was ok.
"""
log.debug("Entering richie update course on publish")
course_id = kwargs["course_id"]
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
if not course:
raise ValueError(
"No course found with the course_id '{}'".format(course_id))
org = course_key.org
edxapp_domain = configuration_helpers.get_value_for_org(
org, "LMS_BASE", settings.LMS_BASE
)
course_start = course.start and course.start.isoformat()
course_end = course.end and course.end.isoformat()
enrollment_start = course.enrollment_start and course.enrollment_start.isoformat()
enrollment_end = course.enrollment_end and course.enrollment_end.isoformat()
# Enrollment start date should fallback to course start date, by default Open edX uses the
# course start date for the enrollment start date when the enrollment start date isn't defined.
enrollment_start = enrollment_start or course_start
data = {
"resource_link": "https://{:s}/courses/{!s}/info".format(
edxapp_domain, course_key
),
"start": course_start,
"end": course_end,
"enrollment_start": enrollment_start,
"enrollment_end": enrollment_end,
"languages": [course.language or settings.LANGUAGE_CODE],
"enrollment_count": CourseEnrollment.objects.filter(
course_id=course_id
).count(),
"catalog_visibility": course.catalog_visibility,
}
hooks = configuration_helpers.get_value_for_org(
org,
"RICHIE_OPENEDX_SYNC_COURSE_HOOKS",
getattr(settings, "RICHIE_OPENEDX_SYNC_COURSE_HOOKS", []),
)
if not hooks:
msg = (
"No richie course hook found for organization '{}'. Please configure the "
"'RICHIE_OPENEDX_SYNC_COURSE_HOOKS' setting or as site configuration"
).format(org)
log.info(msg)
return {}
log_requests = configuration_helpers.get_value_for_org(
org,
"RICHIE_OPENEDX_SYNC_LOG_REQUESTS",
getattr(settings, "RICHIE_OPENEDX_SYNC_LOG_REQUESTS", False),
)
result = {}
for hook in hooks:
signature = hmac.new(
hook["secret"].encode("utf-8"),
msg=json.dumps(data).encode("utf-8"),
digestmod=hashlib.sha256,
).hexdigest()
richie_url = hook.get("url")
timeout = int(hook.get("timeout", 5))
try:
response = requests.post(
richie_url,
json=data,
headers={
"Authorization": "SIG-HMAC-SHA256 {:s}".format(signature)},
timeout=timeout,
)
response.raise_for_status()
result[richie_url] = True
if log_requests:
status_code = response.status_code
msg = "Synchronized the course {} to richie site {} it returned the HTTP status code {}".format(
course_key, richie_url, status_code
)
log.info(msg)
log.info(response.content)
except requests.exceptions.HTTPError as e:
status_code = response.status_code
msg = "Error synchronizing course {} to richie site {} it returned the HTTP status code {}".format(
course_key, richie_url, status_code
)
log.error(e, exc_info=True)
log.error(msg)
log.error(response.content)
result[richie_url] = False
except requests.exceptions.RequestException as e:
msg = "Error synchronizing course {} to richie site {}".format(
course_key, richie_url
)
log.error(e, exc_info=True)
log.error(msg)
result[richie_url] = False
return result | /richie_openedx_sync-1.2.0-py3-none-any.whl/richie_openedx_sync/tasks.py | 0.600188 | 0.265214 | tasks.py | pypi |
import logging
from typing import Dict
from django.core.management.base import BaseCommand
from six import text_type
from xmodule.modulestore.django import modulestore
from richie_openedx_sync.tasks import sync_course_run_information_to_richie
from opaque_keys.edx.keys import CourseKey
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Command that synchronizes the Open edX courses to the Richie marketing site
"""
help = (
"Synchronize courses to the Richie marketing site, by default all courses "
"or a specific course"
)
def add_arguments(self, parser):
parser.add_argument(
"--course_id",
type=str,
default=None,
help="Course id to synchronize, otherwise all courses would be sync",
)
def handle(self, *args, **kwargs):
"""
Synchronize courses to the Richie marketing site, print to console its sync progress.
"""
course_id = kwargs["course_id"]
if not course_id:
module_store = modulestore()
courses = module_store.get_courses()
course_ids = [x.id for x in courses]
else:
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
courses = [course]
course_ids = [course_id]
course_ids_count = len(course_ids)
total_sync_ok_count = 0
total_sync_not_ok_count = 0
for course_id in course_ids:
log.info("-" * 80)
log.info("Synchronizing to Richie course id = {0}".format(course_id))
sync_result = sync_course_run_information_to_richie(
course_id=str(course_id)
)
ok_count = len(list(filter(lambda e: e[1], sync_result.items())))
not_ok_count = len(list(filter(lambda e: not e[1], sync_result.items())))
if ok_count > 0:
log.info(" ok count: {0}".format(ok_count))
if not_ok_count > 0:
log.info(" not ok count: {0}".format(not_ok_count))
richie_failed_backends = str(
list(
map(
lambda e: e[0],
(filter(lambda e: not e[1], sync_result.items())),
)
)
)
log.info(" failed backends: {0}".format(richie_failed_backends))
total_sync_ok_count += ok_count
total_sync_not_ok_count += not_ok_count
log.info("=" * 80)
log.info("Synchronization summary")
print("Total number of courses synchronized: {0}".format(course_ids_count))
log.info("Total number of synchronizations ok: {0}".format(total_sync_ok_count))
log.info(
"Total number of synchronizations not ok (error): {0}".format(total_sync_not_ok_count)
) | /richie_openedx_sync-1.2.0-py3-none-any.whl/richie_openedx_sync/management/commands/sync_courses_to_richie.py | 0.555797 | 0.173919 | sync_courses_to_richie.py | pypi |
import requests
import json
import logging
from richkit.retrieve.x509 import X509
from datetime import datetime
logger = logging.getLogger(__name__)
class DomainCertificates:
"""
This class provides the functions to get certificates of a given domain.
The website used to get them is crt.sh
"""
# Website used to retrieve the certificates belonging a domain
crtSH_url = "https://crt.sh/{}"
def __init__(self, domain):
"""
Get the certificate features from the given domain
:param domain: domain to analyze
"""
self.domain = domain
self.certificates = self.get_certificates(self.domain)
self.certificates_features = None
def get_certificates(self, domain):
"""
Make a request and get the response content of the given domain
:param domain: the choosen domain
"""
try:
r = requests.get(self.crtSH_url.format("?q=" + domain + "&output=json"))
if r.status_code != 200:
raise Exception("Server not available")
content = r.content.decode('utf-8')
if len(r.text) == 2: # It's 2 when the domain is not found
raise Exception("Domain not found")
return json.loads(content)
except Exception as e:
logger.error('Error while retrieving certificates: %s', e)
return None
def get_all(self):
"""
Get the list of certificates for the given domain and the certificate features for each of them
"""
certs_features = []
for cert in self.certificates:
# filter out all the rows containing @ because they are email
# example: https://crt.sh/?id=34083306
cf = X509(cert.get('id'))
not_before = cert.get('not_before')
not_after = cert.get('not_after')
not_before_obj = datetime.strptime(not_before, "%Y-%m-%dT%H:%M:%S")
not_after_obj = datetime.strptime(not_after, "%Y-%m-%dT%H:%M:%S")
validity = (not_after_obj.date() - not_before_obj.date()).days
features = dict({
'ID': cert.get('id'),
'Issuer': cert.get('issuer_name'),
'Algorithm': cf.algorithm,
'ValidationL': cf.policy_list,
'NotBefore': not_before,
'NotAfter': not_after,
'Validity': validity, # days
'SANFeatures': cf.certificates_features
})
certs_features.append(features)
self.certificates_features = certs_features
return certs_features
def get_certificates_list(self):
"""
Get the list of certificates for the given domain
"""
certs_features = []
for cert in self.certificates:
# filter out all the rows containing @ because they are email
# example: https://crt.sh/?id=34083306
not_before = cert.get('not_before')
not_after = cert.get('not_after')
not_before_obj = datetime.strptime(not_before, "%Y-%m-%dT%H:%M:%S")
not_after_obj = datetime.strptime(not_after, "%Y-%m-%dT%H:%M:%S")
validity = (not_after_obj.date() - not_before_obj.date()).days
features = dict({
'ID': cert.get('id'),
'Issuer': cert.get('issuer_name'),
'NotBefore': not_before,
'NotAfter': not_after,
'Validity': validity, # days
})
certs_features.append(features)
self.certificates_features = certs_features
return certs_features | /retrieve/cert_sh.py | 0.494141 | 0.201617 | cert_sh.py | pypi |
from richkit.analyse import tld, sld, sl_label, depth, length
import statistics
import requests
import logging
import time
logger = logging.getLogger(__name__)
class X509:
"""
This class provides functions to extract certificate features from crt.sh
The only needed parameter is the crt.sh ID of the certificate, it's possible to
get it just making a request on crt.sh by listing all the certificates for a specific domain
"""
# Website used to retrieve the certificates belonging a domain
crtSH_url = "https://crt.sh/{}"
def __init__(self, cert_id):
"""
Get the Subject Alternative Name features from the given certificate
:param cert_id: unique ID given by crt.sh per certificate
"""
self.cert_id = cert_id
self.algorithm = None
self.policy_list = None
self.certificates_features = None
self.get_certificate_features()
def get_certificate_info(self, cert_id):
"""
Make a request and get the response content of the given ID
:param cert_id: crt.sh ID of the certificate
:return: response as text or None in case an Exception raised
"""
try:
r = requests.get(self.crtSH_url.format("?id=" + cert_id))
if "<BR><BR>Certificate not found </BODY>" in r.text:
raise Exception("Certificate not found")
if "<BR><BR>Invalid value:" in r.text:
raise Exception("Certificate not found")
return r.text
except Exception as e:
raise e
def get_certificate_features(self):
"""
Parse the response content to get the certificate features
"""
text = None
for _ in range(5):
if text is not None:
break
try:
text = self.get_certificate_info(str(self.cert_id))
text_list = text.split('<BR>')
except:
time.sleep(10)
sans = SANList() # Used to store the SANs
policy_list = [] # Used to store the policies in order to get the Validation Level
algo_index = ' Signature Algorithm:'
san_index = \
' DNS:'
san_index_email = \
' email:'
policy_index = \
' ' \
' Policy: '
for row in text_list:
# Get Signature Algorithm
if algo_index in row:
self.algorithm = row[len(algo_index) + 6:]
# Get SANs
if san_index in row:
sans.append(row[len(san_index):])
if san_index_email in row:
sans.append(row[len(san_index_email):])
if policy_index in row:
policy_list.append(row[len(policy_index):])
# Calculating the LCS
apex = [sld(san) for san in sans.get_sans()]
lcs_num = get_lcs_apex(apex)
self.policy_list = policy_list
self.certificates_features = dict({
'san_list': sans.get_sans(),
'DomainCount': len(sans.get_sans()),
'UniqueApexCount': unique_apex(sans.get_sans()),
'UniqueSLDCount': unique_sld(sans.get_sans()),
'ShortestSAN': sans.min(),
'LongestSAN': sans.max(),
'SANsMean': sans.mean(),
'MinSubLabels': sans.min_labels(),
'MaxSubLabels': sans.max_labels(),
'MeanSubLabels': sans.mean_labels(),
'UniqueTLDsCount': unique_tld(sans.get_sans()),
'UniqueTLDsDomainCount': sans.uniqueTLDsDomainCount(),
'ApexLCS': None, # Don't need to implement
'LenApexLCS': lcs_num,
'LenApexLCSNorm': sans.lenApexLCSNorm(lcs_num),
})
def unique_apex(sans):
"""
Number of unique apex/root domains covered by the certificate
:param sans: List of Subject Alternative Name
"""
apex = [sld(san) for san in sans]
return len(set(apex))
def unique_tld(sans):
"""
Number of unique TLDs covered by the certificate
:param sans: List of Subject Alternative Name
"""
get_tlds = [tld(san) for san in sans]
return len(set(get_tlds))
def unique_sld(sans):
"""
Number of unique effective 2-level label domains covered by the certificate
:param sans: List of Subject Alternative Name
"""
get_sld = [sl_label(san) for san in sans]
return len(set(get_sld))
def get_lcs_apex(apex):
"""
The longest common substring of an array
:param apex: apex array
:return: The longest common substring
"""
lcs_num = 0
for i in apex:
current_sans_list = apex[:]
current_sans_list.remove(i)
for j in current_sans_list:
current_lcs = lcs(i, j)
if current_lcs > lcs_num:
lcs_num = current_lcs
return lcs_num
def lcs(x, y):
"""
The longest common substring (LCS)
:param x: First string
:param y: Second string
:return LCS
"""
m = len(x)
n = len(y)
h = [[None] * (n + 1) for i in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
h[i][j] = 0
elif x[i - 1] == y[j - 1]:
h[i][j] = h[i - 1][j - 1] + 1
else:
h[i][j] = max(h[i - 1][j], h[i][j - 1])
return h[m][n]
class SANList:
"""
This class provides tje functions to extract features from the SAN list
"""
def __init__(self):
self.sans = []
def append(self, san):
self.sans.append(san)
def get_sans(self):
return self.sans
def min(self):
if not self.sans:
return 0
return int(min([length(row) for row in self.sans]))
def max(self):
if not self.sans:
return 0
return int(max([length(row) for row in self.sans]))
def mean(self):
if not self.sans:
return 0
return statistics.mean([len(row) for row in self.sans])
def min_labels(self):
if not self.sans:
return 0
return min([int(depth(row)) - 2 for row in self.sans])
def max_labels(self):
if not self.sans:
return 0
return max([int(depth(row)) - 2 for row in self.sans])
def mean_labels(self):
if not self.sans:
return 0
return statistics.mean([int(depth(row)) for row in self.sans])
def uniqueTLDsDomainCount(self):
if not self.sans:
return 0
return unique_tld(self.sans) / len(self.sans)
def lenApexLCSNorm(self, lcs):
if not self.sans:
return 0
return lcs / len(self.sans) | /retrieve/x509.py | 0.61682 | 0.365372 | x509.py | pypi |
import logging
import sys
import types
from datetime import datetime
from logging import LogRecord
from types import TracebackType
from typing import Any, Callable, Dict, Iterable, List, Optional, Type, Union
from loguru import logger
from loguru._logger import Core
from rich.console import Console, ConsoleRenderable
from rich.logging import RichHandler
from rich.text import Text
from rich.theme import Theme
for lv in Core().levels.values():
logging.addLevelName(lv.no, lv.name)
class LoguruHandler(logging.Handler):
def emit(self, record: logging.LogRecord) -> None:
try:
level = logger.level(record.levelname).name
except ValueError:
level = str(record.levelno)
frame, depth = logging.currentframe(), 2
while frame and frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level,
record.getMessage(),
)
def highlight(style: str) -> Dict[str, Callable[[Text], Text]]:
"""Add `style` to RichHandler's log text.
Example:
```py
logger.warning("Sth is happening!", **highlight("red bold"))
```
"""
def highlighter(text: Text) -> Text:
return Text(text.plain, style=style)
return {"highlighter": highlighter}
class LoguruRichHandler(RichHandler):
"""
Interpolate RichHandler in a better way
Example:
```py
logger.warning("Sth is happening!", style="red bold")
logger.warning("Sth is happening!", **highlight("red bold"))
logger.warning("Sth is happening!", alt="[red bold]Sth is happening![/red bold]")
logger.warning("Sth is happening!", text=Text.from_markup("[red bold]Sth is happening![/red bold]"))
```
"""
def render_message(self, record: LogRecord, message: str) -> "ConsoleRenderable":
# alternative time log
time_format = None if self.formatter is None else self.formatter.datefmt
time_format = time_format or self._log_render.time_format
log_time = datetime.fromtimestamp(record.created)
if callable(time_format):
log_time_display = time_format(log_time)
else:
log_time_display = Text(log_time.strftime(time_format))
if not (log_time_display == self._log_render._last_time and self._log_render.omit_repeated_times):
self.console.print(log_time_display, style="log.time")
self._log_render._last_time = log_time_display
# add extra attrs to record
extra: dict = getattr(record, "extra", {})
if "rich" in extra:
return extra["rich"]
if "style" in extra:
record.__dict__.update(highlight(extra["style"]))
elif "highlighter" in extra:
setattr(record, "highlighter", extra["highlighter"])
if "alt" in extra:
message = extra["alt"]
setattr(record, "markup", True)
if "markup" in extra:
setattr(record, "markup", extra["markup"])
if "text" in extra:
setattr(record, "highlighter", lambda _: extra["text"])
return super().render_message(record, message)
ExceptionHook = Callable[[Type[BaseException], BaseException, Optional[TracebackType]], Any]
def _loguru_exc_hook(typ: Type[BaseException], val: BaseException, tb: Optional[TracebackType]):
logger.opt(exception=(typ, val, tb)).error("Exception:")
def install(
rich_console: Optional[Console] = None,
exc_hook: Optional[ExceptionHook] = _loguru_exc_hook,
rich_traceback: bool = True,
tb_ctx_lines: int = 3,
tb_theme: Optional[str] = None,
tb_suppress: Iterable[Union[str, types.ModuleType]] = (),
time_format: Union[str, Callable[[datetime], Text]] = "[%x %X]",
keywords: Optional[List[str]] = None,
level: Union[int, str] = 20,
) -> None:
"""Install Rich logging and Loguru exception hook"""
logging.basicConfig(handlers=[LoguruHandler()], level=0)
logger.configure(
handlers=[
{
"sink": LoguruRichHandler(
console=rich_console
or Console(
theme=Theme(
{
"logging.level.success": "green",
"logging.level.trace": "bright_black",
}
)
),
rich_tracebacks=rich_traceback,
tracebacks_show_locals=True,
tracebacks_suppress=tb_suppress,
tracebacks_extra_lines=tb_ctx_lines,
tracebacks_theme=tb_theme,
show_time=False,
log_time_format=time_format,
keywords=keywords,
),
"format": (lambda _: "{message}") if rich_traceback else "{message}",
"level": level,
}
]
)
if exc_hook is not None:
sys.excepthook = exc_hook | /richuru-0.1.1-py3-none-any.whl/richuru.py | 0.592431 | 0.422743 | richuru.py | pypi |
from collections.abc import Mapping
from rick_db import Repository
from rick_db.sql import Select, Literal
class DbGrid:
SEARCH_NONE = 0
SEARCH_ANY = 1
SEARCH_START = 2
SEARCH_END = 3
search_map = {SEARCH_START: "{}%", SEARCH_ANY: "%{}%", SEARCH_END: "%{}"}
def __init__(
self,
repo: Repository,
search_fields: list = None,
search_type: int = None,
case_sensitive=False,
):
"""
Constructor
:param repo: Repository to use
:param search_fields: list of valid field names that can be searched
:param search_type: one of SEARCH_NONE, SEARCH_ANY, SEARCH_START, SEARCH_END
:param case_sensitive: if True, search will use LIKE instead of ILIKE
"""
if not search_fields:
search_fields = []
if search_type is None:
search_type = self.SEARCH_ANY
else:
if search_type != self.SEARCH_NONE:
if search_type not in self.search_map.keys():
raise ValueError("search type '%s' is not supported" % search_type)
self._repo = repo
record = repo._record()
self._fields = record.dbfields()
self._field_pk = getattr(record, "_pk", None)
for field in search_fields:
if field not in self._fields:
raise ValueError(
"search field '%s' does not exist in the Record" % field
)
self._search_type = search_type
self._search_fields = search_fields
self._case_sensitive = case_sensitive is True
self._ilike = repo.dialect().ilike
def default_query(self) -> Select:
"""
Build a default query. Can be overridden on descendants
:return: Select() for "select * from table"
"""
return self._repo.select()
def default_sort(self) -> dict:
"""
Build default sort dictionary. Can be overridden on descendants
The default sort order is primary_key DESC
:return: dict
"""
if self._field_pk:
return {self._field_pk: "ASC"}
return {}
def _assemble(
self,
qry: Select = None,
search_text: str = None,
match_fields: dict = None,
sort_fields: dict = None,
search_fields: list = None,
) -> Select:
"""
Assembles a query for a run() operation
search_text is applied by default to all fields present in search_fields and are OR'ed; this list can be reduced
by specifying search_fields
match_fields contains fieldnames and values to be matched for equality; they are AND'ed;
:param qry: optional Select query
:param search_text: optional search string
:param match_fields: optional field filter
:param sort_fields: optional sort fields in the format {field_name: order}
:param search_fields: optional search fields
:return: Select
"""
if not qry:
qry = self.default_query()
if not sort_fields:
sort_fields = self.default_sort()
if match_fields:
qry.where_and()
for field, value in match_fields.items():
if field not in self._fields:
raise ValueError(
"field '%s' used in match_field does not exist on Record"
% field
)
qry.where(field, "=", value)
qry.where_end()
if search_text:
qry.where_and()
if self._search_type == self.SEARCH_NONE:
raise RuntimeError("search is not allowed")
if len(self._search_fields) == 0:
raise RuntimeError("no available fields are mapped as searchable")
if search_fields:
# ensure search_fields only has allowed field names
tmp = []
for field in search_fields:
if field in self._search_fields:
tmp.append(field)
search_fields = tmp
if not search_fields:
# either search_fields was not passed as parameter, or had invalid fields
search_fields = self._search_fields
mask = self.search_map[self._search_type].format(str(search_text))
operand = "LIKE"
psql_mode = False
if not self._case_sensitive:
if self._ilike:
operand = "ILIKE"
psql_mode = True
else:
psql_mode = True
if psql_mode:
for field in search_fields:
qry.orwhere(field, operand, mask.format(str(search_text)))
else:
mask = mask.upper()
dialect = self._repo.dialect()
for field in search_fields:
# note: assuming non-pg operation does NOT support schemas or is referencing ambiguous fields
field = dialect.field(field)
qry.orwhere(Literal("UPPER({})".format(field)), operand, mask)
qry.where_end()
if sort_fields:
if not isinstance(sort_fields, Mapping):
raise ValueError("'sort_fields' parameter must be a dict")
for field, order in sort_fields.items():
if field in self._fields:
qry.order(field, order.upper())
else:
raise ValueError(
"field '%s' used for sorting does not exist on Record" % field
)
return qry
def run(
self,
qry: Select = None,
search_text: str = None,
match_fields: dict = None,
limit: int = None,
offset: int = None,
sort_fields: dict = None,
search_fields: list = None,
) -> tuple:
"""
Executes a query and returns the total row count matching the query, as well as the records within the specified
range.
If no query specified, a default query is built; If no sort dict specified, the default sort is used;
If limit is omitted, all results are returned
search_text is applied to all fields present in search_fields and are OR'ed;
match_fields contains fieldnames and values to be matched for equality; they are AND'ed;
:param qry: optional Select query
:param search_text: optional search string
:param match_fields: optional field filter
:param limit: optional limit
:param offset: optional offset (ignored if no limit)
:param sort_fields: optional sort fields in the format {field_name: order}
:param search_fields: optional search fields
:return: tuple(total_row_count, filtered_rows)
"""
# assemble query
qry = self._assemble(
qry=qry,
search_text=search_text,
match_fields=match_fields,
sort_fields=sort_fields,
search_fields=search_fields,
)
# execute query
return self._repo.list(qry, limit=limit, offset=offset) | /rick-db-1.2.0.tar.gz/rick-db-1.2.0/rick_db/dbgrid.py | 0.794066 | 0.199269 | dbgrid.py | pypi |
from functools import lru_cache
import inspect
# Record class attribute names
ATTR_RECORD_MAGIC = "__PatchedRecordClass__"
ATTR_FIELDS = "_fieldmap"
ATTR_TABLE = "_tablename"
ATTR_SCHEMA = "_schema"
ATTR_PRIMARY_KEY = "_pk"
ATTR_ROW = "_row"
class RecordError(Exception):
pass
class Record:
def load(self, **kwargs):
pass
def fromrecord(self, record):
pass
def has_pk(self):
pass
def pk(self):
pass
def asdict(self):
pass
def asrecord(self):
pass
def fields(self):
pass
def items(self):
pass
def values(self):
pass
class BaseRecord(Record):
_fieldmap = {}
_tablename = None
_schema = None
_pk = None
def __init__(self, **kwargs):
self._row = {} # row must be a local scoped var
if len(kwargs) > 0:
self.load(**kwargs)
def load(self, **kwargs):
"""
Load record data from named parameter
Names are field values, ex:
user.load(name="john connor", age=11)
:param kwargs:
:return: self
"""
fm = object.__getattribute__(self, ATTR_FIELDS)
row = object.__getattribute__(self, ATTR_ROW)
for field, value in kwargs.items():
if field in fm:
row[fm[field]] = value
else:
raise RecordError(f"unknown attribute {field}")
return self
def fromrecord(self, record: dict):
"""
Loads a record from a db result record
Field names are not checked. This allows for fast loading of data for read operations
record parameter is not copied, but instead referenced; ensure there are no operations that may alter record
structure outside the scope of the Record class
:param record:
:return: Record
"""
object.__setattr__(self, ATTR_ROW, record)
return self
def has_pk(self):
"""
Returns true if a pk is defined
:return:
"""
return self._pk is not None
def dbfields(self):
"""
Return the db field list from the fieldmap
:return: list
"""
return list(self._fieldmap.values())
def pk(self):
"""
Return the current pk value, if defined
If not defined, RecordError is raised
If defined but value is not set, raises AttributeError
:return:
"""
pk = self._pk
if pk is None:
raise RecordError("primary key is not defined")
row = self._row
if pk in row.keys():
return row[pk]
raise AttributeError("primary key value is not set")
def asdict(self):
result = {}
data = self._row
for key, dbfield in self._fieldmap.items():
if dbfield in data.keys():
result[key] = data[dbfield]
return result
def asrecord(self):
dbfieldnames = object.__getattribute__(self, ATTR_FIELDS).values()
data = object.__getattribute__(self, ATTR_ROW).copy()
# remove entries that may exist in _row but do not map to allowed fields
for key in list(data.keys()):
if key not in dbfieldnames:
data.pop(key)
return data
def fields(self):
result = []
data = self._row
for key, dbfield in self._fieldmap.items():
if dbfield in data.keys():
result.append(key)
return result
def items(self):
return self.asdict().items()
def values(self):
result = []
data = self._row
for key, dbfield in self._fieldmap.items():
if dbfield in data.keys():
result.append(data[dbfield])
return result
def __getattribute__(self, attr):
fieldmap = object.__getattribute__(self, ATTR_FIELDS)
if attr in fieldmap:
field = fieldmap[attr]
data = object.__getattribute__(self, ATTR_ROW)
if field in data.keys():
return data[field]
return None
attribute = object.__getattribute__(self, attr)
return attribute
def __setattr__(self, key, value):
fm = object.__getattribute__(self, ATTR_FIELDS)
if key in fm:
data = object.__getattribute__(self, ATTR_ROW)
if type(data) is not dict:
# unwrap dict from dict-like record objects, such as psycopg2 results
# this is only necessary when setting values, as original object is often read-only
data = dict(data)
object.__setattr__(self, ATTR_ROW, data)
field = fm[key]
data[field] = value
else:
self.__dict__[key] = value
@lru_cache(maxsize=None)
def _base_record_method_map() -> dict:
methods = {}
for item in dir(BaseRecord):
attr = getattr(BaseRecord, item)
if inspect.isfunction(attr):
methods[item] = attr
return methods
def fieldmapper(cls=None, pk=None, tablename=None, schema=None, clsonly=False):
def wrap(cls):
fieldmap = {}
if clsonly:
# build fieldmap for current class attributes only
fieldmap = dict(
(field, value)
for field, value in cls.__dict__.items()
if field[0] != "_" and not callable(value)
)
else:
# build fieldmap for all available class attributes
for item in dir(cls):
if item[0] != "_":
value = getattr(cls, item)
if not callable(value):
fieldmap[item] = value
# replace class variables
setattr(cls, ATTR_RECORD_MAGIC, True)
setattr(cls, ATTR_FIELDS, fieldmap)
setattr(cls, ATTR_TABLE, tablename)
setattr(cls, ATTR_SCHEMA, schema)
setattr(cls, ATTR_PRIMARY_KEY, pk)
# row attr is set, but will be shadowed by internal attribute on __init__
setattr(cls, ATTR_ROW, {})
# patch methods
for name, method in _base_record_method_map().items():
setattr(cls, name, method)
return cls
if cls is None:
return wrap
return wrap(cls)
def patch_record(obj=None, tablename=None, pk=None, schema=None):
"""
Set/update internal attributes for Record obj
:param obj: Record object to update
:param tablename: table name
:param pk: primary key name
:param schema: schema name
:return:
"""
setattr(obj, ATTR_TABLE, tablename)
setattr(obj, ATTR_PRIMARY_KEY, pk)
setattr(obj, ATTR_SCHEMA, schema) | /rick-db-1.2.0.tar.gz/rick-db-1.2.0/rick_db/mapper.py | 0.64791 | 0.214218 | mapper.py | pypi |
from pathlib import Path
from typing import List
from rick_db.cli.command import BaseCommand
from rick_db.util import MigrationManager
from rick_db.util.metadata import FieldRecord
class Command(BaseCommand):
command = "dto"
description = (
"generate a python data transfer object (DTO) for a given database object"
)
def help(self):
self._tty.title(self.description)
self._tty.title(
"Usage: {name} [database] dto <[schema.]table_name> <output_file.py>".format(
name=self._name
)
)
def run(self, mgr: MigrationManager, args: list, command_list: dict):
if len(args) < 1:
self._tty.error("Error : Missing table name")
return False
if len(args) < 2:
self._tty.error("Error : Missing output file")
return False
view = False
table_name = args[0].split(".", 1)
schema = None
output_file = Path(args[1])
if output_file.exists():
self._tty.error("Error : Output file already exists")
return False
if len(table_name) > 1:
schema = table_name[0]
table_name = table_name[1]
else:
table_name = table_name.pop(0)
meta = mgr.get_meta()
if not meta.table_exists(table_name, schema):
view = True
if not meta.view_exists(table_name, schema):
self._tty.error(
"Error : Database object '{}' not found".format(args[0])
)
return False
if view:
fields = meta.view_fields(table_name, schema)
else:
fields = meta.table_fields(table_name, schema)
try:
contents = self._code_gen(table_name, schema, fields)
with open(output_file, "w", encoding="utf-8") as f:
f.write(contents)
self._tty.title("DAO written to file {name}".format(name=output_file))
return True
except Exception as e:
self._tty.error("Error : " + str(e))
return False
def _code_gen(self, table_name: str, schema: str, fields: List[FieldRecord]) -> str:
"""
Build Python class definition
:param table_name: table name
:param schema: optional schema name
:param fields: field list
:return: string
"""
result = []
pk = None
has_id = False
for f in fields:
if f.field == "id":
has_id = True
if f.primary:
pk = f.field
result.append("from rick_db import fieldmapper")
result.append("")
result.append("")
# build fieldmapper decorator fields
name = table_name.title().replace("_", "")
line = ["tablename='{name}'".format(name=table_name)]
if schema is not None:
line.append("schema='{schema}'".format(schema=schema))
if pk is not None:
line.append("pk='{pk}'".format(pk=pk))
result.append("@fieldmapper({fields})".format(fields=", ".join(line)))
result.append("class {name}:".format(name=name))
for f in fields:
attr_name = f.field
if f.primary:
if not has_id:
attr_name = "id"
result.append(
" {attr} = '{field}'".format(attr=attr_name, field=f.field)
)
result.append("")
return "\n".join(result) | /rick-db-1.2.0.tar.gz/rick-db-1.2.0/rick_db/cli/commands/dto.py | 0.638835 | 0.207114 | dto.py | pypi |
from rick_db.mapper import ATTR_TABLE, ATTR_SCHEMA
from rick_db.sql import (
SqlStatement,
SqlDialect,
SqlError,
Sql,
Literal,
DefaultSqlDialect,
Select,
)
class Delete(SqlStatement):
def __init__(self, dialect: SqlDialect = None):
"""
DELETE constructor
:param dialect: optional SQL dialect
"""
self._table = None
self._schema = None
self._clauses = []
self._values = []
if dialect is None:
dialect = DefaultSqlDialect()
self._dialect = dialect
def from_(self, table, schema=None):
"""
DELETE FROM table name and schema
:param table: string or record object
:param schema: optional string
:return: self
Possible values for table:
'table' -> string with table name
<object_or_class> -> record class or object
"""
if isinstance(table, str):
pass
elif isinstance(table, object):
schema = getattr(table, ATTR_SCHEMA, schema)
table = getattr(table, ATTR_TABLE, None)
if table is None:
raise SqlError("from_(): invalid type for table name")
else:
raise SqlError("from_(): invalid type for table name")
if schema is not None and type(schema) is not str:
raise SqlError(
"from_(): Invalid type for schema name: %s" % str(type(schema))
)
self._table = table
self._schema = schema
return self
def where(self, field, operator=None, value=None):
"""
WHERE clause
Multiple calls concat with AND
:param field: expression
:param operator: clause operator
:param value: optional value
:return: self
"""
return self._where(field, operator, value)
def orwhere(self, field, operator=None, value=None):
"""
WHERE clause
Multiple calls concat with OR
:param field: expression
:param operator: clause operator
:param value: optional value
:return: self
"""
return self._where(field, operator, value, is_and=False)
def _where(self, field, operator=None, value=None, is_and=True):
"""
Internal where handler
:param field: expression
:param operator: clause operator
:param value: optional value
:param is_and: True to interleave with AND, False to OR
:return: self
"""
concat = Sql.SQL_AND
if is_and is False:
concat = Sql.SQL_OR
if isinstance(field, str):
field = self._dialect.field(field)
elif isinstance(field, Literal):
field = str(field)
else:
raise SqlError("_where(): invalid field name type")
if value is None:
if operator is None:
expression = "{fld}".format(fld=field)
else:
expression = "{fld} {op}".format(fld=field, op=operator)
self._clauses.append([expression, concat])
else:
# sanity check, as we actually may have value list if subquery is in use
if isinstance(value, (list, tuple, dict)):
raise SqlError("_where(): invalid value type: %s" % str(type(value)))
if operator is None:
expression = "{fld} {ph}".format(
fld=field, ph=self._dialect.placeholder
)
else:
if isinstance(value, Select):
sql, value = value.assemble()
expression = "{fld} {op} ({query})".format(
fld=field, op=operator, query=sql
)
else:
expression = "{fld} {op} {ph}".format(
fld=field, op=operator, ph=self._dialect.placeholder
)
self._clauses.append([expression, concat])
if isinstance(value, list):
self._values.extend(value)
else:
self._values.append(value)
return self
def assemble(self):
"""
Generate SQL
:return: tuple(str, list)
"""
parts = [Sql.SQL_DELETE, self._dialect.table(self._table, None, self._schema)]
if len(self._clauses) > 0:
c = 0
parts.append(Sql.SQL_WHERE)
for clause in self._clauses:
expr, concat = clause
if c > 0:
parts.append(concat)
parts.append(expr)
c += 1
return " ".join(parts), self._values | /rick-db-1.2.0.tar.gz/rick-db-1.2.0/rick_db/sql/delete.py | 0.627723 | 0.28429 | delete.py | pypi |
from typing import Union
from rick_db.sql import (
SqlDialect,
DefaultSqlDialect,
SqlStatement,
Sql,
Literal,
)
class With(SqlStatement):
def __init__(self, dialect: SqlDialect = None):
"""
WITH constructor
"""
self._clauses = [] # each clause is (name, with_query, [columns], materialized)
self._query = None
self._recursive = False
if dialect is None:
dialect = DefaultSqlDialect()
self._dialect = dialect
def recursive(self, status=True):
"""
Enables or disables RECURSIVE
:param status:
:return:
"""
self._recursive = status
return self
def clause(
self,
name: str,
with_query: Union[SqlStatement, Literal],
columns: list = None,
materialized: bool = True,
):
"""
Adds a WITH <clause>(columns) AS <with_query>
:param name:
:param with_query:
:param columns:
:param materialized:
:return:
"""
if columns is None:
columns = []
self._clauses.append((name, with_query, columns, materialized))
return self
def query(self, query: SqlStatement):
"""
CTE query
:param query:
:return:
"""
self._query = query
return self
def assemble(self) -> tuple:
if not isinstance(self._query, (SqlStatement, Literal)):
raise RuntimeError("assemble(): missing CTE query")
if len(self._clauses) == 0:
raise RuntimeError("assemble(): missing CTE clauses")
parts = [Sql.SQL_WITH]
values = []
with_clauses = []
if self._recursive:
parts.append(Sql.SQL_RECURSIVE)
for clause in self._clauses:
name, qry, cols, materialized = clause
chunks = []
# optional expression columns
if len(cols) > 0:
fields = []
for field in cols:
fields.append(self._dialect.field(field))
chunks.append(self._dialect.table(name) + "(" + ",".join(fields) + ")")
else:
chunks.append(self._dialect.table(name))
chunks.append(Sql.SQL_AS)
if not materialized:
chunks.append(Sql.SQL_NOT_MATERIALIZED)
if isinstance(qry, SqlStatement):
qry_sql, qry_values = qry.assemble()
values.extend(qry_values)
else:
# Literal
qry_sql = str(qry)
stmt = "{} ({})".format(" ".join(chunks), qry_sql)
with_clauses.append(stmt)
parts.append(",".join(with_clauses))
if isinstance(self._query, SqlStatement):
qry_sql, qry_values = self._query.assemble()
values.extend(qry_values)
parts.append(qry_sql)
else:
# Literal
parts.append(str(self._query))
return " ".join(parts).strip(), values | /rick-db-1.2.0.tar.gz/rick-db-1.2.0/rick_db/sql/sql_with.py | 0.828592 | 0.301619 | sql_with.py | pypi |
from dataclasses import dataclass
from datetime import datetime
from typing import Optional, List
from rick_db import Repository, fieldmapper
from rick_db.conn import Connection
from rick_db.util import Metadata
MIGRATION_TABLE = "_migration"
@fieldmapper(tablename=MIGRATION_TABLE, pk="id_migration")
class MigrationRecord:
id = "id_migration"
name = "name"
applied = "applied"
@dataclass
class MigrationResult:
success: bool
error: str
class Migration:
def run(self, conn) -> bool:
"""
Base class for code-based migrations
:param conn:
:return: bool
"""
pass
class MigrationManager:
def __init__(self, db: Connection):
self._db = db
self._meta = db.metadata() # type: Metadata
self._repo = None
def get_meta(self) -> Metadata:
"""
Retrieve metadata manager
:return:
"""
return self._meta
def has_manager(self) -> bool:
"""
Returns true if migration manager is installed
:return:
"""
return self._meta.table_exists(MIGRATION_TABLE)
def install_manager(self) -> MigrationResult:
"""
Installs the migration manager in the current db
:return:
"""
if self._meta.table_exists(MIGRATION_TABLE):
return MigrationResult(
success=False,
error="migration table '{}' already exists".format(MIGRATION_TABLE),
)
try:
with self._db.cursor() as c:
c.exec(self._migration_table_sql(MIGRATION_TABLE))
return MigrationResult(success=True, error="")
except Exception as e:
return MigrationResult(success=False, error=str(e))
def fetch_by_name(self, name: str) -> Optional[MigrationRecord]:
"""
Search a migration by name
:param name: name to search
:return: MigrationRecord or None
"""
result = self.get_repository().fetch_by_field(MigrationRecord.name, name)
if len(result) > 0:
return result.pop(0)
return None
def list(self) -> List[MigrationRecord]:
"""
Retrieve all registered migrations
:return:
"""
qry = self.get_repository().select().order(MigrationRecord.applied)
return self.get_repository().fetch(qry)
def flatten(self, record: MigrationRecord) -> MigrationResult:
"""
Remove all records from the migration table, and replace them with a new record
:param record: new migration record
:return:
"""
try:
self.get_repository().delete_where([(MigrationRecord.id, ">", 0)])
record.applied = datetime.now().isoformat()
self.get_repository().insert(record)
return MigrationResult(success=True, error="")
except Exception as e:
return MigrationResult(success=False, error=str(e))
def register(self, migration: MigrationRecord) -> MigrationResult:
"""
Registers a migration
This method can be used to provide code-only migration mechanisms
:param migration:
:return:
"""
if len(migration.name) == 0:
return MigrationResult(success=False, error="empty migration data")
try:
migration.applied = datetime.now().isoformat()
self.get_repository().insert(migration)
return MigrationResult(success=True, error="")
except Exception as e:
return MigrationResult(success=False, error=str(e))
def execute(self, migration: MigrationRecord, content: str) -> MigrationResult:
"""
Execute a migration and register it
:param migration:
:param content:
:return:
"""
if len(migration.name) == 0 or len(content) == 0:
return MigrationResult(success=False, error="empty migration data")
if self.fetch_by_name(migration.name):
return MigrationResult(success=False, error="migration already executed")
try:
# execute migration
self._exec(content)
# update record
return self.register(migration)
except Exception as e:
return MigrationResult(success=False, error=str(e))
def get_repository(self) -> Repository:
if self._repo is None:
self._repo = Repository(self._db, MigrationRecord)
return self._repo
def _migration_table_sql(self, table_name: str) -> str:
raise NotImplementedError("abstract method")
def _exec(self, content):
"""
Execute migration using a cursor
:param content: string
:return: none
"""
with self._db.cursor() as c:
c.exec(content) | /rick-db-1.2.0.tar.gz/rick-db-1.2.0/rick_db/util/migrations.py | 0.862178 | 0.220846 | migrations.py | pypi |
from typing import Optional
from typing import List
from rick_db import fieldmapper
from rick_db.conn import Connection
@fieldmapper
class FieldRecord:
field = "field"
type = "type"
primary = "primary"
@fieldmapper()
class UserRecord:
name = "name"
superuser = "superuser"
createdb = "createdb"
class Metadata:
def __init__(self, db: Connection):
self._db = db
def tables(self, schema=None) -> List:
"""
List all available tables on the indicated schema. If no schema is specified, assume public schema
:param schema: optional schema name
:return: list of tablenames
"""
raise NotImplementedError("abstract method")
def views(self, schema=None) -> List:
"""
List all available views on the indicated schema. If no schema is specified, assume public schema
:param schema: optional schema name
:return: list of tablenames
"""
raise NotImplementedError("abstract method")
def schemas(self) -> List:
"""
List all available schemas
:return: list of schema names
"""
raise NotImplementedError("abstract method")
def databases(self) -> List:
"""
List all available databases
:return: list of database names
"""
raise NotImplementedError("abstract method")
def table_indexes(self, table_name: str, schema=None) -> List[FieldRecord]:
"""
List all indexes on a given table
:param table_name:
:param schema:
:return:
"""
raise NotImplementedError("abstract method")
def table_pk(self, table_name: str, schema=None) -> Optional[FieldRecord]:
"""
Get primary key from table
:param table_name:
:param schema:
:return:
"""
raise NotImplementedError("abstract method")
def table_fields(self, table_name: str, schema=None) -> List[FieldRecord]:
"""
Get fields of table
:param table_name:
:param schema:
:return:
"""
raise NotImplementedError("abstract method")
def view_fields(self, view_name: str, schema=None) -> List[FieldRecord]:
"""
Get fields of view
:param view_name:
:param schema:
:return:
"""
raise NotImplementedError("abstract method")
def users(self) -> List[UserRecord]:
"""
List all available users
:return:
"""
raise NotImplementedError("abstract method")
def user_groups(self, user_name: str) -> List[str]:
"""
List all groups associated with a given user
:param user_name: username to check
:return: list of group names
"""
raise NotImplementedError("abstract method")
def table_exists(self, table_name: str, schema=None) -> bool:
"""
Check if a given table exists
:param table_name: table name
:param schema: optional schema
:return:
"""
raise NotImplementedError("abstract method")
def view_exists(self, view_name: str, schema=None) -> bool:
"""
Check if a given view exists
:param view_name: table name
:param schema: optional schema
:return:
"""
raise NotImplementedError("abstract method")
def create_database(self, database_name: str, **kwargs):
"""
Create a database
:param database_name: database name
:param kwargs: optional parameters
:return:
"""
raise NotImplementedError("abstract method")
def database_exists(self, database_name: str) -> bool:
"""
Checks if a given database exists
:param database_name: database name
:return: bool
"""
raise NotImplementedError("abstract method")
def drop_database(self, database_name: str):
"""
Removes a database
:param database_name: database name
:return:
"""
raise NotImplementedError("abstract method")
def create_schema(self, schema: str, **kwargs):
"""
Create a new schema in the current database
:param schema:
:return:
"""
raise NotImplementedError("abstract method")
def schema_exists(self, schema: str) -> bool:
"""
Check if a given schema exists on the current database
:param schema:
:return: bool
"""
raise NotImplementedError("abstract method")
def drop_schema(self, schema: str, cascade: bool = False):
"""
Removes a schema
:param schema:
:param cascade:
:return:
"""
raise NotImplementedError("abstract method")
def kill_clients(self, database_name: str):
"""
Kills all active connections to the database
:param database_name:
:return:
"""
raise NotImplementedError("abstract method")
def drop_table(self, table_name: str, cascade: bool = False, schema: str = None):
"""
Removes a table
:param table_name:
:param cascade:
:param schema:
:return:
"""
dialect = self._db.dialect()
sql = "DROP TABLE IF EXISTS {name}".format(
name=dialect.table(table_name, schema=schema)
)
if cascade:
sql = sql + " CASCADE"
with self._db.cursor() as c:
c.exec(sql)
def drop_view(self, view_name: str, cascade: bool = False, schema: str = None):
"""
Removes a view
:param view_name:
:param cascade:
:param schema:
:return:
"""
dialect = self._db.dialect()
sql = "DROP VIEW IF EXISTS {name}".format(
name=dialect.table(view_name, schema=schema)
)
if cascade:
sql = sql + " CASCADE"
with self._db.cursor() as c:
c.exec(sql) | /rick-db-1.2.0.tar.gz/rick-db-1.2.0/rick_db/util/metadata.py | 0.922089 | 0.309989 | metadata.py | pypi |
from typing import Optional
from typing import List
from rick_db.sql import Sqlite3SqlDialect, Select
from rick_db.util import Metadata
from rick_db.util.metadata import FieldRecord, UserRecord
class Sqlite3Metadata(Metadata):
def tables(self, schema=None) -> List:
"""
List all available tables on the indicated schema. If no schema is specified, assume public schema
:param schema: optional schema name
:return: list of tablenames
"""
qry = (
Select(Sqlite3SqlDialect())
.from_("sqlite_master")
.where("type", "=", "table")
)
result = []
with self._db.cursor() as c:
for r in c.fetchall(*qry.assemble()):
if not r["name"].startswith("sqlite_"):
result.append(r["name"])
return result
def views(self, schema=None) -> List:
"""
List all available views on the indicated schema. If no schema is specified, assume public schema
:param schema: optional schema name
:return: list of tablenames
"""
qry = (
Select(Sqlite3SqlDialect())
.from_("sqlite_master")
.where("type", "=", "view")
)
result = []
with self._db.cursor() as c:
for r in c.fetchall(*qry.assemble()):
if not r["name"].startswith("sqlite_"):
result.append(r["name"])
return result
def schemas(self) -> List:
"""
List all available schemas
:return: list of schema names
"""
return []
def databases(self) -> List:
"""
List all available databases
:return: list of database names
"""
return []
def table_indexes(self, table_name: str, schema=None) -> List[FieldRecord]:
"""
List all indexes on a given table
:param table_name:
:param schema:
:return:
"""
sql = """
SELECT
ii.name as field,
ti.type as 'type',
pk as 'primary'
FROM sqlite_master AS m,
pragma_index_list(m.name) AS il,
pragma_index_info(il.name) AS ii,
pragma_table_info(m.name) AS ti
WHERE
m.type = 'table'
and m.tbl_name = ?
and ti.name= ii.name
GROUP BY
ii.name,
il.seq
ORDER BY 1,2;
"""
with self._db.cursor() as c:
result = c.fetchall(sql, (table_name,), cls=FieldRecord)
for r in result:
r.primary = r.primary == 1
return result
def table_pk(self, table_name: str, schema=None) -> Optional[FieldRecord]:
"""
Get primary key from table
:param table_name:
:param schema:
:return:
"""
sql = "select name as field, type, pk as 'primary' from pragma_table_info(?) where pk=1;"
with self._db.cursor() as c:
return c.fetchone(sql, (table_name,), cls=FieldRecord)
def table_fields(self, table_name: str, schema=None) -> List[FieldRecord]:
"""
Return list of fields for table
:param table_name:
:param schema:
:return:
"""
sql = "select name as field, type, pk as 'primary' from pragma_table_info(?);"
with self._db.cursor() as c:
result = c.fetchall(sql, (table_name,), cls=FieldRecord)
for r in result:
r.primary = r.primary == 1
return result
def view_fields(self, view_name: str, schema=None) -> List[FieldRecord]:
"""
Return list of fields for view
:param view_name:
:param schema:
:return:
"""
sql = "select name as field, type from pragma_table_info(?);"
with self._db.cursor() as c:
result = c.fetchall(sql, (view_name,), cls=FieldRecord)
for r in result:
r.primary = r.primary == 1
return result
def users(self) -> List[UserRecord]:
"""
List all available users
:return:
"""
return []
def user_groups(self, user_name: str) -> List[str]:
"""
List all groups associated with a given user
:param user_name: user name to check
:return: list of group names
"""
return []
def table_exists(self, table_name: str, schema=None) -> bool:
"""
Check if a given table exists
:param table_name: table name
:param schema: optional schema
:return:
"""
qry = (
Select(Sqlite3SqlDialect())
.from_("sqlite_master", ["name"])
.where("name", "=", table_name)
.where("type", "=", "table")
)
with self._db.cursor() as c:
return len(c.fetchall(*qry.assemble())) > 0
def view_exists(self, view_name: str, schema=None) -> bool:
"""
Check if a given view exists
:param view_name: table name
:param schema: optional schema
:return:
"""
qry = (
Select(Sqlite3SqlDialect())
.from_("sqlite_master", ["name"])
.where("name", "=", view_name)
.where("type", "=", "view")
)
with self._db.cursor() as c:
return len(c.fetchall(*qry.assemble())) > 0
def create_database(self, database_name: str, **kwargs):
"""
Create a database
:param database_name: database name
:param kwargs: optional parameters
:return:
"""
raise NotImplementedError("SqlLite3: feature not supported")
def database_exists(self, database_name: str) -> bool:
"""
Checks if a given database exists
:param database_name: database name
:return: bool
"""
raise NotImplementedError("SqlLite3: feature not supported")
def drop_database(self, database_name: str):
"""
Removes a database
:param database_name: database name
:return:
"""
raise NotImplementedError("SqlLite3: feature not supported")
def create_schema(self, schema: str, **kwargs):
"""
Create a new schema
:param schema:
:return:
"""
raise NotImplementedError("SqlLite3: feature not supported")
def schema_exists(self, schema: str) -> bool:
"""
Check if a given schema exists on the current database
:param schema:
:return: bool
"""
raise NotImplementedError("SqlLite3: feature not supported")
def drop_schema(self, schema: str, cascade: bool = False):
"""
Removes a schema
:param schema:
:param cascade:
:return:
"""
raise NotImplementedError("SqlLite3: feature not supported")
def kill_clients(self, database_name: str):
"""
Kills all active connections to the database
:param database_name:
:return:
"""
raise NotImplementedError("SqlLite3: feature not supported") | /rick-db-1.2.0.tar.gz/rick-db-1.2.0/rick_db/util/sqlite/metadata.py | 0.90562 | 0.345464 | metadata.py | pypi |
from typing import Optional, List
from rick_db.conn import Connection
from rick_db.util import Metadata
from rick_db.sql import Select, Literal
from rick_db.util.metadata import FieldRecord, UserRecord
from .pginfo import PgInfo
class PgMetadata(Metadata):
SCHEMA_DEFAULT = "public"
def __init__(self, db: Connection):
super().__init__(db)
self.pginfo = PgInfo(db)
def tables(self, schema=None) -> List[str]:
"""
List all available tables on the indicated schema. If no schema is specified, assume public schema
:param schema: optional schema name
:return: list of tablenames
"""
if schema is None:
schema = self.SCHEMA_DEFAULT
result = [t.name for t in self.pginfo.list_database_tables(schema)]
return result
def views(self, schema=None) -> List[str]:
"""
List all available views on the indicated schema. If no schema is specified, assume public schema
:param schema: optional schema name
:return: list of tablenames
"""
if schema is None:
schema = self.SCHEMA_DEFAULT
result = [t.name for t in self.pginfo.list_database_views(schema)]
return result
def schemas(self) -> List[str]:
"""
List all available schemas
:return: list of schema names
"""
result = [t.name for t in self.pginfo.list_database_schemas()]
return result
def databases(self) -> List[str]:
"""
List all available databases
:return: list of database names
"""
result = [t.name for t in self.pginfo.list_server_databases()]
return result
def table_indexes(self, table_name: str, schema=None) -> List[FieldRecord]:
"""
List all indexes on a given table
:param table_name:
:param schema:
:return:
"""
return self.pginfo.list_table_indexes(table_name, schema)
def table_pk(self, table_name: str, schema=None) -> Optional[FieldRecord]:
"""
Get primary key from table
:param table_name:
:param schema:
:return:
"""
pk = self.pginfo.list_table_pk(table_name, schema)
if pk is None:
return None
return FieldRecord(field=pk.column, primary=True)
def table_fields(self, table_name: str, schema=None) -> List[FieldRecord]:
"""
Get fields of table
:param table_name:
:param schema:
:return:
"""
if schema is None:
schema = self.SCHEMA_DEFAULT
columns = {
"column_name": "field",
"data_type": "type",
Literal("false"): "primary",
}
qry = (
Select(self._db.dialect())
.from_("columns", columns, schema="information_schema")
.where("table_schema", "=", schema)
.where("table_name", "=", table_name)
.order("ordinal_position")
)
idx = self.table_pk(table_name, schema)
with self._db.cursor() as c:
fields = c.fetchall(
*qry.assemble(), cls=FieldRecord
) # type:list[FieldRecord]
if idx is not None:
for f in fields:
f.primary = f.field == idx.field
return fields
def view_fields(self, view_name: str, schema=None) -> List[FieldRecord]:
"""
Get fields of view
:param view_name:
:param schema:
:return:
"""
# table_fields() implementation actually doesn't distinguish between table and view
return self.table_fields(view_name, schema)
def users(self) -> List[UserRecord]:
"""
List all available users
:return:
"""
fields = {"usename": "name", "usesuper": "superuser", "usecreatedb": "createdb"}
with self._db.cursor() as c:
return c.fetchall(
*Select(self._db.dialect())
.from_("pg_user", fields, "pg_catalog")
.assemble(),
UserRecord
)
def user_groups(self, user_name: str) -> List[str]:
"""
List all groups associated with a given user
:param user_name: username to check
:return: list of group names
"""
qry = (
Select(self._db.dialect())
.from_("pg_user", {"rolname": "name"})
.join("pg_auth_members", "member", "pg_user", "usesysid")
.join("pg_roles", "oid", "pg_auth_members", "roleid")
.where("usename", "=", user_name)
)
result = []
with self._db.cursor() as c:
for r in c.fetchall(*qry.assemble()):
result.append(r["name"])
return result
def table_exists(self, table_name: str, schema=None) -> bool:
"""
Check if a given table exists
:param table_name: table name
:param schema: optional schema
:return:
"""
if schema is None:
schema = self.SCHEMA_DEFAULT
qry = (
Select(self._db.dialect())
.from_("pg_tables", ["tablename"])
.where("schemaname", "=", schema)
.where("tablename", "=", table_name)
)
with self._db.cursor() as c:
return len(c.fetchall(*qry.assemble())) > 0
def view_exists(self, view_name: str, schema=None) -> bool:
"""
Check if a given view exists
:param view_name: table name
:param schema: optional schema
:return:
"""
if schema is None:
schema = self.SCHEMA_DEFAULT
qry = (
Select(self._db.dialect())
.from_("pg_views", ["viewname"])
.where("schemaname", "=", schema)
.where("viewname", "=", view_name)
)
with self._db.cursor() as c:
return len(c.fetchall(*qry.assemble())) > 0
def create_database(self, database_name: str, **kwargs):
"""
Create a database
:param database_name: database name
:param kwargs: optional parameters
:return:
"""
dialect = self._db.dialect()
args = []
for k, v in kwargs.items():
args = "=".join([k.upper(), dialect.database(v)])
args = " ".join(args)
backend = self._db.backend()
backend.set_isolation_level(0) # ISOLATION_LEVEL_AUTOCOMMIT
sql = "CREATE DATABASE {db} {args}".format(
db=dialect.database(database_name), args=args
)
with self._db.cursor() as c:
c.exec(sql)
backend.set_isolation_level(self._db.isolation_level)
def database_exists(self, database_name: str) -> bool:
"""
Checks if a given database exists
:param database_name: database name
:return: bool
"""
return database_name in self.databases()
def drop_database(self, database_name: str):
"""
Removes a database
:param database_name: database name
:return:
"""
self.kill_clients(database_name)
dialect = self._db.dialect()
backend = self._db.backend()
backend.set_isolation_level(0) # ISOLATION_LEVEL_AUTOCOMMIT
with self._db.cursor() as c:
c.exec(
"DROP DATABASE IF EXISTS {db}".format(
db=dialect.database(database_name)
)
)
backend.set_isolation_level(self._db.isolation_level)
def create_schema(self, schema: str, **kwargs):
"""
Create a new schema
:param schema:
:return:
"""
dialect = self._db.dialect()
authorization = (
kwargs["authorization"] if "authorization" in kwargs.keys() else None
)
sql = "CREATE SCHEMA IF NOT EXISTS {schema}".format(
schema=dialect.database(schema)
)
if authorization:
sql = sql + " AUTHORIZATION {role}".format(
role=dialect.database(authorization)
)
with self._db.cursor() as c:
c.exec(sql)
def schema_exists(self, schema: str) -> bool:
"""
Check if a given schema exists on the current database
:param schema:
:return: bool
"""
return schema in self.schemas()
def drop_schema(self, schema: str, cascade: bool = False):
"""
Removes a schema
:param schema:
:param cascade:
:return:
"""
dialect = self._db.dialect()
sql = "DROP SCHEMA IF EXISTS {schema}".format(schema=dialect.database(schema))
if cascade:
sql = sql + " CASCADE"
with self._db.cursor() as c:
c.exec(sql)
def kill_clients(self, database_name: str):
"""
Kills all active connections to the database
:param database_name:
:return:
"""
with self._db.cursor() as c:
sql = """
SELECT pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE pg_stat_activity.datname = %s
AND pid <> pg_backend_pid();
"""
c.exec(sql, [database_name]) | /rick-db-1.2.0.tar.gz/rick-db-1.2.0/rick_db/util/pg/metadata.py | 0.896825 | 0.258703 | metadata.py | pypi |
from typing import List, Optional
from rick_db.conn import Connection
from rick_db.sql import Select, PgSqlDialect, Literal
from rick_db.util.metadata import FieldRecord
from rick_db.util.pg.records import (
DatabaseRecord,
RoleRecord,
TableSpaceRecord,
SettingRecord,
NamespaceRecord,
TableRecord,
ColumnRecord,
ConstraintRecord,
KeyColumnUsageRecord,
UserRecord,
GroupRecord,
ForeignKeyRecord,
IdentityRecord,
)
class PgInfo:
SCHEMA_DEFAULT = "public"
# table types
TYPE_BASE = "BASE TABLE"
TYPE_VIEW = "VIEW"
TYPE_FOREIGN = "FOREIGN TABLE"
TYPE_LOCAL = "LOCAL TEMPORARY"
def __init__(self, db: Connection):
self.db = db
self.dialect = PgSqlDialect()
def get_server_version(self) -> str:
"""
Get server version string
:return: str
"""
with self.db.cursor() as c:
result = c.exec(" SELECT version()")
return result.pop()[0]
def list_server_databases(self) -> List[DatabaseRecord]:
"""
List existing databases, ordered by name
:return: List[DatabaseRecord]
"""
sql, values = (
Select(self.dialect)
.from_(
{DatabaseRecord: "dr"},
cols=[
"*",
{Literal("pg_encoding_to_char(encoding)"): DatabaseRecord.encoding},
],
)
.order(DatabaseRecord.name)
.assemble()
)
with self.db.cursor() as c:
return c.exec(sql, values, cls=DatabaseRecord)
def list_server_roles(self) -> List[RoleRecord]:
"""
List existing roles, ordered by name
:return:
"""
sql, values = (
Select(self.dialect).from_(RoleRecord).order(RoleRecord.name).assemble()
)
with self.db.cursor() as c:
return c.exec(sql, values, cls=RoleRecord)
def list_server_users(self) -> List[UserRecord]:
"""
List existing users, ordered by name
:return:
"""
sql, values = (
Select(self.dialect).from_(UserRecord).order(UserRecord.name).assemble()
)
with self.db.cursor() as c:
return c.exec(sql, values, cls=UserRecord)
def list_server_groups(self) -> List[GroupRecord]:
"""
List existing groups, ordered by name
:return:
"""
sql, values = (
Select(self.dialect).from_(GroupRecord).order(GroupRecord.name).assemble()
)
with self.db.cursor() as c:
return c.exec(sql, values, cls=GroupRecord)
def list_user_groups(self, user_name: str) -> List[GroupRecord]:
"""
List all groups associated with a username
:param user_name: username to check
:return: list of group names
"""
sql = """
SELECT * FROM pg_group WHERE pg_group.grosysid IN(
SELECT pg_roles.oid FROM pg_user
JOIN pg_auth_members ON (pg_user.usesysid=pg_auth_members.member)
JOIN pg_roles ON (pg_roles.oid=pg_auth_members.roleid)
WHERE pg_user.usename = %s);
"""
with self.db.cursor() as c:
return c.exec(sql, [user_name], cls=GroupRecord)
def list_server_tablespaces(self) -> List[TableSpaceRecord]:
"""
List existing tablespaces, ordered by name
:return: List[TableSpaceRecord]
"""
sql, values = (
Select(self.dialect)
.from_(TableSpaceRecord)
.order(TableSpaceRecord.name)
.assemble()
)
with self.db.cursor() as c:
return c.exec(sql, values, cls=TableSpaceRecord)
def list_server_settings(self) -> List[SettingRecord]:
"""
List existing server settings and current values
:return: List[SettingRecord]
"""
sql, values = (
Select(self.dialect)
.from_(SettingRecord)
.order(SettingRecord.name)
.assemble()
)
with self.db.cursor() as c:
return c.exec(sql, values, cls=SettingRecord)
def list_database_namespaces(self) -> List[NamespaceRecord]:
"""
List available namespaces on current database
:return: List[TableRecord]
"""
sql, values = (
Select(self.dialect)
.from_(NamespaceRecord)
.order(NamespaceRecord.name)
.assemble()
)
with self.db.cursor() as c:
return c.exec(sql, values, cls=NamespaceRecord)
def list_database_schemas(self) -> List[NamespaceRecord]:
"""
List available namespaces on current database
:return: List[TableRecord]
"""
sql, values = (
Select(self.dialect)
.from_(NamespaceRecord)
.order(NamespaceRecord.name)
.assemble()
)
with self.db.cursor() as c:
return c.exec(sql, values, cls=NamespaceRecord)
def list_database_tables_type(
self, table_type: str, schema: str = None
) -> List[TableRecord]:
"""
List tables by type for the specified schema
:param table_type: table type to filter
:param schema: optional schema, 'public' if omitted
:return: List[TableRecord]
"""
if not schema:
schema = self.SCHEMA_DEFAULT
sql, values = (
Select(self.dialect)
.from_(TableRecord)
.where(TableRecord.schema, "=", schema)
.where(TableRecord.table_type, "=", table_type)
.order(TableRecord.name)
.assemble()
)
with self.db.cursor() as c:
return c.exec(sql, values, cls=TableRecord)
def list_database_views(self, schema: str = None) -> List[TableRecord]:
"""
List all views for the specified schema
:param schema: optional schema, 'public' if omitted
:return: List[TableRecord]
"""
return self.list_database_tables_type(self.TYPE_VIEW, schema)
def list_database_tables(self, schema: str = None) -> List[TableRecord]:
"""
List all base tables for the specified schema
:param schema: optional schema, 'public' if omitted
:return: List[TableRecord]
"""
return self.list_database_tables_type(self.TYPE_BASE, schema)
def list_database_temporary_tables(self, schema: str = None) -> List[TableRecord]:
"""
List all temporary tables for the specified schema
:param schema: optional schema, 'public' if omitted
:return: List[TableRecord]
"""
return self.list_database_tables_type(self.TYPE_LOCAL, schema)
def list_database_foreign_tables(self, schema: str = None) -> List[TableRecord]:
"""
List all foreign tables for the specified schema
:param schema: optional schema, 'public' if omitted
:return: List[TableRecord]
"""
return self.list_database_tables_type(self.TYPE_FOREIGN, schema)
def list_table_columns(
self, table_name: str, schema: str = None
) -> List[ColumnRecord]:
"""
List all table columns, sorted by numerical order
:param table_name:
:param schema:
:return: List[ColumnRecord]
"""
if not schema:
schema = self.SCHEMA_DEFAULT
sql, values = (
Select(self.dialect)
.from_(ColumnRecord)
.where(ColumnRecord.schema, "=", schema)
.where(ColumnRecord.table_name, "=", table_name)
.order(ColumnRecord.position)
.assemble()
)
with self.db.cursor() as c:
return c.exec(sql, values, cls=ColumnRecord)
def list_table_pk(
self, table_name: str, schema: str = None
) -> Optional[ConstraintRecord]:
"""
List primary key of table
:param table_name:
:param schema:
:return: ConstraintRecord
"""
if not schema:
schema = self.SCHEMA_DEFAULT
sql, values = (
Select(self.dialect)
.from_({ConstraintRecord: "cr"})
.join(
{KeyColumnUsageRecord: "kc"},
KeyColumnUsageRecord.name,
{ConstraintRecord: "cr"},
ConstraintRecord.const_name,
"=",
cols=[KeyColumnUsageRecord.column],
)
.where({"cr": ConstraintRecord.schema}, "=", schema)
.where({"cr": ConstraintRecord.table_name}, "=", table_name)
.where({"cr": ConstraintRecord.constraint_type}, "=", "PRIMARY KEY")
.assemble()
)
with self.db.cursor() as c:
result = c.exec(sql, values, cls=ConstraintRecord)
if len(result) > 0:
return result.pop()
return None
def list_table_indexes(self, table_name: str, schema=None) -> List[FieldRecord]:
"""
List all indexes on a given table
:param table_name:
:param schema:
:return:
"""
if schema is None:
schema = self.SCHEMA_DEFAULT
sql = """
SELECT
pg_attribute.attname AS field,
format_type(pg_attribute.atttypid, pg_attribute.atttypmod) AS type,
indisprimary AS primary
FROM pg_index, pg_class, pg_attribute, pg_namespace
WHERE
pg_class.relname = %s AND
indrelid = pg_class.oid AND
nspname = %s AND
pg_class.relnamespace = pg_namespace.oid AND
pg_attribute.attrelid = pg_class.oid AND
pg_attribute.attnum = any(pg_index.indkey)
"""
params = (table_name, schema)
with self.db.cursor() as c:
return c.fetchall(sql, params, cls=FieldRecord)
def list_table_foreign_keys(
self, table_name, schema: str = None
) -> List[ForeignKeyRecord]:
"""
List foreign keys for a given table
Query from bob217 on https://stackoverflow.com/questions/1152260/how-to-list-table-foreign-keys
:param table_name:
:param schema:
:return:
"""
sql = """
SELECT sh.nspname AS table_schema,
tbl.relname AS table_name,
col.attname AS column_name,
referenced_sh.nspname AS foreign_table_schema,
referenced_tbl.relname AS foreign_table_name,
referenced_field.attname AS foreign_column_name
FROM pg_constraint c
INNER JOIN pg_namespace AS sh ON sh.oid = c.connamespace
INNER JOIN (SELECT oid, unnest(conkey) as conkey FROM pg_constraint) con ON c.oid = con.oid
INNER JOIN pg_class tbl ON tbl.oid = c.conrelid
INNER JOIN pg_attribute col ON (col.attrelid = tbl.oid AND col.attnum = con.conkey)
INNER JOIN pg_class referenced_tbl ON c.confrelid = referenced_tbl.oid
INNER JOIN pg_namespace AS referenced_sh ON referenced_sh.oid = referenced_tbl.relnamespace
INNER JOIN (SELECT oid, unnest(confkey) as confkey FROM pg_constraint) conf ON c.oid = conf.oid
INNER JOIN pg_attribute referenced_field ON
(referenced_field.attrelid = c.confrelid AND referenced_field.attnum = conf.confkey)
WHERE c.contype = 'f' AND sh.nspname = %s AND tbl.relname = %s
"""
if not schema:
schema = self.SCHEMA_DEFAULT
with self.db.cursor() as c:
return c.fetchall(sql, [schema, table_name], cls=ForeignKeyRecord)
def table_exists(
self, table_name: str, table_type: str = None, schema: str = None
) -> bool:
"""
Returns true if the specified table exists
:param table_name: table name to find
:param table_type: optional table type, BASE TABLE if omitted
:param schema: optional schema, 'public' if omitted
:return: bool
"""
if not table_type:
table_type = self.TYPE_BASE
if not schema:
schema = self.SCHEMA_DEFAULT
sql, values = (
Select(self.dialect)
.from_(TableRecord)
.where(TableRecord.schema, "=", schema)
.where(TableRecord.table_type, "=", table_type)
.where(TableRecord.name, "=", table_name)
.assemble()
)
with self.db.cursor() as c:
return len(c.exec(sql, values)) > 0
def list_identity_columns(self, table, schema: str = None) -> List[IdentityRecord]:
"""
List IDENTITY columns (if any)
:param table: table name
:param schema: optional schema name
:return: List[IdentityRecord]
"""
if not schema:
schema = self.SCHEMA_DEFAULT
sql, values = (
Select(self.db.dialect())
.from_(
IdentityRecord,
cols=[
IdentityRecord.column,
IdentityRecord.identity,
IdentityRecord.generated,
],
)
.join({"pg_class": "c"}, "oid", IdentityRecord, "attrelid")
.join({"pg_namespace": "n"}, "oid", "c", "relnamespace")
.where("attnum", ">", 0)
.where({"c": "relname"}, "=", table)
.where({"n": "nspname"}, "=", schema)
.where_and()
.where(IdentityRecord.identity, "!=", "")
.orwhere(IdentityRecord.generated, "!=", "")
.where_end()
.assemble()
)
with self.db.cursor() as c:
return c.exec(sql, values, cls=IdentityRecord) | /rick-db-1.2.0.tar.gz/rick-db-1.2.0/rick_db/util/pg/pginfo.py | 0.885117 | 0.32178 | pginfo.py | pypi |
import os
import shutil
from datetime import datetime, timedelta
from io import BytesIO, StringIO
from pathlib import PurePath, Path
from typing import Union, Any, List
import magic
from rick_vfs.utils import dict_extract
from rick_vfs.vfs import VfsObjectInfo, VfsVolume, VfsContainer, VfsError
class LocalObjectInfo(VfsObjectInfo):
def __init__(self, volume: PurePath, name: Path, src: os.stat_result):
"""
Assemble object stat() information
:param volume: the volume path
:param name: absolute path for the object, including the volume
:param src: os.stat() info
"""
self.src = src
self.object_name = name.name
self.volume = str(volume)
self.path = name
if name.is_file():
self.content_type = magic.from_file(name, mime=True)
self.size = src.st_size
self.atime = datetime.fromtimestamp(src.st_atime)
self.mtime = datetime.fromtimestamp(src.st_mtime)
self.owner_id = src.st_uid
self.permissions = src.st_mode
self.is_latest = True
self.is_dir = name.is_dir()
self.is_file = name.is_file()
class LocalVolume(VfsVolume):
"""
LocalVolume allows the usage of a specific local folder path as virtual volume for VFS operations
"""
def __init__(self, root_path: Union[str, Path], auto_create=True):
"""
Initialize a local volume
:param root_path: local root folder path for the volume
:param auto_create: if True, the root folder path will be created if it doesn't exist
"""
if isinstance(root_path, str):
root_path = Path(root_path)
if not isinstance(root_path, Path):
raise VfsError("Invalid root path type")
self.root = root_path.resolve()
if auto_create:
if not self.exists():
self.create()
def root_path(self) -> str:
"""
Retrieve the root folder path as string
:return: root folder path
"""
return str(self.root)
def exists(self) -> bool:
"""
Check if volume root folder exists and is a valid directory
:return: True if root folder exists, false otherwise
"""
return self.root.exists() and self.root.is_dir()
def create(self, **kwargs):
"""
Create volume root folder path if it doesn't exist
:param kwargs:
:return:
"""
if not self.root.exists():
try:
os.makedirs(self.root)
except OSError as e:
raise VfsError(e)
else:
if not self.root.is_dir():
raise ValueError("Invalid root path '{}': not a directory".format(self.root))
def remove(self, **kwargs):
"""
Removes the local volume
The volume folder must be empty
:param kwargs:
:return:
"""
if self.exists():
try:
self.root.rmdir()
except OSError as e:
raise VfsError(e)
def purge(self, **kwargs):
"""
Removes the whole local volume directory tree
This operation is not reversible, use with care - all data will be lost!
:param kwargs:
:return:
"""
if self.exists():
try:
shutil.rmtree(self.root)
except OSError as e:
raise VfsError(e)
def resolve_path(self, local_path: Union[str, Path]) -> Path:
"""
Make path absolute and return the full volume path for the specified local path
:param local_path: relative path inside the volume
:return: Path object
"""
return self.root / Path(os.path.relpath(os.path.normpath(os.path.join("/", local_path)), "/"))
class LocalVfs(VfsContainer):
def __init__(self, volume: LocalVolume):
self.volume = volume
self.root = volume.root
if not volume.exists():
raise VfsError("Base path not found; volume not initialized?")
def stat(self, object_name, **kwargs) -> Union[VfsObjectInfo, None]:
"""
Get file or directory information
:param object_name: object to get information
:param kwargs:
:return: VfsObjectInfo object with information, or None if object doesn't exist
"""
path = self.volume.resolve_path(object_name)
if not path.exists():
return None
try:
return LocalObjectInfo(self.root, path, os.stat(path))
except OSError as e:
raise VfsError(e)
def mkdir(self, directory_name, **kwargs) -> Any:
"""
Creates a path or directory
If directory_name is a non-existing path, it will be built
:param directory_name: full path to create inside the volume
:param kwargs:
:return:
"""
path = self.volume.resolve_path(directory_name)
try:
os.makedirs(path)
except OSError as e:
raise VfsError(e)
def rmdir(self, directory_name, **kwargs) -> Any:
"""
Removes an empty directory
:param directory_name: full path of directory to remove
:param kwargs:
:return:
"""
path = self.volume.resolve_path(directory_name)
if path == self.root:
raise VfsError("rmdir(): cannot remove root directory")
try:
path.rmdir()
except OSError as e:
raise VfsError(e)
def rmfile(self, file_name, **kwargs) -> Any:
"""
Removes a file
:param file_name: full path to file to be removed
:param kwargs:
:return:
"""
path = self.volume.resolve_path(file_name)
if not path.is_file():
raise VfsError("rmfile(): cannot remove '{}'; not a file".format(file_name))
try:
path.unlink()
except OSError as e:
raise VfsError(e)
def exists(self, file_name, **kwargs) -> bool:
"""
Check if a given path or file exists
:param file_name: full path to verify
:param kwargs:
:return: True if exists, false otherwise
"""
path = self.volume.resolve_path(file_name)
return path.exists()
def chmod(self, path, mask, **kwargs):
"""
Posix chmod of files or folders
:param path: full path for item to change permissions
:param mask: permission mask
:param kwargs:
:return:
"""
path = self.volume.resolve_path(path)
if path == self.root:
raise VfsError("chmod(): cannot change root folder permissions")
try:
os.chmod(path, mask)
except OSError as e:
raise VfsError(e)
def chown(self, path, owner_id, group_id, **kwargs):
"""
Posix chown of files or folders
:param path: full path for item to change owner
:param owner_id: owner id
:param group_id: group id
:param kwargs:
:return:
"""
path = self.volume.resolve_path(path)
if path == self.root:
raise VfsError("chown(): cannot change root folder owner")
try:
os.chown(path, owner_id, group_id)
except OSError as e:
raise VfsError(e)
def get_local_file(self, file_name, **kwargs) -> Path:
"""
Gets a full path for a locally accessible file
:param file_name:
:param kwargs:
:return:
"""
path = self.volume.resolve_path(file_name)
if not path.is_file():
raise VfsError("get_local_file(): file '{}' does not exist or is not a file".format(file_name))
return path
def open_file(self, file_name, **kwargs) -> Any:
"""
Opens a locally-accessible file
file-related kwargs:
:param mode: file access mode (default 'rb')
:param encoding: file encoding (default None)
:paramm newline: end-of-line marker (default None)
Example::
try:
with vfs.open_file('my-file') as f:
f.read()
except:
raise
:param file_name: full path from the object to retrieve
:param kwargs: optional arguments
:return: Path() object to the local file
"""
path = self.volume.resolve_path(file_name)
if not path.is_file():
raise VfsError("open_file(): file '{}' does not exist or is not a file".format(file_name))
mode = dict_extract(kwargs, 'mode', 'rb')
encoding = dict_extract(kwargs, 'encoding', None)
newline = dict_extract(kwargs, 'newline', None)
for key in ['mode', 'encoding', 'newline']:
if key in kwargs.keys():
del kwargs[key]
try:
# open file and return fd
return open(str(path), mode=mode, encoding=encoding, newline=newline)
except FileNotFoundError as e:
raise VfsError(e)
except BaseException as e:
raise VfsError(e)
def read_file(self, file_name, offset=0, length=0, **kwargs) -> BytesIO:
"""
Reads a binary file to a memory buffer
:param file_name: full file path to read
:param offset: optional start offset
:param length: optional length
:param kwargs: optional parameters
:return: BytesIO buffer
"""
path = self.volume.resolve_path(file_name)
if not path.is_file():
raise VfsError("read_file(): file '{}' does not exist or is not a file".format(file_name))
if length == 0:
length = -1
try:
with open(path, 'rb') as f:
f.seek(offset)
result = BytesIO(f.read(length))
result.seek(0)
return result
except FileNotFoundError as e:
raise VfsError(e)
except OSError as e:
raise VfsError(e)
def read_file_text(self, file_name, offset=0, length=0, **kwargs) -> StringIO:
"""
Reads a text file to a memory buffer
:param file_name: full file path to read
:param offset: optional start offset
:param length: optional length
:param kwargs: optional parameters
:return: BytesIO buffer
"""
path = self.volume.resolve_path(file_name)
if not path.is_file():
raise VfsError("read_file_text(): file '{}' does not exist or is not a file".format(file_name))
if length == 0:
length = -1
try:
with open(path, 'rb') as f:
f.seek(offset)
result = StringIO(str(f.read(length), 'utf-8'))
result.seek(0)
return result
except FileNotFoundError as e:
raise VfsError(e)
except OSError as e:
raise VfsError(e)
def url_file_get(self, file_name, expires=timedelta(hours=1), **kwargs) -> str:
raise VfsError("url_file_get(): Unsupported operation on Local Volumes")
def url_file_put(self, file_name, expires=timedelta(hours=1), **kwargs) -> str:
raise VfsError("url_file_put(): Unsupported operation on Local Volumes")
def write_file(self, buffer: BytesIO, file_name, **kwargs) -> Any:
"""
Writes a binary buffer to a file
If the file exists, it is rewritten
:param buffer: buffer to write
:param file_name: full path for destination file
:param kwargs: optional parameters
:return: None
"""
path = self.volume.resolve_path(file_name)
try:
with open(path, 'wb') as f:
buffer.seek(0)
f.write(buffer.read())
except FileNotFoundError as e:
raise VfsError(e)
except OSError as e:
raise VfsError(e)
def add_file(self, local_file, file_name, **kwargs) -> Any:
"""
Adds a local file to the volume
:param local_file: full path of source file
:param file_name: full path for destination file
:param kwargs: optional parameters
:return: None
"""
local_file = Path(local_file)
path = self.volume.resolve_path(file_name)
if not local_file.exists() or not local_file.is_file():
raise VfsError("add_file(): invalid or non-existing local file")
try:
shutil.copyfile(local_file, path, follow_symlinks=True)
except OSError as e:
raise VfsError(e)
except shutil.SameFileError as e:
raise VfsError(e)
def ls(self, path=Path('/'), **kwargs) -> List[LocalObjectInfo]:
"""
List items on a given path
Note: this is an intensive operation, because it fetches multiple details on the items
Make sure you don't use this on huge folders
:param path: path to scan
:param kwargs:
:return: List[LocalObjectInfo]
"""
result = []
path = self.volume.resolve_path(path)
try:
for item in os.listdir(path):
fpath = path / Path(item)
result.append(LocalObjectInfo(self.root, fpath, os.stat(fpath)))
return result
except OSError as e:
raise VfsError(e) | /rick_vfs-1.0.0.tar.gz/rick_vfs-1.0.0/rick_vfs/local/local.py | 0.74055 | 0.228974 | local.py | pypi |
from typing import Dict
from urllib.parse import quote_plus
import aiohttp
from jwcrypto import jwk
import python_jwt as jwt
from jwcrypto.jws import InvalidJWSObject, InvalidJWSSignature
ALLOWED_FLOWS = ["otp", "magic"]
class AuthClientError(Exception):
pass
class AppNotFound(AuthClientError):
pass
class ServerError(AuthClientError):
pass
class ValidationError(AuthClientError):
pass
class AuthenticationFailure(AuthClientError):
pass
class InvalidAuthFlow(AuthClientError):
pass
def _check_response(response):
if 200 <= response.status < 300:
return
if response.status == 404:
raise AppNotFound
if response.status == 500:
raise ServerError
if response.status == 422:
raise ValidationError
if response.status == 401 or response.status == 403:
raise AuthenticationFailure
class AuthClient:
def __init__(self, host: str, app_id: str):
"""Create the auth client objects
:param host: hostname of the auth server without 'https://'
:param app_id: The unique ID of the app to authenticate against.
"""
self.host = host
if "http" not in host:
self.host = "https://" + self.host
self.app_id = app_id
self._public_key = None
async def authenticate(self, email, flow="otp") -> str:
"""Initialize authentication flow for user email
:param email: user's email address
:param flow: which authentication flow to use. Defaults to otp
:return: Message from server
:raises AppNotFound: if the app id is invalid.
:raises ServerError: if something goes wrong on the server.
:raises ValidationError: if something is wrong with the request.
"""
if flow not in ALLOWED_FLOWS:
raise InvalidAuthFlow
return await _perform_post(
f"{self.host}/{flow}/request/{self.app_id}", {"email": email}
)
async def submit_code(self, email: str, code) -> Dict[str, str]:
"""Submit an authentication code and get a token back
:param email: User's email address.
:param code: Submitted one time password code.
:returns: Dict containing id_token and refresh_token or None if refresh
is not enabled
:raises AppNotFound: if the app id is invalid (cannot be found).
:raises ServerError: if something goes wrong on the server.
:raises ValidationError: if something is wrong with the request data.
:raises AuthenticationFailure: if the email code combination doesn't authenticate.
"""
data = await _perform_post(
f"{self.host}/otp/confirm/{self.app_id}", {"email": email, "code": code}
)
try:
return {
"id_token": data["idToken"],
"refresh_token": data.get("refreshToken"),
}
except (KeyError, TypeError):
raise AuthClientError("idToken was not in response")
async def verify_token_remote(self, id_token: str) -> Dict[str, dict]:
"""Request the server to verify an idToken for you.
:param id_token: JWT idToken from client
:returns: Dict of headers and claims from the verified JWT
:raises ValidationError: If the request was invalid in some way
:raises AuthenticationFailure: If the token could not be verified
:raises AppNotFound: Not found from server, the app does not exist.
:raises ServerError: The server experienced an error.
"""
data = await _perform_post(
f"{self.host}/token/verify/{self.app_id}", {"idToken": id_token}
)
if not data or "headers" not in data or "claims" not in data:
raise AuthenticationFailure("Data missing from response")
return data
async def refresh(self, refresh_token: str) -> str:
"""Request a new ID Token using a refresh token.
:param refresh_token: Refresh token from a client.
:returns: New ID Token
:raises ValidationError: If the request was invalid in some way
:raises AuthenticationFailure: If the token could not be verified
:raises AppNotFound: Not found from server, the app does not exist.
:raises ServerError: The server experienced an error.
"""
if not refresh_token:
raise ValueError("Refresh Token is Required")
data = await _perform_post(
f"{self.host}/token/refresh/{self.app_id}", {"refreshToken": refresh_token}
)
try:
return data["idToken"]
except (TypeError, KeyError):
raise AuthenticationFailure("ID token not in response")
async def app_info(self) -> dict:
"""Get full info about this app
:returns: dict of info about the app
:raises AppNotFound: Not found from server, the app does not exist.
:raises ServerError: The server experienced an error.
"""
async with aiohttp.ClientSession() as session:
async with session.get(f"{self.host}/app/{self.app_id}") as response:
_check_response(response)
return await response.json()
async def verify(self, id_token: str) -> Dict[str, dict]:
"""Request the server to verify an idToken for you.
:param id_token: JWT idToken from client
:returns: Dict of headers and claims from the verified JWT
:raises ValidationError: If the request was invalid in some way
:raises AuthenticationFailure: If the token could not be verified
:raises AppNotFound: Not found from server, the app does not exist.
:raises ServerError: The server experienced an error.
"""
if not id_token:
raise ValueError("ID Token is required")
if not self._public_key:
self._public_key = await self._get_public_key()
try:
headers, claims = jwt.verify_jwt(
id_token, self._public_key, allowed_algs=["ES256"]
)
except jwt._JWTError as e:
raise AuthenticationFailure(str(e))
except (
UnicodeDecodeError,
InvalidJWSObject,
InvalidJWSSignature,
ValueError,
):
raise AuthenticationFailure
return {"headers": headers, "claims": claims}
async def _get_public_key(self) -> jwk.JWK:
async with aiohttp.ClientSession() as session:
async with session.get(
f"{self.host}/app/public_key/{self.app_id}"
) as response:
_check_response(response)
data = await response.json()
return jwk.JWK(**data)
async def delete_refresh_token(self, id_token: str, refresh_token: str):
"""
Delete a refresh token (logout)
:param id_token: ID token of the user
:param refresh_token: the token to delete
:return: None
:raises ValidationError: If the request was invalid in some way
:raises AuthenticationFailure: If the token could not be verified
:raises AppNotFound: Not found from server, the app does not exist.
:raises ServerError: The server experienced an error.
"""
async with aiohttp.ClientSession() as session:
async with session.delete(
f"{self.host}/token/refresh/{self.app_id}/{quote_plus(refresh_token)}",
headers={"Authorization": f"Bearer {id_token}"},
) as response:
_check_response(response)
async def delete_all_refresh_tokens(self, id_token: str):
"""
Delete all a user's refresh tokens (logout everywhere)
:param id_token: User's ID token
:return: None
:raises ValidationError: If the request was invalid in some way
:raises AuthenticationFailure: If the token could not be verified
:raises AppNotFound: Not found from server, the app does not exist.
:raises ServerError: The server experienced an error.
"""
async with aiohttp.ClientSession() as session:
async with session.delete(
f"{self.host}/token/refresh/{self.app_id}",
headers={"Authorization": f"Bearer {id_token}"},
) as response:
_check_response(response)
async def _perform_post(url: str, body: dict):
async with aiohttp.ClientSession() as session:
async with session.post(url, json=body) as response:
_check_response(response)
return await response.json() | /ricks-auth-service-client-0.1.2.tar.gz/ricks-auth-service-client-0.1.2/ricks_auth_service_client.py | 0.83346 | 0.209915 | ricks_auth_service_client.py | pypi |
# rico: rich content to HTML as easy as Doc(df, plot)
**rico** is a Python package for creating HTML documents from rich content: dataframes, plots, images, markdown etc. It provides a high-level, easy-to-use API with reasonable defaults, as well as low-level access for better control.
[](https://github.com/e10v/rico/actions/workflows/ci.yml)
[](https://codecov.io/gh/e10v/rico)
[](https://github.com/e10v/rico/blob/main/LICENSE)
[](https://pypi.org/project/rico/)
[](https://pypi.org/project/rico/)
[](https://pypi.org/project/rico/)
## Installation
Install the core functionality:
```bash
pip install rico
```
The core functionality has no dependencies other than the standard Python packages. Optional additional dependencies are required to support the following content types:
* Plots. Altair, Matplotlib Pyplot and Seaborn are currently supported.
* Markdown.
Install one or several extras to use plots or Markdown in HTML documents.
[Altair](https://altair-viz.github.io/):
```bash
pip install rico[altair]
```
[Markdown](https://python-markdown.github.io/):
```bash
pip install rico[markdown]
```
[Matplotlib Pyplot](https://matplotlib.org/):
```bash
pip install rico[pyplot]
```
[Seaborn](https://seaborn.pydata.org/):
```bash
pip install rico[seaborn]
```
Install `rico[seaborn]` extra only to use the [seaborn.objects](https://seaborn.pydata.org/tutorial/objects_interface.html) interface. Otherwise install `rico[pyplot]` as the old plotting functions return Matplotlib Pyplot Axes objects.
All extras:
```bash
pip install rico[complete]
```
## User guide
### Basic usage
**rico** provides both declarative and imperative style interfaces.
Declarative style:
```python
import pandas as pd
import rico
df = pd.DataFrame({
"a": list("CCCDDDEEE"),
"b": [2, 7, 4, 1, 2, 6, 8, 4, 7],
})
plot = df.plot.scatter(x="a", y="b")
doc = rico.Doc("Hello world!", df, plot, title="My doc")
```
Imperative style:
```python
doc = rico.Doc(title="My doc")
doc.append("Hello world!", df, plot)
```
Also imperative style:
```python
doc = rico.Doc(title="My doc")
doc.append("Hello world!")
doc.append(df)
doc.append(plot)
```
Mix-and-match:
```python
doc = rico.Doc("Hello world!", df, title="My doc")
doc.append(plot)
```
### Serialization
Serialize the document to HTML using `str(doc)`:
```python
with open("doc.html", "w") as f:
f.write(str(doc))
```
Implicit serialization:
```python
with open("doc.html", "w") as f:
print(doc, file=f)
```
Internally, `str(doc)` calls `doc.serialize()` with default parameter values. Call `doc.serialize()` to indent the HTML element tree visually:
```python
with open("doc.html", "w") as f:
f.write(doc.serialize(indent=True))
```
Set custom whitespace for indentation using the `space` parameter:
```python
with open("doc.html", "w") as f:
f.write(doc.serialize(indent=True, space=" "))
```
Remove unnecessary whitespace between tags by setting `strip` to `True`:
```python
with open("doc.html", "w") as f:
f.write(doc.serialize(strip=True))
```
Control the default behavior of `str(doc)` and `doc.serialize()` using the global options `indent_html`, `indent_space`, and `strip_html`:
```python
with open("doc.html", "w") as f, rico.config_context(indent_html=True):
f.write(str(doc))
```
The default option values are:
* `indent_html = False`,
* `indent_space = " "`,
* `strip_html = False`.
### Rich content types
**rico** automatically recognizes the following content types:
* `rico` content classes (subclasses of `rico.ContentBase`).
* Plots (Altair, Matplotlib Pyplot, Seaborn).
* Dataframes and other types with `_repr_html_` method.
* Text.
Use specific classes for plots and texts to change the default behavior:
```python
doc = rico.Doc(
rico.Text("Hello world!", mono=True), # The default value is False.
df,
rico.Plot(plot, format="png"), # The default value is "svg".
title="My doc",
)
```
Or:
```python
doc = rico.Doc(title="My doc")
doc.append_text("Hello world!", mono=True)
doc.append(df)
doc.append_plot(plot, format="png")
```
Some options can be set in the global configuration:
```python
with rico.config_context(text_mono=True, image_format="png"):
doc = rico.Doc("Hello world!", df, plot, title="My doc")
```
Use specific classes and methods for other content types:
* Images: `Image` or `Doc.append_image`.
* Code: `Code` or `Doc.append_code`.
* Markdown: `Markdown` or `Doc.append_markdown`.
* HTML tag: `Tag` or `Doc.append_tag`.
* Raw HTML: `HTML` or `Doc.append_html`.
Example:
```python
doc = rico.Doc(
rico.Markdown("## Dataframe"),
df,
rico.Tag("h2", "Plot"), # An alternative way to add a header.
plot,
rico.HTML("<h2>Code</h2>"), # Another way to add a header.
rico.Code("print('Hello world!')"),
title="My doc",
)
```
Or:
```python
doc = rico.Doc(title="My doc")
doc.append_markdown("## Dataframe")
doc.append(df)
doc.append_tag("h2", "Plot")
doc.append(plot)
doc.append_html("<h2>Code</h2>")
doc.append_code("print('Hello world!')")
```
Check the docstrings for additional parameters.
Serialize content to HTML using `str()` or `object.serialize()`:
```python
obj = rico.Tag("p", "Hello world!")
print(obj)
# <div><p>Hello world!</p></div>
print(obj.serialize(indent=True, space=" "))
# <div>
# <p>Hello world!</p>
# </div>
```
### Bootstrap, HTML classes and document layout
By default, [Bootstrap](https://getbootstrap.com/) styles are included in the document. Change the default behavior using the `bootstrap` parameter:
```python
doc = rico.Doc("Hello world!", bootstrap="full")
```
The possible values are:
* `"css"` (default) -- include only CSS.
* `"full"` -- include both the CSS and JS.
* `"none"` -- don't include Bootstrap*.
*Keep in mind that **rico** relies on Bootstrap classes and styles. For example:
* The `mono` and `wrap` parameters of the `Text` class use Bootstrap's `font-monospace` and `font-monospace` classes.
* **rico**'s dataframe style definition uses Bootstrap variables.
Each content element is wrapped in a `<div>` container. Specify the element's container class using the `class_` parameter:
```python
print(rico.Tag("p", "Hello world!", class_="col"))
# <div class="col"><p>Hello world!</p></div>
```
All elements' containers in the document are also wrapped in a `<div>` container. Specify the document's container class using the `class_` parameter:
```python
doc = rico.Doc("Hello world!", class_="container-fluid")
```
Define the document layout using Bootstrap and `Div` class:
```python
doc = rico.Doc(rico.Div(
rico.Obj(df, class_="col"),
rico.Obj(plot, class_="col"),
class_="row row-cols-auto",
))
```
The code above creates a document with two columns, one with a dataframe and another with a plot. The `Obj` is a magic class which automatically determines the content type in the same way that `Doc` and `Doc.append` do.
Another example:
```python
doc = rico.Doc(
rico.Tag("h1", "My doc"),
rico.Tag("h2", "Description"),
"This is an example of custom document layout using Bootstrap classes.",
rico.Tag("h2", "Data"),
rico.Div(
rico.Obj("Dataframe", df, class_="col"),
rico.Obj("Plot", plot, class_="col"),
class_="row row-cols-auto",
),
title="My doc",
)
```
Or:
```python
doc = rico.Doc(title="My doc")
doc.append_tag("h1", "My doc")
doc.append_tag("h2", "Description")
doc.append("This is an example of custom document layout using Bootstrap classes.")
doc.append_tag("h2", "Data")
div = rico.Div(class_="row row-cols-auto")
doc.append(div)
div.append("Dataframe", df, class_="col")
div.append("Plot", plot, class_="col")
```
Keep in mind that `obj.append(x, y)` works differently than
```python
obj.append(x)
obj.append(y)
```
The first one wraps both elements in a single `<div>` container. The second one creates a separate `<div>` container for each element.
`Obj(x, y, class_="z")` wraps both `x` and `y` elements in a single `<div>` container with `class` attribute set to `"z"`.
More on Bootstrap layout and grid system:
* [Breakpoints](https://getbootstrap.com/docs/5.3/layout/breakpoints/)
* [Containers](https://getbootstrap.com/docs/5.3/layout/containers/)
* [Grid system](https://getbootstrap.com/docs/5.3/layout/grid/)
* [Columns](https://getbootstrap.com/docs/5.3/layout/columns/)
### Styles and scripts
By default, **rico** includes the following styles in the document:
* Bootstrap CSS. Change the default behavior using the `bootstrap` parameter of the `Doc` class.
* Dataframe style. Change it by setting the `dataframe_style` global option.
Exclude dataframe style from the document by setting `dataframe_style` to `""`:
```python
with rico.config_context(dataframe_style=""):
doc = rico.Doc(df)
```
Include custom styles and scripts using the `Style` and `Script` classes:
```python
css = "https://cdn.jsdelivr.net/npm/bootstrap-icons@1/font/bootstrap-icons.css"
js = "https://cdn.jsdelivr.net/npm/jquery@3.7.0/dist/jquery.min.js"
doc = rico.Doc(
"Hello world",
extra_styles=(
rico.Style("p {color: red;}"),
rico.Style(src=css),
),
extra_scripts=(
rico.Script("alert('Hello World!');"),
rico.Script(src=js),
),
)
```
By default, external styles and scripts are included as file links. This means that these files must be available when someone opens the document. Include the contents of these files in the document using the `inline` parameter:
```python
doc = rico.Doc(
"Hello world",
extra_styles=(
rico.Style("p {color: red;}"),
rico.Style(src=css, inline=True),
),
extra_scripts=(
rico.Script("alert('Hello World!');"),
rico.Script(src=js, inline=True),
),
)
```
In the example above, the Bootstrap styles are still included as a link. Use the global options `inline_styles` and `inline_scripts` to include the contents of the style and script files in the document:
```python
with rico.config_context(inline_styles=True, inline_scripts=True):
doc = rico.Doc(
"Hello world",
extra_styles=(
rico.Style("p {color: red;}"),
rico.Style(src=css),
),
extra_scripts=(
rico.Script("alert('Hello World!');"),
rico.Script(src=js),
),
)
```
### Global configuration
Use global configuration to:
* Get or set default parameter values.
* Get or set document properties.
The following globals options define the default parameter values:
| Global option | Parameter | Classes, methods, functions |
|:-----------------|:----------|:----------------------------------|
| `indent_html` | `indent` | `obj.serialize`, `serialize_html` |
| `indent_space` | `space` | `obj.serialize`, `serialize_html` |
| `strip_html` | `strip` | `obj.serialize`, `serialize_html` |
| `text_mono` | `mono` | `Text`, `obj.append_text` |
| `text_wrap` | `wrap` | `Text`, `obj.append_text` |
| `image_format` | `format` | `Plot`, `obj.append_plot` |
| `inline_styles` | `inline` | `Style` |
| `inline_scripts` | `inline` | `Script` |
The following globals options define document properties:
* `meta_charset` defines a document [charset](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/meta#charset) metadata.
* `meta_viewport` defines a document [viewport](https://developer.mozilla.org/en-US/docs/Web/HTML/Viewport_meta_tag) metadata.
* `bootstrap_css` defines a link to the Bootstrap CSS file.
* `bootstrap_js` defines a link to the Bootstrap JS file.
* `dataframe_style` defines a dataframe style.
Get a dictionary with global options using `get_config` without parameters:
```python
global_config = rico.get_config()
print(global_config["indent_html"])
# False
```
Get a global option value using `get_config` with the option name as a parameter:
```python
print(rico.get_config("indent_html"))
# False
```
Set a global option value using `set_config`:
```python
rico.set_config(indent_html=True)
print(rico.get_config("indent_html"))
# True
rico.set_config(indent_html=False)
```
Set a global option value within a context using `config_context`:
```python
with rico.config_context(indent_html=True):
print(rico.get_config("indent_html"))
# True
print(rico.get_config("indent_html"))
# False
```
### Low-level control
Internally, **rico** uses the standard [xml.etree.ElementTree](https://docs.python.org/3/library/xml.etree.elementtree.html) module:
* Every content object (`Tag`, `Text`, `Div` etc.) has a `container` attribute of type `xml.etree.ElementTree.Element`. The value is a `<div>` container element.
* `Doc` objects has additional attributes `html`, `head`, and `body` of type `xml.etree.ElementTree.Element`. They represent the `<html>`, `<head>`, and `<body>` elements, respectively.
Access these attributes and use `xml.etree.ElementTree` API to gain low-level control over the document and its elements.
## Use case and alternatives
Use **rico** if you want to create an HTML document from objects created in a Python script.
With **rico** you can avoid:
* Writing data to intermediate files or a database from a script.
* Loading data into a Jupyter notebook.
* Using [nbconvert](https://nbconvert.readthedocs.io/) or similar tools.
Alternatives:
* Use [Jupyter Notebook](https://jupyter.org/) for interactive computing.
* Use [nbconvert](https://nbconvert.readthedocs.io/) or [papermill](https://papermill.readthedocs.io/) if you're processing data and creating objects for a document in a Jupyter notebook.
* Use [Quarto](https://quarto.org/) if you prefer R Markdown style notebooks and a variety of output formats.
* Use [xml.etree.ElementTree](https://docs.python.org/3/library/xml.etree.elementtree.html), [lxml](https://lxml.de/), [Yattag](https://www.yattag.org/), or [Airium](https://gitlab.com/kamichal/airium) if you need low-level control.
More on the topic:
* [Pass pandas dataframe to notebook via nbconvert](https://github.com/jupyter/nbconvert/issues/1070).
* [Could Papermill pass an in-memory dataframe to a notebook?](https://github.com/nteract/papermill/issues/406)
* "I Don’t Like Notebooks": [video](https://www.youtube.com/watch?v=7jiPeIFXb6U), [slides](https://docs.google.com/presentation/d/1n2RlMdmv1p25Xy5thJUhkKGvjtV-dkAIsUXP-AL4ffI/edit#slide=id.g362da58057_0_1).
* [The First Notebook War](https://yihui.org/en/2018/09/notebook-war/).
## Roadmap
* Create docs with [MkDocs](https://www.mkdocs.org/) and [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/).
* Support math with [KaTeX](https://katex.org/).
* Save Altair Charts in [Vega-Lite](https://vega.github.io/vega-lite/) format.
* Save SVG images in XML format.
* Support diagrams with [Mermaid.js](https://mermaid.js.org/).
* Support other plot types: [Plotly](https://plotly.com/python/), [Bokeh](https://bokeh.org/).
| /rico-0.3.0.tar.gz/rico-0.3.0/README.md | 0.41253 | 0.936749 | README.md | pypi |
class User(object):
def __init__(self, first_name, last_name, email, scans_path):
self.first_name = first_name
self.last_name = last_name
if not email:
email = ""
self.email = email
self.scans_path = scans_path
@classmethod
def from_ldap_dict(cls, d, scans_path):
return cls(first_name=d["givenName"], last_name=d["sn"], email=d["mail"], scans_path=scans_path)
@classmethod
def from_ldap_adobject(cls, ad_object, scans_path):
return cls(first_name=ad_object.givenName, last_name=ad_object.sn, email=ad_object.mail, scans_path=scans_path)
@property
def name(self):
try:
return "%s %s" % (self.first_name, self.last_name[0])
except (IndexError, TypeError):
return self.first_name
@property
def title_1(self):
try:
letter = self.first_name[0].upper()
if letter < "C":
return 1
elif letter < "E":
return 2
elif letter < "G":
return 3
elif letter < "I":
return 4
elif letter < "L":
return 5
elif letter < "O":
return 6
elif letter < "R":
return 7
elif letter < "U":
return 8
elif letter < "X":
return 9
else:
return 10
except:
return ""
@property
def folder_path(self):
return r"%s\%s" % (self.scans_path, self.first_name)
@property
def as_dict(self):
return {
"Index in ACLs and Groups": "[]",
"Name": "[%s]" % self.name,
"Set General Settings": "[1]",
"Set Registration No.": "[0]",
"Registration No.": "[]",
"Entry Type": "[U]",
"Phonetic Name": "[]",
"Display Name": "[%s]" % self.name,
"Display Priority": "[5]",
"Set Title Settings": "[1]",
"Title 1": "[%s]" % self.title_1,
"Title 2": "[]",
"Title 3": "[]",
"Title Freq.": "[1]",
"Set User Code Settings": "[0]",
"User Code": "[]",
"Set Auth. Info Settings": "[1]",
"Device Login User Name": "[]",
"Device Login Password": "[]",
"Device Login Password Encoding": "[omitted]",
"SMTP Authentication": "[0]",
"SMTP Authentication Login User Name": "[]",
"SMTP Authentication Login Password": "[]",
"SMTP Authentication Password Encoding": "[omitted]",
"Folder Authentication": "[0]",
"Folder Authentication Login User Name": "[]",
"Folder Authentication Login Password": "[]",
"Folder Authentication Password Encoding": "[omitted]",
"LDAP Authentication": "[0]",
"LDAP Authentication Login User Name": "[]",
"LDAP Authentication Login Password": "[]",
"LDAP Authentication Password Encoding": "[omitted]",
"Set Access Control Settings": "[0]",
"Can Use B/W Copy": "[0]",
"Can Use Single Color Copy": "[0]",
"Can Use Two Color Copy": "[0]",
"Can Use Full Color Copy": "[0]",
"Can Use Auto Color Copy": "[0]",
"Can Use B/W Print": "[0]",
"Can Use Color Print": "[0]",
"Can Use Scanner": "[]",
"Can Use Fax": "[]",
"Can Use Document Server": "[]",
"Maximum of Print Usage Limit": "[-1]",
"Set Email/Fax Settings": "[%s]" % (1 if self.email else 0),
"Fax Destination": "[]",
"Fax Line Type": "[g3]",
"International Fax Transmission Mode": "[]",
"E-mail Address": "[%s]" % self.email,
"Ifax Address": "[]",
"Ifax Enable": "[0]",
"Direct SMTP": "[]",
"Ifax Direct SMTP": "[]",
"Fax Header": "[1]",
"Label Insertion 1st Line (Selection)": "[]",
"Label Insertion 2nd Line (String)": "[]",
"Label Insertion 3rd Line (Standard Message)": "[0]",
"Set Folder Settings": "[1]",
"Folder Protocol": "[0]",
"Folder Port No.": "[21]",
"Folder Server Name": "[]",
"Folder Path": "[%s]" % self.folder_path,
"Folder Japanese Character Encoding": "[us-ascii]",
"Set Protection Settings": "[1]",
"Is Setting Destination Protection": "[1]",
"Is Protecting Destination Folder": "[0]",
"Is Setting Sender Protection": "[0]",
"Is Protecting Sender": "[0]",
"Sender Protection Password": "[]",
"Sender Protection Password Encoding": "[omitted]",
"Access Privilege to User": "[]",
"Access Privilege to Protected File": "[]",
"Set Group List Settings": "[1]",
"Groups": "[]",
"Set Counter Reset Settings": "[0]",
"Enable Plot Counter Reset": "[]",
"Enable Fax Counter Reset": "[]",
"Enable Scanner Counter Reset": "[]",
"Enable User Volume Counter Reset": "[]"
}
def __lt__(self, other):
return self.first_name < other.first_name
USER_CSV_FIELDS = (
"Index in ACLs and Groups",
"Name",
"Set General Settings",
"Set Registration No.",
"Registration No.",
"Entry Type",
"Phonetic Name",
"Display Name",
"Display Priority",
"Set Title Settings",
"Title 1",
"Title 2",
"Title 3",
"Title Freq.",
"Set User Code Settings",
"User Code",
"Set Auth. Info Settings",
"Device Login User Name",
"Device Login Password",
"Device Login Password Encoding",
"SMTP Authentication",
"SMTP Authentication Login User Name",
"SMTP Authentication Login Password",
"SMTP Authentication Password Encoding",
"Folder Authentication",
"Folder Authentication Login User Name",
"Folder Authentication Login Password",
"Folder Authentication Password Encoding",
"LDAP Authentication",
"LDAP Authentication Login User Name",
"LDAP Authentication Login Password",
"LDAP Authentication Password Encoding",
"Set Access Control Settings",
"Can Use B/W Copy",
"Can Use Single Color Copy",
"Can Use Two Color Copy",
"Can Use Full Color Copy",
"Can Use Auto Color Copy",
"Can Use B/W Print",
"Can Use Color Print",
"Can Use Scanner",
"Can Use Fax",
"Can Use Document Server",
"Maximum of Print Usage Limit",
"Set Email/Fax Settings",
"Fax Destination",
"Fax Line Type",
"International Fax Transmission Mode",
"E-mail Address",
"Ifax Address",
"Ifax Enable",
"Direct SMTP",
"Ifax Direct SMTP",
"Fax Header",
"Label Insertion 1st Line (Selection)",
"Label Insertion 2nd Line (String)",
"Label Insertion 3rd Line (Standard Message)",
"Set Folder Settings",
"Folder Protocol",
"Folder Port No.",
"Folder Server Name",
"Folder Path",
"Folder Japanese Character Encoding",
"Set Protection Settings",
"Is Setting Destination Protection",
"Is Protecting Destination Folder",
"Is Setting Sender Protection",
"Is Protecting Sender",
"Sender Protection Password",
"Sender Protection Password Encoding",
"Access Privilege to User",
"Access Privilege to Protected File",
"Set Group List Settings",
"Groups",
"Set Counter Reset Settings",
"Enable Plot Counter Reset",
"Enable Fax Counter Reset",
"Enable Scanner Counter Reset",
"Enable User Volume Counter Reset"
) | /ricoh-ldap-sync-0.1.1.tar.gz/ricoh-ldap-sync-0.1.1/ricoh_ldap_sync/user.py | 0.614741 | 0.245831 | user.py | pypi |
import os
import argparse
import sys
import logging
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except ImportError:
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger(__name__)
def freeze_model(model_folder,
output,
output_node_names="o_energy,o_forces"):
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_folder)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_folder = "/".join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_folder + "/" + output
# Before exporting our graph, we need to precise what is our output node
# This is how TF decides what part of the Graph he has to keep and what part it can dump
# NOTE: this variable is plural, because you can have multiple output nodes
# output_node_names = "energy_test,force_test,virial_test,t_rcut"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
# We import the meta graph and retrieve a Saver
saver = tf.train.import_meta_graph(
input_checkpoint + '.meta', clear_devices=clear_devices)
# We retrieve the protobuf graph definition
graph = tf.get_default_graph()
input_graph_def = graph.as_graph_def()
# We start a session and restore the graph weights
with tf.Session() as sess:
saver.restore(sess, input_checkpoint)
# We use a built-in TF helper to export variables to constants
output_graph_def = graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
input_graph_def, # The graph_def is used to retrieve the nodes
# The output node names are used to select the usefull nodes
output_node_names.split(",")
)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
logger.debug("%d ops in the final graph." % len(output_graph_def.node))
if __name__ == '__main__':
default_frozen_nodes = "o_energy,o_forces"
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--folder", type=str, default=".",
help="path to checkpoint folder")
parser.add_argument("-o", "--output", type=str, default="frozen_model.pb",
help="name of graph, will output to the checkpoint folder")
parser.add_argument("-n", "--nodes", type=str, default=default_frozen_nodes,
help="the frozen nodes, defaults is " + default_frozen_nodes)
args = parser.parse_args()
freeze_model(args.folder, args.output, args.nodes) | /rid_kit-0.6.2-py3-none-any.whl/rid/nn/freeze.py | 0.478285 | 0.204342 | freeze.py | pypi |
from rid.task.task import Task
from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Union, Sequence
import numpy as np
from rid.constants import (
gmx_conf_name,
lmp_conf_name,
gmx_top_name,
lmp_input_name,
gmx_mdp_name,
plumed_input_name,
)
from rid.utils import read_txt
from rid.common.mol import get_distance_from_atomid
from rid.common.gromacs import make_md_mdp_string
from rid.common.plumed import make_deepfe_plumed, make_restraint_plumed, make_constraint_plumed, get_cv_name,make_distance_list_from_file
class TaskBuilder(ABC):
@abstractmethod
def build(self):
pass
class EnhcMDTaskBuilder(TaskBuilder):
def __init__(
self,
conf: str,
topology: Optional[str],
exploration_config: Dict,
cv_file: Optional[List[str]] = None,
selected_resid: Optional[List[int]] = None,
selected_atomid: Optional[List[int]] = None,
sampler_type: str = "gmx",
trust_lvl_1: float = 1.0,
trust_lvl_2: float = 2.0,
model_list: List[str] = ["graph.pb"],
plumed_output: str = "plm.out",
cv_mode: str = "torsion",
wall_list: Optional[List[str]] = None,
iteration: Optional[str] = None
):
super().__init__()
self.conf = conf
self.topology = topology
self.exploration_config = exploration_config
self.stride = self.exploration_config["output_freq"]
self.cv_file = cv_file
self.selected_resid = selected_resid
self.selected_atomid = selected_atomid
self.sampler_type = sampler_type
self.trust_lvl_1 = trust_lvl_1
self.trust_lvl_2 = trust_lvl_2
assert self.trust_lvl_1 < self.trust_lvl_2
self.model_list = model_list
self.plumed_output = plumed_output
self.cv_mode = cv_mode
self.wall_list = wall_list
self.iteration = iteration
self.task = Task()
self.cv_names = get_cv_name(
conf=self.conf, cv_file=self.cv_file,
selected_resid=self.selected_resid,
selected_atomid=self.selected_atomid,
stride=self.stride,
mode=self.cv_mode
)
def build(self) -> Task:
task_dict = {}
if self.sampler_type == "gmx":
task_dict.update(self.build_gmx())
elif self.sampler_type == "lmp":
task_dict.update(self.build_lmp())
task_dict.update(self.build_plumed())
for fname, fconts in task_dict.items():
self.task.add_file(fname, fconts)
self.task.add_property({"num_models": len(self.model_list)})
return self.task
def get_task(self):
return self.task
def build_gmx(self):
return build_gmx_dict(self.conf, self.topology, self.exploration_config)
def build_lmp(self):
return build_lmp_dict(self.conf)
def build_plumed(self):
return build_plumed_dict(
conf=self.conf, cv_file=self.cv_file, selected_resid=self.selected_resid,
selected_atomid=self.selected_atomid,
trust_lvl_1=self.trust_lvl_1, trust_lvl_2=self.trust_lvl_2,
model_list=self.model_list, stride=self.stride, output=self.plumed_output,
mode=self.cv_mode, wall_list = self.wall_list, iteration=self.iteration
)
def get_cv_dim(self):
return len(self.cv_names)
class RestrainedMDTaskBuilder(TaskBuilder):
def __init__(
self,
conf: str,
topology: Optional[str],
label_config: Dict,
cv_file: Optional[List[str]] = None,
selected_resid: Optional[List[int]] = None,
selected_atomid: Optional[List[int]] = None,
sampler_type: str = "gmx",
kappa: Union[int, float, List[Union[int, float]]] = 0.5,
at: Union[int, float, List[Union[int, float]]] = 1.0,
plumed_output: str = "plm.out",
cv_mode: str = "torsion"
):
super().__init__()
self.conf = conf
self.topology = topology
self.label_config = label_config
self.stride = self.label_config["output_freq"]
self.cv_file = cv_file
self.selected_resid = selected_resid
self.selected_atomid = selected_atomid
self.plumed_output = plumed_output
self.cv_mode = cv_mode
self.sampler_type = sampler_type
self.kappa = kappa
self.at = at
self.task = Task()
def build(self) -> Task:
task_dict = {}
if self.sampler_type == "gmx":
task_dict.update(self.build_gmx())
elif self.sampler_type == "lmp":
task_dict.update(self.build_lmp())
task_dict.update(self.build_plumed())
for fname, fconts in task_dict.items():
self.task.add_file(fname, fconts)
return self.task
def get_task(self):
return self.task
def build_gmx(self):
return build_gmx_dict(self.conf, self.topology, self.label_config)
def build_lmp(self):
return build_lmp_dict(self.conf)
def build_plumed(self):
return build_plumed_restraint_dict(
conf=self.conf, cv_file=self.cv_file, selected_resid=self.selected_resid,
selected_atomid=self.selected_atomid, kappa=self.kappa, at=self.at,
stride=self.stride, output=self.plumed_output, mode=self.cv_mode
)
class ConstrainedMDTaskBuilder(TaskBuilder):
def __init__(
self,
conf: str,
topology: Optional[str],
label_config: Dict,
cv_file: Optional[List[str]] = None,
selected_atomid: Optional[List[int]] = None,
sampler_type: str = "gmx",
plumed_output: str = "plm.out",
cv_mode: str = "distance"
):
super().__init__()
self.conf = conf
self.topology = topology
self.label_config = label_config
self.stride = self.label_config["output_freq"]
self.cv_file = cv_file
self.selected_atomid = selected_atomid
self.plumed_output = plumed_output
self.cv_mode = cv_mode
self.sampler_type = sampler_type
self.task = Task()
def build(self) -> Task:
task_dict = {}
if self.sampler_type == "gmx":
task_dict.update(self.build_gmx())
elif self.sampler_type == "lmp":
task_dict.update(self.build_lmp())
task_dict.update(self.build_plumed())
for fname, fconts in task_dict.items():
self.task.add_file(fname, fconts)
return self.task
def get_task(self):
return self.task
def build_gmx(self):
return build_gmx_constraint_dict(self.conf, self.topology, self.label_config, self.selected_atomid)
def build_lmp(self):
return build_lmp_dict(self.conf)
def build_plumed(self):
return build_plumed_constraint_dict(
conf=self.conf, cv_file=self.cv_file, selected_atomid=self.selected_atomid,
stride=self.stride, output=self.plumed_output, mode=self.cv_mode
)
def build_gmx_dict(
conf: str,
topology: str,
gmx_config: Dict
):
gmx_task_files = {}
gmx_task_files[gmx_conf_name] = (read_txt(conf), "w")
gmx_task_files[gmx_top_name] = (read_txt(topology), "w")
mdp_string = make_md_mdp_string(gmx_config)
gmx_task_files[gmx_mdp_name] = (mdp_string, "w")
return gmx_task_files
def build_gmx_constraint_dict(
conf: str,
topology: str,
gmx_config: Dict,
selected_atomid: Optional[List[int]] = None
):
gmx_task_files = {}
gmx_task_files[gmx_conf_name] = (read_txt(conf), "w")
cv_info = get_distance_from_atomid(conf, selected_atomid)
ret = ""
with open(topology, "r") as f:
for line in f.readlines():
ret += line
if "constraints" in line:
print("constrained md operating normally!\n")
ret += "; atom1 atom2 funct dis\n"
for dis_id in range(len(selected_atomid)):
ret += "%s %s 2 %s\n"%(selected_atomid[dis_id][0], selected_atomid[dis_id][1],\
cv_info["%s %s"%(selected_atomid[dis_id][0],selected_atomid[dis_id][1])])
gmx_task_files[gmx_top_name] = (ret, "w")
mdp_string = make_md_mdp_string(gmx_config)
gmx_task_files[gmx_mdp_name] = (mdp_string, "w")
return gmx_task_files
def build_lmp_dict(
conf: str
):
lmp_task_files = {}
lmp_task_files[lmp_conf_name] = (read_txt(conf), "w")
return lmp_task_files
def build_plumed_dict(
conf: Optional[str] = None,
cv_file: Optional[str] = None,
selected_resid: Optional[List[int]] = None,
selected_atomid: Optional[List[int]] = None,
trust_lvl_1: float = 1.0,
trust_lvl_2: float = 2.0,
model_list: List[str] = ["graph.pb"],
stride: int = 100,
output: str = "plm.out",
mode: str = "torsion",
wall_list: Optional[List[str]] = None,
iteration: Optional[str] = None
):
plumed_task_files = {}
plm_content = make_deepfe_plumed(
conf=conf, cv_file=cv_file, selected_resid=selected_resid,
selected_atomid = selected_atomid,
trust_lvl_1=trust_lvl_1, trust_lvl_2=trust_lvl_2,
model_list=model_list, stride=stride,
output=output, mode=mode, wall_list=wall_list, iteration=iteration
)
plumed_task_files[plumed_input_name] = (plm_content, "w")
return plumed_task_files
def build_plumed_restraint_dict(
conf: Optional[str] = None,
cv_file: Optional[str] = None,
selected_resid: Optional[List[int]] = None,
selected_atomid: Optional[List[int]] = None,
kappa: Union[int, float, Sequence, np.ndarray] = 0.5,
at: Union[int, float, Sequence, np.ndarray] = 1.0,
stride: int = 100,
output: str = "plm.out",
mode: str = "torsion"
):
plumed_task_files = {}
if selected_atomid is not None:
at = []
cv_info = get_distance_from_atomid(conf, selected_atomid)
for dis_id in range(len(selected_atomid)):
at.append(cv_info["%s %s"%(selected_atomid[dis_id][0],selected_atomid[dis_id][1])])
plm_content = make_restraint_plumed(
conf=conf, cv_file=cv_file, selected_resid=selected_resid,selected_atomid = selected_atomid,
kappa=kappa, at=at, stride=stride,
output=output, mode=mode
)
plumed_task_files[plumed_input_name] = (plm_content, "w")
return plumed_task_files
def build_plumed_constraint_dict(
conf: Optional[str] = None,
cv_file: Optional[str] = None,
selected_atomid: Optional[List[int]] = None,
stride: int = 100,
output: str = "plm.out",
mode: str = "distance"
):
plumed_task_files = {}
plm_content = make_constraint_plumed(
conf=conf, cv_file=cv_file, selected_atomid=selected_atomid,
stride=stride, output=output, mode=mode
)
plumed_task_files[plumed_input_name] = (plm_content, "w")
return plumed_task_files | /rid_kit-0.6.2-py3-none-any.whl/rid/task/builder.py | 0.881946 | 0.186706 | builder.py | pypi |
import os, sys
from typing import Union, List, Optional
import logging
import numpy as np
import sklearn.cluster as skcluster
from matplotlib import pyplot as plt
from rid.constants import cluster_fig
from pathlib import Path
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger(__name__)
class Cluster:
def __init__(
self,
cvs: Union[np.ndarray, List],
threshold: float,
task_name: str,
angular_mask: Optional[Union[np.ndarray, List]] = None,
weights: Optional[Union[np.ndarray, List]] = None,
max_search_step: int = 500,
max_selection: int = 1000
):
if angular_mask is None:
angular_mask = np.zeros(shape=(cvs[1],))
if weights is None:
weights = np.ones(shape=(cvs[1],))
self.angular_mask = angular_mask
self.weights = weights
self.task_name = task_name
self.max_search_step = max_search_step
self.threshold = threshold
self.cvs = cvs
self.enlarge_coeff = 1.05
self.reduce_coeff = 0.95
self.cls_sel = None
self.max_selection = max_selection
def make_threshold(self, numb_cluster_lower, numb_cluster_upper):
current_iter = 0
logger.info(f"set numb_cluster_upper to {numb_cluster_upper}")
logger.info(f"set numb_cluster_lower to {numb_cluster_lower}")
assert numb_cluster_lower < numb_cluster_upper, f"expect numb_cluster_upper > numb_cluster_lower, "
"got {numb_cluster_upper} < {numb_cluster_lower}"
while current_iter < self.max_search_step:
logger.info(f"making threshold attempt {current_iter}")
cls_sel = sel_from_cluster(
self.cvs, self.threshold, Path(self.task_name),angular_mask=self.angular_mask,
weights=self.weights, max_selection=self.max_selection)
test_numb_cluster = len(set(cls_sel))
if test_numb_cluster < numb_cluster_lower:
self.threshold = self.threshold * self.reduce_coeff
elif test_numb_cluster > numb_cluster_upper:
self.threshold = self.threshold * self.enlarge_coeff
else:
break
logger.info(f"set threshold to {self.threshold}, get {test_numb_cluster} clusters.")
current_iter += 1
self.cls_sel = cls_sel
return self.threshold
def get_cluster_selection(self):
if self.cls_sel is None:
self.cls_sel = sel_from_cluster(
self.cvs, self.threshold, Path(self.task_name),angular_mask=self.angular_mask,
weights=self.weights, max_selection=self.max_selection)
return self.cls_sel
def cv_dist(cv1, cv2, angular_mask, weights):
diff = cv1 - cv2
angular_mask = np.array(angular_mask)
angular_boolean = (angular_mask == 1)
angular_diff = diff[angular_boolean]
angular_diff[angular_diff < -np.pi] += 2 * np.pi
angular_diff[angular_diff > np.pi] -= 2 * np.pi
diff[angular_boolean] = angular_diff
return np.linalg.norm(diff * weights)
def mk_dist(cv, angular_mask, weights):
nframe = cv.shape[0]
dist = np.zeros([nframe, nframe])
for ii in range(nframe):
for jj in range(ii+1, nframe):
dist[ii][jj] = cv_dist(cv[ii], cv[jj], angular_mask, weights)
dist[jj][ii] = dist[ii][jj]
return dist
def mk_cluster(dist, distance_threshold):
logger.info("clustering ...")
cluster = skcluster.AgglomerativeClustering(n_clusters=None,
linkage='average',
affinity='precomputed',
distance_threshold=distance_threshold)
cluster.fit(dist)
return cluster.labels_
def chooseClusterCenter(dist:np.ndarray, conf_ids:list):
id_min = conf_ids[0]
min_loss = None
# n_tot_frame = dist.shape[0]
for id in conf_ids:
rmsd = dist[id]
loss = 0
for i, conf in enumerate(conf_ids):
rms = rmsd[conf]
loss += rms * rms
loss = np.sqrt(loss)
if min_loss == None:
min_loss = loss
else:
if loss < min_loss:
id_min = id
min_loss = loss
return [id_min]
def sel_from_cluster(cvs, threshold, task_path, angular_mask=None, weights=None, max_selection=1000):
if len(cvs) <= 1:
return cvs
weights = np.array(weights)
dist = mk_dist(cvs, angular_mask, weights)
labels = mk_cluster(dist, threshold)
# plot clustering distributions
xlist = [i for i in range(len(labels))]
plt.figure(figsize=(10, 8), dpi=100)
plt.xlabel("trajectory frames")
plt.ylabel("cluster index")
plt.title("cluster distributions along trajectories")
plt.scatter(xlist, labels, s = 5)
plt.savefig(task_path.joinpath(cluster_fig))
# make cluster map
_cls_map = []
for _ in range(len(set(labels))):
_cls_map.append([])
for idx, label in enumerate(labels):
_cls_map[label].append(idx)
cls_map = []
for clust in _cls_map:
cls_map.append((clust, len(clust)))
cls_map = sorted(cls_map, key=lambda x: x[1], reverse=True)
# randomly select from clusters
cls_sel = []
np.random.seed(seed=None)
for cluster, _ in cls_map:
# _ret = np.random.choice(cluster, 1)
_ret = chooseClusterCenter(dist, cluster)
cls_sel.append(_ret[0])
if len(cls_sel) > max_selection:
cls_sel = cls_sel[:max_selection]
logger.info("selection number is beyond max selection, adjust to the max number.")
return np.array(cls_sel, dtype=int) | /rid_kit-0.6.2-py3-none-any.whl/rid/select/cluster.py | 0.6705 | 0.352648 | cluster.py | pypi |
from typing import List, Optional, Dict
from pathlib import Path
import numpy as np
from dflow.python import (
OP,
OPIO,
OPIOSign,
Artifact,
Parameter,
BigParameter
)
from rid.utils import save_txt, set_directory
from rid.constants import sel_gro_name, sel_lmp_name, cv_init_label, model_devi_name, model_devi_precision, sel_ndx_name
from rid.select.conf_select import select_from_devi
from rid.common.mol import slice_xtc
from rid.common.mol_dpdata import slice_dump
from rid.select.model_devi import make_std
import json
class RunSelect(OP):
"""
`RunSelect` calculates model deviations for each chosen representive cluster frames from `PrepSelect` and select
ones with high uncertainty from them.
As RiD-kit is based on `Gromacs`, please provide trajectories in `.xtc` format (single-point precision) and NN models in
`.pb` format.
Warning: We highly recommend use `slice_mode = "gmx"` due to the inconsistent format convention of `mdtraj` that may lead to topology
mismatch of next label steps. If you use `mdtraj` mode, please make sure the name conventions of molecules in Gromacs topology
files satisfy PDB standards. This could happen in some old Gromcas version.
"""
@classmethod
def get_input_sign(cls):
return OPIOSign(
{
"task_name": str,
"cluster_selection_index": Artifact(Path),
"cluster_selection_data": Artifact(Path),
"models": Artifact(List[Path], optional=True),
"trust_lvl_1": float,
"trust_lvl_2": float,
"xtc_traj": Artifact(Path),
"topology": Artifact(Path),
"label_config": BigParameter(Dict),
"type_map": Parameter(Optional[List], default=[]),
"dt": Parameter(Optional[float], default=None),
"output_freq": Parameter(Optional[float], default=None),
"slice_mode": Parameter(str, default="gmx")
}
)
@classmethod
def get_output_sign(cls):
return OPIOSign(
{
"selected_confs": Artifact(List[Path], archive = None),
"selected_cv_init": Artifact(List[Path], archive = None),
"model_devi": Artifact(Path, optional=True, archive = None),
"selected_indices": Artifact(Path, archive = None),
"selected_conf_tags": Artifact(Path, archive= None)
}
)
@OP.exec_sign_check
def execute(
self,
op_in: OPIO,
) -> OPIO:
r"""Execute the OP.
Parameters
----------
op_in : dict
Input dict with components:
- `task_name`: str,
- `cluster_selection_index`: Artifact(Path),
- `cluster_selection_data`: Artifact(Path),
- `models`: Artifact(List[Path], optional=True),
- `trust_lvl_1`: float,
- `trust_lvl_2`: float,
- `xtc_traj`: Artifact(Path),
- `topology`: Artifact(Path),
- `dt`: Parameter(Optional[float], default=None),
- `slice_mode`: Parameter(str, default=`gmx`)
Returns
-------
Output dict with components:
- `selected_confs`: (`Artifact(Path)`) Selected conformation files by model deviations from representative frames of clusters.
- `selected_cv_init`: (`Artifact(Path)`) Collective variables of selected conformations (`selected_confs`).
- `model_devi`: (`Artifact(Path)`) Model deviation values of selected conformation files (`selected_confs`) .
- `selected_indices`: (`Artifact(Path)`) Indices of selected conformation files (`selected_confs`) in trajectories.
"""
cls_sel_idx = np.load(op_in["cluster_selection_index"])
cls_sel_data = np.load(op_in["cluster_selection_data"])
task_path = Path(op_in["task_name"])
task_path.mkdir(exist_ok=True, parents=True)
walker_idx = int(op_in["task_name"])
with set_directory(task_path):
if op_in["models"] is None:
save_txt("cls_"+model_devi_name, [], fmt=model_devi_precision)
_selected_idx = np.array([ii for ii in range(len(cls_sel_idx))], dtype=int)
else:
stds = make_std(cls_sel_data, models=op_in["models"])
save_txt("cls_"+model_devi_name, stds, fmt=model_devi_precision)
_selected_idx = select_from_devi(stds, op_in["trust_lvl_1"])
sel_idx = cls_sel_idx[_selected_idx]
np.save(sel_ndx_name, sel_idx)
sel_data = cls_sel_data[_selected_idx]
if op_in["slice_mode"] == "gmx":
assert op_in["dt"] is not None, "Please provide time step to slice trajectory."
for ii, sel in enumerate(sel_idx):
time = sel * op_in["dt"] * op_in["output_freq"]
slice_xtc(xtc=op_in["xtc_traj"], top=op_in["topology"],
walker_idx=walker_idx,selected_idx=time, output=sel_gro_name.format(walker=walker_idx,idx=sel), style="gmx")
elif op_in["slice_mode"] == "mdtraj":
slice_xtc(xtc=op_in["xtc_traj"], top=op_in["topology"],
walker_idx = walker_idx, selected_idx=sel_idx, output=sel_gro_name, style="mdtraj")
elif op_in["slice_mode"] == "dpdata":
if op_in["label_config"]["type"] == "gmx":
slice_dump(dump=op_in["xtc_traj"],walker_idx = walker_idx,selected_idx=sel_idx, output=sel_gro_name, type_map = op_in["type_map"], style="dpdata")
elif op_in["label_config"]["type"] == "lmp":
slice_dump(dump=op_in["xtc_traj"],walker_idx = walker_idx,selected_idx=sel_idx, output=sel_lmp_name, type_map = op_in["type_map"],style="dpdata")
else:
raise ValueError("Invalid labeling type, only support gmx and lmp")
else:
raise RuntimeError("Unknown Style for Slicing Trajectory.")
conf_list = []
cv_init_list = []
conf_tags = {}
for ii, sel in enumerate(sel_idx):
if op_in["slice_mode"] == "dpdata":
if op_in["label_config"]["type"] == "gmx":
conf_list.append(task_path.joinpath(sel_gro_name.format(walker=walker_idx,idx=sel)))
conf_tags[sel_gro_name.format(walker = walker_idx,idx=sel)] = f"{op_in['task_name']}_{sel}"
elif op_in["label_config"]["type"] == "lmp":
conf_list.append(task_path.joinpath(sel_lmp_name.format(walker=walker_idx,idx=sel)))
conf_tags[sel_lmp_name.format(walker = walker_idx,idx=sel)] = f"{op_in['task_name']}_{sel}"
elif op_in["slice_mode"] == "gmx" or op_in["slice_mode"] == "mdtraj" :
conf_list.append(task_path.joinpath(sel_gro_name.format(walker=walker_idx,idx=sel)))
conf_tags[sel_gro_name.format(walker = walker_idx,idx=sel)] = f"{op_in['task_name']}_{sel}"
save_txt(cv_init_label.format(walker=walker_idx,idx=sel), sel_data[ii])
cv_init_list.append(task_path.joinpath(cv_init_label.format(walker=walker_idx,idx=sel)))
with open("conf.json", "w") as f:
json.dump(conf_tags,f)
op_out = OPIO(
{
"selected_confs": conf_list,
"selected_cv_init": cv_init_list,
"model_devi": task_path.joinpath("cls_"+model_devi_name),
"selected_indices": task_path.joinpath(sel_ndx_name),
"selected_conf_tags": task_path.joinpath("conf.json")
}
)
return op_out | /rid_kit-0.6.2-py3-none-any.whl/rid/op/run_select.py | 0.816004 | 0.327883 | run_select.py | pypi |
from logging import raiseExceptions
from dflow.python import (
OP,
OPIO,
OPIOSign,
Artifact,
BigParameter
)
from typing import List, Dict, Union
from pathlib import Path
from rid.constants import (
plumed_output_name
)
from rid.task.builder import EnhcMDTaskBuilder
class PrepExplore(OP):
r"""
Prepare files for exploration tasks.
Currently, RiD is based on Gromacs/Lammps with PLUMED2 plugin. Provide .gro files and .top files if running Gromacs and .lmp files if running Lammps.
Exploration step would run biased MD sampling with neural network models or
brute force MD sampling without neural network model provided.
With models provided, the bias forces will be the average value of outputs of these models and tuned by a switching function.
.. math::
F(r) = -\nabla_{r_i} U(r) + \sigma( \me ( s( r))) \nabla_{r_i} A(r)
where :math:`F(r)` is forces exrted on atoms, :math:`U(r)` is potential energy and :math:`A(r)` is free energy
represented by neural networks.
.. math::
\sigma(\epsilon)=
\begin{cases}
1, & \epsilon<\epsilon_0 \\
\frac{1}{2}+\frac{1}{2}\cos{(\pi \frac{\epsilon-\epsilon_0}{\epsilon_1-\epsilon_0})}, & \epsilon_0 <\epsilon < \epsilon_1 \\
0, &\epsilon > \epsilon_1
\end{cases}
where :math:`\sigma(\epsilon)` is the switching function with parameters trust level (`trust_lvl_1` and `trust_lvl_2`).
"""
@classmethod
def get_input_sign(cls):
return OPIOSign(
{
"models": Artifact(List[Path], optional=True),
"topology": Artifact(Path, optional=True),
"conf": Artifact(Path),
"cv_file": Artifact(List[Path], optional=True),
"trust_lvl_1": float,
"trust_lvl_2": float,
"exploration_config": BigParameter(Dict),
"cv_config": BigParameter(Dict),
"task_name": str,
"block_tag": str
}
)
@classmethod
def get_output_sign(cls):
return OPIOSign(
{
"task_path": Artifact(Path, archive = None),
"cv_dim": int
}
)
@OP.exec_sign_check
def execute(
self,
op_in: OPIO,
) -> OPIO:
r"""Execute the OP.
Parameters
----------
op_in : dict
Input dict with components:
- `models`: (`Artifact(List[Path])`) Optional. Neural network model files (`.pb`) used to bias the simulation.
Run brute force MD simulations if not provided.
- `trust_lvl_1`: (`float`) Trust level 1.
- `trust_lvl_2`: (`float`) Trust level 2.
- `topology`: (`Artifact(Path)`) Topology files (.top) for Gromacs simulations.
- `conf`: (`Artifact(Path)`) Conformation files (.gro, .lmp) for Gromacs/Lammps simulations.
- `exploration_config`: (`Dict`) Configuration in `Dict` format for Gromacs/Lammps run.
- `cv_config`: (`Dict`) Configuration for CV creation.
- `task_name`: (`str`) Task name used to make sub-dir for tasks.
Returns
-------
Output dict with components:
- `task_path`: (`Artifact(Path)`) A directory containing files for RiD exploration.
- `cv_dim`: (`int`) CV dimensions.
"""
cv_file = []
selected_resid = None
selected_atomid = None
if op_in["cv_config"]["mode"] == "torsion":
selected_resid = op_in["cv_config"]["selected_resid"]
elif op_in["cv_config"]["mode"] == "distance":
selected_atomid = op_in["cv_config"]["selected_atomid"]
elif op_in["cv_config"]["mode"] == "custom":
cv_file = op_in["cv_file"]
if op_in["models"] is None:
models = []
else:
models = [str(model.name) for model in op_in["models"]]
wall_list = None
if "iterative_walls" in op_in["cv_config"]:
wall_list = op_in["cv_config"]["iterative_walls"]
iteration = int(op_in["block_tag"].split("-")[1])
gmx_task_builder = EnhcMDTaskBuilder(
conf = op_in["conf"],
topology = op_in["topology"],
exploration_config = op_in["exploration_config"],
cv_file=cv_file,
selected_resid = selected_resid,
selected_atomid = selected_atomid,
sampler_type = op_in["exploration_config"]["type"],
trust_lvl_1 = op_in["trust_lvl_1"],
trust_lvl_2 = op_in["trust_lvl_2"],
model_list = models,
plumed_output = plumed_output_name,
cv_mode = op_in["cv_config"]["mode"],
wall_list = wall_list,
iteration = iteration
)
cv_dim = gmx_task_builder.get_cv_dim()
task_path = Path(op_in["task_name"])
task_path.mkdir(exist_ok=True, parents=True)
gmx_task = gmx_task_builder.build()
for fname, fconts in gmx_task.files.items():
with open(task_path.joinpath(fname), fconts[1]) as ff:
ff.write(fconts[0])
op_out = OPIO(
{
"task_path": task_path,
"cv_dim": int(cv_dim)
}
)
return op_out | /rid_kit-0.6.2-py3-none-any.whl/rid/op/prep_exploration.py | 0.909584 | 0.583856 | prep_exploration.py | pypi |
from typing import List, Optional, Dict
from pathlib import Path
import numpy as np
from dflow.python import (
OP,
OPIO,
OPIOSign,
Artifact,
Parameter,
BigParameter
)
from rid.utils import save_txt, set_directory
from rid.mcmc.walker import Walker, my_hist1d, my_hist2d, my_hist1d_path, my_hist2d_path
from rid.select.model_devi import test_ef
from rid.common.tensorflow.graph import load_graph
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except ImportError:
import tensorflow as tf
from rid.constants import mcmc_1cv_name, mcmc_1cv_dir_name, mcmc_2cv_name, kb, f_cvt, kcal2kj
import os
class MCMCRun(OP):
"""
`MCMC_Run` performs Markov chain Monte carlo sampling to reduce the dimension of Free energy surface (represented by neural network)
"""
@classmethod
def get_input_sign(cls):
return OPIOSign(
{
"task_names": str,
"mcmc_config": BigParameter(Dict),
"models": Artifact(Path, archive = None)
}
)
@classmethod
def get_output_sign(cls):
return OPIOSign(
{
"mcmc_1cv": Artifact(Path, archive = None),
"mcmc_2cv": Artifact(Path, archive = None)
}
)
@OP.exec_sign_check
def execute(
self,
op_in: OPIO,
) -> OPIO:
r"""Execute the OP.
Parameters
----------
op_in : dict
Input dict with components:
- `task_name`: str,
- `models`: Artifact(List[Path], optional=True),
Returns
-------
Output dict with components:
- `fes_fig`: (`Artifact(Path)`)
"""
graph = load_graph(op_in["models"])
mcmc_config = op_in["mcmc_config"]
temperature = mcmc_config["temperature"]
fd = mcmc_config["cv_dimension"]
ns = mcmc_config["numb_steps"]
nw = mcmc_config["numb_walkers"]
# kinetic enery in eV
kbT = kb * temperature
beta = 1.0 / kbT
if "cv_upper_bound" in mcmc_config:
cv_upper = mcmc_config["cv_upper_bound"]
else:
cv_upper = 2*np.pi
if "cv_lower_bound" in mcmc_config:
cv_lower = mcmc_config["cv_lower_bound"]
else:
cv_lower = 0
proj_info = mcmc_config["proj_info"]
proj_mode = proj_info["proj_mode"]
proj_cv_index = proj_info["proj_cv_index"]
if "path_list" in proj_info:
path_list = np.array(proj_info["path_list"])
path_lm = proj_info["path_lm"]
cv_type = mcmc_config["cv_type"]
bins = mcmc_config["bins"]
if cv_type == "dih":
xx = np.linspace(0,2* np.pi, bins)
yy = np.linspace(0,2* np.pi, bins)
pp_hist = np.zeros((fd, len(xx)))
pp_hist2d = np.zeros((1, len(xx), len(yy)))
delta = 2.0 * np.pi / (bins-1)
elif cv_type == "dis":
xx = np.linspace(0,10, bins)
yy = np.linspace(0,10, bins)
if proj_mode == "cv":
pp_hist = np.zeros((fd, len(xx)))
elif proj_mode == "path":
pp_hist = np.zeros((2, len(xx)))
pp_hist2d = np.zeros((1, len(xx), len(yy)))
delta = 10.0 / (bins-1)
else:
raise ValueError("Undefined cv type, only support 'dih' and 'dis' type")
task_path = Path(op_in["task_names"])
task_path.mkdir(exist_ok=True, parents=True)
mcmc_2cv_path = None
with set_directory(task_path):
with tf.Session(graph = graph) as sess:
walker = Walker(fd, nw, sess, cv_type, cv_lower=cv_lower, cv_upper=cv_upper)
for _ in range(10000):
pp, ee, ff = walker.sample(test_ef)
for ii in range(ns+1):
pp, ee, ff = walker.sample(test_ef)
if proj_mode == "cv":
# project on 1D CV
pp_hist_new = my_hist1d(pp, xx, delta, fd)
pp_hist = (pp_hist * ii + pp_hist_new) / (ii+1)
if not os.path.exists(mcmc_1cv_dir_name):
os.makedirs(mcmc_1cv_dir_name)
if np.mod(ii,int(ns/5)) == 0:
zz = -np.log(pp_hist+1e-7)/beta
# convert ev to kcal/mol
zz *= f_cvt/kcal2kj
zz = zz - np.min(zz)
for jj in range(fd):
fp = open(mcmc_1cv_dir_name+"/"+mcmc_1cv_name.format(tag=jj), "a")
for temp in zz[jj]:
fp.write(str(temp)+' ')
fp.write('\n')
fp.close()
# project on 2D CV
assert len(proj_cv_index) == 2
cv1 = proj_cv_index[0]
cv2 = proj_cv_index[1]
##certain 2d
pp_hist_new2d = my_hist2d(pp, xx, yy, delta, cv1, cv2)
pp_hist2d = (pp_hist2d * ii + pp_hist_new2d) / (ii+1)
if ii == ns:
zz2d = np.transpose(-np.log(pp_hist2d+1e-10), (0,2,1))/beta
# convert ev to kcal/mol
zz2d *= f_cvt/kcal2kj
zz2d = zz2d - np.min(zz2d)
np.savetxt(mcmc_2cv_name,zz2d[0])
elif proj_mode == "path":
# project on 1D CV
pp_hist_new = my_hist1d_path(pp, xx, delta, path_lm, path_list, proj_cv_index)
pp_hist = (pp_hist * ii + pp_hist_new) / (ii+1)
if not os.path.exists(mcmc_1cv_dir_name):
os.makedirs(mcmc_1cv_dir_name)
if np.mod(ii,int(ns/5)) == 0:
zz = -np.log(pp_hist+1e-7)/beta
# convert ev to kcal/mol
zz *= f_cvt/kcal2kj
zz = zz - np.min(zz)
# iterate over 2 path CV
for jj in range(2):
fp = open(mcmc_1cv_dir_name+"/"+mcmc_1cv_name.format(tag=jj), "a")
for temp in zz[jj]:
fp.write(str(temp)+' ')
fp.write('\n')
fp.close()
# project on 2D CV
pp_hist_new2d_path = my_hist2d_path(pp, xx, yy, delta, path_lm, path_list, proj_cv_index)
pp_hist2d = (pp_hist2d * ii + pp_hist_new2d_path) / (ii+1)
if ii == ns:
zz2d = np.transpose(-np.log(pp_hist2d+1e-10), (0,2,1))/beta
# convert ev to kcal/mol
zz2d *= f_cvt/kcal2kj
zz2d = zz2d - np.min(zz2d)
np.savetxt(mcmc_2cv_name,zz2d[0])
if os.path.exists(task_path.joinpath(mcmc_2cv_name)):
mcmc_2cv_path = task_path.joinpath(mcmc_2cv_name)
op_out = OPIO(
{
"mcmc_1cv": task_path.joinpath(mcmc_1cv_dir_name),
"mcmc_2cv": mcmc_2cv_path
}
)
return op_out | /rid_kit-0.6.2-py3-none-any.whl/rid/op/mcmc_run.py | 0.80905 | 0.195172 | mcmc_run.py | pypi |
import os, sys, shutil, logging
from typing import List, Dict
from pathlib import Path
from copy import deepcopy
from dflow.python import (
OP,
OPIO,
OPIOSign,
Artifact,
BigParameter
)
from rid.utils import load_json
from rid.constants import model_tag_fmt, init_conf_gmx_name, init_conf_lmp_name,init_input_name, walker_tag_fmt
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger(__name__)
def prep_confs(confs, numb_walkers,sampler_type):
numb_confs = len(confs)
conf_list = []
if sampler_type == "gmx":
if numb_confs < numb_walkers:
logger.info("Number of confs is smaller than number of walkers. Copy replicas up to number of walkers.")
for idx in range(numb_walkers):
shutil.copyfile(confs[idx%numb_confs], init_conf_gmx_name.format(idx=idx))
elif numb_confs > numb_walkers:
logger.info("Number of confs is greater than number of walkers. Only use the fist `numb_walkers` confs.")
for idx in range(numb_walkers):
shutil.copyfile(confs[idx], init_conf_gmx_name.format(idx=idx))
else:
for idx in range(numb_walkers):
shutil.copyfile(confs[idx], init_conf_gmx_name.format(idx=idx))
for idx in range(numb_walkers):
conf_list.append(Path(init_conf_gmx_name.format(idx=idx)))
elif sampler_type == "lmp":
if numb_confs < numb_walkers:
logger.info("Number of confs is smaller than number of walkers. Copy replicas up to number of walkers.")
for idx in range(numb_walkers):
shutil.copyfile(confs[idx%numb_confs], init_conf_lmp_name.format(idx=idx))
elif numb_confs > numb_walkers:
logger.info("Number of confs is greater than number of walkers. Only use the fist `numb_walkers` confs.")
for idx in range(numb_walkers):
shutil.copyfile(confs[idx], init_conf_lmp_name.format(idx=idx))
else:
for idx in range(numb_walkers):
shutil.copyfile(confs[idx], init_conf_lmp_name.format(idx=idx))
for idx in range(numb_walkers):
conf_list.append(Path(init_conf_lmp_name.format(idx=idx)))
else:
raise ValueError("Invalid sampler type, only support gmx and lmp")
return conf_list
class PrepRiD(OP):
"""Pre-processing of RiD.
1. Parse RiD configuration JSON file, get default value if parameters are not provided.
2. Rearrange conformation files.
3. Make task names and formats.
"""
@classmethod
def get_input_sign(cls):
return OPIOSign(
{
"confs": Artifact(List[Path]),
"rid_config": Artifact(Path)
}
)
@classmethod
def get_output_sign(cls):
return OPIOSign(
{
"numb_iters": int,
"numb_walkers": int,
"numb_models": int,
"confs": Artifact(List[Path],archive = None),
"walker_tags": List,
"model_tags": List,
"exploration_config": BigParameter(Dict),
"cv_config": BigParameter(Dict),
"trust_lvl_1": List[float],
"trust_lvl_2": List[float],
"cluster_threshold": List[float],
"angular_mask": List,
"weights": List,
"numb_cluster_upper": int,
"numb_cluster_lower": int,
"max_selection": int,
"numb_cluster_threshold": int,
"std_threshold": float,
"dt": float,
"output_freq": float,
"slice_mode": str,
"type_map": List,
"label_config": BigParameter(Dict),
"train_config": BigParameter(Dict)
}
)
@OP.exec_sign_check
def execute(
self,
op_in: OPIO,
) -> OPIO:
r"""Execute the OP.
Parameters
----------
op_in : dict
Input dict with components:
- `confs`: (`Artifact(Path)`) User-provided initial conformation files (.gro) for reinfoced dynamics.
- `rid_config`: (`Artifact(Path)`) Configuration file (.json) of RiD.
Parameters in this file will be parsed.
Returns
-------
Output dict with components:
- `numb_iters`: (`int`) Max number of iterations for Ri.
- `numb_walkers`: (`int`) Number of parallel walkers for exploration.
- `numb_models`: (`int`) Number of neural network models of RiD.
- `confs`: (`Artifact(List[Path])`) Rearranged initial conformation files (.gro) for reinfoced dynamics.
- `walker_tags`: (`List`) Tag formmat for parallel walkers.
- `model_tags`: (`List`) Tag formmat for neural network models.
- `exploration_config`: (`Dict`) Configuration of simulations in exploration steps.
- `cv_config`: (`Dict`) Configuration to create CV in PLUMED2 style.
- `trust_lvl_1`: (`List[float]`) Trust level 1, or e0.
- `trust_lvl_2`: (`List[float]`) Trust level 2, or e1.
- `cluster_threshold`: (`List[float]`) Initial guess of cluster threshold.
- `angular_mask`: (`List`) Angular mask for periodic collective variables.
1 represents periodic, 0 represents non-periodic.
- `weights`: (`List`) Weights for clustering collective variables. see details in cluster algorithms.
- `numb_cluster_upper`: (`int`) Upper limit of cluster number to make cluster threshold.
- `numb_cluster_lower`: (`int`) Lower limit of cluster number to make cluster threshold.
- `max_selection`: (`int`) Max selection number of clusters in Selection steps for each parallel walker.
- `numb_cluster_threshold`: (`int`) Used to adjust trust level. When cluster number is grater than this threshold,
trust levels will be increased adaptively.
- `dt`: (`float`) Time interval of exploration MD simulations. Gromacs `trjconv` commands will need this parameters
to slice trajectories by `-dump` tag, see `selection` steps for detail.
- `slice_mode`: (`str`) Mode to slice trajectories. Either `gmx` or `mdtraj`.
- `label_config`: (`Dict`) Configuration of simulations in labeling steps.
- `kappas`: (`List`) Force constants of harmonic restraints to perform restrained MD simulations.
- `train_config`: (`Dict`) Configuration to train neural networks, including training strategy and network structures.
"""
jdata = deepcopy(load_json(op_in["rid_config"]))
numb_walkers = jdata.pop("numb_walkers")
train_config = jdata.pop("Train")
numb_models = train_config.pop("numb_models")
numb_iters = jdata.pop("numb_iters")
exploration_config = jdata.pop("ExploreMDConfig")
sampler_type = exploration_config["type"]
conf_list = prep_confs(op_in["confs"], numb_walkers, sampler_type)
walker_tags = []
model_tags = []
for idx in range(numb_walkers):
walker_tags.append(walker_tag_fmt.format(idx=idx))
for idx in range(numb_models):
model_tags.append(model_tag_fmt.format(idx=idx))
dt = exploration_config["dt"]
output_freq = exploration_config["output_freq"]
cv_config = jdata.pop("CV")
angular_mask = cv_config["angular_mask"]
weights = cv_config["weights"]
selection_config = jdata.pop("SelectorConfig")
label_config = jdata.pop("LabelMDConfig")
trust_lvl_1 = jdata.pop("trust_lvl_1")
trust_lvl_2 = jdata.pop("trust_lvl_2")
trust_lvl_1_list = [trust_lvl_1 for _ in range(numb_walkers)]
trust_lvl_2_list = [trust_lvl_2 for _ in range(numb_walkers)]
cluster_threshold = selection_config.pop("cluster_threshold")
cluster_threshold_list = [cluster_threshold for _ in range(numb_walkers)]
std_threshold = label_config["std_threshold"]
if "type_map" in selection_config:
type_map = selection_config["type_map"]
else:
type_map = []
op_out = OPIO(
{
"numb_iters": numb_iters,
"numb_walkers": numb_walkers,
"numb_models": numb_models,
"confs": conf_list,
"walker_tags": walker_tags,
"model_tags": model_tags,
"exploration_config": exploration_config,
"cv_config": cv_config,
"trust_lvl_1": trust_lvl_1_list,
"trust_lvl_2": trust_lvl_2_list,
"cluster_threshold": cluster_threshold_list,
"angular_mask": angular_mask,
"weights": weights,
"numb_cluster_upper": selection_config.pop("numb_cluster_upper"),
"numb_cluster_lower": selection_config.pop("numb_cluster_lower"),
"max_selection": selection_config.pop("max_selection"),
"numb_cluster_threshold": selection_config.pop("numb_cluster_threshold"),
"std_threshold": std_threshold,
"dt": dt,
"output_freq": output_freq,
"slice_mode": selection_config.pop("slice_mode"),
"type_map": type_map,
"label_config": label_config,
"train_config": train_config
}
)
return op_out | /rid_kit-0.6.2-py3-none-any.whl/rid/op/prep_rid.py | 0.620622 | 0.205795 | prep_rid.py | pypi |
from dflow.python import (
OP,
OPIO,
OPIOSign,
Artifact
)
import os
from typing import List
from pathlib import Path
from rid.constants import (
data_new,
data_raw
)
from rid.utils import load_txt
import numpy as np
class CollectData(OP):
r"""Gather data of different simulations to a single file.
"""
@classmethod
def get_input_sign(cls):
return OPIOSign(
{
"cv_forces": Artifact(List[Path])
}
)
@classmethod
def get_output_sign(cls):
return OPIOSign(
{
"data_new": Artifact(Path),
}
)
@OP.exec_sign_check
def execute(
self,
op_in: OPIO,
) -> OPIO:
cv_forces = []
for idx in range(len(op_in["cv_forces"])):
if op_in["cv_forces"][idx]:
cv_force = load_txt(op_in["cv_forces"][idx])
cv_forces = np.append(cv_forces, cv_force)
if op_in["cv_forces"]:
cv_forces = np.reshape(cv_forces, [-1, len(cv_force)])
data = cv_forces
else:
data = np.array([])
np.save(data_new, data)
op_out = OPIO(
{
"data_new": Path(data_new)
}
)
return op_out
class MergeData(OP):
r"""Merge old data and new generated data.
If old data not existed, it will return new data.
If new data is empty, it will return old data.
"""
@classmethod
def get_input_sign(cls):
return OPIOSign(
{
"data_old": Artifact(Path, optional=True),
"data_new": Artifact(Path),
}
)
@classmethod
def get_output_sign(cls):
return OPIOSign(
{
"data_raw": Artifact(Path),
}
)
@OP.exec_sign_check
def execute(
self,
op_in: OPIO,
) -> OPIO:
if op_in["data_old"] is None:
return OPIO({"data_raw": op_in["data_new"]})
if os.stat(op_in["data_new"]).st_size == 0:
return OPIO({"data_raw": op_in["data_old"]})
_data_new = np.load(op_in["data_new"])
if len(_data_new) == 0:
return OPIO({"data_raw": op_in["data_old"]})
_data_old = np.load(op_in["data_old"])
data = np.concatenate((_data_old, _data_new), axis=0)
np.save(data_raw, data)
op_out = OPIO(
{
"data_raw": Path(data_raw)
}
)
return op_out | /rid_kit-0.6.2-py3-none-any.whl/rid/op/prep_data.py | 0.664867 | 0.306514 | prep_data.py | pypi |
import numpy as np
from typing import List, Dict
from pathlib import Path
from dflow.python import (
OP,
OPIO,
OPIOSign,
Artifact,
Parameter,
BigParameter
)
from rid.constants import tf_model_name, train_fig, train_log
from rid.nn.train_net import train
from rid.nn.freeze import freeze_model
from matplotlib import pyplot as plt
from rid.utils import set_directory
class TrainModel(OP):
"""`TrainModel` trains a set of neural network models (set by `numb_model` in `train_config`).
RiD-kit is powered by TensorFlow framework. The output model files are frozen in `.pb` formats by `rid.nn.freeze`.
"""
@classmethod
def get_input_sign(cls):
return OPIOSign(
{
"model_tag": str,
"angular_mask": List,
"data": Artifact(Path),
"train_config": BigParameter(Dict)
}
)
@classmethod
def get_output_sign(cls):
return OPIOSign(
{
"model": Artifact(Path),
"train_log": Artifact(Path),
"train_fig": Artifact(Path)
}
)
@OP.exec_sign_check
def execute(
self,
op_in: OPIO,
) -> OPIO:
r"""Execute the OP.
Parameters
----------
op_in : dict
Input dict with components:
- `model_tag`: (`str`) Tags for neural network model files. In formats of `model_{model_tag}.pb`.
- `angular_mask`: (`List`) Angular mask for periodic collective variables. 1 represents periodic, 0 represents non-periodic.
- `data`: (`Artifact(Path)`) Data files for training. Prepared by `rid.op.prep_data`.
`data` has the shape of `[number_conf, 2 * dimension_cv]` and contains the CV values and corresponding mean forces.
- `train_config`: (`Dict`) Configuration to train neural networks, including training strategy and network structures.
Returns
-------
Output dict with components:
- `model`: (`Artifact(Path)`) Neural network models in `.pb` formats.
"""
data_shape = np.load(op_in["data"]).shape
cv_dim = int(data_shape[1] // 2)
train_config = op_in["train_config"]
task_path = Path(op_in["model_tag"])
task_path.mkdir(exist_ok=True, parents=True)
train_log_name = train_log.format(tag=op_in["model_tag"])
with set_directory(task_path):
train(
cv_dim=cv_dim,
neurons=train_config["neurons"],
angular_mask=op_in["angular_mask"],
numb_threads=train_config.get("numb_threads", 8),
resnet=train_config["resnet"],
use_mix=train_config["use_mix"],
restart=train_config.get("restart", False),
batch_size=train_config["batch_size"],
epoches=train_config["epoches"],
lr=train_config["init_lr"],
decay_steps=train_config["decay_steps"],
decay_rate=train_config["decay_rate"],
drop_out_rate=train_config["drop_out_rate"],
data_path=str(op_in["data"]),
log_name = train_log_name
)
out_put_name = tf_model_name.format(tag=op_in["model_tag"])
train_fig_name = train_fig.format(tag=op_in["model_tag"])
# plot loglog loss png
loss_list = []
epoch_list = []
with open(train_log_name, "r") as f:
while True:
line = f.readline()
if "running time" in line:
break
if "rid.nn.model" in line:
data = line.split(" ")
if len(loss_list) == 0:
epoch_list.append(int(data[10][:-1]))
loss_list.append(float(data[14][:-1]))
else:
epoch_list.append(int(data[8][:-1]))
loss_list.append(float(data[12][:-1]))
plt.figure(figsize=(10, 8), dpi=100)
plt.loglog(epoch_list,loss_list)
plt.xlabel("log of training epoches")
plt.ylabel("log of relative error")
plt.title("loglog fig of training")
plt.savefig(train_fig_name)
freeze_model(
model_folder=".",
output=out_put_name
)
op_out = OPIO(
{
"model": task_path.joinpath(out_put_name),
"train_log": task_path.joinpath(train_log_name),
"train_fig": task_path.joinpath(train_fig_name)
}
)
return op_out | /rid_kit-0.6.2-py3-none-any.whl/rid/op/run_train.py | 0.863636 | 0.435902 | run_train.py | pypi |
from dflow.python import (
OP,
OPIO,
OPIOSign
)
class AdjustTrustLevel(OP):
r"""AdjustTrustLeve OP adjust trust level according to the number of cluster (`numb_cluster`) and
the number of cluster threshold (`numb_cluster_threshold`). If numb_cluster < numb_cluster_threshold,
the trust level will be increased.
Trust levels won't increase infinitly. When current trust_lvl_1 > `max_level_multiple` * init_trust_lvl_1,
where init_trust_lvl_1 is the initial trust level, trust_lvl_1 will be tuned to its initial value.
"""
@classmethod
def get_input_sign(cls):
return OPIOSign(
{
"trust_lvl_1": float,
"trust_lvl_2": float,
"init_trust_lvl_1": float,
"init_trust_lvl_2": float,
"numb_cluster": int,
"numb_cluster_threshold": int,
"adjust_amplifier": float,
"max_level_multiple": float
}
)
@classmethod
def get_output_sign(cls):
return OPIOSign(
{
"adjust_trust_lvl_1": float,
"adjust_trust_lvl_2": float
}
)
@OP.exec_sign_check
def execute(
self,
op_in: OPIO,
) -> OPIO:
r"""Execute the OP.
Parameters
----------
op_in : dict
Input dict with components:
- `trust_lvl_1`: (`float`) Trust level 1 in the current iteration, or e0.
- `trust_lvl_2`: (`float`) Trust level 2 in the current iteration, or e1.
- `init_trust_lvl_1`: (`float`) Initial value of trust level 1.
- `init_trust_lvl_1`: (`float`) Initial value of trust level 2.
- `numb_cluster`: (`int`) Number of clusters got from cluster op.
- `numb_cluster_threshold`: (`int`) Threshold for cluster number to adjust trust level adaptively.
if `numb_cluster` > `numb_cluster_threshold`, `trust_lvl` will be increased.
- `adjust_amplifier`: (`float`) Increasing multiple for trust level.
- `max_level_multiple`: (`float`) The max multiple than trust level can be increased.
Returns
-------
Output dict with components:
- `adjust_trust_lvl_1`: (`float`) Adjusted Trust level 1 for next iteration.
- `adjust_trust_lvl_2`: (`float`) Adjusted Trust level 2 for next iteration.
"""
if op_in["numb_cluster"] < op_in["numb_cluster_threshold"]:
adjust_trust_lvl_1 = op_in["trust_lvl_1"] * op_in["adjust_amplifier"]
adjust_trust_lvl_2 = op_in["trust_lvl_2"] * op_in["adjust_amplifier"]
else:
adjust_trust_lvl_1 = op_in["init_trust_lvl_1"]
adjust_trust_lvl_2 = op_in["init_trust_lvl_2"]
if adjust_trust_lvl_1 > op_in["init_trust_lvl_1"] * op_in["max_level_multiple"]:
adjust_trust_lvl_1 = op_in["init_trust_lvl_1"]
adjust_trust_lvl_2 = op_in["init_trust_lvl_2"]
op_out = OPIO({
"adjust_trust_lvl_1": adjust_trust_lvl_1,
"adjust_trust_lvl_2": adjust_trust_lvl_2
})
return op_out | /rid_kit-0.6.2-py3-none-any.whl/rid/op/adjust_trust_level.py | 0.858051 | 0.446133 | adjust_trust_level.py | pypi |
from typing import List, Optional, Dict
from pathlib import Path
import numpy as np
from dflow.python import (
OP,
OPIO,
OPIOSign,
Artifact,
Parameter,
BigParameter
)
from rid.utils import save_txt, set_directory
from rid.constants import mcmc_1cv_dir_name, mcmc_1cv_name, mcmc_2cv_name,mcmc_1cv_fig, mcmc_2cv_fig, mcmc_2cv_fig_separate
from matplotlib import pyplot as plt
import os
class MCMCPlot(OP):
"""
`MCMC_Plot` plot the reduced free energy surface produced by MCMC run.
"""
@classmethod
def get_input_sign(cls):
return OPIOSign(
{
"mcmc_1cv": Artifact(List[Path]),
"mcmc_2cv": Artifact(List[Path]),
"plm_out": Artifact(List[Path], optional=True),
"mcmc_config": BigParameter(Dict)
}
)
@classmethod
def get_output_sign(cls):
return OPIOSign(
{
"mcmc_fig": Artifact(Path, archive = None)
}
)
@OP.exec_sign_check
def execute(
self,
op_in: OPIO,
) -> OPIO:
r"""Execute the OP.
Parameters
----------
op_in : dict
Input dict with components:
- `mcmc_config`: Dict,
- `mcmc_cv`: Artifact(List[Path]),
Returns
-------
Output dict with components:
- `mcmc_fig`: (`Artifact(Path)`)
"""
mcmc_config = op_in["mcmc_config"]
cv_type = mcmc_config["cv_type"]
bins = mcmc_config["bins"]
cv_dim = mcmc_config["cv_dimension"]
proj_info = mcmc_config["proj_info"]
proj_mode = proj_info["proj_mode"]
if proj_mode == "cv":
proj_1d_num = cv_dim
elif proj_mode == "path":
proj_1d_num = 2
else:
raise ValueError("Invalid cv type")
task_path = Path("mcmc_fig")
task_path.mkdir(exist_ok=True, parents=True)
with set_directory(task_path):
fourdata_1cv = []
for dir in op_in["mcmc_1cv"]:
all_1cv = []
for cv_index in range(proj_1d_num):
file = dir/mcmc_1cv_name.format(tag = cv_index)
pmf = np.loadtxt(file)
pmf = pmf - np.min(pmf)
allda=[]
proj_iterations = pmf.shape[0]
if cv_type == "dih":
xedges = np.linspace(0, 2*np.pi, bins)
for i in range(bins):
temp = []
# this is to change to dihedral dimension to (-pi,pi) which corresponds to the gmx output
if xedges[i]>=np.pi:
temp.append(xedges[i] - np.pi*2)
else:
temp.append(xedges[i] + np.pi*2/(bins-1))
if i == (bins - 1):
temp.append(pmf[:,0])
else:
temp.append(pmf[:,i])
temp = np.array(temp)
allda.append(temp)
elif cv_type == "dis":
xedges = np.linspace(0, 10, bins)
for i in range(bins):
temp = []
temp.append(xedges[i])
temp.append(pmf[:,i])
temp = np.array(temp)
allda.append(temp)
newarray=np.array(allda)
idex=np.argsort(newarray[:,0])
sorted_data = newarray[idex,:]
print("sorted data shape",sorted_data.shape)
sorted_data = np.stack(sorted_data[:,1])
all_1cv.append(sorted_data)
all_1cv = np.array(all_1cv)
print("all_1cv shape", all_1cv.shape)
fourdata_1cv.append(all_1cv)
avedata_1cv = np.mean(np.array(fourdata_1cv),axis=0)
print("avedata shape", avedata_1cv.shape)
# make 1cv plot
if not os.path.exists(mcmc_1cv_dir_name):
os.makedirs(mcmc_1cv_dir_name)
for cv_index in range(proj_1d_num):
if proj_mode == "cv":
if cv_type == "dih":
xedges = np.linspace(-np.pi, np.pi, bins)
elif cv_type == "dis":
xedges = np.linspace(0, 10, bins)
plt.figure(figsize=(8, 6))
for proj_iter in range(proj_iterations):
plt.plot(xedges,avedata_1cv[cv_index][:,proj_iter], label = proj_iter)
plt.xlabel(r'cv')
plt.ylabel(r'free energy (kcal/mol)')
plt.legend()
plt.savefig(mcmc_1cv_dir_name+"/"+mcmc_1cv_fig.format(tag = cv_index),dpi=600,bbox_inches='tight')
elif proj_mode == "path":
path_edges = [np.linspace(0, 10, bins),np.linspace(-10, 0, bins)]
plt.figure(figsize=(8, 6))
for proj_iter in range(proj_iterations):
plt.plot(path_edges[cv_index],avedata_1cv[cv_index][:,proj_iter], label = proj_iter)
plt.xlabel(r'cv')
plt.ylabel(r'free energy (kcal/mol)')
plt.legend()
plt.savefig(mcmc_1cv_dir_name+"/"+mcmc_1cv_fig.format(tag = cv_index),dpi=600,bbox_inches='tight')
fourdata_2cv = []
for file in op_in["mcmc_2cv"]:
pmf = np.loadtxt(file)
pmf = pmf - np.min(pmf)
allda=[]
if cv_type == "dih":
xedges = np.linspace(0, 2*np.pi, bins)
yedges = np.linspace(0, 2*np.pi, bins)
for i in range(bins):
for j in range(bins):
temp = []
# this is to change to dihedral dimension to (-pi,pi) which corresponds to the gmx output
if xedges[i]>=np.pi:
temp.append(xedges[i] - np.pi*2)
else:
temp.append(xedges[i] + np.pi*2/(bins-1))
if yedges[j]>=np.pi:
temp.append(yedges[j] - np.pi*2)
else:
temp.append(yedges[j] + np.pi*2/(bins-1))
if j == (bins -1):
temp.append(pmf[i][0])
elif i == (bins - 1):
temp.append(pmf[0][j])
else:
temp.append(pmf[i][j])
temp = np.array(temp)
allda.append(temp)
elif cv_type == "dis":
xedges = np.linspace(0, 10, bins)
yedges = np.linspace(0, 10, bins)
for i in range(bins):
for j in range(bins):
temp = []
temp.append(xedges[i])
temp.append(yedges[j])
temp.append(pmf[i][j])
temp = np.array(temp)
allda.append(temp)
newarray=np.array(allda)
idex=np.lexsort([newarray[:,1], newarray[:,0]])
sorted_data = newarray[idex,:]
fourdata_2cv.append(sorted_data[:,2])
avedata_2cv = np.mean(np.array(fourdata_2cv),axis=0)
# make 2cv plot
if cv_type == "dih":
xedges = np.linspace(-np.pi, np.pi, bins)
yedges = np.linspace(-np.pi, np.pi, bins)
elif cv_type == "dis":
if proj_mode == "cv":
xedges = np.linspace(0, 10, bins)
yedges = np.linspace(0, 10, bins)
elif proj_mode == "path":
xedges = np.linspace(0, 10, bins)
yedges = np.linspace(-10, 0, bins)
plt.figure(figsize=(8, 6))
cmap = plt.cm.get_cmap("jet_r")
# Define percentiles for the levels
upper_perc = np.percentile(np.unique(fourdata_2cv[0]), 99)
CS = plt.contourf(xedges,yedges,avedata_2cv.reshape(bins,bins),levels = np.linspace(0,upper_perc,101),cmap=cmap,extend="max")
if op_in["plm_out"] is not None:
for cv_output in op_in["plm_out"]:
cv_point = np.loadtxt(cv_output)
assert cv_point.shape[1] == 2
point_numbers = list(range(len(cv_point[1:,0])))
P1 = plt.scatter(cv_point[1:,0], cv_point[1:,1], s = 2, marker = 'o', c = point_numbers)
P_init = plt.scatter(cv_point[0,0], cv_point[0,1], s = 20, marker = 'x', c = "k")
cbar = plt.colorbar(CS)
cbar.ax.tick_params(labelsize=8)
cbar.ax.set_title('kcal/mol',fontsize=8)
if op_in["plm_out"] is not None:
plt.legend([P_init],["init"])
cbar2 = plt.colorbar(P1)
cbar2.ax.tick_params(labelsize=4)
cbar2.ax.set_title('steps',fontsize=6)
plt.xlabel(r'CV index 1')
plt.ylabel(r'CV index 2')
plt.savefig(mcmc_2cv_fig,dpi=600,bbox_inches='tight')
for iii in range(len(fourdata_2cv)):
fig = plt.figure(figsize=(8, 6))
cmap = plt.cm.get_cmap("jet_r")
# Define percentiles for the levels
upper_perc = np.percentile(np.unique(fourdata_2cv[iii]), 99)
CS = plt.contourf(xedges,yedges,fourdata_2cv[iii].reshape(bins,bins),levels = np.linspace(0,upper_perc,101),cmap=cmap,extend="max")
if op_in["plm_out"] is not None:
for cv_output in op_in["plm_out"]:
cv_point = np.loadtxt(cv_output)
assert cv_point.shape[1] == 2
point_numbers = list(range(len(cv_point[1:,0])))
P1 = plt.scatter(cv_point[1:,0], cv_point[1:,1], s = 2, marker = 'o', c = point_numbers)
P_init = plt.scatter(cv_point[0,0], cv_point[0,1], s = 20, marker = 'x', c = "k")
cbar = plt.colorbar(CS)
cbar.ax.tick_params(labelsize=8)
cbar.ax.set_title('kcal/mol',fontsize=8)
if op_in["plm_out"] is not None:
plt.legend([P_init],["init"])
cbar2 = plt.colorbar(P1)
cbar2.ax.tick_params(labelsize=4)
cbar2.ax.set_title('steps',fontsize=6)
plt.xlabel(r'CV index 1')
plt.ylabel(r'CV index 2')
plt.savefig(mcmc_2cv_fig_separate.format(tag=iii),dpi=600,bbox_inches='tight')
op_out = OPIO(
{
"mcmc_fig": task_path
}
)
return op_out | /rid_kit-0.6.2-py3-none-any.whl/rid/op/mcmc_plot.py | 0.769817 | 0.259591 | mcmc_plot.py | pypi |
from dflow.python import (
OP,
OPIO,
OPIOSign,
Artifact,
Parameter,
BigParameter
)
import json
from typing import List, Dict
from pathlib import Path
from rid.constants import (
plumed_output_name
)
from rid.task.builder import RestrainedMDTaskBuilder, ConstrainedMDTaskBuilder
from rid.utils import load_txt
class CheckLabelInputs(OP):
r"""Check Inputs of Label Steps.
If inputs `conf` are empty or None, `if_continue` will be False,
and the following ops of Label steps won't be executed.
"""
@classmethod
def get_input_sign(cls):
return OPIOSign(
{
"confs": Artifact(List[Path], optional=True),
"conf_tags": Artifact(List[Path], optional=True)
}
)
@classmethod
def get_output_sign(cls):
return OPIOSign(
{
"if_continue": int,
"conf_tags": BigParameter(List)
}
)
@OP.exec_sign_check
def execute(
self,
op_in: OPIO,
) -> OPIO:
r"""Execute the OP.
Parameters
----------
op_in : dict
Input dict with components:
- `confs`: (`Artifact(List[Path])`) Conformations selected from trajectories of exploration steps.
Returns
-------
Output dict with components:
- `if_continue`: (`bool`) Whether to execute following ops of Label steps.
"""
if op_in["confs"] is None:
if_continue = 0
conf_tags = []
else:
if_continue = 1
tags = {}
for tag in op_in["conf_tags"]:
if isinstance(tag,Path):
with open(tag,"r") as f:
tags.update(json.load(f))
else:
raise RuntimeError("Unkown Error.")
conf_tags = []
for conf in op_in["confs"]:
conf_tags.append(str(tags[conf.name]))
op_out = OPIO(
{
"if_continue": if_continue,
"conf_tags": conf_tags
}
)
return op_out
class PrepLabel(OP):
r"""Prepare files for Label steps.
Labels of RiD are mean forces, which are calculated by restrained MD algorithm.
Restrained MD simulations are performed by Gromacs/Lammps with PLUMED2 plugin, so input files are in Gromacs/Lammps format.
"""
@classmethod
def get_input_sign(cls):
return OPIOSign(
{
"topology": Artifact(Path, optional=True),
"conf": Artifact(Path),
"cv_file": Artifact(List[Path], optional=True),
"label_config": BigParameter(Dict),
"cv_config": BigParameter(Dict),
"task_name": BigParameter(str),
"at": Artifact(Path, optional=True)
}
)
@classmethod
def get_output_sign(cls):
return OPIOSign(
{
"task_path": Artifact(Path, archive = None),
}
)
@OP.exec_sign_check
def execute(
self,
op_in: OPIO,
) -> OPIO:
r"""Execute the OP.
Parameters
----------
op_in : dict
Input dict with components:
- `topology`: (`Artifact(Path)`) Topology files (.top) for Restrained MD simulations.
- `conf`: (`Artifact(Path)`) Conformation files (.gro, .lmp) for Restrained MD simulations.
- `label_config`: (`Dict`) Configuration in `Dict` format for Gromacs/Lammps run.
- `cv_config`: (`Dict`) Configuration for CV creation.
- `at`: (`Artifact(Path)`) Files containing initial CV values, or CV centers.
- `task_name`: (`str`) Task name used to make sub-dir for tasks.
Returns
-------
Output dict with components:
- `task_path`: (`Artifact(Path)`) A directory containing files for Restrained MD.
"""
cv_file = []
selected_resid = None
selected_atomid = None
if op_in["cv_config"]["mode"] == "torsion":
selected_resid = op_in["cv_config"]["selected_resid"]
elif op_in["cv_config"]["mode"] == "distance":
selected_atomid = op_in["cv_config"]["selected_atomid"]
elif op_in["cv_config"]["mode"] == "custom":
if "selected_resid" in op_in["cv_config"]:
selected_resid = op_in["cv_config"]["selected_resid"]
elif "selected_atomid" in op_in["cv_config"]:
selected_atomid = op_in["cv_config"]["selected_atomid"]
cv_file = op_in["cv_file"]
#print("what is cv", cv_file)
if op_in["label_config"]["method"] == "restrained":
at = 0.0
if op_in["at"] is not None:
at = load_txt(op_in["at"])
gmx_task_builder = RestrainedMDTaskBuilder(
conf = op_in["conf"],
topology = op_in["topology"],
label_config = op_in["label_config"],
cv_file = cv_file,
selected_resid = selected_resid,
selected_atomid = selected_atomid,
sampler_type = op_in["label_config"]["type"],
kappa = op_in["label_config"]["kappas"],
at = at,
plumed_output = plumed_output_name,
cv_mode = op_in["cv_config"]["mode"]
)
elif op_in["label_config"]["method"] == "constrained":
gmx_task_builder = ConstrainedMDTaskBuilder(
conf = op_in["conf"],
topology = op_in["topology"],
label_config = op_in["label_config"],
cv_file = cv_file,
selected_atomid = selected_atomid,
sampler_type = op_in["label_config"]["type"],
plumed_output = plumed_output_name,
cv_mode = op_in["cv_config"]["mode"]
)
gmx_task = gmx_task_builder.build()
task_path = Path(op_in["task_name"])
task_path.mkdir(exist_ok=True, parents=True)
for fname, fconts in gmx_task.files.items():
with open(task_path.joinpath(fname), fconts[1]) as ff:
ff.write(fconts[0])
op_out = OPIO(
{
"task_path": task_path
}
)
return op_out | /rid_kit-0.6.2-py3-none-any.whl/rid/op/prep_label.py | 0.834272 | 0.271393 | prep_label.py | pypi |
from dflow.python import (
OP,
OPIO,
OPIOSign,
Artifact,
Parameter
)
from typing import List, Optional, Union
from pathlib import Path
from rid.select.cluster import Cluster
from rid.utils import save_txt, set_directory
from rid.constants import (
cluster_selection_data_name,
cluster_selection_index_name,
cluster_fig
)
import numpy as np
class PrepSelect(OP):
"""PrepSelect OP clusters CV outputs of each parallel walker from exploration steps and prepares representative
frames of each clusters for further selection steps.
RiD-kit employs agglomerative clustering algorithm performed by Scikit-Learn python package. The distance matrix of CVs
is pre-calculated, which is defined by Euclidean distance in CV space. For each cluster, one representive frame will
be randomly chosen from cluster members.
For periodic collective variables, RiD-kit uses `angular_mask` to identify them and handle their periodic conditions
during distance calculation.
In the first run of RiD iterations, PrepSelect will make a cluster threshold automatically from the initial guess of this value
and make cluter numbers of each parallel walker fall into the interval of `[numb_cluster_lower, numb_cluster_upper]`.
"""
@classmethod
def get_input_sign(cls):
return OPIOSign(
{
"task_name": str,
"plm_out": Artifact(Path),
"cluster_threshold": float,
"angular_mask": Optional[Union[np.ndarray, List]],
"weights": Optional[Union[np.ndarray, List]],
"numb_cluster_upper": Parameter(Optional[float], default=None),
"numb_cluster_lower": Parameter(Optional[float], default=None),
"max_selection": int,
"if_make_threshold": Parameter(bool, default=False)
}
)
@classmethod
def get_output_sign(cls):
return OPIOSign(
{
"numb_cluster": int,
"cluster_threshold": float,
"cluster_fig": Artifact(Path, archive = None),
"cluster_selection_index": Artifact(Path, archive = None),
"cluster_selection_data": Artifact(Path, archive = None)
}
)
@OP.exec_sign_check
def execute(
self,
op_in: OPIO,
) -> OPIO:
r"""Execute the OP.
Parameters
----------
op_in : dict
Input dict with components:
- `task_name`: (`str`) Task names, used to make sub-directory for tasks.
- `plm_out`: (`Artifact(Path)`) Outputs of CV values (`plumed.out` by default) from exploration steps.
- `cluster_threshold`: (`float`) Cluster threshold of agglomerative clustering algorithm
- `angular_mask`: (`array_like`) Mask for periodic collective variables. 1 represents periodic, 0 represents non-periodic.
- `weights`: (`array_like`) Weights to cluster collective variables. see details in cluster parts.
- `numb_cluster_upper`: (`Optional[float]`) Upper limit of cluster number to make cluster threshold.
- `numb_cluster_lower`: (`Optional[float]`) Lower limit of cluster number to make cluster threshold.
- `max_selection`: (`int`) Max selection number of clusters in Selection steps for each parallel walker.
For each cluster, one representive frame will be randomly chosen from cluster members.
- `if_make_threshold`: (`bool`) whether to make threshold to fit the cluster number interval. Usually `True` in the 1st
iteration and `False` in the further iterations.
Returns
-------
Output dict with components:
- `numb_cluster`: (`int`) Number of clusters.
- `cluster_threshold`: (`float`) Cluster threshold of agglomerative clustering algorithm.
- `cluster_selection_index`: (`Artifact(Path)`) Indice of chosen representive frames of clusters in trajectories.
- `cluster_selection_data`: (`Artifact(Path)`) Collective variable values of chosen representive frames of clusters.
"""
task_path = Path(op_in["task_name"])
task_path.mkdir(exist_ok=True, parents=True)
# the first column of plm_out is time index, the second columnn is biased potential
data = np.loadtxt(op_in["plm_out"])[:,2:]
cv_cluster = Cluster(
data, op_in["cluster_threshold"], op_in["task_name"], angular_mask=op_in["angular_mask"],
weights=op_in["weights"], max_selection=op_in["max_selection"])
if op_in["if_make_threshold"]:
assert (op_in["numb_cluster_lower"] is not None) and (op_in["numb_cluster_upper"] is not None), \
"Please provide a number interval to make cluster thresholds."
threshold = cv_cluster.make_threshold(op_in["numb_cluster_lower"], op_in["numb_cluster_upper"])
else:
threshold = op_in["cluster_threshold"]
cls_sel_idx = cv_cluster.get_cluster_selection()
selected_data = data[cls_sel_idx]
numb_cluster = len(cls_sel_idx)
with set_directory(task_path):
np.save(cluster_selection_index_name, cls_sel_idx)
np.save(cluster_selection_data_name, selected_data)
op_out = OPIO({
"cluster_threshold": threshold,
"numb_cluster": numb_cluster,
"cluster_fig": task_path.joinpath(cluster_fig),
"cluster_selection_index": task_path.joinpath(cluster_selection_index_name),
"cluster_selection_data": task_path.joinpath(cluster_selection_data_name)
})
return op_out | /rid_kit-0.6.2-py3-none-any.whl/rid/op/prep_select.py | 0.933393 | 0.696036 | prep_select.py | pypi |
from typing import Dict, List
from pathlib import Path
from dflow.python import (
OP,
OPIO,
OPIOSign,
Artifact,
Parameter,
BigParameter
)
import numpy as np
from rid.utils import set_directory
from matplotlib import pyplot as plt
from rid.constants import mf_std_fig
import os
class LabelStats(OP):
r"""LabelStats OP calculate the std of all the labeling steps, remove the ones with high std.
"""
@classmethod
def get_input_sign(cls):
return OPIOSign(
{
"cv_forces": Artifact(List[Path]),
"mf_info": Artifact(List[Path]),
"std_threshold": float
}
)
@classmethod
def get_output_sign(cls):
return OPIOSign(
{
"mf_std_fig": Artifact(Path, archive=None),
"cv_forces": Artifact(List[Path], archive=None)
}
)
@OP.exec_sign_check
def execute(
self,
op_in: OPIO,
) -> OPIO:
r"""Execute the OP.
Parameters
----------
op_in : dict
Input dict with components:
- "cv_forces": (`Artifact(List[Path])`)
- "mf_info": (`Artifact(List[Path])`)
- "std_threshold": (`Float`)
Returns
-------
Output dict with components:
- "mf_std_fig": (`Artifact(Path)`)
- "cv_forces":(`Artifact(List[Path])`)
"""
assert len(op_in["mf_info"]) == len(op_in["cv_forces"])
cv_forces_list = [_ for _ in op_in["cv_forces"] if _ is not None]
cv_forces_list = np.array(cv_forces_list)
cv_force_file = cv_forces_list[0]
cv_force = np.loadtxt(cv_force_file)
cv_dim = int(cv_force.shape[0]//2)
# extract the std from the mf_info
mf_all_std_list = []
for mf_info in op_in["mf_info"]:
if mf_info and os.path.exists(mf_info):
with open(mf_info) as f:
while True:
line = f.readline()
line_list = line.strip().split(" ")[:3]
if line_list == ['mean', 'force', 'std']:
break
mf_std_line = line.strip().split(" ")[7:]
mf_std_list = [float(i) for i in mf_std_line]
mf_all_std_list.append(mf_std_list)
mf_all_std_list = np.array(mf_all_std_list)
higher_index = set()
for i, row in enumerate(mf_all_std_list):
for j, num in enumerate(row):
if num > op_in["std_threshold"]:
higher_index.add(i)
higher_index_list = list(higher_index)
print("higher index list", list(cv_forces_list[higher_index_list]))
mf_all_std_list_modified = np.delete(mf_all_std_list, higher_index_list, axis=0)
cv_forces_list_modified = np.delete(cv_forces_list, higher_index_list, axis=0)
assert len(mf_all_std_list_modified) == len(cv_forces_list_modified)
task_path = Path("label_std")
task_path.mkdir(exist_ok=True, parents=True)
with set_directory(task_path):
plt.figure(figsize=(8,6))
for cv_index in range(cv_dim):
plt.hist(mf_all_std_list_modified[:,cv_index], 100, label = "cv%s"%(cv_index+1))
plt.legend()
plt.xlabel("mean force std")
plt.ylabel("frequency")
plt.savefig(mf_std_fig)
op_out = OPIO({
"mf_std_fig": task_path.joinpath(mf_std_fig),
"cv_forces": list(cv_forces_list_modified)
})
return op_out | /rid_kit-0.6.2-py3-none-any.whl/rid/op/label_stats.py | 0.808294 | 0.416144 | label_stats.py | pypi |
from typing import Dict
from copy import deepcopy
from dflow import (
InputParameter,
Inputs,
InputArtifact,
Outputs,
OutputArtifact,
Step,
Steps
)
from dflow.python import(
PythonOPTemplate,
OP
)
from rid.utils import init_executor
class DataGenerator(Steps):
r""" Date generator SuperOP.
This SuperpOP combines CollectData OP and MergeData OP to process data for training.
"""
def __init__(
self,
name: str,
collect_op: OP,
merge_op: OP,
run_config: Dict,
upload_python_package = None,
retry_times = None
):
self._input_parameters = {
"block_tag" : InputParameter(type=str, value="")
}
self._input_artifacts = {
"cv_forces": InputArtifact(),
"data_old": InputArtifact(optional=True)
}
self._output_parameters = {}
self._output_artifacts = {
"data": OutputArtifact()
}
super().__init__(
name=name,
inputs=Inputs(
parameters=self._input_parameters,
artifacts=self._input_artifacts
),
outputs=Outputs(
parameters=self._output_parameters,
artifacts=self._output_artifacts
),
)
self = _gen_data(
self,
collect_op,
merge_op,
run_config = run_config,
upload_python_package = upload_python_package,
retry_times = retry_times
)
@property
def input_parameters(self):
return self._input_parameters
@property
def input_artifacts(self):
return self._input_artifacts
@property
def output_parameters(self):
return self._output_parameters
@property
def output_artifacts(self):
return self._output_artifacts
@property
def keys(self):
return self._keys
def _gen_data(
data_steps,
collect_op : OP,
merge_op : OP,
run_config : Dict,
upload_python_package : str = None,
retry_times: int = None
):
run_config = deepcopy(run_config)
run_template_config = run_config.pop('template_config')
run_executor = init_executor(run_config.pop('executor'))
collect_data = Step(
'collect-data',
template=PythonOPTemplate(
collect_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
**run_template_config,
),
parameters={},
artifacts={
"cv_forces": data_steps.inputs.artifacts['cv_forces'],
},
key = '{}-collect-data'.format(data_steps.inputs.parameters["block_tag"]),
executor = run_executor,
**run_config,
)
data_steps.add(collect_data)
merge_data = Step(
'merge-data',
template=PythonOPTemplate(
merge_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
**run_template_config,
),
parameters={},
artifacts={
"data_old": data_steps.inputs.artifacts["data_old"],
"data_new": collect_data.outputs.artifacts["data_new"]
},
key = '{}-merge-data'.format(data_steps.inputs.parameters["block_tag"]),
executor = run_executor,
**run_config,
)
data_steps.add(merge_data)
data_steps.outputs.artifacts["data"]._from = merge_data.outputs.artifacts["data_raw"]
return data_steps | /rid_kit-0.6.2-py3-none-any.whl/rid/superop/data.py | 0.835013 | 0.303769 | data.py | pypi |
from typing import Dict, List, Optional, Union
from copy import deepcopy
import numpy as np
from dflow import (
InputParameter,
OutputParameter,
Inputs,
InputArtifact,
Outputs,
OutputArtifact,
Step,
Steps,
argo_range,
argo_len,
)
from dflow.python import(
PythonOPTemplate,
OP,
Slices
)
from rid.utils import init_executor
class Selector(Steps):
r""" Selector SuperOP.
This SuperOP combines PrepSelect OP and RunSelect OP.
"""
def __init__(
self,
name: str,
prep_op: OP,
run_op: OP,
prep_config: Dict,
run_config: Dict,
upload_python_package = None,
retry_times = None
):
self._input_parameters = {
"label_config": InputParameter(type=Dict),
"trust_lvl_1" : InputParameter(type=List[float], value=2.0),
"trust_lvl_2": InputParameter(type=List[float], value=3.0),
"cluster_threshold": InputParameter(type=List[float], value=1.0),
"angular_mask": InputParameter(type=Optional[Union[np.ndarray, List]]),
"weights": InputParameter(type=Optional[Union[np.ndarray, List]]),
"numb_cluster_upper": InputParameter(type=Optional[float], value=None),
"numb_cluster_lower": InputParameter(type=Optional[float], value=None),
"max_selection": InputParameter(type=int),
"dt": InputParameter(type=float, value=0.02),
"output_freq": InputParameter(type=float, value=2500),
"slice_mode": InputParameter(type=str, value="gmx"),
"type_map": InputParameter(type=List, value=[]),
"if_make_threshold": InputParameter(type=bool, value=False),
"task_names" : InputParameter(type=List[str]),
"block_tag" : InputParameter(type=str, value="")
}
self._input_artifacts = {
"models" : InputArtifact(optional=True),
"plm_out": InputArtifact(),
"xtc_traj": InputArtifact(),
"topology": InputArtifact()
}
self._output_parameters = {
"cluster_threshold": OutputParameter(type=List[int]),
"numb_cluster": OutputParameter(type=List[int])
}
self._output_artifacts = {
"cluster_selection_index": OutputArtifact(),
"selected_confs": OutputArtifact(),
"selected_cv_init": OutputArtifact(),
"model_devi": OutputArtifact(),
"selected_indices": OutputArtifact(),
"selected_conf_tags": OutputArtifact()
}
super().__init__(
name=name,
inputs=Inputs(
parameters=self._input_parameters,
artifacts=self._input_artifacts
),
outputs=Outputs(
parameters=self._output_parameters,
artifacts=self._output_artifacts
),
)
step_keys = {
"prep_select": "{}-prep-select".format(self.inputs.parameters["block_tag"]),
"run_select": "{}-run-select".format(self.inputs.parameters["block_tag"]),
"post_select": "{}-post-select".format(self.inputs.parameters["block_tag"])
}
self = _select(
self,
step_keys,
prep_op,
run_op,
prep_config = prep_config,
run_config = run_config,
upload_python_package = upload_python_package,
retry_times = retry_times
)
@property
def input_parameters(self):
return self._input_parameters
@property
def input_artifacts(self):
return self._input_artifacts
@property
def output_parameters(self):
return self._output_parameters
@property
def output_artifacts(self):
return self._output_artifacts
@property
def keys(self):
return self._keys
def _select(
select_steps,
step_keys,
prep_select_op : OP,
run_select_op : OP,
prep_config : Dict,
run_config : Dict,
upload_python_package : str = None,
retry_times: int = None
):
prep_config = deepcopy(prep_config)
run_config = deepcopy(run_config)
prep_template_config = prep_config.pop('template_config')
run_template_config = run_config.pop('template_config')
prep_executor = init_executor(prep_config.pop('executor'))
run_executor = init_executor(run_config.pop('executor'))
prep_merge = False
if prep_executor is not None:
prep_merge = prep_executor.merge_sliced_step
if prep_merge:
prep_select = Step(
'prep-select',
template=PythonOPTemplate(
prep_select_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices(
"{{item}}",
input_parameter=["cluster_threshold", "task_name"],
input_artifact=["plm_out"],
output_artifact=["cluster_fig","cluster_selection_index", "cluster_selection_data"],
output_parameter=["cluster_threshold", "numb_cluster"]
),
**prep_template_config,
),
parameters={
"cluster_threshold": select_steps.inputs.parameters['cluster_threshold'],
"angular_mask": select_steps.inputs.parameters['angular_mask'],
"weights": select_steps.inputs.parameters['weights'],
"numb_cluster_upper": select_steps.inputs.parameters['numb_cluster_upper'],
"numb_cluster_lower": select_steps.inputs.parameters['numb_cluster_lower'],
"max_selection": select_steps.inputs.parameters['max_selection'],
"if_make_threshold": select_steps.inputs.parameters['if_make_threshold'],
"task_name": select_steps.inputs.parameters['task_names']
},
artifacts={
"plm_out": select_steps.inputs.artifacts['plm_out']
},
key = step_keys["prep_select"]+"-{{item}}",
executor = prep_executor,
with_param=argo_range(argo_len(select_steps.inputs.parameters['task_names'])),
**prep_config
)
else:
prep_select = Step(
'prep-select',
template=PythonOPTemplate(
prep_select_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices(sub_path = True,
input_parameter=["cluster_threshold", "task_name"],
input_artifact=["plm_out"],
output_artifact=["cluster_fig","cluster_selection_index", "cluster_selection_data"],
output_parameter=["cluster_threshold", "numb_cluster"]
),
**prep_template_config,
),
parameters={
"cluster_threshold": select_steps.inputs.parameters['cluster_threshold'],
"angular_mask": select_steps.inputs.parameters['angular_mask'],
"weights": select_steps.inputs.parameters['weights'],
"numb_cluster_upper": select_steps.inputs.parameters['numb_cluster_upper'],
"numb_cluster_lower": select_steps.inputs.parameters['numb_cluster_lower'],
"max_selection": select_steps.inputs.parameters['max_selection'],
"if_make_threshold": select_steps.inputs.parameters['if_make_threshold'],
"task_name": select_steps.inputs.parameters['task_names']
},
artifacts={
"plm_out": select_steps.inputs.artifacts['plm_out']
},
key = step_keys["prep_select"]+"-{{item.order}}",
executor = prep_executor,
**prep_config,
)
select_steps.add(prep_select)
run_merge = False
if run_executor is not None:
run_merge = run_executor.merge_sliced_step
if run_merge:
run_select = Step(
'run-select',
template=PythonOPTemplate(
run_select_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices(
"int({{item}})",
input_parameter=["task_name", "trust_lvl_1", "trust_lvl_2"],
input_artifact=["cluster_selection_index", "cluster_selection_data", "xtc_traj", "topology"],
output_artifact=["selected_confs", "selected_cv_init", "model_devi", "selected_indices","selected_conf_tags"]
),
**run_template_config,
),
parameters={
"label_config": select_steps.inputs.parameters["label_config"],
"trust_lvl_1": select_steps.inputs.parameters["trust_lvl_1"],
"trust_lvl_2": select_steps.inputs.parameters["trust_lvl_2"],
"dt": select_steps.inputs.parameters["dt"],
"output_freq": select_steps.inputs.parameters["output_freq"],
"slice_mode": select_steps.inputs.parameters["slice_mode"],
"type_map": select_steps.inputs.parameters["type_map"],
"task_name": select_steps.inputs.parameters['task_names']
},
artifacts={
"cluster_selection_index": prep_select.outputs.artifacts["cluster_selection_index"],
"cluster_selection_data": prep_select.outputs.artifacts["cluster_selection_data"],
"models": select_steps.inputs.artifacts["models"],
"xtc_traj": select_steps.inputs.artifacts["xtc_traj"],
"topology": select_steps.inputs.artifacts["topology"]
},
key = step_keys["run_select"]+"-{{item}}",
executor = run_executor,
with_param=argo_range(argo_len(select_steps.inputs.parameters["task_names"])),
**run_config
)
else:
run_select = Step(
'run-select',
template=PythonOPTemplate(
run_select_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices(sub_path = True,
input_parameter=["task_name", "trust_lvl_1", "trust_lvl_2"],
input_artifact=["cluster_selection_index", "cluster_selection_data", "xtc_traj", "topology"],
output_artifact=["selected_confs", "selected_cv_init", "model_devi", "selected_indices","selected_conf_tags"]
),
**run_template_config,
),
parameters={
"label_config": select_steps.inputs.parameters["label_config"],
"trust_lvl_1": select_steps.inputs.parameters["trust_lvl_1"],
"trust_lvl_2": select_steps.inputs.parameters["trust_lvl_2"],
"dt": select_steps.inputs.parameters["dt"],
"output_freq": select_steps.inputs.parameters["output_freq"],
"slice_mode": select_steps.inputs.parameters["slice_mode"],
"type_map": select_steps.inputs.parameters["type_map"],
"task_name": select_steps.inputs.parameters['task_names']
},
artifacts={
"cluster_selection_index": prep_select.outputs.artifacts["cluster_selection_index"],
"cluster_selection_data": prep_select.outputs.artifacts["cluster_selection_data"],
"models": select_steps.inputs.artifacts["models"],
"xtc_traj": select_steps.inputs.artifacts["xtc_traj"],
"topology": select_steps.inputs.artifacts["topology"]
},
key = step_keys["run_select"]+"-{{item.order}}",
executor = run_executor,
**run_config,
)
select_steps.add(run_select)
select_steps.outputs.parameters["cluster_threshold"].value_from_parameter = prep_select.outputs.parameters["cluster_threshold"]
select_steps.outputs.parameters["numb_cluster"].value_from_parameter = prep_select.outputs.parameters["numb_cluster"]
select_steps.outputs.artifacts["selected_conf_tags"]._from = run_select.outputs.artifacts["selected_conf_tags"]
select_steps.outputs.artifacts["cluster_selection_index"]._from = prep_select.outputs.artifacts["cluster_selection_index"]
select_steps.outputs.artifacts["selected_confs"]._from = run_select.outputs.artifacts["selected_confs"]
select_steps.outputs.artifacts["selected_cv_init"]._from = run_select.outputs.artifacts["selected_cv_init"]
select_steps.outputs.artifacts["model_devi"]._from = run_select.outputs.artifacts["model_devi"]
select_steps.outputs.artifacts["selected_indices"]._from = run_select.outputs.artifacts["selected_indices"]
return select_steps | /rid_kit-0.6.2-py3-none-any.whl/rid/superop/selector.py | 0.755817 | 0.354321 | selector.py | pypi |
from typing import Dict, List
from copy import deepcopy
from dflow import (
InputParameter,
Inputs,
InputArtifact,
Outputs,
OutputArtifact,
Step,
Steps,
argo_range,
argo_len,
)
from dflow.python import(
PythonOPTemplate,
OP,
Slices,
)
from rid.utils import init_executor
class Label(Steps):
r"""" Label SuperOP.
This SuperOP combines CheckLabelInputs OP, PrepLabel OP and RunLabel OP.
"""
def __init__(
self,
name: str,
check_input_op: OP,
prep_op: OP,
run_op: OP,
stats_op: OP,
prep_config: Dict,
run_config: Dict,
upload_python_package = None,
retry_times = None
):
self._input_parameters = {
"label_config": InputParameter(type=Dict),
"cv_config": InputParameter(type=Dict),
"tail": InputParameter(type=float, value=0.9),
"std_threshold": InputParameter(type=float, value=5.0),
"block_tag" : InputParameter(type=str, value="")
}
self._input_artifacts = {
"topology" : InputArtifact(optional=True),
"models" : InputArtifact(optional=True),
"forcefield" : InputArtifact(optional=True),
"inputfile": InputArtifact(optional=True),
"confs": InputArtifact(),
"at": InputArtifact(optional=True),
"index_file": InputArtifact(optional=True),
"dp_files": InputArtifact(optional=True),
"cv_file": InputArtifact(optional=True),
"conf_tags": InputArtifact(optional=True)
}
self._output_parameters = {
}
self._output_artifacts = {
"md_log": OutputArtifact(),
"cv_forces": OutputArtifact()
}
super().__init__(
name=name,
inputs=Inputs(
parameters=self._input_parameters,
artifacts=self._input_artifacts
),
outputs=Outputs(
parameters=self._output_parameters,
artifacts=self._output_artifacts
),
)
step_keys = {
"check_label_inputs": "{}-check-label-inputs".format(self.inputs.parameters["block_tag"]),
"prep_label": "{}-prep-label".format(self.inputs.parameters["block_tag"]),
"run_label": "{}-run-label".format(self.inputs.parameters["block_tag"]),
"label_stats": "{}-label-stats".format(self.inputs.parameters["block_tag"])
}
self = _label(
self,
step_keys,
check_input_op,
prep_op,
run_op,
stats_op,
prep_config = prep_config,
run_config = run_config,
upload_python_package = upload_python_package,
retry_times = retry_times
)
@property
def input_parameters(self):
return self._input_parameters
@property
def input_artifacts(self):
return self._input_artifacts
@property
def output_parameters(self):
return self._output_parameters
@property
def output_artifacts(self):
return self._output_artifacts
@property
def keys(self):
return self._keys
def _label(
label_steps,
step_keys,
check_label_input_op : OP,
prep_label_op : OP,
run_label_op : OP,
label_stats_op: OP,
prep_config : Dict,
run_config : Dict,
upload_python_package : str = None,
retry_times: int = None
):
prep_config = deepcopy(prep_config)
run_config = deepcopy(run_config)
prep_template_config = prep_config.pop('template_config')
run_template_config = run_config.pop('template_config')
prep_executor = init_executor(prep_config.pop('executor'))
run_executor = init_executor(run_config.pop('executor'))
check_label_inputs = Step(
'check-label-inputs',
template=PythonOPTemplate(
check_label_input_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
**prep_template_config,
),
parameters={},
artifacts={
"conf_tags": label_steps.inputs.artifacts['conf_tags'],
"confs": label_steps.inputs.artifacts['confs'],
},
key = step_keys['check_label_inputs'],
executor = prep_executor,
**prep_config,
)
label_steps.add(check_label_inputs)
prep_merge = False
if prep_executor is not None:
prep_merge = prep_executor.merge_sliced_step
if prep_merge:
prep_label = Step(
'prep-label',
template=PythonOPTemplate(
prep_label_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices("{{item}}",
input_parameter=["task_name"],
input_artifact=["conf", "at"],
output_artifact=["task_path"]),
**prep_template_config,
),
parameters={
"label_config": label_steps.inputs.parameters['label_config'],
"cv_config": label_steps.inputs.parameters['cv_config'],
"task_name": check_label_inputs.outputs.parameters['conf_tags']
},
artifacts={
"topology": label_steps.inputs.artifacts['topology'],
"conf": label_steps.inputs.artifacts['confs'],
"at": label_steps.inputs.artifacts['at'],
"cv_file": label_steps.inputs.artifacts['cv_file']
},
key = step_keys['prep_label']+"-{{item}}",
executor = prep_executor,
with_param=argo_range(argo_len(check_label_inputs.outputs.parameters['conf_tags'])),
when = "%s > 0" % (check_label_inputs.outputs.parameters["if_continue"]),
**prep_config
)
else:
prep_label = Step(
'prep-label',
template=PythonOPTemplate(
prep_label_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices("{{item}}",
group_size=10,
pool_size=1,
input_parameter=["task_name"],
input_artifact=["conf", "at"],
output_artifact=["task_path"]),
**prep_template_config,
),
parameters={
"label_config": label_steps.inputs.parameters['label_config'],
"cv_config": label_steps.inputs.parameters['cv_config'],
"task_name": check_label_inputs.outputs.parameters['conf_tags']
},
artifacts={
"topology": label_steps.inputs.artifacts['topology'],
"conf": label_steps.inputs.artifacts['confs'],
"at": label_steps.inputs.artifacts['at'],
"cv_file": label_steps.inputs.artifacts['cv_file']
},
key = step_keys['prep_label']+"-{{item}}",
executor = prep_executor,
with_param=argo_range(argo_len(check_label_inputs.outputs.parameters['conf_tags'])),
when = "%s > 0" % (check_label_inputs.outputs.parameters["if_continue"]),
**prep_config
)
label_steps.add(prep_label)
run_merge = False
if run_executor is not None:
run_merge = run_executor.merge_sliced_step
if run_merge:
run_label = Step(
'run-label',
template=PythonOPTemplate(
run_label_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices("{{item}}",
input_parameter=["task_name"],
input_artifact=["task_path","at"],
output_artifact=["plm_out","cv_forces","mf_info","mf_fig","md_log","trajectory"]),
**run_template_config,
),
parameters={
"label_config": label_steps.inputs.parameters["label_config"],
"cv_config": label_steps.inputs.parameters['cv_config'],
"task_name": check_label_inputs.outputs.parameters['conf_tags'],
"tail": label_steps.inputs.parameters['tail']
},
artifacts={
"forcefield": label_steps.inputs.artifacts['forcefield'],
"task_path": prep_label.outputs.artifacts["task_path"],
"index_file": label_steps.inputs.artifacts['index_file'],
"dp_files": label_steps.inputs.artifacts['dp_files'],
"cv_file": label_steps.inputs.artifacts['cv_file'],
"inputfile": label_steps.inputs.artifacts['inputfile'],
"at": label_steps.inputs.artifacts['at']
},
key = step_keys['run_label']+"-{{item}}",
executor = run_executor,
with_param=argo_range(argo_len(check_label_inputs.outputs.parameters['conf_tags'])),
continue_on_success_ratio = 0.75,
**run_config
)
else:
run_label = Step(
'run-label',
template=PythonOPTemplate(
run_label_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices("{{item}}",
group_size=10,
pool_size=1,
input_parameter=["task_name"],
input_artifact=["task_path","at"],
output_artifact=["plm_out","cv_forces","mf_info","mf_fig","md_log", "trajectory"]),
**run_template_config,
),
parameters={
"label_config": label_steps.inputs.parameters["label_config"],
"cv_config": label_steps.inputs.parameters['cv_config'],
"task_name": check_label_inputs.outputs.parameters['conf_tags'],
"tail": label_steps.inputs.parameters['tail']
},
artifacts={
"forcefield": label_steps.inputs.artifacts['forcefield'],
"task_path": prep_label.outputs.artifacts["task_path"],
"index_file": label_steps.inputs.artifacts['index_file'],
"dp_files": label_steps.inputs.artifacts['dp_files'],
"cv_file": label_steps.inputs.artifacts['cv_file'],
"inputfile": label_steps.inputs.artifacts['inputfile'],
"at": label_steps.inputs.artifacts['at']
},
key = step_keys['run_label']+"-{{item}}",
executor = run_executor,
with_param=argo_range(argo_len(check_label_inputs.outputs.parameters['conf_tags'])),
continue_on_success_ratio = 0.75,
**run_config
)
label_steps.add(run_label)
label_outputs_stats = Step(
'label-outputs-stats',
template=PythonOPTemplate(
label_stats_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
**prep_template_config,
),
parameters={
"std_threshold": label_steps.inputs.parameters["std_threshold"]
},
artifacts={
"cv_forces": run_label.outputs.artifacts["cv_forces"],
"mf_info": run_label.outputs.artifacts["mf_info"]
},
key = step_keys["label_stats"],
executor = prep_executor,
**prep_config,
)
label_steps.add(label_outputs_stats)
label_steps.outputs.artifacts["cv_forces"]._from = label_outputs_stats.outputs.artifacts["cv_forces"]
label_steps.outputs.artifacts["md_log"]._from = run_label.outputs.artifacts["md_log"]
return label_steps | /rid_kit-0.6.2-py3-none-any.whl/rid/superop/label.py | 0.76947 | 0.318585 | label.py | pypi |
from typing import Dict, List, Optional, Union
from copy import deepcopy
import numpy as np
from dflow import (
InputParameter,
OutputParameter,
Inputs,
InputArtifact,
Outputs,
OutputArtifact,
Workflow,
Step,
Steps,
upload_artifact,
argo_range,
argo_len,
argo_sequence,
)
from dflow.python import(
PythonOPTemplate,
OP,
OPIO,
OPIOSign,
Artifact,
Slices,
)
from rid.utils import init_executor
class InitBlock(Steps):
r"""Initial Block SuperOP
This SuperOP is the first iteration of the rid-kit cycle.
"""
def __init__(
self,
name: str,
exploration_op: OP,
select_op: OP,
label_op: OP,
data_op: OP,
train_op: OP,
model_devi_op: OP,
train_config: Dict,
model_devi_config: Dict,
upload_python_package = None,
retry_times = None
):
self._input_parameters = {
"block_tag" : InputParameter(type=str, value=""),
"walker_tags": InputParameter(type=List),
"model_tags": InputParameter(type=List),
"trust_lvl_1" : InputParameter(type=List[float], value=2.0),
"trust_lvl_2": InputParameter(type=List[float], value=3.0),
"exploration_config" : InputParameter(type=Dict),
"cv_config" : InputParameter(type=Dict),
"cluster_threshold": InputParameter(type=float, value=1.0),
"angular_mask": InputParameter(type=Optional[Union[np.ndarray, List]]),
"weights": InputParameter(type=Optional[Union[np.ndarray, List]]),
"numb_cluster_upper": InputParameter(type=float),
"numb_cluster_lower": InputParameter(type=float),
"max_selection": InputParameter(type=int),
"std_threshold": InputParameter(type=float, value=5.0),
"dt": InputParameter(type=float, value=0.02),
"output_freq": InputParameter(type=float, value=2500),
"slice_mode": InputParameter(type=str, value="gmx"),
"label_config": InputParameter(type=Dict),
"type_map": InputParameter(type=List, value = []),
"tail": InputParameter(type=float, value=0.9),
"train_config": InputParameter(type=Dict)
}
self._input_artifacts = {
"models" : InputArtifact(optional=True),
"forcefield" : InputArtifact(optional=True),
"topology" : InputArtifact(optional=True),
"inputfile": InputArtifact(optional=True),
"confs" : InputArtifact(),
"index_file": InputArtifact(optional=True),
"data_old": InputArtifact(optional=True),
"dp_files": InputArtifact(optional=True),
"cv_file": InputArtifact(optional=True)
}
self._output_parameters = {
"cluster_threshold": OutputParameter(type=int)
}
self._output_artifacts = {
"exploration_md_log": OutputArtifact(),
"exploration_trajectory": OutputArtifact(),
"selection_index": OutputArtifact(),
"models": OutputArtifact(),
"data": OutputArtifact(),
"conf_outs": OutputArtifact()
}
super().__init__(
name=name,
inputs=Inputs(
parameters=self._input_parameters,
artifacts=self._input_artifacts
),
outputs=Outputs(
parameters=self._output_parameters,
artifacts=self._output_artifacts
),
)
self = _first_run_block(
self,
exploration_op,
select_op,
label_op,
data_op,
train_op,
model_devi_op,
train_config,
model_devi_config,
upload_python_package = upload_python_package,
retry_times = retry_times
)
@property
def input_parameters(self):
return self._input_parameters
@property
def input_artifacts(self):
return self._input_artifacts
@property
def output_parameters(self):
return self._output_parameters
@property
def output_artifacts(self):
return self._output_artifacts
@property
def keys(self):
return self._keys
def _first_run_block(
block_steps,
exploration_op: OP,
select_op: OP,
label_op: OP,
data_op: OP,
train_op: OP,
model_devi_op: OP,
train_config : Dict,
model_devi_config: Dict,
upload_python_package : str = None,
retry_times: int = None
):
exploration = Step(
"Exploration",
template=exploration_op,
parameters={
"trust_lvl_1" : block_steps.inputs.parameters['trust_lvl_1'],
"trust_lvl_2": block_steps.inputs.parameters['trust_lvl_2'],
"exploration_config" : block_steps.inputs.parameters['exploration_config'],
"cv_config" : block_steps.inputs.parameters['cv_config'],
"task_names" : block_steps.inputs.parameters['walker_tags'],
"block_tag" : block_steps.inputs.parameters['block_tag']
},
artifacts={
"models" : block_steps.inputs.artifacts['models'],
"forcefield" : block_steps.inputs.artifacts['forcefield'],
"topology" : block_steps.inputs.artifacts['topology'],
"inputfile": block_steps.inputs.artifacts['inputfile'],
"confs" : block_steps.inputs.artifacts['confs'],
"index_file": block_steps.inputs.artifacts['index_file'],
"dp_files": block_steps.inputs.artifacts['dp_files'],
"cv_file": block_steps.inputs.artifacts['cv_file']
},
key = '{}-exploration'.format(block_steps.inputs.parameters['block_tag'])
)
block_steps.add(exploration)
selection = Step(
"Selection",
template=select_op,
parameters={
"label_config": block_steps.inputs.parameters["label_config"],
"trust_lvl_1" : block_steps.inputs.parameters["trust_lvl_1"],
"trust_lvl_2": block_steps.inputs.parameters["trust_lvl_2"],
"cluster_threshold": block_steps.inputs.parameters["cluster_threshold"],
"angular_mask": block_steps.inputs.parameters["angular_mask"],
"weights": block_steps.inputs.parameters["weights"],
"numb_cluster_upper": block_steps.inputs.parameters["numb_cluster_upper"],
"numb_cluster_lower": block_steps.inputs.parameters["numb_cluster_lower"],
"max_selection": block_steps.inputs.parameters["max_selection"],
"dt": block_steps.inputs.parameters["dt"],
"output_freq": block_steps.inputs.parameters["output_freq"],
"slice_mode": block_steps.inputs.parameters["slice_mode"],
"type_map": block_steps.inputs.parameters["type_map"],
"if_make_threshold": True,
"task_names" : block_steps.inputs.parameters['walker_tags'],
"block_tag" : block_steps.inputs.parameters['block_tag'],
},
artifacts={
"models" : block_steps.inputs.artifacts["models"],
"plm_out": exploration.outputs.artifacts["plm_out"],
"xtc_traj": exploration.outputs.artifacts["trajectory"],
"topology": block_steps.inputs.artifacts["confs"]
},
key = '{}-selection'.format(block_steps.inputs.parameters['block_tag']),
)
block_steps.add(selection)
label = Step(
"Label",
template=label_op,
parameters={
"label_config": block_steps.inputs.parameters['label_config'],
"cv_config": block_steps.inputs.parameters['cv_config'],
"tail": block_steps.inputs.parameters['tail'],
"block_tag" : block_steps.inputs.parameters['block_tag'],
"std_threshold": block_steps.inputs.parameters["std_threshold"]
},
artifacts={
"topology": block_steps.inputs.artifacts["topology"],
"forcefield" : block_steps.inputs.artifacts['forcefield'],
"confs": selection.outputs.artifacts["selected_confs"],
"at": selection.outputs.artifacts["selected_cv_init"],
"index_file": block_steps.inputs.artifacts['index_file'],
"inputfile": block_steps.inputs.artifacts['inputfile'],
"dp_files": block_steps.inputs.artifacts['dp_files'],
"cv_file": block_steps.inputs.artifacts['cv_file'],
"conf_tags" : selection.outputs.artifacts['selected_conf_tags']
},
key = '{}-label'.format(block_steps.inputs.parameters['block_tag'])
)
block_steps.add(label)
gen_data = Step(
'GenData',
template=data_op,
parameters={"block_tag" : block_steps.inputs.parameters['block_tag']},
artifacts={
"cv_forces": label.outputs.artifacts["cv_forces"],
"data_old": block_steps.inputs.artifacts['data_old']
},
key = '{}-gen-data'.format(block_steps.inputs.parameters['block_tag']),
)
block_steps.add(gen_data)
train_config = deepcopy(train_config)
train_template_config = train_config.pop('template_config')
train_executor = init_executor(train_config.pop('executor'))
train = Step(
"train",
template=PythonOPTemplate(
train_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices("{{item}}",
input_parameter=["model_tag"],
output_artifact=["model","train_fig"]),
**train_template_config,
),
parameters={
"model_tag": block_steps.inputs.parameters["model_tags"],
"angular_mask": block_steps.inputs.parameters["angular_mask"],
"train_config": block_steps.inputs.parameters["train_config"],
},
artifacts={
"data": gen_data.outputs.artifacts["data"],
},
executor = train_executor,
with_param=argo_range(argo_len(block_steps.inputs.parameters["model_tags"])),
key = "{}-train".format(block_steps.inputs.parameters["block_tag"])+"-{{item}}",
**train_config,
)
block_steps.add(train)
model_devi_config = deepcopy(model_devi_config)
model_devi_template_config = model_devi_config.pop('template_config')
model_devi_executor = init_executor(model_devi_config.pop('executor'))
deviation = Step(
"ModelDeviation",
template=PythonOPTemplate(
model_devi_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices("{{item}}",
input_parameter=["trust_lvl_1","task_name"],
input_artifact=["plm_out","selected_indices"],
output_artifact=["model_devi","model_devi_fig"]),
**model_devi_template_config,
),
parameters={
"trust_lvl_1": block_steps.inputs.parameters["trust_lvl_1"],
"block_tag" : block_steps.inputs.parameters['block_tag'],
"task_name": block_steps.inputs.parameters['walker_tags'],
},
artifacts={
"models" : train.outputs.artifacts["model"],
"plm_out": exploration.outputs.artifacts["plm_out"],
"selected_indices": selection.outputs.artifacts["selected_indices"]
},
executor = model_devi_executor,
with_param=argo_range(argo_len(block_steps.inputs.parameters["walker_tags"])),
key = '{}-model-devi'.format(block_steps.inputs.parameters['block_tag'])+"-{{item}}",
**model_devi_config
)
block_steps.add(deviation)
block_steps.outputs.artifacts["models"]._from = train.outputs.artifacts["model"]
block_steps.outputs.artifacts["data"]._from = gen_data.outputs.artifacts["data"]
block_steps.outputs.artifacts["conf_outs"]._from = exploration.outputs.artifacts["conf_outs"]
block_steps.outputs.artifacts["exploration_trajectory"]._from = exploration.outputs.artifacts["trajectory"]
block_steps.outputs.artifacts["exploration_md_log"]._from = exploration.outputs.artifacts["md_log"]
block_steps.outputs.artifacts["selection_index"]._from = selection.outputs.artifacts["selected_indices"]
block_steps.outputs.parameters["cluster_threshold"].value_from_parameter = selection.outputs.parameters["cluster_threshold"]
return block_steps
class IterBlock(Steps):
r"""Iterative Block SuperOP.
This SuperOP is the iterations after the inital iteration of the rid-kit cycle.
"""
def __init__(
self,
name: str,
exploration_op: OP,
select_op: OP,
label_op: OP,
data_op: OP,
adjust_lvl_op: OP,
train_op: OP,
model_devi_op: OP,
adjust_lvl_config: Dict,
train_config: Dict,
model_devi_config: Dict,
upload_python_package = None,
retry_times = None
):
self._input_parameters = {
"block_tag" : InputParameter(type=str, value=""),
"walker_tags": InputParameter(type=List),
"model_tags": InputParameter(type=List),
"trust_lvl_1" : InputParameter(type=List[float]),
"trust_lvl_2": InputParameter(type=List[float]),
"init_trust_lvl_1" : InputParameter(type=List[float]),
"init_trust_lvl_2": InputParameter(type=List[float]),
"exploration_config" : InputParameter(type=Dict),
"cv_config" : InputParameter(type=Dict),
"cluster_threshold": InputParameter(type=float, value=1.0),
"angular_mask": InputParameter(type=Optional[Union[np.ndarray, List]]),
"weights": InputParameter(type=Optional[Union[np.ndarray, List]]),
"max_selection": InputParameter(type=int),
"numb_cluster_threshold": InputParameter(type=float, value=30),
"std_threshold": InputParameter(type=float, value=5.0),
"dt": InputParameter(type=float, value=0.02),
"output_freq": InputParameter(type=float, value=2500),
"slice_mode": InputParameter(type=str, value="gmx"),
"label_config": InputParameter(type=Dict),
"tail": InputParameter(type=float, value=0.9),
"train_config": InputParameter(type=Dict),
"type_map": InputParameter(type=List, value=[]),
"adjust_amplifier": InputParameter(type=float, value=1.5),
"max_level_multiple": InputParameter(type=float, value=8.0),
}
self._input_artifacts = {
"models" : InputArtifact(optional=True),
"forcefield" : InputArtifact(optional=True),
"topology" : InputArtifact(optional=True),
"inputfile": InputArtifact(optional=True),
"confs" : InputArtifact(),
"data_old": InputArtifact(),
"index_file": InputArtifact(optional=True),
"dp_files": InputArtifact(optional=True),
"cv_file": InputArtifact(optional=True)
}
self._output_parameters = {
"cluster_threshold": OutputParameter(type=int),
"adjust_trust_lvl_1": OutputParameter(type=int),
"adjust_trust_lvl_2": OutputParameter(type=int),
}
self._output_artifacts = {
"exploration_md_log": OutputArtifact(),
"exploration_trajectory": OutputArtifact(),
"selection_index": OutputArtifact(),
"models": OutputArtifact(),
"data": OutputArtifact(),
"conf_outs": OutputArtifact()
}
super().__init__(
name=name,
inputs=Inputs(
parameters=self._input_parameters,
artifacts=self._input_artifacts
),
outputs=Outputs(
parameters=self._output_parameters,
artifacts=self._output_artifacts
),
)
self = _iter_block(
self,
exploration_op,
select_op,
label_op,
data_op,
adjust_lvl_op,
train_op,
model_devi_op,
adjust_lvl_config,
train_config,
model_devi_config,
upload_python_package = upload_python_package,
retry_times = retry_times
)
@property
def input_parameters(self):
return self._input_parameters
@property
def input_artifacts(self):
return self._input_artifacts
@property
def output_parameters(self):
return self._output_parameters
@property
def output_artifacts(self):
return self._output_artifacts
@property
def keys(self):
return self._keys
def _iter_block(
block_steps,
exploration_op: OP,
select_op: OP,
label_op: OP,
data_op: OP,
adjust_lvl_op: OP,
train_op: OP,
model_devi_op: OP,
adjust_lvl_config : Dict,
train_config : Dict,
model_devi_config: Dict,
upload_python_package : str = None,
retry_times: int = None
):
exploration = Step(
"Exploration",
template=exploration_op,
parameters={
"trust_lvl_1" : block_steps.inputs.parameters['trust_lvl_1'],
"trust_lvl_2": block_steps.inputs.parameters['trust_lvl_2'],
"exploration_config" : block_steps.inputs.parameters['exploration_config'],
"cv_config" : block_steps.inputs.parameters['cv_config'],
"task_names" : block_steps.inputs.parameters['walker_tags'],
"block_tag" : block_steps.inputs.parameters['block_tag'],
},
artifacts={
"models" : block_steps.inputs.artifacts['models'],
"forcefield" : block_steps.inputs.artifacts['forcefield'],
"topology" : block_steps.inputs.artifacts['topology'],
"inputfile": block_steps.inputs.artifacts['inputfile'],
"confs" : block_steps.inputs.artifacts['confs'],
"index_file": block_steps.inputs.artifacts['index_file'],
"dp_files": block_steps.inputs.artifacts['dp_files'],
"cv_file": block_steps.inputs.artifacts['cv_file']
},
key = '{}-exploration'.format(block_steps.inputs.parameters['block_tag'])
)
block_steps.add(exploration)
selection = Step(
"Selection",
template=select_op,
parameters={
"label_config": block_steps.inputs.parameters["label_config"],
"trust_lvl_1" : block_steps.inputs.parameters["init_trust_lvl_1"],
"trust_lvl_2": block_steps.inputs.parameters["init_trust_lvl_2"],
"cluster_threshold": block_steps.inputs.parameters["cluster_threshold"],
"angular_mask": block_steps.inputs.parameters["angular_mask"],
"weights": block_steps.inputs.parameters["weights"],
"max_selection": block_steps.inputs.parameters["max_selection"],
"dt": block_steps.inputs.parameters["dt"],
"output_freq": block_steps.inputs.parameters["output_freq"],
"slice_mode": block_steps.inputs.parameters["slice_mode"],
"type_map": block_steps.inputs.parameters["type_map"],
"if_make_threshold": False,
"task_names" : block_steps.inputs.parameters['walker_tags'],
"block_tag" : block_steps.inputs.parameters['block_tag'],
},
artifacts={
"models" : block_steps.inputs.artifacts["models"],
"plm_out": exploration.outputs.artifacts["plm_out"],
"xtc_traj": exploration.outputs.artifacts["trajectory"],
"topology": block_steps.inputs.artifacts["confs"]
},
key = '{}-selection'.format(block_steps.inputs.parameters['block_tag']),
)
block_steps.add(selection)
adjust_lvl_config = deepcopy(adjust_lvl_config)
adjust_lvl_template_config = adjust_lvl_config.pop('template_config')
adjust_lvl_executor = init_executor(adjust_lvl_config.pop('executor'))
adjust_lvl = Step(
"adjust-level",
template=PythonOPTemplate(
adjust_lvl_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices("{{item}}",
input_parameter=["trust_lvl_1", "trust_lvl_2", "numb_cluster", "init_trust_lvl_1", "init_trust_lvl_2"],
output_parameter=["adjust_trust_lvl_1", "adjust_trust_lvl_2"]
),
**adjust_lvl_template_config,
),
parameters={
"trust_lvl_1": block_steps.inputs.parameters["trust_lvl_1"],
"trust_lvl_2": block_steps.inputs.parameters["trust_lvl_2"],
"init_trust_lvl_1": block_steps.inputs.parameters["init_trust_lvl_1"],
"init_trust_lvl_2": block_steps.inputs.parameters["init_trust_lvl_2"],
"numb_cluster": selection.outputs.parameters["numb_cluster"],
"numb_cluster_threshold": block_steps.inputs.parameters["numb_cluster_threshold"],
"adjust_amplifier": block_steps.inputs.parameters["adjust_amplifier"],
"max_level_multiple": block_steps.inputs.parameters["max_level_multiple"]
},
artifacts={},
with_param=argo_range(argo_len(block_steps.inputs.parameters["trust_lvl_1"])),
executor = adjust_lvl_executor,
key = '{}-adjust-level'.format(block_steps.inputs.parameters['block_tag'])+"-{{item}}",
**adjust_lvl_config,
)
block_steps.add(adjust_lvl)
label = Step(
"Label",
template=label_op,
parameters={
"label_config": block_steps.inputs.parameters['label_config'],
"cv_config": block_steps.inputs.parameters['cv_config'],
"tail": block_steps.inputs.parameters['tail'],
"block_tag" : block_steps.inputs.parameters['block_tag'],
"std_threshold": block_steps.inputs.parameters["std_threshold"]
},
artifacts={
"topology": block_steps.inputs.artifacts["topology"],
"forcefield" : block_steps.inputs.artifacts['forcefield'],
"confs": selection.outputs.artifacts["selected_confs"],
"inputfile": block_steps.inputs.artifacts['inputfile'],
"at": selection.outputs.artifacts["selected_cv_init"],
"index_file": block_steps.inputs.artifacts['index_file'],
"dp_files": block_steps.inputs.artifacts['dp_files'],
"cv_file": block_steps.inputs.artifacts['cv_file'],
"conf_tags" : selection.outputs.artifacts['selected_conf_tags']
},
key = '{}-label'.format(block_steps.inputs.parameters['block_tag'])
)
block_steps.add(label)
gen_data = Step(
'GenData',
template=data_op,
parameters={"block_tag" : block_steps.inputs.parameters['block_tag']},
artifacts={
"cv_forces": label.outputs.artifacts["cv_forces"],
"data_old": block_steps.inputs.artifacts['data_old']
},
key = '{}-gen-data'.format(block_steps.inputs.parameters['block_tag']),
)
block_steps.add(gen_data)
train_config = deepcopy(train_config)
train_template_config = train_config.pop('template_config')
train_executor = init_executor(train_config.pop('executor'))
train = Step(
"train",
template=PythonOPTemplate(
train_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices("{{item}}",
input_parameter=["model_tag"],
output_artifact=["model","train_fig"]),
**train_template_config,
),
parameters={
"model_tag": block_steps.inputs.parameters["model_tags"],
"angular_mask": block_steps.inputs.parameters["angular_mask"],
"train_config": block_steps.inputs.parameters["train_config"],
},
artifacts={
"data": gen_data.outputs.artifacts["data"],
},
executor = train_executor,
with_param=argo_range(argo_len(block_steps.inputs.parameters["model_tags"])),
key = "{}-train".format(block_steps.inputs.parameters["block_tag"])+"-{{item}}",
**train_config,
)
block_steps.add(train)
model_devi_config = deepcopy(model_devi_config)
model_devi_template_config = model_devi_config.pop('template_config')
model_devi_executor = init_executor(model_devi_config.pop('executor'))
deviation = Step(
"ModelDeviation",
template=PythonOPTemplate(
model_devi_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices("{{item}}",
input_parameter=["trust_lvl_1","task_name"],
input_artifact=["plm_out","selected_indices"],
output_artifact=["model_devi","model_devi_fig"]),
**model_devi_template_config,
),
parameters={
"trust_lvl_1": block_steps.inputs.parameters["trust_lvl_1"],
"block_tag" : block_steps.inputs.parameters['block_tag'],
"task_name": block_steps.inputs.parameters['walker_tags'],
},
artifacts={
"models" : train.outputs.artifacts["model"],
"plm_out": exploration.outputs.artifacts["plm_out"],
"selected_indices": selection.outputs.artifacts["selected_indices"]
},
executor = model_devi_executor,
with_param=argo_range(argo_len(block_steps.inputs.parameters["walker_tags"])),
key = '{}-model-devi'.format(block_steps.inputs.parameters['block_tag'])+"-{{item}}",
**model_devi_config
)
block_steps.add(deviation)
block_steps.outputs.artifacts["models"]._from = train.outputs.artifacts["model"]
block_steps.outputs.artifacts["data"]._from = gen_data.outputs.artifacts["data"]
block_steps.outputs.artifacts["conf_outs"]._from = exploration.outputs.artifacts["conf_outs"]
block_steps.outputs.artifacts["exploration_trajectory"]._from = exploration.outputs.artifacts["trajectory"]
block_steps.outputs.artifacts["exploration_md_log"]._from = exploration.outputs.artifacts["md_log"]
block_steps.outputs.artifacts["selection_index"]._from = selection.outputs.artifacts["selected_indices"]
block_steps.outputs.parameters["cluster_threshold"].value_from_parameter = selection.outputs.parameters["cluster_threshold"]
block_steps.outputs.parameters["adjust_trust_lvl_1"].value_from_parameter = adjust_lvl.outputs.parameters["adjust_trust_lvl_1"]
block_steps.outputs.parameters["adjust_trust_lvl_2"].value_from_parameter = adjust_lvl.outputs.parameters["adjust_trust_lvl_2"]
return block_steps | /rid_kit-0.6.2-py3-none-any.whl/rid/superop/blocks.py | 0.867976 | 0.331891 | blocks.py | pypi |
from typing import Dict, List
from copy import deepcopy
from dflow import (
InputParameter,
OutputParameter,
Inputs,
InputArtifact,
Outputs,
OutputArtifact,
Step,
Steps,
argo_range,
argo_len,
)
from dflow.python import(
PythonOPTemplate,
OP,
Slices
)
from rid.utils import init_executor
class Exploration(Steps):
r"""" Exploration SuperOP.
This SuperOP combines PrepExplore OP and RunExplore OP.
"""
def __init__(
self,
name: str,
prep_op: OP,
run_op: OP,
prep_config: Dict,
run_config: Dict,
upload_python_package = None,
retry_times = None
):
self._input_parameters = {
"trust_lvl_1" : InputParameter(type=List[float], value=2.0),
"trust_lvl_2": InputParameter(type=List[float], value=3.0),
"exploration_config" : InputParameter(type=Dict),
"cv_config" : InputParameter(type=Dict),
"task_names" : InputParameter(type=List[str]),
"block_tag" : InputParameter(type=str, value="")
}
self._input_artifacts = {
"models" : InputArtifact(optional=True),
"forcefield": InputArtifact(optional=True),
"topology" : InputArtifact(optional=True),
"inputfile": InputArtifact(optional=True),
"confs" : InputArtifact(),
"index_file": InputArtifact(optional=True),
"dp_files": InputArtifact(optional=True),
"cv_file": InputArtifact(optional=True)
}
self._output_parameters = {
"cv_dim": OutputParameter(type=List[int])
}
self._output_artifacts = {
"plm_out": OutputArtifact(),
"md_log": OutputArtifact(),
"trajectory": OutputArtifact(),
"conf_outs": OutputArtifact()
}
super().__init__(
name=name,
inputs=Inputs(
parameters=self._input_parameters,
artifacts=self._input_artifacts
),
outputs=Outputs(
parameters=self._output_parameters,
artifacts=self._output_artifacts
),
)
step_keys = {
"prep_exploration": "{}-prep-exploration".format(self.inputs.parameters["block_tag"]),
"run_exploration": "{}-run-exploration".format(self.inputs.parameters["block_tag"]),
}
self = _exploration(
self,
step_keys,
prep_op,
run_op,
prep_config = prep_config,
run_config = run_config,
upload_python_package = upload_python_package,
retry_times = retry_times
)
@property
def input_parameters(self):
return self._input_parameters
@property
def input_artifacts(self):
return self._input_artifacts
@property
def output_parameters(self):
return self._output_parameters
@property
def output_artifacts(self):
return self._output_artifacts
@property
def keys(self):
return self._keys
def _exploration(
exploration_steps,
step_keys,
prep_exploration_op : OP,
run_exploration_op : OP,
prep_config : Dict,
run_config : Dict,
upload_python_package : str = None,
retry_times: int = None
):
prep_config = deepcopy(prep_config)
run_config = deepcopy(run_config)
prep_template_config = prep_config.pop('template_config')
run_template_config = run_config.pop('template_config')
prep_executor = init_executor(prep_config.pop('executor'))
run_executor = init_executor(run_config.pop('executor'))
prep_merge = False
if prep_executor is not None:
prep_merge = prep_executor.merge_sliced_step
if prep_merge:
prep_exploration = Step(
'prep-exploration',
template=PythonOPTemplate(
prep_exploration_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices("{{item}}",
input_parameter=["task_name", "trust_lvl_1", "trust_lvl_2"],
input_artifact=["conf"],
output_artifact=["task_path"],
output_parameter=["cv_dim"]
),
**prep_template_config,
),
parameters={
"trust_lvl_1" : exploration_steps.inputs.parameters['trust_lvl_1'],
"trust_lvl_2": exploration_steps.inputs.parameters['trust_lvl_2'],
"exploration_config" : exploration_steps.inputs.parameters['exploration_config'],
"cv_config" : exploration_steps.inputs.parameters['cv_config'],
"task_name": exploration_steps.inputs.parameters['task_names'],
"block_tag": exploration_steps.inputs.parameters["block_tag"]
},
artifacts={
"models" : exploration_steps.inputs.artifacts['models'],
"topology" :exploration_steps.inputs.artifacts['topology'],
"conf" : exploration_steps.inputs.artifacts['confs'],
"cv_file": exploration_steps.inputs.artifacts['cv_file']
},
key = step_keys["prep_exploration"]+"-{{item}}",
with_param=argo_range(argo_len(exploration_steps.inputs.parameters['task_names'])),
executor = prep_executor,
**prep_config
)
else:
prep_exploration = Step(
'prep-exploration',
template=PythonOPTemplate(
prep_exploration_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices(sub_path = True,
input_parameter=["task_name", "trust_lvl_1", "trust_lvl_2"],
input_artifact=["conf"],
output_artifact=["task_path"],
output_parameter=["cv_dim"]
),
**prep_template_config,
),
parameters={
"trust_lvl_1" : exploration_steps.inputs.parameters['trust_lvl_1'],
"trust_lvl_2": exploration_steps.inputs.parameters['trust_lvl_2'],
"exploration_config" : exploration_steps.inputs.parameters['exploration_config'],
"cv_config" : exploration_steps.inputs.parameters['cv_config'],
"task_name": exploration_steps.inputs.parameters['task_names'],
"block_tag": exploration_steps.inputs.parameters["block_tag"]
},
artifacts={
"models" : exploration_steps.inputs.artifacts['models'],
"topology" :exploration_steps.inputs.artifacts['topology'],
"conf" : exploration_steps.inputs.artifacts['confs'],
"cv_file": exploration_steps.inputs.artifacts['cv_file']
},
key = step_keys["prep_exploration"]+"-{{item.order}}",
executor = prep_executor,
**prep_config
)
exploration_steps.add(prep_exploration)
run_merge = False
if run_executor is not None:
run_merge = run_executor.merge_sliced_step
if run_merge:
run_exploration = Step(
'run-exploration',
template=PythonOPTemplate(
run_exploration_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices("{{item}}",
input_artifact=["task_path"],
output_artifact=["plm_out", "bias_fig","model_devi_fig", "dp_model_devi_fig", "dp_model_devi", "dp_selected_indices","dp_selected_confs","projected_fig","trajectory", "md_log", "conf_out"]
),
**run_template_config,
),
parameters={
"exploration_config" : exploration_steps.inputs.parameters["exploration_config"]
},
artifacts={
"task_path" : prep_exploration.outputs.artifacts["task_path"],
"forcefield": exploration_steps.inputs.artifacts['forcefield'],
"models" : exploration_steps.inputs.artifacts['models'],
"index_file": exploration_steps.inputs.artifacts['index_file'],
"dp_files": exploration_steps.inputs.artifacts['dp_files'],
"cv_file": exploration_steps.inputs.artifacts['cv_file'],
"inputfile": exploration_steps.inputs.artifacts['inputfile']
},
key = step_keys["run_exploration"]+"-{{item}}",
executor = run_executor,
with_param=argo_range(argo_len(exploration_steps.inputs.parameters['task_names'])),
**run_config
)
else:
run_exploration = Step(
'run-exploration',
template=PythonOPTemplate(
run_exploration_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices(sub_path = True,
input_artifact=["task_path"],
output_artifact=["plm_out", "bias_fig","model_devi_fig","dp_model_devi_fig", "dp_model_devi", "dp_selected_indices","dp_selected_confs","projected_fig","trajectory", "md_log", "conf_out"]
),
**run_template_config,
),
parameters={
"exploration_config" : exploration_steps.inputs.parameters["exploration_config"]
},
artifacts={
"task_path" : prep_exploration.outputs.artifacts["task_path"],
"forcefield": exploration_steps.inputs.artifacts['forcefield'],
"models" : exploration_steps.inputs.artifacts['models'],
"index_file": exploration_steps.inputs.artifacts['index_file'],
"dp_files": exploration_steps.inputs.artifacts['dp_files'],
"cv_file": exploration_steps.inputs.artifacts['cv_file'],
"inputfile": exploration_steps.inputs.artifacts['inputfile']
},
key = step_keys["run_exploration"]+"-{{item.order}}",
executor = run_executor,
**run_config,
)
exploration_steps.add(run_exploration)
exploration_steps.outputs.parameters["cv_dim"].value_from_parameter = prep_exploration.outputs.parameters["cv_dim"]
exploration_steps.outputs.artifacts["plm_out"]._from = run_exploration.outputs.artifacts["plm_out"]
exploration_steps.outputs.artifacts["md_log"]._from = run_exploration.outputs.artifacts["md_log"]
exploration_steps.outputs.artifacts["trajectory"]._from = run_exploration.outputs.artifacts["trajectory"]
exploration_steps.outputs.artifacts["conf_outs"]._from = run_exploration.outputs.artifacts["conf_out"]
return exploration_steps | /rid_kit-0.6.2-py3-none-any.whl/rid/superop/exploration.py | 0.726814 | 0.33012 | exploration.py | pypi |
from typing import Dict, List
from copy import deepcopy
from dflow import (
InputParameter,
OutputParameter,
Inputs,
InputArtifact,
Outputs,
OutputArtifact,
Step,
Steps,
argo_range,
argo_len,
)
from dflow.python import(
PythonOPTemplate,
OP,
Slices
)
from rid.utils import init_executor
class MCMC(Steps):
r"""" MCMC SuperOP.
This SuperOP combines MCMC_Run OP and MCMC_Plot OP.
"""
def __init__(
self,
name: str,
mcmc_run_op: OP,
mcmc_plot_op: OP,
run_config: Dict,
plot_config: Dict,
upload_python_package = None,
retry_times = None
):
self._input_parameters = {
"mcmc_config" : InputParameter(type=Dict),
"task_names" : InputParameter(type=List[str]),
"block_tag" : InputParameter(type=str, value="")
}
self._input_artifacts = {
"models" : InputArtifact(),
"plm_out": InputArtifact()
}
self._output_parameters = {}
self._output_artifacts = {
"mcmc_fig": OutputArtifact()
}
super().__init__(
name=name,
inputs=Inputs(
parameters=self._input_parameters,
artifacts=self._input_artifacts
),
outputs=Outputs(
parameters=self._output_parameters,
artifacts=self._output_artifacts
),
)
step_keys = {
"mcmc_run": "{}-mcmc-run".format(self.inputs.parameters["block_tag"]),
"mcmc_plot": "{}-mcmc-plot".format(self.inputs.parameters["block_tag"]),
}
self = _mcmc(
self,
step_keys,
mcmc_run_op,
mcmc_plot_op,
run_config = run_config,
plot_config = plot_config,
upload_python_package = upload_python_package,
retry_times = retry_times
)
@property
def input_parameters(self):
return self._input_parameters
@property
def input_artifacts(self):
return self._input_artifacts
@property
def output_parameters(self):
return self._output_parameters
@property
def output_artifacts(self):
return self._output_artifacts
@property
def keys(self):
return self._keys
def _mcmc(
mcmc_steps,
step_keys,
mcmc_run_op : OP,
mcmc_plot_op : OP,
run_config : Dict,
plot_config : Dict,
upload_python_package : str = None,
retry_times: int = None
):
run_config = deepcopy(run_config)
plot_config = deepcopy(plot_config)
run_template_config = run_config.pop('template_config')
plot_template_config = plot_config.pop('template_config')
run_executor = init_executor(run_config.pop('executor'))
plot_executor = init_executor(plot_config.pop('executor'))
run_merge = False
if run_executor is not None:
run_merge = run_executor.merge_sliced_step
if run_merge:
mcmc_run = Step(
'mcmc-run',
template=PythonOPTemplate(
mcmc_run_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices("{{item}}",
input_parameter=["task_names"],
input_artifact=["models"],
output_artifact=["mcmc_1cv", "mcmc_2cv"]
),
**run_template_config,
),
parameters={
"mcmc_config" : mcmc_steps.inputs.parameters['mcmc_config'],
"task_names": mcmc_steps.inputs.parameters['task_names']
},
artifacts={
"models" : mcmc_steps.inputs.artifacts['models']
},
key = step_keys["mcmc_run"]+"-{{item}}",
with_param=argo_range(argo_len(mcmc_steps.inputs.parameters['task_names'])),
executor = run_executor,
**run_config
)
else:
mcmc_run = Step(
'mcmc-run',
template=PythonOPTemplate(
mcmc_run_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
slices=Slices(sub_path = True,
input_parameter=["task_names"],
input_artifact=["models"],
output_artifact=["mcmc_1cv", "mcmc_2cv"]
),
**run_template_config,
),
parameters={
"mcmc_config" : mcmc_steps.inputs.parameters['mcmc_config'],
"task_names": mcmc_steps.inputs.parameters['task_names']
},
artifacts={
"models" : mcmc_steps.inputs.artifacts['models']
},
key = step_keys["mcmc_run"]+"-{{item.order}}",
executor = run_executor,
**run_config
)
mcmc_steps.add(mcmc_run)
mcmc_plot = Step(
'mcmc-plot',
template=PythonOPTemplate(
mcmc_plot_op,
python_packages = upload_python_package,
retry_on_transient_error = retry_times,
**plot_template_config,
),
parameters={
"mcmc_config" : mcmc_steps.inputs.parameters["mcmc_config"]
},
artifacts={
"mcmc_1cv": mcmc_run.outputs.artifacts['mcmc_1cv'],
"mcmc_2cv": mcmc_run.outputs.artifacts['mcmc_2cv'],
"plm_out": mcmc_steps.inputs.artifacts['plm_out']
},
key = step_keys["mcmc_plot"],
executor = plot_executor,
**run_config
)
mcmc_steps.add(mcmc_plot)
mcmc_steps.outputs.artifacts["mcmc_fig"]._from = mcmc_plot.outputs.artifacts["mcmc_fig"]
return mcmc_steps | /rid_kit-0.6.2-py3-none-any.whl/rid/superop/mcmc.py | 0.742608 | 0.237222 | mcmc.py | pypi |
from dflow import (
InputParameter,
Inputs,
InputArtifact,
Outputs,
OutputArtifact,
Step,
Steps,
if_expression,
)
from dflow.python import(
PythonOPTemplate,
OP
)
from typing import List, Optional, Dict, Union
import numpy as np
from rid.utils import init_executor
from rid.op.prep_rid import PrepRiD
from rid.op.recorder import Recorder
from copy import deepcopy
class ReinforcedDynamicsLoop(Steps):
def __init__(
self,
name : str,
block_op : Steps,
step_config : dict,
upload_python_package : str = None,
):
self._input_parameters={
"numb_iters" : InputParameter(type=int),
"last_iteration" : InputParameter(type=int),
"block_tag" : InputParameter(type=str, value=""),
"walker_tags": InputParameter(type=List),
"model_tags": InputParameter(type=List),
"trust_lvl_1" : InputParameter(type=List[float]),
"trust_lvl_2": InputParameter(type=List[float]),
"init_trust_lvl_1" : InputParameter(type=List[float]),
"init_trust_lvl_2": InputParameter(type=List[float]),
"exploration_config" : InputParameter(type=Dict),
"cv_config" : InputParameter(type=Dict),
"cluster_threshold": InputParameter(type=float, value=1.0),
"angular_mask": InputParameter(type=Optional[Union[np.ndarray, List]]),
"weights": InputParameter(type=Optional[Union[np.ndarray, List]]),
"max_selection": InputParameter(type=int),
"numb_cluster_threshold": InputParameter(type=float, value=30),
"std_threshold": InputParameter(type=float, value=5.0),
"dt": InputParameter(type=float, value=0.02),
"output_freq": InputParameter(type=float, value=2500),
"slice_mode": InputParameter(type=str, value="gmx"),
"label_config": InputParameter(type=Dict),
"tail": InputParameter(type=float, value=0.9),
"train_config": InputParameter(type=Dict),
"type_map": InputParameter(type=List, value = []),
"adjust_amplifier": InputParameter(type=float, value=1.5),
"max_level_multiple": InputParameter(type=float, value=8.0),
}
self._input_artifacts={
"models" : InputArtifact(optional=True),
"forcefield" : InputArtifact(optional=True),
"topology" : InputArtifact(optional=True),
"inputfile": InputArtifact(optional=True),
"confs" : InputArtifact(),
"data_old": InputArtifact(),
"index_file": InputArtifact(optional=True),
"dp_files": InputArtifact(optional=True),
"cv_file": InputArtifact(optional=True)
}
self._output_parameters={
}
self._output_artifacts={
"exploration_trajectory": OutputArtifact(),
"models": OutputArtifact(),
"data": OutputArtifact(),
"conf_outs": OutputArtifact()
}
super().__init__(
name = name,
inputs = Inputs(
parameters=self._input_parameters,
artifacts=self._input_artifacts,
),
outputs=Outputs(
parameters=self._output_parameters,
artifacts=self._output_artifacts,
),
)
_step_keys = ['block', 'recorder']
step_keys = {}
for ii in _step_keys:
step_keys[ii] = '-'.join(["%s"%self.inputs.parameters["block_tag"], ii])
self = _loop(
self,
step_keys,
name,
block_op,
step_config = step_config,
upload_python_package = upload_python_package,
)
@property
def input_parameters(self):
return self._input_parameters
@property
def input_artifacts(self):
return self._input_artifacts
@property
def output_parameters(self):
return self._output_parameters
@property
def output_artifacts(self):
return self._output_artifacts
@property
def keys(self):
return self._keys
def _loop (
steps,
step_keys,
name : str,
block_op : OP,
step_config : dict,
upload_python_package : str = None,
):
step_config = deepcopy(step_config)
step_template_config = step_config.pop('template_config')
step_executor = init_executor(step_config.pop('executor'))
recorder_step = Step(
name = name + '-recorder',
template=PythonOPTemplate(
Recorder,
python_packages = upload_python_package,
**step_template_config,
),
parameters={
"iteration": steps.inputs.parameters['last_iteration'],
},
artifacts={},
key = step_keys['recorder'],
executor = step_executor,
**step_config,
)
steps.add(recorder_step)
block_step = Step(
name = name + '-block',
template = block_op,
parameters={
"block_tag": recorder_step.outputs.parameters["block_tag"],
"walker_tags": steps.inputs.parameters["walker_tags"],
"model_tags": steps.inputs.parameters["model_tags"],
"exploration_config": steps.inputs.parameters["exploration_config"],
"cv_config": steps.inputs.parameters["cv_config"],
"trust_lvl_1" : steps.inputs.parameters["trust_lvl_1"],
"trust_lvl_2": steps.inputs.parameters["trust_lvl_2"],
"init_trust_lvl_1": steps.inputs.parameters["init_trust_lvl_1"],
"init_trust_lvl_2": steps.inputs.parameters["init_trust_lvl_2"],
"cluster_threshold": steps.inputs.parameters["cluster_threshold"],
"angular_mask": steps.inputs.parameters["angular_mask"],
"weights": steps.inputs.parameters["weights"],
"max_selection": steps.inputs.parameters["max_selection"],
"numb_cluster_threshold": steps.inputs.parameters["numb_cluster_threshold"],
"std_threshold": steps.inputs.parameters["std_threshold"],
"dt": steps.inputs.parameters["dt"],
"output_freq": steps.inputs.parameters["output_freq"],
"slice_mode": steps.inputs.parameters["slice_mode"],
"type_map": steps.inputs.parameters["type_map"],
"label_config": steps.inputs.parameters["label_config"],
"train_config": steps.inputs.parameters["train_config"]
},
artifacts={
"models": steps.inputs.artifacts["models"],
"forcefield" : steps.inputs.artifacts['forcefield'],
"topology": steps.inputs.artifacts["topology"],
"inputfile": steps.inputs.artifacts["inputfile"],
"confs": steps.inputs.artifacts["confs"],
"data_old": steps.inputs.artifacts["data_old"],
"index_file": steps.inputs.artifacts["index_file"],
"dp_files": steps.inputs.artifacts["dp_files"],
"cv_file": steps.inputs.artifacts["cv_file"]
},
key = step_keys['block'],
)
steps.add(block_step)
next_step = Step(
name = name+'-next',
template = steps,
parameters={
"numb_iters": steps.inputs.parameters["numb_iters"],
"last_iteration": recorder_step.outputs.parameters["next_iteration"],
"block_tag": recorder_step.outputs.parameters["block_tag"],
"walker_tags": steps.inputs.parameters["walker_tags"],
"model_tags": steps.inputs.parameters["model_tags"],
"exploration_config": steps.inputs.parameters["exploration_config"],
"cv_config": steps.inputs.parameters["cv_config"],
"trust_lvl_1": block_step.outputs.parameters["adjust_trust_lvl_1"],
"trust_lvl_2": block_step.outputs.parameters["adjust_trust_lvl_2"],
"init_trust_lvl_1": steps.inputs.parameters["init_trust_lvl_1"],
"init_trust_lvl_2": steps.inputs.parameters["init_trust_lvl_2"],
"cluster_threshold": block_step.outputs.parameters["cluster_threshold"],
"angular_mask": steps.inputs.parameters["angular_mask"],
"weights": steps.inputs.parameters["weights"],
"max_selection": steps.inputs.parameters["max_selection"],
"numb_cluster_threshold": steps.inputs.parameters["numb_cluster_threshold"],
"std_threshold": steps.inputs.parameters["std_threshold"],
"dt": steps.inputs.parameters["dt"],
"output_freq": steps.inputs.parameters["output_freq"],
"slice_mode": steps.inputs.parameters["slice_mode"],
"type_map": steps.inputs.parameters["type_map"],
"label_config": steps.inputs.parameters["label_config"],
"train_config": steps.inputs.parameters["train_config"]
},
artifacts={
"models": block_step.outputs.artifacts["models"],
"forcefield" : steps.inputs.artifacts['forcefield'],
"topology": steps.inputs.artifacts["topology"],
"inputfile": steps.inputs.artifacts["inputfile"],
"confs": block_step.outputs.artifacts["conf_outs"],
"data_old": block_step.outputs.artifacts["data"],
"index_file": steps.inputs.artifacts["index_file"],
"dp_files": steps.inputs.artifacts["dp_files"],
"cv_file": steps.inputs.artifacts["cv_file"]
},
when = "%s < %s" % (recorder_step.outputs.parameters['next_iteration'], steps.inputs.parameters["numb_iters"]),
)
steps.add(next_step)
steps.outputs.artifacts['exploration_trajectory'].from_expression = \
if_expression(
_if = (recorder_step.outputs.parameters['next_iteration'] >= steps.inputs.parameters["numb_iters"]),
_then = block_step.outputs.artifacts['exploration_trajectory'],
_else = next_step.outputs.artifacts['exploration_trajectory'],
)
steps.outputs.artifacts['conf_outs'].from_expression = \
if_expression(
_if = (recorder_step.outputs.parameters['next_iteration'] >= steps.inputs.parameters["numb_iters"]),
_then = block_step.outputs.artifacts['conf_outs'],
_else = next_step.outputs.artifacts['conf_outs'],
)
steps.outputs.artifacts['models'].from_expression = \
if_expression(
_if = (recorder_step.outputs.parameters['next_iteration'] >= steps.inputs.parameters["numb_iters"]),
_then = block_step.outputs.artifacts['models'],
_else = next_step.outputs.artifacts['models'],
)
steps.outputs.artifacts['data'].from_expression = \
if_expression(
_if = (recorder_step.outputs.parameters['next_iteration'] >= steps.inputs.parameters["numb_iters"]),
_then = block_step.outputs.artifacts['data'],
_else = next_step.outputs.artifacts['data'],
)
return steps
class ReinforcedDynamics(Steps):
def __init__(
self,
name : str,
init_block_op : Steps,
block_op : Steps,
step_config : dict,
upload_python_package : str = None,
):
self._input_parameters={}
self._input_artifacts={
"models": InputArtifact(optional=True),
"forcefield": InputArtifact(optional=True),
"topology": InputArtifact(optional=True),
"inputfile": InputArtifact(optional=True),
"confs": InputArtifact(),
"rid_config": InputArtifact(),
"index_file": InputArtifact(optional=True),
"data_file": InputArtifact(optional=True),
"dp_files": InputArtifact(optional=True),
"cv_file": InputArtifact(optional=True)
}
self._output_parameters={
}
self._output_artifacts={
"exploration_trajectory": OutputArtifact(),
"models": OutputArtifact(),
"data": OutputArtifact(),
"conf_outs": OutputArtifact()
}
super().__init__(
name = name,
inputs = Inputs(
parameters=self._input_parameters,
artifacts=self._input_artifacts,
),
outputs=Outputs(
parameters=self._output_parameters,
artifacts=self._output_artifacts,
),
)
_init_keys = ['recorder', 'block']
step_keys = {}
for ii in _init_keys:
step_keys[ii] = '--'.join(['init', ii])
self = _rid(
self,
step_keys,
name,
init_block_op,
block_op,
step_config = step_config,
upload_python_package = upload_python_package,
)
@property
def input_parameters(self):
return self._input_parameters
@property
def input_artifacts(self):
return self._input_artifacts
@property
def output_parameters(self):
return self._output_parameters
@property
def output_artifacts(self):
return self._output_artifacts
@property
def init_keys(self):
return self._init_keys
@property
def loop_keys(self):
return [self.loop_key] + self.loop.keys
def _rid(
steps,
step_keys,
name,
init_block_op,
block_op,
step_config : dict,
upload_python_package : Optional[str] = None
):
_step_config = deepcopy(step_config)
step_template_config = _step_config.pop('template_config')
step_executor = init_executor(_step_config.pop('executor'))
prep_rid = Step(
name = 'prepare-rid',
template=PythonOPTemplate(
PrepRiD,
python_packages = upload_python_package,
**step_template_config,
),
parameters={},
artifacts={
"confs": steps.inputs.artifacts['confs'],
"rid_config" : steps.inputs.artifacts['rid_config'],
},
key = 'prepare-rid',
executor = step_executor,
**_step_config,
)
steps.add(prep_rid)
recorder_step = Step(
name = name + '-recorder',
template=PythonOPTemplate(
Recorder,
python_packages = upload_python_package,
**step_template_config,
),
parameters={
"iteration": None,
},
artifacts={},
key = "init-recorder",
executor = step_executor,
**_step_config,
)
steps.add(recorder_step)
init_block = Step(
name = name + '-block',
template = init_block_op,
parameters={
"block_tag": recorder_step.outputs.parameters["block_tag"],
"walker_tags": prep_rid.outputs.parameters["walker_tags"],
"model_tags": prep_rid.outputs.parameters["model_tags"],
"exploration_config": prep_rid.outputs.parameters["exploration_config"],
"cv_config": prep_rid.outputs.parameters["cv_config"],
"trust_lvl_1" : prep_rid.outputs.parameters["trust_lvl_1"],
"trust_lvl_2": prep_rid.outputs.parameters["trust_lvl_2"],
"numb_cluster_upper": prep_rid.outputs.parameters['numb_cluster_upper'],
"numb_cluster_lower": prep_rid.outputs.parameters['numb_cluster_lower'],
"cluster_threshold": prep_rid.outputs.parameters["cluster_threshold"],
"angular_mask": prep_rid.outputs.parameters["angular_mask"],
"weights": prep_rid.outputs.parameters["weights"],
"max_selection": prep_rid.outputs.parameters["max_selection"],
"std_threshold": prep_rid.outputs.parameters["std_threshold"],
"dt": prep_rid.outputs.parameters["dt"],
"output_freq": prep_rid.outputs.parameters["output_freq"],
"slice_mode": prep_rid.outputs.parameters["slice_mode"],
"type_map": prep_rid.outputs.parameters["type_map"],
"label_config": prep_rid.outputs.parameters["label_config"],
"train_config": prep_rid.outputs.parameters["train_config"]
},
artifacts={
"models": steps.inputs.artifacts["models"],
"forcefield" : steps.inputs.artifacts['forcefield'],
"topology": steps.inputs.artifacts["topology"],
"inputfile": steps.inputs.artifacts["inputfile"],
"confs": prep_rid.outputs.artifacts["confs"],
"index_file": steps.inputs.artifacts["index_file"],
"data_old": steps.inputs.artifacts["data_file"],
"dp_files": steps.inputs.artifacts["dp_files"],
"cv_file": steps.inputs.artifacts["cv_file"]
},
key = "%s-init-block"%recorder_step.outputs.parameters["block_tag"]
)
steps.add(init_block)
loop_step = Step(
name = name + '-loop',
template = ReinforcedDynamicsLoop(
name = "RiD-Loop",
block_op = block_op,
step_config = step_config,
upload_python_package = upload_python_package
),
parameters={
"numb_iters": prep_rid.outputs.parameters["numb_iters"],
"last_iteration": recorder_step.outputs.parameters["next_iteration"],
"block_tag": recorder_step.outputs.parameters["block_tag"],
"walker_tags": prep_rid.outputs.parameters["walker_tags"],
"model_tags": prep_rid.outputs.parameters["model_tags"],
"exploration_config": prep_rid.outputs.parameters["exploration_config"],
"cv_config": prep_rid.outputs.parameters["cv_config"],
"trust_lvl_1" : prep_rid.outputs.parameters["trust_lvl_1"],
"trust_lvl_2": prep_rid.outputs.parameters["trust_lvl_2"],
"init_trust_lvl_1": prep_rid.outputs.parameters["trust_lvl_1"],
"init_trust_lvl_2": prep_rid.outputs.parameters["trust_lvl_2"],
"cluster_threshold": init_block.outputs.parameters["cluster_threshold"],
"angular_mask": prep_rid.outputs.parameters["angular_mask"],
"weights": prep_rid.outputs.parameters["weights"],
"max_selection": prep_rid.outputs.parameters["max_selection"],
"numb_cluster_threshold": prep_rid.outputs.parameters["numb_cluster_threshold"],
"std_threshold": prep_rid.outputs.parameters["std_threshold"],
"dt": prep_rid.outputs.parameters["dt"],
"output_freq": prep_rid.outputs.parameters["output_freq"],
"slice_mode": prep_rid.outputs.parameters["slice_mode"],
"type_map": prep_rid.outputs.parameters["type_map"],
"label_config": prep_rid.outputs.parameters["label_config"],
"train_config": prep_rid.outputs.parameters["train_config"]
},
artifacts={
"models": init_block.outputs.artifacts["models"],
"forcefield" : steps.inputs.artifacts['forcefield'],
"topology": steps.inputs.artifacts["topology"],
"inputfile": steps.inputs.artifacts["inputfile"],
"confs": init_block.outputs.artifacts["conf_outs"],
"data_old": init_block.outputs.artifacts["data"],
"index_file": steps.inputs.artifacts["index_file"],
"dp_files": steps.inputs.artifacts["dp_files"],
"cv_file": steps.inputs.artifacts["cv_file"]
},
when = "%s < %s" % (recorder_step.outputs.parameters['next_iteration'], prep_rid.outputs.parameters["numb_iters"]),
key = "rid-loop",
)
steps.add(loop_step)
steps.outputs.artifacts['exploration_trajectory'].from_expression = \
if_expression(
_if = (recorder_step.outputs.parameters['next_iteration'] >= prep_rid.outputs.parameters["numb_iters"]),
_then = init_block.outputs.artifacts['exploration_trajectory'],
_else = loop_step.outputs.artifacts['exploration_trajectory'],
)
steps.outputs.artifacts['conf_outs'].from_expression = \
if_expression(
_if = (recorder_step.outputs.parameters['next_iteration'] >= prep_rid.outputs.parameters["numb_iters"]),
_then = init_block.outputs.artifacts['conf_outs'],
_else = loop_step.outputs.artifacts['conf_outs'],
)
steps.outputs.artifacts['models'].from_expression = \
if_expression(
_if = (recorder_step.outputs.parameters['next_iteration'] >= prep_rid.outputs.parameters["numb_iters"]),
_then = init_block.outputs.artifacts['models'],
_else = loop_step.outputs.artifacts['models'],
)
steps.outputs.artifacts['data'].from_expression = \
if_expression(
_if = (recorder_step.outputs.parameters['next_iteration'] >= prep_rid.outputs.parameters["numb_iters"]),
_then = init_block.outputs.artifacts['data'],
_else = loop_step.outputs.artifacts['data'],
)
return steps | /rid_kit-0.6.2-py3-none-any.whl/rid/flow/loop.py | 0.800536 | 0.366646 | loop.py | pypi |
from dflow.python import (
OP,
OPIO,
OPIOSign,
Artifact,
Parameter
)
from typing import List, Optional, Union, Dict
from pathlib import Path
import numpy as np
from rid.constants import (
cv_force_out
)
from rid.common.gromacs.trjconv import generate_coords, generate_forces
from rid.utils import load_txt, set_directory
from rid.constants import gmx_coord_name, gmx_force_name,f_cvt,kb,mf_fig
from matplotlib import pyplot as plt
import os
from parmed import gromacs
import parmed as pmd
def plot_mf_average_all(mf_average, task_path, dt):
plt.figure(figsize=(10, 8), dpi=100)
xlist = [i*dt for i in range(len(mf_average))]
for index in range(mf_average.shape[1]):
plt.scatter(xlist, mf_average[:,index],label="d%s"%(index+1))
plt.title("running average of mean force")
plt.xlabel("constrained simulation time (ps)")
plt.ylabel("mean force (KJ/(mol*nm))")
plt.legend()
plt.savefig(task_path.joinpath(mf_fig))
def phase_factor(r_cv, cv_init, selected_atomid_simple, mass_list):
C_m = np.zeros(shape=(len(cv_init), len(cv_init)))
C_list = np.zeros(shape=(len(cv_init), len(r_cv)))
for index in range(len(cv_init)):
atom_id1 = selected_atomid_simple[index][0]
atom_id2 = selected_atomid_simple[index][1]
C_list[index][atom_id1*3] = (r_cv[atom_id1*3] - r_cv[atom_id2*3])/cv_init[index]
C_list[index][atom_id1*3+1] = (r_cv[atom_id1*3+1] - r_cv[atom_id2*3+1])/cv_init[index]
C_list[index][atom_id1*3+2] = (r_cv[atom_id1*3+2] - r_cv[atom_id2*3+2])/cv_init[index]
C_list[index][atom_id2*3] = -(r_cv[atom_id1*3] - r_cv[atom_id2*3])/cv_init[index]
C_list[index][atom_id2*3+1] = -(r_cv[atom_id1*3+1] - r_cv[atom_id2*3+1])/cv_init[index]
C_list[index][atom_id2*3+2] = -(r_cv[atom_id1*3+2] - r_cv[atom_id2*3+2])/cv_init[index]
Mass = np.diag(mass_list)
Mass = np.linalg.inv(Mass)
for index_i in range(len(cv_init)):
for index_j in range(len(cv_init)):
C_m[index_i,index_j] = np.dot(np.dot(C_list[index_i],Mass),C_list[index_j])
A = np.linalg.det(C_m)
return A
def pseudo_inv(r_cv, cv_init, selected_atomid_simple):
A = np.zeros((len(r_cv),len(cv_init)))
for index in range(len(cv_init)):
atom_id1 = selected_atomid_simple[index][0]
atom_id2 = selected_atomid_simple[index][1]
A[atom_id1*3,index] = (r_cv[atom_id1*3] - r_cv[atom_id2*3])/cv_init[index]
A[atom_id1*3+1,index] = (r_cv[atom_id1*3+1] - r_cv[atom_id2*3+1])/cv_init[index]
A[atom_id1*3+2,index] = (r_cv[atom_id1*3+2] - r_cv[atom_id2*3+2])/cv_init[index]
A[atom_id2*3,index] = -(r_cv[atom_id1*3] - r_cv[atom_id2*3])/cv_init[index]
A[atom_id2*3+1,index] = -(r_cv[atom_id1*3+1] - r_cv[atom_id2*3+1])/cv_init[index]
A[atom_id2*3+2,index] = -(r_cv[atom_id1*3+2] - r_cv[atom_id2*3+2])/cv_init[index]
U, S, Vh = np.linalg.svd(A, full_matrices = False)
B = np.matmul(np.matmul(np.transpose(Vh),np.linalg.inv(np.diag(S))),np.transpose(U))
return B
if os.path.exists("/gromacs/share/gromacs/top"):
gromacs.GROMACS_TOPDIR = "/gromacs/share/gromacs/top"
elif os.path.exists("/opt/conda/share/gromacs/top"):
gromacs.GROMACS_TOPDIR = "/opt/conda/share/gromacs/top"
def CalcMF(
conf: str,
task_name: str,
plm_out: str,
cv_config: dict,
label_config: dict,
tail: float = 0.9,
topology: Optional[str] = None,
frame_coords: Optional[str] = None,
frame_forces: Optional[str] = None,
at: Optional[str] = None
):
"""
Calculate mean force with the results of restrained or constrained MD.
CalcMF will handle periodic CVs by `angular_mask`.
To get the mean value of CVs near equilibrium state under restrained or constrained MD, only part of outputs of CV values
(the last `tail` part, for example, the last 90% CV values) are used.
"""
if label_config["method"] == "restrained":
data = load_txt(plm_out)
data = data[:, 1:] # removr the first column(time index).
centers = data[0,:]
nframes = data.shape[0]
angular_boolean = (np.array(cv_config["angular_mask"], dtype=int) == 1)
init_angle = data[0][angular_boolean]
for ii in range(1, nframes):
current_angle = data[ii][angular_boolean]
angular_diff = current_angle - init_angle
current_angle[angular_diff < -np.pi] += 2 * np.pi
current_angle[angular_diff >= np.pi] -= 2 * np.pi
data[ii][angular_boolean] = current_angle
start_f = int(nframes * (1-tail))
avgins = np.average(data[start_f:, :], axis=0)
mf_avg_list = []
for simu_frames in range(int(nframes*0.1), nframes,1):
start_f = int(simu_frames*(1-0.9))
avgins_ = np.average(data[start_f:simu_frames, :], axis=0)
diff_ = avgins_ - centers
angular_diff_ = diff_[angular_boolean]
angular_diff_[angular_diff_ < -np.pi] += 2 * np.pi
angular_diff_[angular_diff_ > np.pi] -= 2 * np.pi
diff_[angular_boolean] = angular_diff_
ff_ = np.multiply(label_config["kappas"], diff_)
mf_avg_list.append(ff_)
mf_avg_list = np.array(mf_avg_list)
mid_f = int(nframes * 0.5)
mf_std = np.std(mf_avg_list[mid_f:,:], axis=0)
diff = avgins - centers
angular_diff = diff[angular_boolean]
angular_diff[angular_diff < -np.pi] += 2 * np.pi
angular_diff[angular_diff > np.pi] -= 2 * np.pi
diff[angular_boolean] = angular_diff
ff = np.multiply(label_config["kappas"], diff)
task_path = Path(task_name)
task_path.mkdir(exist_ok=True, parents=True)
cv_forces = np.concatenate((centers, ff))
np.savetxt(task_path.joinpath(cv_force_out), np.reshape(cv_forces, [1, -1]), fmt='%.10f')
plot_mf_average_all(mf_avg_list, task_path, dt = label_config["dt"]*label_config["output_freq"])
with open(task_path.joinpath("mf_info.out"),"w") as f:
f.write("cv list value ")
for cv_ in centers:
f.write("%.4f "%cv_)
f.write("\n")
f.write("mean force value ")
for mf_ in ff:
f.write("%.4f "%mf_)
f.write("\n")
f.write("mean force std ")
for mf_std_ in mf_std:
f.write("%.4f "%mf_std_)
elif label_config["method"] == "constrained":
data = load_txt(plm_out)
data = data[:, 1:] # removr the first column(time index).
centers = data[0,:]
# Kb to KJ/mol
KB = kb*f_cvt
T = float(label_config["temperature"])
coords = np.loadtxt(frame_coords,comments=["#","@"])
if "units" in cv_config:
length_units = cv_config["units"]
else:
length_units = None
if length_units == None or length_units == "nm":
# coords units nm
coords = coords[:,1:]
forces = np.loadtxt(frame_forces,comments=["#","@"])
# forces units to KJ/(mol*nm)
forces = forces[:,1:]
elif length_units == "A" or length_units == "Angstrom":
# coords units nm
coords = coords[:,1:]*10
forces = np.loadtxt(frame_forces,comments=["#","@"])
# forces units to KJ/(mol*nm)
forces = forces[:,1:]/10
else:
raise ValueError("Valid length units must be nm or A")
mflist = []
mflist_phase = []
phase_list = []
mf_average_phase = []
selected_atomid = cv_config["selected_atomid"]
selected_atoms = list(set([item for sublist in selected_atomid for item in sublist]))
selected_atomid_simple = []
for atom_pairs in selected_atomid:
selected_atomid_simple.append([selected_atoms.index(i) for i in atom_pairs])
# calculate mass matrix of the system
system = pmd.load_file(os.path.abspath(topology))
mass_list = [system.atoms[i].mass for i in range(len(system.atoms))]
mass_list_simple = []
for atom_id in selected_atoms:
atom_id -= 1
mass_list_simple.append(mass_list[atom_id])
mass_list_simple.append(mass_list[atom_id])
mass_list_simple.append(mass_list[atom_id])
# calculate mean force via singular value decomposition(SVD)
eps = 0.0001
for index in range(np.shape(coords)[0]):
coordlist = coords[index]
forcelist = forces[index]
r_cv = []
f_cv = []
for atom_id in selected_atoms:
atom_id -= 1
r_cv.append(coordlist[atom_id*3])
r_cv.append(coordlist[atom_id*3+1])
r_cv.append(coordlist[atom_id*3+2])
f_cv.append(forcelist[atom_id*3])
f_cv.append(forcelist[atom_id*3+1])
f_cv.append(forcelist[atom_id*3+2])
B = pseudo_inv(r_cv, centers, selected_atomid_simple)
# print(B.shape)
dBdx = []
for index in range(len(r_cv)):
r1 = r_cv.copy()
r1[index] += eps
B1 = pseudo_inv(r1,centers,selected_atomid_simple)
r2 = r_cv.copy()
r2[index] -= eps
B2 = pseudo_inv(r2, centers,selected_atomid_simple)
dBdx.append((B1-B2)/(2*eps))
dBdx = np.array(dBdx)
phase = phase_factor(r_cv, centers, selected_atomid_simple, mass_list_simple)
mf = np.matmul(B,f_cv) + KB*T*np.trace(dBdx, axis1=0, axis2=2)
phase_list.append(1/(phase**0.5))
mflist.append(mf)
mflist_phase.append(mf/(phase**0.5))
mflist = np.array(mflist)
mflist_phase = np.array(mflist_phase)
nframes = np.shape(coords)[0]
start_f = int(nframes * (1-tail))
mid_f = int(nframes * 0.5)
mf_avg_without_norm = np.average(mflist_phase[start_f:, :], axis=0)
phase_avg = np.average(phase_list[start_f:])
mf_avg_without_phase = np.average(mflist[start_f:, :], axis=0)
avg_force = mf_avg_without_norm/phase_avg
for simu_frames in range(int(nframes*0.1), nframes,1):
start_f = int(simu_frames*(1-0.9))
mf_average_phase.append(np.average(mflist_phase[start_f:simu_frames, :], axis=0)/np.average(phase_list[start_f:simu_frames]))
# calculate mean force statistics
mf_average_phase = np.array(mf_average_phase)
mf_difference = avg_force - mf_avg_without_phase
mf_std = np.std(mf_average_phase[mid_f:,:], axis=0)
task_path = Path(task_name)
task_path.mkdir(exist_ok=True, parents=True)
cv_forces = np.concatenate((centers, avg_force))
np.savetxt(task_path.joinpath(cv_force_out), np.reshape(cv_forces, [1, -1]), fmt='%.10f')
plot_mf_average_all(mf_average_phase, task_path, dt = label_config["dt"]*label_config["output_freq"])
with open(task_path.joinpath("mf_info.out"),"w") as f:
f.write("cv list value ")
for cv_ in centers:
f.write("%.4f "%cv_)
f.write("\n")
f.write("mean force value ")
for mf_ in avg_force:
f.write("%.4f "%mf_)
f.write("\n")
f.write("diff with phase ")
for mf_diff_ in mf_difference:
f.write("%.4f "%mf_diff_)
f.write("\n")
f.write("mean force std ")
for mf_std_ in mf_std:
f.write("%.4f "%mf_std_)
mf_info = None
if os.path.exists(task_path.joinpath("mf_info.out")):
mf_info = task_path.joinpath("mf_info.out")
op_out = {
"cv_forces": task_path.joinpath(cv_force_out),
"mf_info": mf_info
}
return op_out | /rid_kit-0.6.2-py3-none-any.whl/rid/tools/estimator.py | 0.735547 | 0.376566 | estimator.py | pypi |
from distutils.command.config import dump_file
import json
from pathlib import Path
from typing import List, Union, Optional
from rid.utils import load_json
from copy import deepcopy
import os
from dflow import (
Workflow,
Step,
upload_artifact
)
from dflow.python import upload_packages
from rid import SRC_ROOT
upload_packages.append(SRC_ROOT)
from rid.utils import normalize_resources
from rid.superop.label import Label
from rid.op.prep_label import PrepLabel, CheckLabelInputs
from rid.op.run_label import RunLabel
from rid.constants import label_task_pattern
def relabel_rid(
workflow_id: str,
confs: Union[str, List[str]],
topology: Optional[str],
rid_config: str,
machine_config: str,
workflow_id_defined: Optional[str] = None,
models: Optional[Union[str, List[str]]] = None,
forcefield: Optional[str] = None,
index_file: Optional[str] = None,
dp_files: Optional[List[str]] = [],
otherfiles: Optional[List[str]] = None
):
with open(machine_config, "r") as mcg:
machine_config_dict = json.load(mcg)
resources = machine_config_dict["resources"]
tasks = machine_config_dict["tasks"]
normalized_resources = {}
for resource_type in resources.keys():
normalized_resources[resource_type] = normalize_resources(resources[resource_type])
label_op = Label(
"label",
CheckLabelInputs,
PrepLabel,
RunLabel,
prep_config = normalized_resources[tasks["prep_label_config"]],
run_config = normalized_resources[tasks["run_label_config"]],
retry_times=None)
if isinstance(confs, str):
confs_artifact = upload_artifact(Path(confs), archive=None)
elif isinstance(confs, List):
confs_artifact = upload_artifact([Path(p) for p in confs], archive=None)
else:
raise RuntimeError("Invalid type of `confs`.")
if index_file is None:
index_file_artifact = None
else:
index_file_artifact = upload_artifact(Path(index_file), archive=None)
jdata = deepcopy(load_json(rid_config))
cv_config = jdata["CV"]
label_config = jdata["LabelMDConfig"]
inputfiles = []
if "inputfile" in jdata["ExploreMDConfig"]:
inputfiles.append(jdata["ExploreMDConfig"]["inputfile"])
if "inputfile" in jdata["LabelMDConfig"]:
inputfiles.append(jdata["LabelMDConfig"]["inputfile"])
fe_models = []
assert isinstance(jdata["init_models"],list), "model input should be list."
for model in jdata["init_models"]:
fe_models.append(model)
cvfiles = []
if "cv_file" in jdata["CV"]:
assert isinstance(jdata["CV"]["cv_file"],list), "CV file input should be list."
for file in jdata["CV"]["cv_file"]:
cvfiles.append(file)
dp_models = []
if "dp_model" in jdata["ExploreMDConfig"]:
assert isinstance(jdata["ExploreMDConfig"]["dp_model"],list), "model input should be list."
for model in jdata["ExploreMDConfig"]["dp_model"]:
dp_models.append(model)
inputfile_list = []
cvfile_list = []
model_list = []
dpfile_list = []
if otherfiles is not None:
for file in otherfiles:
if os.path.basename(file) in inputfiles:
inputfile_list.append(file)
elif os.path.basename(file) in cvfiles:
cvfile_list.append(file)
if dp_files is not None:
for dp_file in dp_files:
dpfile_list.append(dp_file)
if models is not None:
for model in models:
if os.path.basename(model) in fe_models:
model_list.append(model)
elif os.path.basename(model) in dp_models:
dpfile_list.append(model)
if len(inputfile_list) == 0:
inputfile_artifact = None
else:
inputfile_artifact = upload_artifact([Path(p) for p in inputfile_list], archive=None)
if len(model_list) == 0:
models_artifact = None
else:
models_artifact = upload_artifact([Path(p) for p in model_list], archive=None)
if len(cvfile_list) == 0:
cv_file_artifact = None
else:
cv_file_artifact = upload_artifact([Path(p) for p in cvfile_list], archive=None)
if len(dpfile_list) == 0:
dp_files_artifact = None
elif isinstance(dp_files, List):
dp_files_artifact = upload_artifact([Path(p) for p in dpfile_list], archive=None)
else:
raise RuntimeError("Invalid type of `dp_files`.")
if forcefield is None:
forcefield_artifact = None
else:
forcefield_artifact = upload_artifact(Path(forcefield), archive=None)
if topology is None:
top_artifact = None
else:
top_artifact = upload_artifact(Path(topology), archive=None)
conf_tags = []
for index in range(len(confs)):
conf_tags.append({Path(confs[index]).name:label_task_pattern.format(index)})
rid_steps = Step("rid-label",
label_op,
artifacts={
"topology": top_artifact,
"models": models_artifact,
"forcefield": forcefield_artifact,
"inputfile": inputfile_artifact,
"confs": confs_artifact,
"at": None,
"index_file": index_file_artifact,
"dp_files": dp_files_artifact,
"cv_file": cv_file_artifact
},
parameters={
"label_config": label_config,
"cv_config": cv_config,
"conf_tags": conf_tags,
"block_tag" : "000"
},
)
old_workflow = Workflow(id=workflow_id)
all_steps = old_workflow.query_step()
succeeded_steps = []
for step in all_steps:
if step["type"] == "Pod":
if step["phase"] == "Succeeded":
succeeded_steps.append(step)
wf = Workflow("rid-labeling", pod_gc_strategy="OnPodSuccess", parallelism=50, id = workflow_id_defined)
wf.add(rid_steps)
wf.submit(reuse_step=succeeded_steps) | /rid_kit-0.6.2-py3-none-any.whl/rid/entrypoint/relabel.py | 0.615319 | 0.187021 | relabel.py | pypi |
from distutils.command.config import dump_file
import json
from pathlib import Path
from typing import List, Union, Optional
from rid.utils import load_json
from copy import deepcopy
import os
from dflow import (
Workflow,
Step,
upload_artifact
)
from dflow.python import(
PythonOPTemplate,
upload_packages,
OP
)
from rid import SRC_ROOT
upload_packages.append(SRC_ROOT)
from rid.utils import normalize_resources,init_executor
from rid.op.prep_rid import PrepRiD
from rid.superop.exploration import Exploration
from rid.op.prep_exploration import PrepExplore
from rid.op.run_exploration import RunExplore
def explore_rid(
confs: Union[str, List[str]],
topology: Optional[str],
rid_config: str,
machine_config: str,
workflow_id_defined: Optional[str] = None,
models: Optional[Union[str, List[str]]] = None,
forcefield: Optional[str] = None,
index_file: Optional[str] = None,
dp_files: Optional[List[str]] = [],
otherfiles: Optional[List[str]] = None
):
with open(machine_config, "r") as mcg:
machine_config_dict = json.load(mcg)
resources = machine_config_dict["resources"]
tasks = machine_config_dict["tasks"]
normalized_resources = {}
for resource_type in resources.keys():
normalized_resources[resource_type] = normalize_resources(resources[resource_type])
if isinstance(confs, str):
confs_artifact = upload_artifact(Path(confs), archive=None)
elif isinstance(confs, List):
confs_artifact = upload_artifact([Path(p) for p in confs], archive=None)
else:
raise RuntimeError("Invalid type of `confs`.")
if index_file is None:
index_file_artifact = None
else:
index_file_artifact = upload_artifact(Path(index_file), archive=None)
jdata = deepcopy(load_json(rid_config))
cv_config = jdata["CV"]
explore_config = jdata["ExploreMDConfig"]
inputfiles = []
if "inputfile" in explore_config:
inputfiles.append(explore_config["inputfile"])
fe_models = []
assert isinstance(jdata["init_models"],list), "model input should be list."
for model in jdata["init_models"]:
fe_models.append(model)
cvfiles = []
if "cv_file" in jdata["CV"]:
assert isinstance(jdata["CV"]["cv_file"],list), "CV file input should be list."
for file in jdata["CV"]["cv_file"]:
cvfiles.append(file)
dp_models = []
if "dp_model" in explore_config:
assert isinstance(explore_config["dp_model"],list), "model input should be list."
for model in explore_config["dp_model"]:
dp_models.append(model)
inputfile_list = []
cvfile_list = []
model_list = []
dpfile_list = []
if otherfiles is not None:
for file in otherfiles:
if os.path.basename(file) in inputfiles:
inputfile_list.append(file)
elif os.path.basename(file) in cvfiles:
cvfile_list.append(file)
if dp_files is not None:
for dp_file in dp_files:
dpfile_list.append(dp_file)
if models is not None:
for model in models:
if os.path.basename(model) in fe_models:
model_list.append(model)
elif os.path.basename(model) in dp_models:
dpfile_list.append(model)
if len(inputfile_list) == 0:
inputfile_artifact = None
else:
inputfile_artifact = upload_artifact([Path(p) for p in inputfile_list], archive=None)
if len(model_list) == 0:
models_artifact = None
else:
models_artifact = upload_artifact([Path(p) for p in model_list], archive=None)
if len(cvfile_list) == 0:
cv_file_artifact = None
else:
cv_file_artifact = upload_artifact([Path(p) for p in cvfile_list], archive=None)
if len(dpfile_list) == 0:
dp_files_artifact = None
elif isinstance(dp_files, List):
dp_files_artifact = upload_artifact([Path(p) for p in dpfile_list], archive=None)
else:
raise RuntimeError("Invalid type of `dp_files`.")
if forcefield is None:
forcefield_artifact = None
else:
forcefield_artifact = upload_artifact(Path(forcefield), archive=None)
if topology is None:
top_artifact = None
else:
top_artifact = upload_artifact(Path(topology), archive=None)
rid_config = upload_artifact(Path(rid_config), archive=None)
_step_config = normalized_resources[tasks["workflow_steps_config"]]
_step_config = deepcopy(_step_config)
step_template_config = _step_config.pop('template_config')
step_executor = init_executor(_step_config.pop('executor'))
prep_rid = Step(
name = 'prepare-rid',
template=PythonOPTemplate(
PrepRiD,
python_packages = None,
**step_template_config,
),
parameters={},
artifacts={
"confs":confs_artifact,
"rid_config" : rid_config
},
key = 'prepare-rid',
executor = step_executor,
**_step_config,
)
explore_op = Exploration(
"explore",
PrepExplore,
RunExplore,
prep_config = normalized_resources[tasks["prep_exploration_config"]],
run_config = normalized_resources[tasks["run_exploration_config"]],
retry_times=1)
exploration_steps = Step("rid-explore",
explore_op,
artifacts={
"models": models_artifact,
"forcefield": forcefield_artifact,
"topology": top_artifact,
"inputfile": inputfile_artifact,
"confs": prep_rid.outputs.artifacts["confs"],
"index_file": index_file_artifact,
"dp_files": dp_files_artifact,
"cv_file": cv_file_artifact
},
parameters={
"trust_lvl_1" : prep_rid.outputs.parameters["trust_lvl_1"],
"trust_lvl_2": prep_rid.outputs.parameters["trust_lvl_2"],
"exploration_config" :explore_config,
"cv_config": cv_config,
"task_names": prep_rid.outputs.parameters["walker_tags"],
"block_tag" : "iter-001"
},
)
wf = Workflow("rid-exploration", pod_gc_strategy="OnPodSuccess", parallelism=50, id = workflow_id_defined)
wf.add(prep_rid)
wf.add(exploration_steps)
wf.submit() | /rid_kit-0.6.2-py3-none-any.whl/rid/entrypoint/explore.py | 0.586996 | 0.20832 | explore.py | pypi |
from distutils.command.config import dump_file
import json
from pathlib import Path
from typing import List, Union, Optional
from rid.utils import load_json
from copy import deepcopy
import os
from dflow import (
Workflow,
Step,
upload_artifact
)
from dflow.python import upload_packages
from rid import SRC_ROOT
upload_packages.append(SRC_ROOT)
from rid.utils import normalize_resources
from rid.superop.label import Label
from rid.op.prep_label import PrepLabel, CheckLabelInputs
from rid.op.run_label import RunLabel
from rid.constants import label_task_pattern
def label_rid(
confs: Union[str, List[str]],
topology: Optional[str],
rid_config: str,
machine_config: str,
workflow_id_defined: Optional[str] = None,
models: Optional[Union[str, List[str]]] = None,
forcefield: Optional[str] = None,
index_file: Optional[str] = None,
dp_files: Optional[List[str]] = [],
otherfiles: Optional[List[str]] = None
):
with open(machine_config, "r") as mcg:
machine_config_dict = json.load(mcg)
resources = machine_config_dict["resources"]
tasks = machine_config_dict["tasks"]
normalized_resources = {}
for resource_type in resources.keys():
normalized_resources[resource_type] = normalize_resources(resources[resource_type])
label_op = Label(
"label",
CheckLabelInputs,
PrepLabel,
RunLabel,
prep_config = normalized_resources[tasks["prep_label_config"]],
run_config = normalized_resources[tasks["run_label_config"]],
retry_times=1)
if isinstance(confs, str):
confs_artifact = upload_artifact(Path(confs), archive=None)
elif isinstance(confs, List):
confs_artifact = upload_artifact([Path(p) for p in confs], archive=None)
else:
raise RuntimeError("Invalid type of `confs`.")
if index_file is None:
index_file_artifact = None
else:
index_file_artifact = upload_artifact(Path(index_file), archive=None)
jdata = deepcopy(load_json(rid_config))
cv_config = jdata["CV"]
label_config = jdata["LabelMDConfig"]
inputfiles = []
if "inputfile" in jdata["ExploreMDConfig"]:
inputfiles.append(jdata["ExploreMDConfig"]["inputfile"])
if "inputfile" in jdata["LabelMDConfig"]:
inputfiles.append(jdata["LabelMDConfig"]["inputfile"])
fe_models = []
assert isinstance(jdata["init_models"],list), "model input should be list."
for model in jdata["init_models"]:
fe_models.append(model)
cvfiles = []
if "cv_file" in jdata["CV"]:
assert isinstance(jdata["CV"]["cv_file"],list), "CV file input should be list."
for file in jdata["CV"]["cv_file"]:
cvfiles.append(file)
dp_models = []
if "dp_model" in jdata["ExploreMDConfig"]:
assert isinstance(jdata["ExploreMDConfig"]["dp_model"],list), "model input should be list."
for model in jdata["ExploreMDConfig"]["dp_model"]:
dp_models.append(model)
inputfile_list = []
cvfile_list = []
model_list = []
dpfile_list = []
if otherfiles is not None:
for file in otherfiles:
if os.path.basename(file) in inputfiles:
inputfile_list.append(file)
elif os.path.basename(file) in cvfiles:
cvfile_list.append(file)
if dp_files is not None:
for dp_file in dp_files:
dpfile_list.append(dp_file)
if models is not None:
for model in models:
if os.path.basename(model) in fe_models:
model_list.append(model)
elif os.path.basename(model) in dp_models:
dpfile_list.append(model)
if len(inputfile_list) == 0:
inputfile_artifact = None
else:
inputfile_artifact = upload_artifact([Path(p) for p in inputfile_list], archive=None)
if len(model_list) == 0:
models_artifact = None
else:
models_artifact = upload_artifact([Path(p) for p in model_list], archive=None)
if len(cvfile_list) == 0:
cv_file_artifact = None
else:
cv_file_artifact = upload_artifact([Path(p) for p in cvfile_list], archive=None)
if len(dpfile_list) == 0:
dp_files_artifact = None
elif isinstance(dp_files, List):
dp_files_artifact = upload_artifact([Path(p) for p in dpfile_list], archive=None)
else:
raise RuntimeError("Invalid type of `dp_files`.")
if forcefield is None:
forcefield_artifact = None
else:
forcefield_artifact = upload_artifact(Path(forcefield), archive=None)
if topology is None:
top_artifact = None
else:
top_artifact = upload_artifact(Path(topology), archive=None)
conf_tags = []
for index in range(len(confs)):
conf_tags.append({Path(confs[index]).name:label_task_pattern.format(index)})
rid_steps = Step("rid-label",
label_op,
artifacts={
"topology": top_artifact,
"models": models_artifact,
"forcefield": forcefield_artifact,
"inputfile": inputfile_artifact,
"confs": confs_artifact,
"at": None,
"index_file": index_file_artifact,
"dp_files": dp_files_artifact,
"cv_file": cv_file_artifact
},
parameters={
"label_config": label_config,
"cv_config": cv_config,
"conf_tags": conf_tags,
"block_tag" : "000"
},
)
wf = Workflow("rid-labeling", pod_gc_strategy="OnPodSuccess", parallelism=50, id = workflow_id_defined)
wf.add(rid_steps)
wf.submit() | /rid_kit-0.6.2-py3-none-any.whl/rid/entrypoint/label.py | 0.633297 | 0.183685 | label.py | pypi |
from distutils.command.config import dump_file
import json
from pathlib import Path
from typing import List, Union, Optional
from rid.utils import load_json
import os
from dflow import (
Workflow,
Step,
upload_artifact
)
from dflow.python import upload_packages
from rid import SRC_ROOT
upload_packages.append(SRC_ROOT)
from rid.utils import normalize_resources
from rid.superop.exploration import Exploration
from rid.op.prep_exploration import PrepExplore
from rid.op.run_exploration import RunExplore
from rid.superop.label import Label
from rid.op.prep_label import PrepLabel, CheckLabelInputs
from rid.op.run_label import RunLabel
from rid.op.label_stats import LabelStats
from rid.superop.selector import Selector
from rid.op.prep_select import PrepSelect
from rid.op.run_select import RunSelect
from rid.superop.data import DataGenerator
from rid.op.prep_data import CollectData, MergeData
from rid.superop.blocks import IterBlock, InitBlock
from rid.op.run_train import TrainModel
from rid.op.run_model_devi import RunModelDevi
from rid.op.adjust_trust_level import AdjustTrustLevel
from rid.flow.loop import ReinforcedDynamics
def prep_rid_op(
prep_exploration_config,
run_exploration_config,
prep_label_config,
run_label_config,
prep_select_config,
run_select_config,
prep_data_config,
run_train_config,
model_devi_config,
workflow_steps_config,
retry_times
):
exploration_op = Exploration(
"exploration",
PrepExplore,
RunExplore,
prep_exploration_config,
run_exploration_config,
retry_times=retry_times)
label_op = Label(
"label",
CheckLabelInputs,
PrepLabel,
RunLabel,
LabelStats,
prep_label_config,
run_label_config,
retry_times=retry_times)
select_op = Selector(
"select",
PrepSelect,
RunSelect,
prep_select_config,
run_select_config,
retry_times=retry_times)
data_op = DataGenerator(
"gen-data",
CollectData,
MergeData,
prep_data_config,
retry_times=retry_times)
init_block_op = InitBlock(
"init-block",
exploration_op,
select_op,
label_op,
data_op,
TrainModel,
RunModelDevi,
run_train_config,
model_devi_config,
retry_times=retry_times
)
block_op = IterBlock(
"rid-block",
exploration_op,
select_op,
label_op,
data_op,
AdjustTrustLevel,
TrainModel,
RunModelDevi,
workflow_steps_config,
run_train_config,
model_devi_config,
retry_times=retry_times)
rid_op = ReinforcedDynamics(
"reinforced-dynamics",
init_block_op,
block_op,
workflow_steps_config
)
return rid_op
def submit_rid(
confs: Union[str, List[str]],
topology: Optional[str],
rid_config: str,
machine_config: str,
workflow_id_defined: Optional[str] = None,
models: Optional[Union[str, List[str]]] = None,
forcefield: Optional[str] = None,
index_file: Optional[str] = None,
data_file: Optional[str] = None,
dp_files: Optional[List[str]] = [],
otherfiles: Optional[List[str]] = None
):
with open(machine_config, "r") as mcg:
machine_config_dict = json.load(mcg)
resources = machine_config_dict["resources"]
tasks = machine_config_dict["tasks"]
normalized_resources = {}
for resource_type in resources.keys():
normalized_resources[resource_type] = normalize_resources(resources[resource_type])
rid_op = prep_rid_op(
prep_exploration_config = normalized_resources[tasks["prep_exploration_config"]],
run_exploration_config = normalized_resources[tasks["run_exploration_config"]],
prep_label_config = normalized_resources[tasks["prep_label_config"]],
run_label_config = normalized_resources[tasks["run_label_config"]],
prep_select_config = normalized_resources[tasks["prep_select_config"]],
run_select_config = normalized_resources[tasks["run_select_config"]],
prep_data_config = normalized_resources[tasks["prep_data_config"]],
run_train_config = normalized_resources[tasks["run_train_config"]],
model_devi_config = normalized_resources[tasks["model_devi_config"]],
workflow_steps_config = normalized_resources[tasks["workflow_steps_config"]],
retry_times=1
)
if isinstance(confs, str):
confs_artifact = upload_artifact(Path(confs), archive=None)
elif isinstance(confs, List):
confs_artifact = upload_artifact([Path(p) for p in confs], archive=None)
else:
raise RuntimeError("Invalid type of `confs`.")
if index_file is None:
index_file_artifact = None
else:
index_file_artifact = upload_artifact(Path(index_file), archive=None)
jdata = load_json(rid_config)
inputfiles = []
if "inputfile" in jdata["ExploreMDConfig"]:
inputfiles.append(jdata["ExploreMDConfig"]["inputfile"])
if "inputfile" in jdata["LabelMDConfig"]:
inputfiles.append(jdata["LabelMDConfig"]["inputfile"])
fe_models = []
assert isinstance(jdata["init_models"],list), "model input should be list."
for model in jdata["init_models"]:
fe_models.append(model)
cvfiles = []
if "cv_file" in jdata["CV"]:
assert isinstance(jdata["CV"]["cv_file"],list), "CV file input should be list."
for file in jdata["CV"]["cv_file"]:
cvfiles.append(file)
dp_models = []
if "dp_model" in jdata["ExploreMDConfig"]:
assert isinstance(jdata["ExploreMDConfig"]["dp_model"],list), "model input should be list."
for model in jdata["ExploreMDConfig"]["dp_model"]:
dp_models.append(model)
inputfile_list = []
cvfile_list = []
model_list = []
dpfile_list = []
if otherfiles is not None:
for file in otherfiles:
if os.path.basename(file) in inputfiles:
inputfile_list.append(file)
elif os.path.basename(file) in cvfiles:
cvfile_list.append(file)
if dp_files is not None:
for dp_file in dp_files:
dpfile_list.append(dp_file)
if models is not None:
for model in models:
if os.path.basename(model) in fe_models:
model_list.append(model)
elif os.path.basename(model) in dp_models:
dpfile_list.append(model)
if len(inputfile_list) == 0:
inputfile_artifact = None
else:
inputfile_artifact = upload_artifact([Path(p) for p in inputfile_list], archive=None)
if len(model_list) == 0:
models_artifact = None
else:
models_artifact = upload_artifact([Path(p) for p in model_list], archive=None)
if len(cvfile_list) == 0:
cv_file_artifact = None
else:
cv_file_artifact = upload_artifact([Path(p) for p in cvfile_list], archive=None)
if len(dpfile_list) == 0:
dp_files_artifact = None
elif isinstance(dp_files, List):
dp_files_artifact = upload_artifact([Path(p) for p in dpfile_list], archive=None)
else:
raise RuntimeError("Invalid type of `dp_files`.")
if forcefield is None:
forcefield_artifact = None
else:
forcefield_artifact = upload_artifact(Path(forcefield), archive=None)
if topology is None:
top_artifact = None
else:
top_artifact = upload_artifact(Path(topology), archive=None)
if data_file is None:
data_artifact = None
else:
data_artifact = upload_artifact(Path(data_file), archive=None)
rid_config = upload_artifact(Path(rid_config), archive=None)
rid_steps = Step("rid-procedure",
rid_op,
artifacts={
"topology": top_artifact,
"confs": confs_artifact,
"rid_config": rid_config,
"models": models_artifact,
"forcefield": forcefield_artifact,
"index_file": index_file_artifact,
"inputfile": inputfile_artifact,
"data_file": data_artifact,
"dp_files": dp_files_artifact,
"cv_file": cv_file_artifact
},
parameters={}
)
wf = Workflow("reinforced-dynamics", pod_gc_strategy="OnPodSuccess", parallelism=50, id = workflow_id_defined)
wf.add(rid_steps)
wf.submit() | /rid_kit-0.6.2-py3-none-any.whl/rid/entrypoint/submit.py | 0.670285 | 0.200969 | submit.py | pypi |
import json
from pathlib import Path
from typing import List, Union, Optional
from rid.utils import load_json
from copy import deepcopy
import os
from dflow import (
Workflow,
Step,
upload_artifact
)
from dflow.python import upload_packages
from rid import SRC_ROOT
upload_packages.append(SRC_ROOT)
from rid.utils import normalize_resources
from rid.superop.mcmc import MCMC
from rid.op.mcmc_run import MCMCRun
from rid.op.mcmc_plot import MCMCPlot
def redim_rid(
rid_config: str,
machine_config: str,
models: Optional[Union[str, List[str]]] = None,
plm_out: Optional[Union[str, List[str]]] = None,
workflow_id_defined: Optional[str] = None
):
with open(machine_config, "r") as mcg:
machine_config_dict = json.load(mcg)
resources = machine_config_dict["resources"]
tasks = machine_config_dict["tasks"]
normalized_resources = {}
for resource_type in resources.keys():
normalized_resources[resource_type] = normalize_resources(resources[resource_type])
mcmc_op = MCMC(
"mcmc",
MCMCRun,
MCMCPlot,
run_config = normalized_resources[tasks["mcmc_run_config"]],
plot_config = normalized_resources[tasks["mcmc_plot_config"]],
retry_times=1)
jdata = deepcopy(load_json(rid_config))
mcmc_config = jdata["MCMC_Config"]
fe_models = []
assert isinstance(jdata["init_models"],list), "model input should be list."
for model in jdata["init_models"]:
fe_models.append(model)
model_list = []
if models is not None:
for model in models:
if os.path.basename(model) in fe_models:
model_list.append(model)
if len(model_list) == 0:
models_artifact = None
else:
models_artifact = upload_artifact([Path(p) for p in model_list], archive=None)
plm_artifact = None
if len(plm_out) != 0:
if isinstance(plm_out, str):
plm_artifact = upload_artifact(Path(plm_out), archive=None)
elif isinstance(plm_out, list):
plm_artifact = upload_artifact([Path(p) for p in plm_out], archive=None)
task_names = []
for index in range(len(model_list)):
task_names.append("%03d"%index)
rid_steps = Step("rid-mcmc",
mcmc_op,
artifacts={
"models": models_artifact,
"plm_out": plm_artifact
},
parameters={
"mcmc_config": mcmc_config,
"task_names": task_names,
"block_tag" : "000"
},
)
wf = Workflow("rid-mcmc", pod_gc_strategy="OnPodSuccess", parallelism=10, id = workflow_id_defined)
wf.add(rid_steps)
wf.submit() | /rid_kit-0.6.2-py3-none-any.whl/rid/entrypoint/redim.py | 0.578924 | 0.154344 | redim.py | pypi |
import json
from pathlib import Path
from typing import List, Union, Optional
from rid.utils import load_json
from copy import deepcopy
import os
from dflow import (
Workflow,
Step,
upload_artifact
)
from dflow.python import upload_packages
from rid import SRC_ROOT
upload_packages.append(SRC_ROOT)
from rid.utils import normalize_resources
from rid.superop.mcmc import MCMC
from rid.op.mcmc_run import MCMCRun
from rid.op.mcmc_plot import MCMCPlot
def reredim_rid(
workflow_id: str,
rid_config: str,
machine_config: str,
models: Optional[Union[str, List[str]]] = None,
plm_out: Optional[Union[str, List[str]]] = None,
workflow_id_defined: Optional[str] = None,
pod: Optional[str] = None
):
with open(machine_config, "r") as mcg:
machine_config_dict = json.load(mcg)
resources = machine_config_dict["resources"]
tasks = machine_config_dict["tasks"]
normalized_resources = {}
for resource_type in resources.keys():
normalized_resources[resource_type] = normalize_resources(resources[resource_type])
mcmc_op = MCMC(
"mcmc",
MCMCRun,
MCMCPlot,
run_config = normalized_resources[tasks["mcmc_run_config"]],
plot_config = normalized_resources[tasks["mcmc_plot_config"]],
retry_times=None)
jdata = deepcopy(load_json(rid_config))
mcmc_config = jdata["MCMC_Config"]
fe_models = []
assert isinstance(jdata["init_models"],list), "model input should be list."
for model in jdata["init_models"]:
fe_models.append(model)
model_list = []
if models is not None:
for model in models:
if os.path.basename(model) in fe_models:
model_list.append(model)
if len(model_list) == 0:
models_artifact = None
else:
models_artifact = upload_artifact([Path(p) for p in model_list], archive=None)
plm_artifact = None
if len(plm_out) != 0:
if isinstance(plm_out, str):
plm_artifact = upload_artifact(Path(plm_out), archive=None)
elif isinstance(plm_out, list):
plm_artifact = upload_artifact([Path(p) for p in plm_out], archive=None)
task_names = []
for index in range(len(model_list)):
task_names.append("%03d"%index)
rid_steps = Step("rid-mcmc",
mcmc_op,
artifacts={
"models": models_artifact,
"plm_out": plm_artifact
},
parameters={
"mcmc_config": mcmc_config,
"task_names": task_names,
"block_tag" : "000"
},
)
old_workflow = Workflow(id=workflow_id)
all_steps = old_workflow.query_step()
succeeded_steps = []
restart_flag = 1
for step in all_steps:
if step["type"] == "Pod":
pod_key = step["key"]
if pod_key is not None:
pod_key_list = pod_key.split("-")
pod_step_1 = "-".join(pod_key_list[1:-1])
pod_step_2 = "-".join(pod_key_list[1:])
if pod is not None:
if pod_step_1 == pod or pod_step_2 == pod:
restart_flag = 0
else:
if step["phase"] != "Succeeded":
restart_flag = 0
else:
restart_flag = 1
if restart_flag == 1:
succeeded_steps.append(step)
wf = Workflow("rid-mcmc-continue", pod_gc_strategy="OnPodSuccess", parallelism=10, id = workflow_id_defined)
wf.add(rid_steps)
wf.submit(reuse_step=succeeded_steps) | /rid_kit-0.6.2-py3-none-any.whl/rid/entrypoint/reredim.py | 0.556761 | 0.17653 | reredim.py | pypi |
import os
import sys
from typing import List, Dict, Sequence, Union
import logging
import mdtraj as md
from mdtraj.geometry.dihedral import _atom_sequence, PHI_ATOMS, PSI_ATOMS
import numpy as np
from rid.constants import sel_gro_name_gmx, sel_gro_name
from rid.common.gromacs.trjconv import slice_trjconv
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger(__name__)
def _zip_dict(resi_indices, atom_indices):
dihedral_dict = {}
for idx, resi_idx in enumerate(resi_indices):
dihedral_dict[resi_idx] = atom_indices[idx].tolist()
return dihedral_dict
def distance(r1,r2):
d = 0
for index in range(3):
d += (r1[index] - r2[index])**2
d = np.sqrt(d)
return d
def get_dihedral_info(file_path: str):
traj = md.load(file_path)
top = traj.topology
psi_found_indices, psi_atom_indices = _atom_sequence(top, PSI_ATOMS)
psi_info = _zip_dict(psi_found_indices + 1, psi_atom_indices + 1)
phi_found_indices, phi_atom_indices = _atom_sequence(top, PHI_ATOMS)
phi_info = _zip_dict(phi_found_indices + 1, phi_atom_indices + 1)
dihedral_angle = {}
for residue in top.residues:
if residue.is_protein:
dihedral_angle[residue.index+1] = {}
if residue.index in phi_found_indices:
dihedral_angle[residue.index+1]["phi"] = phi_info[residue.index+1]
if residue.index in psi_found_indices:
dihedral_angle[residue.index+1]["psi"] = psi_info[residue.index+1]
return dihedral_angle
def get_dihedral_from_resid(file_path: str, selected_resid: List[int]) -> Dict:
if len(selected_resid) == 0:
return {}
traj = md.load(file_path)
top = traj.topology
psi_found_indices, psi_atom_indices = _atom_sequence(top, PSI_ATOMS)
psi_info = _zip_dict(psi_found_indices + 1, psi_atom_indices + 1)
phi_found_indices, phi_atom_indices = _atom_sequence(top, PHI_ATOMS)
phi_info = _zip_dict(phi_found_indices + 1, phi_atom_indices + 1)
selected_dihedral_angle = {}
residue_list = list(top.residues)
for sid in selected_resid:
residue = residue_list[sid-1]
if residue.is_protein:
selected_dihedral_angle[residue.index+1] = {}
if residue.index in phi_found_indices:
selected_dihedral_angle[residue.index+1]["phi"] = phi_info[residue.index+1]
if residue.index in psi_found_indices:
selected_dihedral_angle[residue.index+1]["psi"] = psi_info[residue.index+1]
num_cv = len(selected_dihedral_angle.keys())
logger.info(f"{num_cv} CVs have been created.")
return selected_dihedral_angle
def get_distance_from_atomid(file_path: str, selected_atomid: List[int]) -> Dict:
if len(selected_atomid) == 0:
return {}
top = md.load(file_path)
selected_distance = {}
for sid in selected_atomid:
assert len(sid) == 2, "No valid distance list created."
d_cv = md.compute_distances(top,atom_pairs=np.array([sid[0]-1,sid[1]-1]).reshape(-1,2),periodic=True)[0][0]
selected_distance["%s %s"%(sid[0],sid[1])] = d_cv
num_cv = len(selected_distance.keys())
logger.info(f"{num_cv} CVs have been created.")
return selected_distance
def slice_xtc_mdtraj(
xtc: str,
top: str,
walker_idx: int,
selected_idx: Sequence,
output_format: str
):
logger.info("slicing trajectories ...")
traj = md.load_xtc(xtc, top=top)
for sel in selected_idx:
frame = traj[sel]
frame.save_gro(output_format.format(walker=walker_idx,idx=sel))
def slice_xtc(
xtc: str,
top: str,
walker_idx: int,
selected_idx,
output: str,
style: str = "gmx"
):
if style == "gmx":
slice_trjconv(
xtc = xtc,
top = top,
selected_time = selected_idx,
output = output
)
elif style == "mdtraj":
slice_xtc_mdtraj(
xtc = xtc,
top = top,
walker_idx=walker_idx,
selected_idx = selected_idx,
output_format=output
)
else:
raise RuntimeError("Unknown Style for Slicing Trajectory.") | /rid_kit-0.6.2-py3-none-any.whl/rid/common/mol.py | 0.488283 | 0.296782 | mol.py | pypi |
import json
import os
import sys
import logging
import numpy as np
from typing import List, Union, Tuple, Dict, Optional, Sequence
from rid.utils import list_to_string
from rid.common.mol import get_dihedral_from_resid, get_distance_from_atomid
from rid.common.plumed.plumed_constant import (
dihedral_name,
distance_name,
dihedral_def_from_atoms,
distance_def_from_atoms,
deepfe_def,
print_def,
restraint_def,
upper_def,
lower_def
)
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger(__name__)
angle_id = {
"phi": 0,
"psi": 1
}
def make_restraint(
name: str,
arg: str,
kappa: Union[str, int, float],
at: Union[str, int, float]
):
return restraint_def.format(
name = name,
arg = arg,
kappa = kappa,
at = at
)
def make_restraint_list(
cv_list: List[str],
kappa: List[Union[int, float, str]],
at: List[Union[int, float, str]]
) -> Tuple[List, List]:
print(at)
res_names = []
res_list = []
assert len(cv_list) == len(kappa), "Make sure `kappa` and `cv_names` have the same length."
assert len(cv_list) == len(at), "Make sure `at` and `cv_names` have the same length."
for idx, cv_print in enumerate(cv_list):
res_name = "res-" + cv_print
res_names.append(res_name)
res_list.append(make_restraint(res_name, cv_print, kappa[idx], at[idx]))
return res_list, res_names
def make_deepfe_bias(
cv_list: List[str],
trust_lvl_1: float = 1.0,
trust_lvl_2: float = 2.0,
model_list: List[str] = ["graph.pb"]
) -> str:
if len(model_list) == 0:
return ""
cv_string = list_to_string(cv_list, ",")
model_string = list_to_string(model_list, ",")
return deepfe_def.format(
trust_lvl_1 = trust_lvl_1,
trust_lvl_2 = trust_lvl_2,
model = model_string,
arg = cv_string
)
def make_print_bias(
name_list,
stride,
file_name,
model_list
) -> str:
if len(model_list) != 0:
name_list.insert(0,"dpfe.bias")
else:
name_list.insert(0,name_list[0])
return print_def.format(
stride = stride,
arg = list_to_string(name_list, ","),
file = file_name
)
def make_print(
name_list,
stride,
file_name
) -> str:
return print_def.format(
stride = stride,
arg = list_to_string(name_list, ","),
file = file_name
)
def make_wholemolecules(atom_index):
arg_list = list_to_string(atom_index, ",")
return ("WHOLEMOLECULES" + " " +
"ENTITY0=" + arg_list +
"\n")
def user_plumed_def(cv_file, pstride, pfile):
logger.info("Custom CVs are created from plumed files.")
ret = ""
cv_names = []
print_content = None
print("cv_file name",cv_file)
with open(cv_file, 'r') as fp:
for line in fp.readlines():
if ("PRINT" in line) and ("#" not in line):
print_content = line + "\n"
cv_names = line.split()[2].split("=")[1].split(",")
break
ret += line
if ret == "" or cv_names == []:
raise RuntimeError("Invalid customed plumed files.")
if print_content is not None:
assert len(print_content.split(",")) == len(cv_names), "There are {} CVs defined in the plumed file, while {} CVs are printed.".format(len(cv_names), len(print_content.split(",")) )
print_content_list = print_content.split()
print_content_list[-1] = "FILE={}".format(pfile)
print_content_list[1] = "STRIDE={}".format(str(pstride))
print_content = " ".join(print_content_list)
return ret, cv_names, print_content
def make_torsion(
name: str,
atom_list: List[Union[int, str]]
) -> str:
assert len(atom_list) == 4, f"Make sure dihedral angle defined by 4 atoms, not {len(atom_list)}."
return dihedral_def_from_atoms.format(
name = name,
a1 = atom_list[0], a2 = atom_list[1],
a3 = atom_list[2], a4 = atom_list[3],
)
def make_distance(
name: str,
atom_list: List[Union[int, str]]
) -> str:
assert len(atom_list) == 2, f"Make sure distance defined by 2 atoms, not {len(atom_list)}."
return distance_def_from_atoms.format(
name = name,
a1 = atom_list[0], a2 = atom_list[1]
)
def make_torsion_name(resid: int, angid: int):
return dihedral_name.format(
resid = resid,
angid = angid
)
def make_distance_name(atomids: list):
return distance_name.format(
atomid1 = int(atomids[0]),
atomid2 = int(atomids[1])
)
def make_torsion_list(
dihedral_info: Dict,
) -> Tuple[List, List]:
torsion_list = []
torsion_name_list = []
for resid in dihedral_info.keys():
for ang in dihedral_info[resid].keys():
torsion_name = make_torsion_name(resid=resid, angid = angle_id[ang])
torsion_name_list.append(torsion_name)
torsion_list.append(make_torsion(
name = torsion_name,
atom_list=list(dihedral_info[resid][ang])
))
return torsion_list, torsion_name_list
def make_distance_list(
distance_info: Dict,
) -> Tuple[List, List]:
distance_list = []
distance_name_list = []
for atomids in distance_info.keys():
dis = distance_info[atomids]
atom_list = atomids.split(" ")
distance_name = make_distance_name(atomids=atom_list)
distance_name_list.append(distance_name)
distance_list.append(make_distance(
name = distance_name,
atom_list=atom_list)
)
return distance_list, distance_name_list
def make_torsion_list_from_file(
file_path: str,
selected_resid: List[int]
) -> Tuple[List, List]:
cv_info = get_dihedral_from_resid( file_path, selected_resid)
logger.info("Create CVs (torsion) from selected residue ids.")
assert len(cv_info.keys()) > 0, "No valid CVs created."
return make_torsion_list(cv_info)
def make_distance_list_from_file(
file_path: str,
selected_atomid: List[int]
) -> Tuple[List, List]:
cv_info = get_distance_from_atomid(file_path, selected_atomid)
logger.info("Create CVs (distance) from selected atom ids.")
assert len(cv_info.keys()) > 0, "No valid CVs created."
return make_distance_list(cv_info)
def make_wall_list(
cv_name_list,
wall_list,
iteration
):
ret = ""
for index in range(len(wall_list)):
if wall_list[index][0].upper() != "NONE":
start = float(wall_list[index][1])
end = float(wall_list[index][2])
iterations = float(wall_list[index][3])
kappa = float(wall_list[index][4])
iteration_index = (iteration-1) % (iterations)
at = start + (end - start)/(iterations-1)*(iteration_index)
if wall_list[index][0].upper() == "UPPER":
line = upper_def.format(arg = cv_name_list[index], at=at,kappa=kappa,name="upper%s"%index)
elif wall_list[index][0].upper() == "LOWER":
line = lower_def.format(arg = cv_name_list[index], at=at,kappa=kappa,name="lower%s"%index)
ret += line+"\n"
return ret
def make_restraint_plumed(
conf: Optional[str] = None,
cv_file: Optional[List[str]] = None,
selected_resid: Optional[List[int]] = None,
selected_atomid: Optional[List[int]] = None,
kappa: Union[int, float, Sequence, np.ndarray] = 0.5,
at: Union[int, float, Sequence, np.ndarray] = 1.0,
stride: int = 100,
output: str = "plm.out",
mode: str = "torsion"
):
content_list = []
if mode == "torsion":
cv_content_list, cv_name_list = \
make_torsion_list_from_file(conf, selected_resid)
content_list += cv_content_list
elif mode == "distance":
cv_content_list, cv_name_list = \
make_distance_list_from_file(conf, selected_atomid)
content_list += cv_content_list
elif mode == "custom":
for cv_file_ in cv_file:
if not os.path.basename(cv_file_).endswith("pdb"):
ret, cv_name_list, _ = user_plumed_def(cv_file_, stride, output)
content_list.append(ret)
else:
raise RuntimeError("Unknown mode for making plumed files.")
if isinstance(kappa, int) or isinstance(kappa, float):
kappa = [kappa for _ in range(len(cv_name_list))]
if isinstance(at, int) or isinstance(at, float):
at = [at for _ in range(len(cv_name_list))]
res_list, _ = make_restraint_list(
cv_name_list, kappa, at
)
content_list += res_list
content_list.append(make_print(cv_name_list, stride, output))
return list_to_string(content_list, split_sign="\n")
def make_constraint_plumed(
conf: Optional[str] = None,
cv_file: Optional[List[str]] = None,
selected_atomid: Optional[List[int]] = None,
stride: int = 100,
output: str = "plm.out",
mode: str = "distance"
):
content_list = []
if mode == "distance":
cv_content_list, cv_name_list = \
make_distance_list_from_file(conf, selected_atomid)
content_list += cv_content_list
elif mode == "custom":
for cv_file_ in cv_file:
if not os.path.basename(cv_file_).endswith("pdb"):
ret, cv_name_list, _ = user_plumed_def(cv_file_, stride, output)
content_list.append(ret)
else:
raise RuntimeError("Unknown mode for making plumed files.")
content_list.append(make_print(cv_name_list, stride, output))
return list_to_string(content_list, split_sign="\n")
def make_deepfe_plumed(
conf: Optional[str] = None,
cv_file: Optional[List[str]] = None,
selected_resid: Optional[List[int]] = None,
selected_atomid: Optional[List[int]] = None,
trust_lvl_1: float = 1.0,
trust_lvl_2: float = 2.0,
model_list: List[str] = ["graph.pb"],
stride: int = 100,
output: str = "plm.out",
mode: str = "torsion",
wall_list: Optional[List[str]] = None,
iteration: Optional[str] = None
):
content_list = []
if mode == "torsion":
cv_content_list, cv_name_list = \
make_torsion_list_from_file(conf, selected_resid)
content_list += cv_content_list
elif mode == "distance":
cv_content_list, cv_name_list = \
make_distance_list_from_file(conf, selected_atomid)
content_list += cv_content_list
elif mode == "custom":
for cv_file_ in cv_file:
if not os.path.basename(cv_file_).endswith("pdb"):
ret, cv_name_list, _ = user_plumed_def(cv_file_, stride, output)
content_list.append(ret)
else:
raise RuntimeError("Unknown mode for making plumed files.")
if wall_list is not None:
ret = make_wall_list(cv_name_list, wall_list, iteration)
content_list.append(ret)
deepfe_string = make_deepfe_bias(cv_name_list, trust_lvl_1, trust_lvl_2, model_list)
content_list.append(deepfe_string)
content_list.append(make_print_bias(cv_name_list, stride, output, model_list))
return list_to_string(content_list, split_sign="\n")
def get_cv_name(
conf: Optional[str] = None,
cv_file: Optional[List[str]] = None,
selected_resid: Optional[List[int]] = None,
selected_atomid: Optional[List[int]] = None,
stride: int = 100,
mode: str = "torsion"
):
if mode == "torsion":
_, cv_name_list = \
make_torsion_list_from_file(conf, selected_resid)
elif mode == "distance":
_, cv_name_list = make_distance_list_from_file(conf, selected_atomid)
elif mode == "custom":
for cv_file_ in cv_file:
if not os.path.basename(cv_file_).endswith("pdb"):
_, cv_name_list, _ = user_plumed_def(cv_file_, stride, "test.out")
else:
raise RuntimeError("Unknown mode for making plumed files.")
return cv_name_list | /rid_kit-0.6.2-py3-none-any.whl/rid/common/plumed/make_plumed.py | 0.642657 | 0.317016 | make_plumed.py | pypi |
from turtle import update
from rid.common.gromacs.gmx_constant import mdp_parameters
from typing import Optional, Dict, List, Union
def make_mdp_from_json(
task: str,
inputs:Optional[Dict] = None
) -> Dict:
assert task in mdp_parameters.keys()
mdp_json = mdp_parameters[task]
if inputs is not None:
mdp_json.update(inputs)
def make_mdp_line(key, value):
return "{} \t= {}".format(key, value)
content_list = []
for key, value in mdp_json.items():
content_list.append( make_mdp_line(key, value) )
content_list.sort()
mdp_content = "\n".join(content_list)
return mdp_content
def modify_output(
freq: Union[str, int],
output_mode: str = "both"
) -> Dict:
if output_mode == "both":
output_json = {
"nstxout": freq,
"nstvout": freq,
"nstfout": freq,
"nstenergy": freq,
"nstxtcout": freq
}
elif output_mode == "single":
output_json = {
"nstxout": 0,
"nstvout": 0,
"nstfout": 0,
"nstenergy": freq,
"nstxtcout": freq
}
elif output_mode == "double":
output_json = {
"nstxout": freq,
"nstvout": freq,
"nstfout": freq,
"nstenergy": freq,
"nstxtcout": 0
}
elif output_mode == "none":
output_json = {
"nstxout": 0,
"nstvout": 0,
"nstfout": 0,
"nstenergy": 0,
"nstxtcout": 0
}
else:
raise RuntimeError("Unknown output mode. Please specify one from 'single', 'double' or 'both'.")
return output_json
def modify_define(
define: Union[str, List]
) -> Dict:
if type(define) == List:
define_string = " ".join(define)
else:
define_string = define
return {
"define": define_string
}
def make_md_mdp_string(
gmx_config
):
update_dict = {}
for item in gmx_config:
if item not in ["method","temperature","nt", "kappas","ntmpi", "max_warning", "output_freq", "output_mode","type","dp_model"]:
update_dict[item] = gmx_config[item]
update_dict.update(modify_output(freq = gmx_config["output_freq"], output_mode=gmx_config["output_mode"]))
mdp_string = make_mdp_from_json(task="md", inputs=update_dict)
return mdp_string
def make_md_mdp_from_config(
md_parameters_dict
):
check_basic_argument(md_parameters_dict)
mdp_string = make_mdp_from_json(task="md", inputs=md_parameters_dict)
return mdp_string
def check_basic_argument(md_parameters_dict: Dict):
assert "nsteps" in md_parameters_dict.keys()
assert "output_freq" in md_parameters_dict.keys() | /rid_kit-0.6.2-py3-none-any.whl/rid/common/gromacs/mdp.py | 0.723895 | 0.326862 | mdp.py | pypi |
from ride import getLogger
from ride.core import AttributeDict, Configs, RideClassificationDataset
from ride.utils.env import DATASETS_PATH
logger = getLogger(__name__)
try:
import pl_bolts # noqa: F401
import torchvision # noqa: F401
except ImportError:
logger.error(
"To run the `mnist_dataset.py` example, first install its dependencies: "
"`pip install pytorch-lightning-bolts torchvision`"
)
class MnistDataset(RideClassificationDataset):
"""
Example Mnist Dataset
Modified from https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pl_examples/basic_examples/mnist_datamodule.py
"""
@staticmethod
def configs():
c = Configs.collect(MnistDataset)
c.add(
name="val_split",
type=int,
default=5000,
strategy="constant",
description="Number samples from train dataset used for validation split.",
)
c.add(
name="normalize",
type=int,
default=1,
choices=[0, 1],
strategy="constant",
description="Whether to normalize dataset.",
)
return c
def __init__(self, hparams: AttributeDict):
self.datamodule = pl_bolts.datamodules.MNISTDataModule(
data_dir=DATASETS_PATH,
val_split=self.hparams.val_split,
num_workers=self.hparams.num_workers,
normalize=self.hparams.normalize,
batch_size=self.hparams.batch_size,
seed=42,
shuffle=True,
pin_memory=self.hparams.num_workers > 1,
drop_last=False,
)
self.datamodule.prepare_data()
self.datamodule.setup()
self.datamodule.train_dataloader = self.datamodule.train_dataloader()
self.datamodule.val_dataloader = self.datamodule.val_dataloader()
self.datamodule.test_dataloader = self.datamodule.test_dataloader()
self.output_shape = 10
self.classes = list(range(10))
self.input_shape = self.datamodule.dims | /ride-0.7.2-py3-none-any.whl/examples/mnist_dataset.py | 0.872646 | 0.331715 | mnist_dataset.py | pypi |
I inserted the function called from the main:
-) displayContours
-)save_to_disk
Here I collected the following file:
-) Normal.java
-) Threshold.java
-) Offset.java
"""
from math import exp,sin,cos
from ridge_detection.basicGeometry import Region,Chord
from numpy import amax,amin,asarray
import logging
from os import path
from copy import deepcopy
from PIL import Image, ImageDraw
from mrcfile import open as mrcfile_open, new as mrcfile_new
RED_PIXEL_LINE = (255, 0, 0)
GREEN_PIXEL_CONTOUR = (0, 255, 0)
SIZE_RAY_JUNCTION =1
def displayContours(params,result,resultJunction):
try:
img=Image.fromarray(mrcfile_open(params.config_path_to_file).data).convert('RGB')
except ValueError:
img=Image.open(params.config_path_to_file).convert('RGB')
pixelMap2 = img.load()
sizePixelMap2=img.size
""" plot the lines"""
if isinstance(result, list) is True:
for line in result:
for i,j in zip(line.col,line.row):
pixelMap2[int(i), int(j)] = RED_PIXEL_LINE
img_only_lines = deepcopy(img)
""" plot the contours"""
if isinstance(result, list) is True:
for cont in result:
last_w_l ,last_w_r,px_r,px_l,py_r,py_l = 0,0,0,0,0,0
for j in range(cont.num):
px = cont.col[j]
py = cont.row[j]
nx = sin(cont.angle[j])
ny = cos(cont.angle[j])
if params.get_estimate_width():
px_r = px + cont.width_r[j] * nx
py_r = py + cont.width_r[j] * ny
px_l = px - cont.width_l[j] * nx
py_l = py - cont.width_l[j] * ny
if last_w_r > 0 and cont.width_r[j] > 0 and sizePixelMap2[0]>int(px_r)+1 and sizePixelMap2[1]>int(py_r)+1:
pixelMap2[int(px_r)+1, int(py_r)+1] = GREEN_PIXEL_CONTOUR
if last_w_l > 0 and cont.width_l[j] > 0 and sizePixelMap2[0]>int(px_l)+1 and sizePixelMap2[1]>int(py_l)+1:
pixelMap2[int(px_l) + 1, int(py_l) + 1] = GREEN_PIXEL_CONTOUR
last_w_r = cont.width_r[j]
last_w_l = cont.width_l[j]
""" draw a circle (with ray SIZE_RAY_JUNCTION) centered in each junctions"""
if params.get_show_junction_points() is True and isinstance(resultJunction, list) is True:
for junction in resultJunction:
draw = ImageDraw.Draw(img)
draw.ellipse((int(junction.x) - SIZE_RAY_JUNCTION, int(junction.y) - SIZE_RAY_JUNCTION, int(junction.x) + SIZE_RAY_JUNCTION, int(junction.y) + SIZE_RAY_JUNCTION), fill = 'blue')
if params.get_preview() is True:
img.show()
return img,img_only_lines
def save_to_disk(img,img_only_lines,folder_save_out):
outJpg=path.join(folder_save_out,"output.png")
outJpg_onlyLines = path.join(folder_save_out, "output_only_lines.png")
outmrc=path.join(folder_save_out,"output.mrc")
outmrc_onlyLines = path.join(folder_save_out, "output_only_lines.mrc")
img.save(outJpg)
logging.info(" desired output image saved in '"+str(outJpg)+"'")
img_only_lines.save(outJpg_onlyLines)
logging.info(" only lines output image saved in '"+str(outJpg_onlyLines)+"'")
with mrcfile_new(outmrc,overwrite=True) as mrc:
mrc.set_data(asarray(Image.open(outJpg).convert("L")))
mrc.close()
logging.info(" desired output MRC saved in '" + str(outmrc) + "'")
with mrcfile_new(outmrc_onlyLines,overwrite=True) as mrc:
mrc.set_data(asarray( Image.open(outJpg_onlyLines).convert("L")))
mrc.close()
logging.info(" only lines output MRC saved in '" + str(outmrc_onlyLines) + "'")
INTEGER_8BIT_MAX = 255
INTEGER_8BIT_MIN = 0
DEBUGGING = False
""" OFFSET.JAVA"""
class Offset:
def __init__(self, x=0, y=0):
self.x=x
self.y=y
""" NORMAL.JAVA"""
SQRTPI = 1.772453850905516027
UPPERLIMIT = 20.0
P10 = 242.66795523053175
P11 = 21.979261618294152
P12 = 6.9963834886191355
P13 = -.035609843701815385
Q10 = 215.05887586986120
Q11 = 91.164905404514901
Q12 = 15.082797630407787
Q13 = 1.0
P20 = 300.4592610201616005
P21 = 451.9189537118729422
P22 = 339.3208167343436870
P23 = 152.9892850469404039
P24 = 43.16222722205673530
P25 = 7.211758250883093659
P26 = .5641955174789739711
P27 = -.0000001368648573827167067
Q20 = 300.4592609569832933
Q21 = 790.9509253278980272
Q22 = 931.3540948506096211
Q23 = 638.9802644656311665
Q24 = 277.5854447439876434
Q25 = 77.00015293522947295
Q26 = 12.78272731962942351
Q27 = 1.0
P30 = -.00299610707703542174
P31 = -.0494730910623250734
P32 = -.226956593539686930
P33 = -.278661308609647788
P34 = -.0223192459734184686
Q30 = .0106209230528467918
Q31 = .191308926107829841
Q32 = 1.05167510706793207
Q33 = 1.98733201817135256
Q34 = 1.0
SQRT2 = 1.41421356237309504880
def normalizeImg(img,new_min=INTEGER_8BIT_MIN,new_max=INTEGER_8BIT_MAX,return_Aslist=True):
"""
Normalize the image. For default it is converted to an 8bit img
:param img:
:param new_max:
:param new_min:
:param return_Aslist:
:return:
"""
m = amin(img)
res= (new_max - new_min) * ((img - m) / (amax(img) - m)) + new_min
return res if return_Aslist is False else res.flatten().tolist()
def getNormal(x):
"""
calculates the normal of x
:param x:
:return:
"""
if x < -UPPERLIMIT:
return 0.0
if x > UPPERLIMIT:
return 1.0
y = x / SQRT2
sn = 1
if y < 0:
y = -y
sn = -1
if y < 0.46875:
R1 = P10 + P11 * y*y + P12 * y*y*y*y + P13 * pow(y,6)
R2 = Q10 + Q11 * y*y + Q12 * y*y*y*y + Q13 * pow(y,6)
erf = y * R1 / R2
return 0.5 + 0.5 * erf if sn ==1 else 0.5 - 0.5 * erf
elif y < 4.0:
R1 = P20 + P21 * y + P22 * y*y + P23 * y*y*y + P24 * y*y*y*y + P25 * pow(y,5) + P26 * pow(y,6) + P27 * pow(y,7)
R2 = Q20 + Q21 * y + Q22 * y*y + Q23 * y*y*y + Q24 * y*y*y*y + Q25 * pow(y,5) + Q26 * pow(y,6) + Q27 * pow(y,7)
erfc = exp(-(y*y)) * R1 / R2
return 1.0 - 0.5 * erfc if sn == 1 else 0.5 * erfc
else:
R1 = P30 + P31 * y*y*y*y + P32 * pow(y,8) + P33 * pow(y,12) + P34 * pow(y,16)
R2 = Q30 + Q31 * y*y*y*y + Q32 * pow(y,8) + Q33 * pow(y,12) + Q34 * pow(y,16)
erfc = (exp(-(y*y)) / y) * (1.0 / SQRTPI + R1 / (R2 * y*y))
return 1.0 - 0.5 * erfc if sn==1 else 0.5 * erfc
""" THRESHOLD JAVA"""
def threshold(image, minimum, width, height, out_region):
if not isinstance(out_region, Region):
print("ERROR: The 'out_region' param has to be an instance of the class 'Region'. (helper.py->threshold)")
logging.error(" The 'out_region' param has to be an instance of the class 'Region'. (helper.py->threshold)")
exit(-1)
inside = False
num = 0
rl =list()
for r in list(range(0, height)):
for c in list(range(0, width)):
l = r * width + c # it is lincoord(row, col,width) of linesUtil.py
grey = image[l]
if grey>=minimum:
if inside is False:
inside = True
rl.append(Chord(row=r,column_start= c))
elif inside is True:
inside = False
rl[num].ce = c - 1
num+=1 #todo: e' uguale a (*) --> quindi refactor possibile
if inside is True:
inside = False
rl[num].ce=width-1
num+=1 #todo: e' uguale a (*) --> quindi refactor possibile
out_region.add_chords(rl[:num])
out_region.num =num | /ridge_detection-3.0.0-py3-none-any.whl/ridge_detection/helper.py | 0.611962 | 0.29026 | helper.py | pypi |
from tkinter import Tk, messagebox,Button, Label,scrolledtext,INSERT
from os import path, listdir
class gen_id(object):
"""
Generator, that returns the current row for the grid alignment
"""
def __init__(self):
self.row=0
def __next__(self):
return self.next()
def next(self):
self.row=self.row+1
return self.row
def prev(self):
self.row=max(self.row-1,0)
return self.row
def current(self):
return self.row
def click_exit():
exit()
# message box info for the params
def click_info_setfile():
messagebox.showinfo("Set file","Select the image to analyze")
def click_info_general():
messagebox.showinfo("General","They are used to estimate the mandatory parameters.\n\nAt version 1.3.0 they are still not implemented")
def click_info_line_width():
messagebox.showinfo("Line Width","The line diameter in pixels.\nIt estimates the mandatory parameter 'Sigma'")
def click_info_high_contrast():
messagebox.showinfo("High Contrast","Highest grayscale value of the line.\nIt estimates the mandatory parameter 'Upper threshold'")
def click_info_low_contrast():
messagebox.showinfo("Low Contrast","Lowest grayscale value of the line.\nIt estimates the mandatory parameter 'Lower threshold'")
def click_info_sigma():
messagebox.showinfo("Sigma","Determines the sigma for the derivatives. It depends on the line width")
def click_info_threshold_lw():
messagebox.showinfo("Lower_Threshold","Line points with a response smaller as this threshold are rejected")
def click_info_threshold_up():
messagebox.showinfo("Upper_Threshold","Line points with a response larger as this threshold are accepted")
def click_info_darkline():
messagebox.showinfo("DarkLine","Determines whether dark or bright lines are extracted by the ridge detection tool")
def click_info_overlap():
messagebox.showinfo("Overlap resolution"," You can select a method to attempt automatic overlap resolution. The accuracy of this method will depend on the structure of your data")
def click_info_correct_position():
messagebox.showinfo("Correct position","Correct the line position if it has different contrast on each side of it")
def click_info_estimate_width():
messagebox.showinfo("Estimate width","If this option is selected the width of the line is estimated")
def click_info_do_extend_line():
messagebox.showinfo("Do ExtendLine","Try to extend the lines at their end_points to find additional junctions")
def click_info_show_junctions():
messagebox.showinfo("Show junction point","Show the junction points in the output image")
def click_info_show_ids():
messagebox.showinfo("Show IDs","The ID of each line will be shown.\n\nAt version 1.3.0 they are still not implemented")
def click_info_display_results():
messagebox.showinfo("Display results","All contours and junctions are filled into a results table\n\nAt version 1.3.0 they are still not implemented")
def click_info_preview():
messagebox.showinfo("Preview","Show the results before saving on disk")
def click_info_make_binary():
messagebox.showinfo("Make binary","Binarize the output.\n\nAt version 1.3.0 they are still not implemented")
def show_info():
"""
It creates the info windows. each botton open a message info about the selected parameter
:return:
"""
id_row = gen_id()
id_col_detection = gen_id()
id_col_filtering = gen_id()
id_col_general = gen_id()
window_info = Tk()
window_info.title("Parameters information")
window_info.geometry('1300x250')
lbl = Label(window_info, text="Selection path image:")
lbl.grid(column=0, row=id_row.current())
btn = Button(window_info, text="Set file", command=click_info_setfile)
btn.grid(column=1, row=id_row.current())
lbl = Label(window_info)
lbl.grid(column=id_col_detection.current(), row=id_row.next())
lbl = Label(window_info, text="Mandatory Parameters:")
lbl.grid(column=id_col_detection.current(), row=id_row.next())
btn = Button(window_info, text="Sigma", command=click_info_sigma)
btn.grid(column=id_col_detection.current(), row=id_row.next())
btn = Button(window_info, text="Lower_Threshold", command=click_info_threshold_lw)
btn.grid(column=id_col_detection.next(), row=id_row.current())
btn = Button(window_info, text="Upper_Threshold", command=click_info_threshold_up)
btn.grid(column=id_col_detection.next(), row=id_row.current())
btn = Button(window_info, text="DarkLine", command=click_info_darkline)
btn.grid(column=id_col_detection.next(), row=id_row.current())
btn = Button(window_info, text="Overlap", command=click_info_overlap)
btn.grid(column=id_col_detection.next(), row=id_row.current())
lbl = Label(window_info)
lbl.grid(column=id_col_general.current(), row=id_row.next())
lbl = Label(window_info, text="Optional Parameters")
lbl.grid(column=id_col_general.current(), row=id_row.next())
lbl = Label(window_info, text="( not available at the current version):")
lbl.grid(column=id_col_general.next(), row=id_row.current())
btn = Button(window_info, text="General", command=click_info_general)
btn.grid(column=id_col_general.prev(), row=id_row.next())
btn = Button(window_info, text="Line width", command=click_info_line_width)
btn.grid(column=id_col_general.next(), row=id_row.current())
btn = Button(window_info, text="High contrast", command=click_info_high_contrast)
btn.grid(column=id_col_general.next(), row=id_row.current())
btn = Button(window_info, text="Low contrast", command=click_info_low_contrast)
btn.grid(column=id_col_general.next(), row=id_row.current())
lbl = Label(window_info)
lbl.grid(column=id_col_filtering.current(), row=id_row.next())
lbl = Label(window_info, text="Further Parameters:")
lbl.grid(column=id_col_filtering.current(), row=id_row.next())
btn = Button(window_info, text="Correct position", command=click_info_correct_position)
btn.grid(column=id_col_filtering.current(), row=id_row.next())
btn = Button(window_info, text="Estimate width", command=click_info_estimate_width)
btn.grid(column=id_col_filtering.next(), row=id_row.current())
btn = Button(window_info, text="Do ExtendLine", command=click_info_do_extend_line)
btn.grid(column=id_col_filtering.next(), row=id_row.current())
btn = Button(window_info, text="Show junction point", command=click_info_show_junctions)
btn.grid(column=id_col_filtering.next(), row=id_row.current())
btn = Button(window_info, text="Show IDs", command=click_info_show_ids)
btn.grid(column=id_col_filtering.next(), row=id_row.current())
btn = Button(window_info, text="Display results", command=click_info_display_results)
btn.grid(column=id_col_filtering.next(), row=id_row.current())
btn = Button(window_info, text="Preview", command=click_info_preview)
btn.grid(column=id_col_filtering.next(), row=id_row.current())
btn = Button(window_info, text="Make Binary", command=click_info_make_binary)
btn.grid(column=id_col_filtering.next(), row=id_row.current())
window_info.mainloop() | /ridge_detection-3.0.0-py3-none-any.whl/ridge_detection/gui_helper.py | 0.61555 | 0.238961 | gui_helper.py | pypi |
Here I collected the following file:
-) Line.java --> Lines.java is treated as a python list of this object directly in the code that use it i.e. lineDetector obj
-) Chord.java
-) Region.java
-) Junction.java --> Junctions.java is treated as a python list of this object directly in the code that use it i.e. lineDetector obj
"""
from math import sqrt
import logging
"""
global variable to manage the counter of the lines. It used the synchronized method.
I hope this workaround will work otherwise --> http://theorangeduck.com/page/synchronized-python
"""
id_counter = 0
def get_global_counter():
global id_counter
return id_counter
def reset_counter():
global id_counter
id_counter = 0
def getIndexByID(lines, id):
"""
Since I do not have the object "lines" but a list of object Line, this function is faking the 'getIndexByID' function of the Liens obj in lines.java
:param lines: list of Line obj
:param id: id of the object Line to search
:return: the index in the list 'lines' of the searched obj
"""
for i in range(len(lines)):
if lines[i].getID()==id:
return i
return -1
class Line:
"""
This class holds one extracted line. The field num contains the number of points in the line. The coordinates of the line points are given in the arrays row and col.
The array angle contains the direction of the normal to each line point, as measured from the row-axis. Some people like to call the col-axis the x-axis and the row-axis the y-axis, and measure the angle from
the x-axis. To convert the angle into this convention, subtract PI/2 from the angle and normalize it to be in the interval [0,2*PI). The array response contains the response of the operator,
i.e., the second directional derivative in the direction of angle, at each line point. The arrays width_l and width_r contain the width information for each line point if the
algorithm was requested to extract it; otherwise they are NULL. If the line position and width correction was applied the contents of width_l and width_r will be identical.
The arrays asymmetry and contrast contain the true asymmetry and contrast of each line point if the algorithm was instructed to apply the width and position correction. Otherwise, they are set to NULL. If
the asymmetry, i.e., the weaker gradient, is on the right side of the line, the asymmetry is set to a positive value, while if it is on the left side it is set to a negative value.
"""
""" number of points. """
num = 0
""" row coordinates of the line points. """
row = list()
""" column coordinates of the line points. """
col = list()
""" angle of normal (measured from the row axis). """
angle = list()
""" response of line point (second derivative). """
response = list()
""" width to the left of the line. """
width_l = list()
""" width to the right of the line. """
width_r= list()
""" asymmetry of the line point. """
asymmetry = list()
""" intensity of the line point. """
intensity = list()
""" contour class (e.g., cont_no_junc,cont_start_junc,cont_end_junc,cont_both_junc,cont_closed) """
cont_class = None #I adapted this variable because it was an enum class. It has to be an integer, It has to be filled using linesUtil.COUNTOUR_DICT["name_of junc_type"]
""" The id. """
_id = None
""" The frame. """
frame = 0
"""
Since in java there is a class Lines that is basically a list of lines plus a variable frame and I have to track this value, I stored it in this variable
if it is in a list the frame of the list
"""
father_frame = None
def __init__(self, num=None, row = None, col = None, angle = None, response = None, asymmetry = None, intensity = None, width_l = None, width_r = None, cont_class=None):
self.assignID()
self.num = 0 if num is None else num
self.row = row
self.col = col
self.angle = angle
self.response = response
self.asymmetry = asymmetry
self.intensity = intensity
self.width_l = width_l
self.width_r = width_r
self.cont_class = cont_class
def getStartOrdEndPosition(self,x,y):
"""
:param x:
:param y:
:return: the start ord end position
"""
distStart = sqrt(pow(self.col[0] - x, 2) + pow(self.row[0] - y, 2))
distEnd = sqrt(pow(self.col[(self.num - 1)] - x, 2) + pow(self.row[(self.num - 1)] - y, 2))
return 0 if distStart < distEnd else self.num - 1
def estimateLength(self):
"""
:return: the estimated length of the line
"""
length = 0
for i in list(range(0,self.num)):
length += sqrt(pow(self.col[i] - self.col[i - 1], 2) + pow(self.row[i] - self.row[i - 1], 2))
return length
#http://theorangeduck.com/page/synchronized-python o global??
def assignID(self):
global id_counter
self._id = id_counter
id_counter +=1
def getXCoordinates(self):
return self.col
def getYCoordinates(self):
return self.row
def getID(self):
return self._id
def __str__(self):
return "id: "+str(self._id)+"\tcont_class: "+str(self.cont_class)+"\tnum: "+str(self.num)+"\tlen_elements: "+str(len(self.angle))
class Chord:
""" Storage the chord variables"""
def __init__(self, row=0, column_start=0, column_end =0):
self.r = row
self.ce = column_end
self.cb = column_start
def __str__(self):
"""print the value used in the script the variables """
output = "List of input params:\n"
output += "\trow coordinate of the chord = " + str(self.r) + "\n"
output += "\tcolumn coordinate of the start of the chord = " + str(self.cb) + "\n"
output += "\tcolumn coordinate of the end of the chord = " + str(self.ce) + "\n"
return output
class Region:
""" Storage a region"""
def __init__(self):
self.num = 0 # number of chords
""" Since it has to be a list of Chord I used the same workaround used in 'LineDetector.py for junctions and lines"""
self._rl = list()
def add_chord(self, chord):
if isinstance(chord, Chord):
self._rl.append(chord)
else:
print("ERROR: You have to append to the 'rl' list a 'chord' class variable. (basicGeometry.py->Region.add_chord)")
logging.error(" You have to append to the 'rl' list a 'chord' class variable. (basicGeometry.py->Region.add_chord)")
exit(-1)
def add_chords(self, list_chords):
"""
insert a bunch of lines to the _rl variables
:param list_chords: list of chords
:return:
"""
if isinstance(list_chords, list):
for l in list_chords:
self.add_chord(l)
else:
self.add_chord(list_chords)
def get_all_rl(self):
""" returns all the chords"""
return self._rl
def get_line(self, index=-1):
"""
Returns the line in the 'index' position.
For default returns the last inserted chord
:param index: index in the lines list. -1 means the last inserted line
:return: the specified chord
"""
return self._rl[index] if len(self._rl) != 0 and len(self._rl) > index else None
class Junction:
"""
Storage the junction variables. In the original Java code is a subclass of the "Comparable" class Interface of the Java standard library (that has just the 'compareTo' function)
"""
"""
Since in java there is a class Junctions that is basically a list of lines plus a variable frame and I have to track this value, I stored it in this variable
if it is in a list the frame of the list
"""
def __init__(self, cont1=None, cont2=None, pos=None, x =None, y=None, lineCont1=None, lineCont2=None, isNonTerminal=False, father_frame=None ):
self.cont1 = cont1 #Index of line that is already processed
self.cont2 = cont2 #Index of line that runs into cont1
self.pos = pos #Index of the junction point in cont1
self.x = x
self.y = y
self.isNonTerminal = isNonTerminal
self.father_frame=father_frame
if (isinstance(lineCont1,Line) and isinstance(lineCont2,Line)) or (lineCont1 is None and lineCont2 is None):
self.lineCont1 = lineCont1 #line that is already processed
self.lineCont2 = lineCont2
else:
print("ERROR: The 'lineCont1' and 'lineCont2' input value have to be an instance of the class 'Line' or a 'NoneType'. (basicGeometry.py->Junction.__init__)")
logging.error(" The 'lineCont1' and 'lineCont2' input value have to be an instance of the class 'Line' or a 'NoneType'. (basicGeometry.py->Junction.__init__)")
exit(-1)
def __str__(self):
"""print the value used in the script the variables """
output = "List of input params:\n"
output += "\tcont1: Index of line that is already processed = " + str(self.cont1) + "\n"
output += "\tlineCont1: Line that is already processed = " + self.lineCont1.__str__() + "\n"
output += "\tpos: Index of the junction point in cont1 = " + str(self.pos) + "\n"
output += "\tx: row coordinate of the junction point = " + str(self.x) + "\n"
output += "\ty: column coordinate of the junction point = " + str(self.y) + "\n"
output += "\tcont2: Index of line that runs into cont1 = " + str(self.cont2) + "\n"
output += "\tlineCont1: Line that runs into idCont1 = " + self.lineCont2.__str__() + "\n"
output += "\tisNonTerminal = " + str(self.cont2) + "\n"
return output | /ridge_detection-3.0.0-py3-none-any.whl/ridge_detection/basicGeometry.py | 0.66454 | 0.621139 | basicGeometry.py | pypi |
import logging
from json import load as json_load
from json import JSONDecodeError
from os import path
from math import sqrt,pi,exp
from ridge_detection.linesUtil import MODE_LIGHT,MODE_DARK
def load_json(config_path):
"""
Check if the config file exist and has a valid json format
:param config_path: path to the json config file
:return: loaded json file
"""
if path.isfile(config_path) is False:
print("ERROR: file '"+config_path+"' not found!")
logging.error("ERROR: file '" + config_path + "' not found!")
exit(-1)
try:
with open(config_path) as json_file:
return json_load(json_file)
except JSONDecodeError:
print("\nYour configuration file seems to be corruped. Please check if it is valid.")
logging.error("Your configuration file seems to be corruped. Please check if it is valid.")
exit(-1)
def error_optional_parameter_missing(par, mess):
"""
Check if the given optional param is in the input value and if it is not and you need to estimate the mandatory params as for istance sigma
:param par: value parameter
:param mess: error message
:return:
"""
if par is None:
logging.error(mess)
exit()
class Params:
"""
This class replaces the original class "Options.java".
class containing the parameters got or estimated from the json config file.
The lists of accepted parameters are the following:
mandatory_params= ["Sigma", "Lower_Threshold", "Upper_Threshold", "Darkline", "Overlap_resolution","Minmum_Line_Length","Maximum_Line_Length"]
optional_params = ["Line_width", "High_contrast", "Low_contrast"]
further_bool_options = ["Correct_position", "Estimate_width", "Show_junction_points", "Show_IDs", "Display_results", "Add_to_Manager", "Preview"]
The "Sigma", "Lower_Threshold" and "Upper_Threshold" mandatory's parameter can be estimated from the oprional_parameter,
for more info see: https://imagej.net/Ridge_Detection#Mandatory_Parameters
"""
_line_width = None
_high_contrast = None
_low_contrast = None
_correct_position = False
_estimate_width = False
_show_junction_points = False
_show_IDs = False
_display_results = True
_preview = False
_doExtendLine = True
_saveOnFile= True
config_path_to_file = None
_config = None
_accepted_Overlap_resolution =["none","slope"]
_accepted_Darkline = ["light", "dark"]
_foundamental_params = ["Minimum_Line_Length", "Maximum_Line_Length", "Darkline", "Overlap_resolution"]
def __init__(self, cfg):
if isinstance(cfg, dict):
self._config = cfg
self.config_path_to_file = cfg["path_to_file"]
else:
self._config = load_json(cfg)
self.config_path_to_file=self._config["path_to_file"]
self._isValid()
self._set_params()
def _isValid(self):
"""
Given the loaded json config file, it check if all the params are available. If not abort
I inserted this function because we can estimate some mandatory params from the optional param
:return: None
"""
if "mandatory_parameters" not in self._config.keys():
print("\nError: params 'mandatory_parameters' not found in the config file!")
logging.error(" params 'mandatory_parameters' not found in the config file!")
exit(-1)
mandatory_k = self._config["mandatory_parameters"].keys()
for foundamental_value in self._foundamental_params:
if foundamental_value not in mandatory_k:
print("\nError: param 'mandatory_parameters."+foundamental_value+"' not found in the config file!")
logging.error(" param 'mandatory_parameters." + foundamental_value + "' not found in the config file!")
exit(-1)
if self._config["mandatory_parameters"]["Overlap_resolution"].lower() not in self._accepted_Overlap_resolution:
print("\nError: param 'mandatory_parameters.Overlap_resolution' not valid. Has to be one of "+str(self._accepted_Overlap_resolution))
logging.error(" param 'mandatory_parameters.Overlap_resolution' not valid. Has to be one of "+str(self._accepted_Overlap_resolution))
exit(-1)
if self._config["mandatory_parameters"]["Darkline"].lower() not in self._accepted_Darkline:
print("\nError: param 'mandatory_parameters.Darkiline' not valid. Has to be one of " + str(self._accepted_Darkline))
logging.error(" param 'mandatory_parameters.Darkiline' not valid. Has to be one of " + str(self._accepted_Darkline))
exit(-1)
if "optional_parameters" not in self._config.keys() and ("Sigma" not in mandatory_k or "Lower_Threshold" not in mandatory_k or "Upper_Threshold" not in mandatory_k ):
print("\nERROR: optional parameters are used to estimate the [Sigma, Lower_Threshold,Upper_Threshold] mandatory's parameter when they miss")
logging.error("optional parameters are used to estimate the [Sigma, Lower_Threshold,Upper_Threshold] mandatory's parameter when they miss")
exit(-1)
def _get_mandatory_parameters(self):
return {k:v for k,v in zip (self._config["mandatory_parameters"].keys(),self._config["mandatory_parameters"].values())}
def _get_optional_parameters(self):
return {k:v for k,v in zip (self._config["optional_parameters"].keys(),self._config["optional_parameters"].values())} if "optional_parameters" in self._config.keys() else None
def _get_further_options(self):
return {k:v for k,v in zip (self._config["further_options"].keys(),self._config["further_options"].values())} if "further_options" in self._config.keys() else None
def __str__(self):
"""print the value used in the script the variables """
output = "List of input params:\n"
output += "\tLine_width = " + str(self._line_width) + "\n"
output += "\tHigh_contrast = " + str(self._high_contrast) + "\n"
output += "\tLow_contrast = " + str(self._low_contrast) + "\n"
output += "\tSigma = " + str(self._sigma) + "\n"
output += "\tLower_Threshold = " + str(self._lower_Threshold) + "\n"
output += "\tUpper_Threshold = " + str(self._upper_Threshold) + "\n"
output += "\tMaximum_Line_Length = " + str(self._maximum_Line_Length) + "\n"
output += "\tMinimum_Line_Length = " + str(self._minimum_Line_Length) + "\n"
output += "\tDarkline = " + str(self._darkline) + "\n"
output += "\tOverlap_resolution = " + str(self._overlap_resolution) + "\n"
output += "\tCorrect_position = " + str(self._correct_position) + "\n"
output += "\tEstimate_width = " + str(self._estimate_width) + "\n"
output += "\tShow_junction_points = " + str(self._show_junction_points) + "\n"
output += "\tShow_IDs = " + str(self._show_IDs) + "\n"
output += "\tDisplay_results = " + str(self._display_results) + "\n"
output += "\tPreview = " + str(self._preview) + "\n"
output += "\tdoExtendLine = " + str(self._doExtendLine) + "\n"
output += "\tSaveOnfile = " + str(self._saveOnFile) + "\n"
return output
def _set_params(self):
m = self._get_mandatory_parameters()
f = self._get_further_options()
o = self._get_optional_parameters()
if isinstance(o,dict):
if "Line_width" in o:
self._line_width = o["Line_width"]
if "High_contrast" in o:
self._high_contrast = o["High_contrast"]
if "Low_contrast" in o :
self._low_contrast = o["Low_contrast"]
if "Sigma" in m:
self._sigma = m["Sigma"]
else:
error_optional_parameter_missing(self.get_line_width(),"When you do not insert 'Sigma' value you have to insert 'Line width' to estimate it")
self._sigma = 0.5 + (self._line_width/(2*sqrt(3)))
val = pow(self._sigma,3)*sqrt(2*pi)
if "Lower_Threshold" in m:
self._lower_Threshold = m["Lower_Threshold"]
else:
error_optional_parameter_missing(self.get_line_width(),"When you do not insert 'Lower_Threshold' value you have to insert 'Line width' to estimate it")
error_optional_parameter_missing(self.get_low_contrast(),"When you do not insert 'Lower_Threshold' value you have to insert 'Low contrast' to estimate it")
ew = 0.17 * exp(- (pow(self._line_width / 2, 2) / pow(2 * self._sigma, 2)))
self._lower_Threshold = ew*( (2 * self._low_contrast*(self._line_width / 2)) /val)
if "Upper_Threshold" in m:
self._upper_Threshold = m["Upper_Threshold"]
else:
error_optional_parameter_missing(self.get_line_width(),"When you do not insert 'Upper_Threshold' value you have to insert 'Line width' to estimate it")
error_optional_parameter_missing(self.get_high_contrast(),"When you do not insert 'Upper_Threshold' value you have to insert 'High contrast' to estimate it")
ew = 0.17 * exp(- (pow(self._line_width / 2, 2) / pow(2 * self._sigma, 2)))
self._upper_Threshold = ew * ((2 * self._high_contrast * (self._line_width / 2)) / val)
self._maximum_Line_Length = m["Maximum_Line_Length"]
self._minimum_Line_Length = m["Minimum_Line_Length"]
self._darkline = MODE_DARK if m["Darkline"].lower()=="dark" else MODE_LIGHT
self._overlap_resolution = m["Overlap_resolution"].lower()
if isinstance(f, dict):
if "Correct_position" in f:
self._correct_position = f["Correct_position"]
if "Estimate_width" in f:
self._estimate_width = f["Estimate_width"]
if "Show_junction_points" in f:
self._show_junction_points = f["Show_junction_points"]
if "Show_IDs" in f:
self._show_IDs = f["Show_IDs"]
if "Display_results" in f:
self._display_results = f["Display_results"]
if "Preview" in f:
self._preview = f["Preview"]
if "save_on_disk" in f:
self._saveOnFile = f["save_on_disk"]
if "doExtendLine" in f:
self._doExtendLine = f["doExtendLine"]
def get_line_width(self):
return self._line_width
def get_high_contrast(self):
return self._high_contrast
def get_low_contrast(self):
return self._low_contrast
def get_correct_position(self):
return self._correct_position
def get_estimate_width(self):
return self._estimate_width
def get_show_junction_points(self):
return self._show_junction_points
def get_show_IDs (self):
return self._show_IDs
def get_display_results(self):
return self._display_results
def get_preview(self):
return self._preview
def get_saveOnFile(self):
return self._saveOnFile
def get_doExtendLine(self):
return self._doExtendLine
def get_sigma(self):
return self._sigma
def get_lower_Threshold(self):
return self._lower_Threshold
def get_upper_Threshold(self):
return self._upper_Threshold
def get_maximum_Line_Length(self):
return self._maximum_Line_Length
def get_minimum_Line_Length(self):
return self._minimum_Line_Length
def get_darkline(self):
return self._darkline
def get_overlap_resolution(self):
return self._overlap_resolution | /ridge_detection-3.0.0-py3-none-any.whl/ridge_detection/params.py | 0.594904 | 0.156717 | params.py | pypi |
from ridge_detection.helper import Offset,threshold
from math import pi,floor,atan2,sqrt, ceil, cos,sin
from ridge_detection import linesUtil
from ridge_detection.basicGeometry import Line, Junction, Region
from operator import attrgetter
import logging
from ridge_detection.helper import DEBUGGING
MAX_ANGLE_DIFFERENCE = pi / 6.0
DOUBLE_MAX_VALUE_JAVA= 1.7976931348623157E308
"""
This table contains the three appropriate neighbor pixels that the linking algorithm must examine.
It is indexed by the octant the current line angle lies in, e.g., 0 if the angle in degrees lies within [-22.5,22.5].
"""
dirtab= [ [ [ 1, 0 ], [ 1, -1 ], [ 1, 1 ] ], [ [ 1, 1 ], [ 1, 0 ], [ 0, 1 ] ],
[ [ 0, 1 ], [ 1, 1 ], [ -1, 1 ] ], [ [ -1, 1 ], [ 0, 1 ], [ -1, 0 ] ], [ [ -1, 0 ], [ -1, 1 ], [ -1, -1 ] ],
[ [ -1, -1 ], [ -1, 0 ], [ 0, -1 ] ], [ [ 0, -1 ], [ -1, -1 ], [ 1, -1 ] ],
[ [ 1, -1 ], [ 0, -1 ], [ 1, 0 ] ] ]
""" This table contains the two neighbor pixels that the linking algorithm should examine and mark as processed in case there are double responses """
cleartab = [ [ [ 0, 1 ], [ 0, -1 ] ], [ [ -1, 1 ], [ 1, -1 ] ],
[ [ -1, 0 ], [ 1, 0 ] ], [ [ -1, -1 ], [ 1, 1 ] ], [ [ 0, -1 ], [ 0, 1 ] ], [ [ 1, -1 ], [ -1, 1 ] ],
[ [ 1, 0 ], [ -1, 0 ] ], [ [ 1, 1 ], [ -1, -1 ] ] ]
class Crossref:
"""
Storage the Crossref variables, it is the Correction.java code
This data structure facilitates the quick search for the next possible starting point of a line. An array of crossrefs will be accumulated and
sorted according to its value. x and y are the coordinates of a point in the image. When this point has been processed it will be marked as done.
"""
def __init__(self,x=None, y=None, value=None, done=None):
self.x = x
self.y = y
self.value = value
self.done = done
def compareTo(self, crossref0):
if not isinstance(crossref0, Crossref):
print("ERROR: The input of 'Crossref.compareTo' has to be an instance of the class 'Crossref'. (link.py->detect_lines)")
logging.error(" The input of 'Crossref.compareTo' has to be an instance of the class 'Crossref'.(link.py->detect_lines)")
exit(-1)
if self.value > crossref0.value:
return -1
elif self.value < crossref0.value:
return 1
return 0
def __str__(self):
return "x: "+str(self.x)+"\ty: "+str(self.y)+"\tvalue: "+str(self.value)+"\tdone: "+str(self.done)
def interpolate_response( resp, x, y, px, py, width, height):
"""
Compute the response of the operator with sub-pixel accuracy by using the facet model to interpolate the pixel accurate responses
:param resp:
:param x:
:param y:
:param px:
:param py:
:param width:
:param height:
:return:
"""
i1 = resp[linesUtil.lincoord(linesUtil.br(x - 1, height), linesUtil.bc(y - 1, width), width)]
i2 = resp[linesUtil.lincoord(linesUtil.br(x - 1, height), y, width)]
i3 = resp[linesUtil.lincoord(linesUtil.br(x - 1, height), linesUtil.bc(y + 1, width), width)]
i4 = resp[linesUtil.lincoord(x, linesUtil.bc(y - 1, width), width)]
i5 = resp[linesUtil.lincoord(x, y, width)]
i6 = resp[linesUtil.lincoord(x, linesUtil.bc(y + 1, width), width)]
i7 = resp[linesUtil.lincoord(linesUtil.br(x + 1, height), linesUtil.bc(y - 1, width), width)]
i8 = resp[linesUtil.lincoord(linesUtil.br(x + 1, height), y, width)]
i9 = resp[linesUtil.lincoord(linesUtil.br(x + 1, height), linesUtil.bc(y + 1, width), width)]
t1 = i1 + i2 + i3
t2 = i4 + i5 + i6
t3 = i7 + i8 + i9
t4 = i1 + i4 + i7
t5 = i2 + i5 + i8
t6 = i3 + i6 + i9
d = (-i1 + 2 * i2 - i3 + 2 * i4 + 5 * i5 + 2 * i6 - i7 + 2 * i8 - i9) / 9
dr = (t3 - t1) / 6
dc = (t6 - t4) / 6
drr = (t1 - 2 * t2 + t3) / 6
dcc = (t4 - 2 * t5 + t6) / 6
drc = (i1 - i3 - i7 + i9) / 4
xx = px - x
yy = py - y
return d + xx * dr + yy * dc + xx * xx * drr + xx * yy * drc + yy * yy * dcc
def closest_point(lx, ly, dx, dy, px, py):
"""
Calculate the closest point to (px,py) on the line (lx,ly) + t*(dx,dy)
:param lx:
:param ly:
:param dx:
:param dy:
:param px:
:param py:
:return:
"""
mx = px - lx
my = py - ly
den = dx * dx + dy * dy
nom = mx * dx + my * dy
tt = nom/den if den!=0 else 0
return lx + tt * dx, ly + tt * dy, tt
def interpolate_gradient(gradx, grady, px, py, width):
"""
Interpolate the gradient of the gradient images gradx and grady with width width at the point (px,py) using linear interpolation
:param gradx:
:param grady:
:param px:
:param py:
:param width:
:return:
"""
gix = floor(px)
giy = floor(py)
gfx = px % 1.0
gfy = py % 1.0
gpos = linesUtil.lincoord(gix, giy, width)
gx1 = gradx[gpos]
gy1 = grady[gpos]
gpos = linesUtil.lincoord(gix + 1, giy, width)
gx2 = gradx[gpos]
gy2 = grady[gpos]
gpos = linesUtil.lincoord(gix, giy + 1, width)
gx3 = gradx[gpos]
gy3 = grady[gpos]
gpos = linesUtil.lincoord(gix + 1, giy + 1, width)
gx4 = gradx[gpos]
gy4 = grady[gpos]
return ((1 - gfy) * ((1 - gfx) * gx1 + gfx * gx2) + gfy * ((1 - gfx) * gx3 + gfx * gx4)), ((1 - gfy) * ((1 - gfx) * gy1 + gfx * gy2) + gfy * ((1 - gfx) * gy3 + gfx * gy4))
def compute_contours(ismax, eigval, normx, normy, posx, posy, gradx, grady, contours, sigma, extend_lines,mode, width, height,junctions):
"""
This function links the line points into lines. The input to this function are the response of the filter, i.e., the second directional derivative along
(nx[l],ny[l]), contained in eigval[l], and the sub-pixel position of each line point, contained in (px[l],py[l]). The parameters low and high are the
hysteresis thresholds for the linking, while width and height are the dimensions of the five float-images. The linked lines are returned in result, and the number of lines detected is returned in num_result
:param self:
:param ismax:
:param eigval:
:param normx:
:param normy:
:param posx:
:param posy:
:param gradx:
:param grady:
:param contours:
#:param num_result: era passata per riferimento e quindi la ho eliminata
:param sigma:
:param extend_lines:
:param mode:
:param width:
:param height:
:param junctions:
:return:
"""
"""
stuff to know.
Since it allocated the memory before inserting a new junc,line etc he had some variables like num_pnt to know how many elements eh had in the list in a given moment.
We do that in real time without allocating extra memory hence we do not need this extra variable but we used len(obj) e.g. num_pnt==len(row)
"""
from ridge_detection.width import bresenham
i,k,j,m=0,0,0,0
num_junc,num_cont = 0, 0
end_angle, end_resp=0,0
cont = []
junc = []
"""The image indx is an index into the table of all pixels that possibly could be starting points for new lines. It is used to quickly determine the next starting point of a line"""
indx = [0 for ii in range(width * height)]
""" The image label contains information on the pixels that have been processed by the linking algorithm"""
label = [0 for ii in range(width * height)]
""" Select all pixels that can be starting points for lines """
seg = Region()
threshold(ismax, 2, width, height, seg)
""" Count the number of possible starting points """
area = 0
for i in range(seg.num):
area += seg.get_line(i).ce - seg.get_line(i).cb + 1
""" Create the index of possible starting points """
cross=list()
for i in range(seg.num):
rl= seg.get_line(i)
for y in list(range(rl.cb,rl.ce+1)):
pos = rl.r * width + y # it is lincoord(row, col,width) of linesUtil.py
cross.append(Crossref(x=rl.r, y=y, value=eigval[pos], done = False))
"""
https://stackoverflow.com/questions/4010322/sort-a-list-of-class-instances-python. I use this python sort to simulate the "Crossref.java -> compareTo function used in java.util.Arrays.sort(cross)
"""
cross.sort(key=attrgetter('value'), reverse=True)
if DEBUGGING is True:
logging.debug("stampa valori dei crossRef:")
for index,crossref in enumerate(cross):
logging.debug(str(index)+"\t"+str(crossref))
for i in range(area):
indx[linesUtil.lincoord(cross[i].x, cross[i].y, width)] = i + 1
""" Link lines points"""
indx_max,nextalpha,diff,diff2 = 0,0,0,0
x,y = 0,0
while True:
row = list()
col = list()
angle = list()
resp = list()
""" Contour class unknown at this point; therefore assume both ends free """
cls = linesUtil.COUNTOUR_DICT["cont_no_junc"]
while indx_max < area and cross[indx_max].done:
indx_max+=1
""" Stop if no feasible starting point exists. """
if indx_max == area:
break
maxx = cross[indx_max].x
maxy = cross[indx_max].y
if cross[indx_max].value == 0.0:
break
""" Add starting point to the line """
pos = maxx * width + maxy # it is lincoord(row, col,width) of linesUtil.py
label[pos] = num_cont + 1
if not indx[pos] == 0:
cross[(indx[pos] - 1)].done = True
row.append(posx[pos])
col.append( posy[pos])
"""Select line direction"""
nx = -normy[pos]
ny = normx[pos]
alpha = atan2(ny, nx)
if alpha < 0.0:
alpha += 2.0 * pi
if alpha >= pi:
alpha -= pi
octant = int (floor(4.0 / pi * alpha + 0.5)) % 4
"""
Select normal to the line. The normal points to the right of the line as the line is traversed from 0 to num-1. Since the points are sorted in reverse
order before the second iteration, the first beta actually has to point to the left of the line!
"""
beta = alpha + pi / 2.0
if beta >= 2.0 * pi:
beta -= 2.0 * pi
angle.append( beta)
resp.append(interpolate_response(eigval, maxx, maxy, posx[pos], posy[pos], width, height))
""" Mark double responses as processed """
for ii in [0,1]:
nextx = maxx + cleartab[octant][ii][0]
nexty = maxy + cleartab[octant][ii][1]
if nextx < 0 or nextx >= height or nexty < 0 or nexty >= width:
continue
nextpos = linesUtil.lincoord(nextx, nexty, width)
if ismax[nextpos] > 0:
nx = -normy[nextpos]
ny = normx[nextpos]
nextalpha = atan2(ny, nx)
if nextalpha < 0.0:
nextalpha += 2.0 * pi
if nextalpha >= pi:
nextalpha -= pi
diff = abs(alpha - nextalpha)
if diff >= pi / 2.0:
diff = pi - diff
if diff < MAX_ANGLE_DIFFERENCE:
label[nextpos] = num_cont + 1
if not indx[nextpos] == 0:
cross[(indx[nextpos] - 1)].done = True
for it in [1,2]:
if it ==1:
""" Search along the initial line direction in the first iteration """
x = maxx
y = maxy
pos = linesUtil.lincoord(x, y, width)
nx = -normy[pos]
ny = normx[pos]
alpha = atan2(ny,nx)
if alpha < 0.0:
alpha += 2.0 * pi
if alpha >= pi:
alpha -= pi
last_octant = int(floor(4.0 / pi * alpha + 0.5)) % 4
last_beta = alpha + pi/ 2.0
if last_beta >= 2.0 * pi:
last_beta -= 2.0 * pi
else:
""" Search in the opposite direction in the second iteration """
x = maxx
y = maxy
pos = linesUtil.lincoord(x, y, width)
nx = -normy[pos]
ny = normx[pos]
alpha = atan2(ny, nx)
if alpha < 0.0:
alpha += 2.0 * pi
if alpha >= pi:
alpha -= pi
last_octant = int(floor(4.0 / pi * alpha + 0.5)) % 4 + 4
last_beta = alpha + pi / 2.0
if last_beta >= 2.0 * pi:
last_beta -= 2.0 * pi
if it==2:
""" Sort the points found in the first iteration in reverse """
row.reverse()
col.reverse()
angle.reverse()
resp.reverse()
""" Now start adding appropriate neighbors to the line """
while True:
""" Orient line direction w.r.t. the last line direction. """
pos = x * width + y # it is lincoord(row, col,width) of linesUtil.py
nx = -normy[pos]
ny = normx[pos]
px = posx[pos]
py = posy[pos]
""" Orient line direction w.r.t. the last line direction """
alpha = atan2(ny,nx)
if alpha < 0.0:
alpha += 2.0 * pi
if alpha >= pi:
alpha -= pi
octant = int(floor(4.0 / pi * alpha + 0.5)) % 4
if octant ==0:
if 3<= last_octant <= 5 :
octant = 4
if octant ==1:
if 4<= last_octant <= 6 :
octant = 5
if octant ==2:
if 4<= last_octant <= 7 :
octant = 6
if octant ==3:
if last_octant ==0 or last_octant>=6:
octant = 7
last_octant = octant
""" Determine appropriate neighbor """
nextismax = False
nexti = 1
mindiff = DOUBLE_MAX_VALUE_JAVA
for i in [0,1,2]:
nextx = x + dirtab[octant][i][0]
nexty = y + dirtab[octant][i][1]
if nextx < 0 or nextx >= height or nexty < 0 or nexty >= width:
continue
nextpos = nextx * width + nexty # it is lincoord(row, col,width) of linesUtil.py
if ismax[nextpos] == 0:
continue
dx = posx[nextpos] - px
dy = posy[nextpos] - py
dist = sqrt(dx * dx + dy * dy)
nx = -normy[nextpos]
ny = normx[nextpos]
nextalpha = atan2(ny,nx)
if nextalpha < 0.0:
nextalpha += 2.0 * pi
if nextalpha >= pi:
nextalpha -= pi
diff = abs(alpha - nextalpha)
if diff >= pi / 2.0:
diff = pi - diff
diff += dist
if diff < mindiff :
mindiff = diff
nexti = i
if not ismax[nextpos] == 0:
nextismax = True
""" Mark double responses as processed """
for i in [0,1]:
nextx = x + cleartab[octant][i][0]
nexty = y + cleartab[octant][i][1]
if nextx < 0 or nextx >= height or nexty < 0 or nexty >= width:
continue
nextpos = nextx * width + nexty # it is lincoord(row, col,width) of linesUtil.py
if ismax[nextpos] > 0:
nextalpha = atan2(normx[nextpos], -normy[nextpos])
if nextalpha < 0.0:
nextalpha += 2.0 * pi
if nextalpha >= pi:
nextalpha -= pi
diff = abs(alpha - nextalpha)
if diff >= pi / 2.0:
diff = pi - diff
if diff < MAX_ANGLE_DIFFERENCE:
label[nextpos] = num_cont + 1
if indx[nextpos] != 0:
cross[(indx[nextpos] - 1)].done = True
""" Have we found the end of the line? """
if not nextismax:
break
""" If not, add the neighbor to the line """
x += dirtab[octant][nexti][0]
y += dirtab[octant][nexti][1]
"""
29/08/2019
https://github.com/thorstenwagner/ij-ridgedetection/issues/37
There is a known bug in the java implementation. when it allocates more memory at the 'resp' array ln613-617 it set to 0 all the old value of the vector.
It will be fixed BUT now I'm just translating the code and I want to reproduce the same results ... I introduce the bug in the code
"""
pos = x * width + y # it is lincoord(row, col,width) of linesUtil.py
row.append( posx[pos])
col.append( posy[pos])
""" * Orient normal to the line direction w.r.t. the last normal."""
nx = normx[pos]
ny = normy[pos]
beta = atan2(ny,nx)
if beta < 0.0:
beta += 2.0 * pi
if beta >= pi:
beta -= pi
diff1 = abs(beta - last_beta)
if diff1 >= pi:
diff1 = 2.0 * pi - diff1
diff2 =abs(beta+pi-last_beta)
if diff2>=pi:
diff2 = 2*pi-diff2
if diff1 < diff2:
angle.append( beta)
last_beta = beta
else:
angle.append( beta + pi)
last_beta = beta + pi
resp.append(interpolate_response(eigval, x, y, posx[pos], posy[pos], width, height))
""" If the appropriate neighbor is already processed a junction point is found """
if label[pos] > 0:
""" Look for the junction point in the other line. """
k = label[pos] - 1
if k == num_cont:
""" Line intersects itself """
for j in range(len(row)):
if row[j]==posx[pos] and col[j]==posy[pos]:
if j==0:
"""contour is closed"""
cls = linesUtil.COUNTOUR_DICT["cont_closed"]
row.reverse()
col.reverse()
angle.reverse()
resp.reverse()
it =2
else:
if it==2:
""" Determine contour class """
cls = linesUtil.COUNTOUR_DICT["cont_both_junc"] if cls == linesUtil.COUNTOUR_DICT["cont_start_junc"] else linesUtil.COUNTOUR_DICT["cont_end_junc"]
""" Index j is the correct index """
junc.append( Junction(cont1=num_cont, cont2=num_cont, pos=j, x =posx[pos], y=posy[pos]) )
else:
""" Determine contour class """
cls = linesUtil.COUNTOUR_DICT["cont_start_junc"]
"""
Index len(row)-1-j is the correct index since the line is going to be sorted in reverse
"""
junc.append( Junction(cont1=num_cont, cont2=num_cont, pos= len(row) - 1 - j, x =posx[pos], y=posy[pos]) )
num_junc += 1
break
""" Mark this case as being processed for the algorithm below """
j= -1
else:
for j in range(cont[k].num):
if len(cont)>k and len(cont[k].row )>0 and len(cont[k].col )>0 and cont[k].row[j] == posx[pos] and cont[k].col[j] == posy[pos]:
break
""" If no point can be found on the other line a double response must have occured. In this case, find the nearest point on the other line and add it to the current line"""
if j==cont[k].num:
mindist=DOUBLE_MAX_VALUE_JAVA
j= -1
for l in range(cont[k].num):
dx = posx[pos] - cont[k].row[l]
dy = posy[pos] - cont[k].col[l]
dist = sqrt(dx * dx + dy * dy)
if dist<mindist:
mindist=dist
j=l
""" Add the point with index j to the current line """
"""
29/08/2019
https://github.com/thorstenwagner/ij-ridgedetection/issues/37
There is a known bug in the java implementation. when it allocates more memory at the 'resp' array ln768-772 it set to 0 all the old value of the vector.
It will be fixed BUT now I'm just translating the code and I want to reproduce the same results ... I introduce the bug in the code
"""
row.append( cont[k].row[j])
col.append(cont[k].col[j])
beta = cont[k].angle[j]
if beta >= pi:
beta -= pi
diff1 = abs(beta - last_beta)
if diff1 >= pi:
diff1 = 2.0 * pi - diff1
diff2 = abs(beta + pi - last_beta)
if diff2 >= pi:
diff2 = 2.0 * pi - diff2
if diff1 < diff2:
angle.append( beta)
else:
angle.append(beta + pi)
resp.append( cont[k].response[j])
""" Add the junction point only if it is not one of the other line's endpoints """
if 0<j<cont[k].num:
""" Determine contour class """
if it ==1:
cls = linesUtil.COUNTOUR_DICT["cont_start_junc"]
elif cls == linesUtil.COUNTOUR_DICT["cont_start_junc"]:
cls= linesUtil.COUNTOUR_DICT["cont_both_junc"]
else:
cls = linesUtil.COUNTOUR_DICT["cont_end_junc"]
""" Add the new junction """
junc.append(Junction(cont1=k, cont2=num_cont, pos=j, x=row[-1], y=col[-1]))
num_junc += 1
break
label[pos]=num_cont+1
if not indx[pos]==0:
cross[(indx[pos] - 1)].done = True
if len(row)>1:
""" Only add lines with at least two points """
cont.append(Line(num=len(row), row = row, col = col, angle = angle, response = resp, cont_class=cls))
num_cont +=1
else:
""" Delete the point from the label image; we can use maxx and maxy as the coordinates in the label image in this case """
for i in [-1,0,1]:
for j in [-1, 0, 1]:
pos = linesUtil.lincoord(linesUtil.br(maxx + i, height), linesUtil.bc(maxy + j, width), width)
if label[pos] == num_cont+1:
label[pos]=0
""" Now try to extend the lines at their ends to find additional junctions """
if extend_lines is True:
""" Sign by which the gradient has to be multiplied below """
s =1 if mode == linesUtil.MODE_LIGHT else -1
length = 2.5*sigma
max_line = int (ceil(length * 3))
line = [Offset() for ii in range(max_line)]
exty = [0 for ii in range(max_line)]
extx = [0 for ii in range(max_line)]
for i,tmp_cont in enumerate(cont):
num_pnt = tmp_cont.num
if tmp_cont.num == 1:
continue
if tmp_cont.cont_class == linesUtil.COUNTOUR_DICT["cont_closed"]:
continue
trow = tmp_cont.row
tcol = tmp_cont.col
tangle = tmp_cont.angle
tresp = tmp_cont.response
""" Check both ends of the line (it==-1: start, it==1: end) """
for it in [-1, 1]:
""" Determine the direction of the search line. This is done by using the normal to the line (angle). Since this normal may point to the left of the line (see
below) we have to check for this case by comparing the normal to the direction of the line at its respective end point """
if it == -1:
""" start point of the line"""
if tmp_cont.cont_class == linesUtil.COUNTOUR_DICT["cont_start_junc"] or tmp_cont.cont_class == linesUtil.COUNTOUR_DICT["cont_both_junc"] :
continue
dx = trow[1] - trow[0]
dy = tcol[1] - tcol[0]
alpha = tangle[0]
nx = cos(alpha)
ny = sin(alpha)
if nx * dy - ny * dx < 0:
""" Turn the normal by +90 degrees """
mx = -ny
my = nx
else:
""" Turn the normal by -90 degrees """
mx = ny
my = -nx
px = trow[0]
py = tcol[0]
response = tresp[0]
else:
""" end point of the line"""
if tmp_cont.cont_class == linesUtil.COUNTOUR_DICT["cont_end_junc"] or tmp_cont.cont_class == linesUtil.COUNTOUR_DICT["cont_both_junc"] :
continue
dx = trow[(num_pnt - 1)] - trow[(num_pnt - 2)]
dy = tcol[(num_pnt - 1)] - tcol[(num_pnt - 2)]
alpha = tangle[(num_pnt - 1)]
nx = cos(alpha)
ny = sin(alpha)
if nx * dy - ny * dx < 0:
"""Turn the normal by -90 degrees"""
mx = ny
my = -nx
else:
"""Turn the normal by +90 degrees"""
mx = -ny
my = nx
px = trow[(num_pnt - 1)]
py = tcol[(num_pnt - 1)]
response = tresp[(num_pnt - 1)]
""" Determine the current pixel and calculate the pixels on the search line """
x = int (floor(px + 0.5))
y = int (floor(py + 0.5))
dx = px - x
dy = py - y
num_line = bresenham(mx, my, dx, dy, length, line)
""" Now determine whether we can go only uphill (bright lines) or downhill (dark lines) until we hit another line"""
num_add = 0
add_ext = False
for k in range(num_line):
nextx = x + line[k].x
nexty = y + line[k].y
nextpx, nextpy, t = closest_point(px, py, mx, my, nextx, nexty)
""" Ignore points before or less than half a pixel away from the true end point of the line """
if t<=0.5:
continue
""" Stop if the gradient can't be interpolated any more or if the next point lies outside the image """
if nextpx < 0 or nextpy < 0 or nextpx >= height - 1 or nextpy >= width - 1 or nextx < 0 or nexty < 0 or nextx >= height or nexty >= width:
break
gx,gy=interpolate_gradient(gradx, grady, nextpx, nextpy, width)
""" Stop if we can't go uphill anymore. This is determined by the dot product of the line direction and the gradient. If it is smaller than 0 we go downhill (reverse for dark lines) """
nextpos = nextx * width + nexty # it is lincoord(row, col,width) of linesUtil.py
if s * (mx * gx + my * gy) < 0 and label[nextpos] == 0:
break
""" Have we hit another line? """
if label[nextpos] > 0:
m = label[nextpos] - 1
""" Search for the junction point on the other line """
mindist = DOUBLE_MAX_VALUE_JAVA
j= -1
for l in range(cont[m].num):
dx = nextpx - cont[m].row[l]
dy = nextpy - cont[m].col[l]
dist = sqrt(dx * dx + dy * dy)
if dist<mindist:
mindist = dist
j = l
""" This should not happen... But better safe than sorry... """
if mindist>3:
break
extx[num_add] = cont[m].row[j]
exty[num_add] = cont[m].col[j]
end_resp = cont[m].response[j]
end_angle = cont[m].angle[j]
beta = end_angle
if beta >= pi:
beta -= pi
diff1 = abs(beta - alpha)
if diff1 >= pi:
diff1 = 2.0 * pi - diff1
diff2 = abs(beta + pi - alpha)
if diff2 >= pi:
diff2 = 2.0 * pi - diff2
end_angle = beta if diff1 < diff2 else beta + pi
num_add += 1
add_ext = True
break
else:
extx[num_add] = nextpx
exty[num_add] = nextpy
num_add+=1
if add_ext is True:
""" Make room for the new points """
num_pnt+=num_add
if it == -1:
""" Move points on the line up num_add places"""
trow = [0 for i in range(num_add)] + trow
tcol = [0 for i in range(num_add)] + tcol
tangle = [0 for i in range(num_add)] + tangle
tresp = [0 for i in range(num_add)] + tresp
""" Insert points at the beginning of the line. """
for index in range(num_add):
trow[index] = extx[(num_add - 1 - index)]
tcol[index] = exty[(num_add - 1 - index)]
tangle[index] = alpha
tresp[index] = response
tangle[0] = end_angle
tresp[0] =end_resp
""" Adapt indices of the previously found junctions """
for k in range(len(junc)):
if junc[k].cont1 == i:
junc[k].pos += num_add
else:
""" Insert points at the end of the line """
for k in range(num_add):
trow.append( extx[k])
tcol.append(exty[k])
tangle.append(alpha)
tresp.append(response)
tangle[(num_pnt - 1)] = end_angle
tresp[(num_pnt - 1)] = end_resp
tmp_cont.row = trow
tmp_cont.col = tcol
tmp_cont.angle = tangle
tmp_cont.response = tresp
tmp_cont.num = num_pnt
""" Add the junction point only if it is not one of the other line's endpoints """
if 0< j < cont[m].num - 1:
if it == -1:
if tmp_cont.cont_class == linesUtil.COUNTOUR_DICT["cont_end_junc"]:
tmp_cont.cont_class= linesUtil.COUNTOUR_DICT["cont_both_junc"]
else:
tmp_cont.cont_class= linesUtil.COUNTOUR_DICT["cont_start_junc"]
else:
if tmp_cont.cont_class == linesUtil.COUNTOUR_DICT["cont_start_junc"]:
tmp_cont.cont_class= linesUtil.COUNTOUR_DICT["cont_both_junc"]
else:
tmp_cont.cont_class= linesUtil.COUNTOUR_DICT["cont_end_junc"]
index_trow= 0
index_tcol =0
if it != -1:
index_trow = num_pnt - 1
index_tcol = num_pnt - 1
junc.append(Junction(cont1=m, cont2=i, pos=j, x=trow[index_trow], y=tcol[index_tcol]))
num_junc+=1
""" overwrite the che cont of tmp_cont"""
cont[i] = tmp_cont
"""
Done with linking. Now split the lines at the junction points
NB:
I modified a lot of logic.
"""
junc.sort(key=attrgetter('pos'))
for i in range(0,num_junc,k if k>0 else 1):
j =junc[i].cont1
tmp_cont = cont[ j ]
num_pnt = tmp_cont.num
""" Count how often line j needs to be split """
counter=0
for index in range(num_junc):
if i + index < num_junc and junc[(i + index)].cont1 == j :
counter+=1
if counter ==1 and len(tmp_cont.row)>num_pnt-1 and len(tmp_cont.col)>num_pnt-1 and tmp_cont.row[0] == tmp_cont.row[(num_pnt - 1)] and tmp_cont.col[0] == tmp_cont.col[(num_pnt - 1)]:
""" If only one junction point is found and the line is closed it only needs to be rearranged cyclically, but not split """
begin = junc[i].pos
trow = tmp_cont.row
tcol = tmp_cont.col
tangle = tmp_cont.angle
tresp = tmp_cont.response
tmp_cont.row = [0 for ii in range(num_pnt)]
tmp_cont.col = [0 for ii in range(num_pnt)]
tmp_cont.angle = [0 for ii in range(num_pnt)]
tmp_cont.response = [0 for ii in range(num_pnt)]
for l in range(num_pnt):
pos = begin+l
""" Skip starting point so that it is not added twice """
if pos >= num_pnt:
pos = begin + l - num_pnt + 1
tmp_cont.row[l] = trow[pos]
tmp_cont.col[l] = tcol[pos]
tmp_cont.angle[l] = tangle[pos]
tmp_cont.response[l] = tresp[pos]
""" Modify contour class """
tmp_cont.cont_class= linesUtil.COUNTOUR_DICT["cont_both_junc"]
else:
""" Otherwise the line has to be split """
for l in range(counter):
"""
I CHANGED MASSIVELY THE CODE. Because that the cont_class could be different, compared to the java code.
See https://github.com/thorstenwagner/ij-ridgedetection/issues/39
In the Java code we have at least 100 value in the 'junc' variable, that are init with default values.
These values are "really" filled starting from the last position, hence in some case could be that
in this assignment https://github.com/thorstenwagner/ij-ridgedetection/blob/master/src/main/java/de/biomedical_imaging/ij/steger/Link.java#L1202 it assignes the default values instead of a real value
This situation could lead to unexpected behavior.
"""
begin = 0 if l == 0 else junc[(i + l - 1)].pos
end = 0
if l == counter:
end = tmp_cont.num - 1
elif l != counter and i+l>0: # workaround. The idea is to avoid to consider the same starting junction
end = junc[(i + l)].pos
if end==begin and counter>1:
""" Do not add one point segments """
continue
""" select contour class"""
cls = linesUtil.COUNTOUR_DICT["cont_both_junc"]
if l == 0:
if tmp_cont.cont_class == linesUtil.COUNTOUR_DICT["cont_start_junc"] or tmp_cont.cont_class == linesUtil.COUNTOUR_DICT["cont_both_junc"]:
cls= linesUtil.COUNTOUR_DICT["cont_both_junc"]
else:
cls= linesUtil.COUNTOUR_DICT["cont_end_junc"]
elif l==counter:
if tmp_cont.cont_class == linesUtil.COUNTOUR_DICT["cont_end_junc"] or tmp_cont.cont_class == linesUtil.COUNTOUR_DICT["cont_both_junc"]:
cls= linesUtil.COUNTOUR_DICT["cont_both_junc"]
else:
cls= linesUtil.COUNTOUR_DICT["cont_start_junc"]
"""
Becuase a weird implementation in the java code I cannot figure out always a valid value for 'end'. it leads to duplicate values in the cont.
It happens in the java code too (but over differents values.
To avoid it I implemented this check
"""
has_to_be_append = True
for c in cont:
if tmp_cont.row ==c.row:
has_to_be_append =False
tmp_cont.cont_class=cls
break
if has_to_be_append is True:
cont.append(Line( num=len(tmp_cont.row), row = tmp_cont.row, col = tmp_cont.col, angle = tmp_cont.angle, response = tmp_cont.response, cont_class=cls))
num_cont+=1
#cont[j] = cont[--num_cont]; i d not need it becuase it sorted a 100elemetns vector where all are defualt value and just the last 2 not. Here I append the new element, hence they are already in the correct position
""" Finally, check whether all angles point to the right of the line """
for i in range(num_cont):
tmp_cont = cont[i]
num_pnt = tmp_cont.num
if num_pnt > 1:
trow = tmp_cont.row
tcol = tmp_cont.col
tangle = tmp_cont.angle
""" One point of the contour is enough to determine the orientation """
k = int((num_pnt - 1) / 2)
if len(tangle)>k and len(trow)>k+1 and len(tcol)>k+1:
""" The next few lines are ok because lines have at least two points """
dx = trow[(k + 1)] - trow[k]
dy = tcol[(k + 1)] - tcol[k]
nx = cos(tangle[k])
ny = sin(tangle[k])
""" If the angles point to the left of the line they have to be adapted. The orientation is determined by looking at the z-component of the cross-product of (dx,dy,0) and (nx,ny,0) """
if nx * dy - ny * dx < 0:
for j in range(num_pnt):
tangle[j] += pi
if tangle[j] >= 2*pi:
tangle[j] -= 2*pi
for c in cont:
if c is not None and c.cont_class is not None:
c.frame=contours[0].father_frame if len(contours)>0 else None
contours.append(c)
for jun in junc:
if jun is not None and not(jun.cont1==0 and jun.cont2==0):
junctions.append(jun)
return num_cont | /ridge_detection-3.0.0-py3-none-any.whl/ridge_detection/link.py | 0.724383 | 0.62395 | link.py | pypi |
from math import ceil
""" list of constant present in LineUtils.java"""
DERIV_R = 1 # Derivative in row direction
DERIV_C = 2 # Derivative in column direction
DERIV_RR = 3 # Second derivative in row direction
DERIV_RC = 4 # Second derivative in row and column direction
DERIV_CC = 5 # Second derivative in column direction
MODE_LIGHT = 1 # Extract bright lines
MODE_DARK = 2 # Extract dark lines
INITIAL_SIZE = 100
REALLOC_FACTOR = 2
MAX_SIZE_MASK_0 = 3.09023230616781 # Size for Gaussian mask
MAX_SIZE_MASK_1 = 3.46087178201605 # Size for 1st derivative mask
MAX_SIZE_MASK_2 = 3.82922419517181 # Size for 2nd derivative mask
ERR_SOR = "Sigma out of range:"
COUNTOUR_DICT= {
'cont_no_junc': 0, # The cont no junc.
'cont_start_junc': 1, # The cont start junc ... no end point is a junction
'cont_end_junc': 2, # The cont end junc ... only the start point of the line is a junction
'cont_both_junc': 3, # The cont both junc ... both end points of the line are junctions
'cont_closed': 4 # the contour is closed
}
""" functions declared in LineUtils.java"""
def mask_size(maximum, sigma):
"""
:param maximum:
:param sigma:
:return: maximum mask index
"""
return int( ceil(maximum*sigma))
def lincoord(row, col,width):
"""
Translate row and column coordinates of an image into an index into its one-dimensional array
:param row:
:param col:
:param width:
:return:
"""
return row * width + col
def br(row,height):
"""Mirror the row coordinate at the borders of the image; height must be a defined variable in the calling function containing the image height."""
if row<0:
return -row
elif row >= height:
return height - row + height - 2
return row
def bc(col, width):
"""Mirror the column coordinate at the borders of the image; width must be a defined variable in the calling function containing the image width"""
if col<0:
return -col
elif col >= width:
return width - col + width - 2
return col | /ridge_detection-3.0.0-py3-none-any.whl/ridge_detection/linesUtil.py | 0.82748 | 0.475484 | linesUtil.py | pypi |
from math import exp
from ridge_detection.helper import getNormal
from ridge_detection import linesUtil
import numpy as np
""" 1/sqrt(2*PI) """
SQRT_2_PI_INV = 0.398942280401432677939946059935
def phi0(x,sigma):
"""
Integral of the Gaussian function
:param x:
:param sigma:
:return:
"""
return getNormal(x/sigma)
def phi1(x,sigma):
"""
The Gaussian function
:param x:
:param sigma:
:return:
"""
t = x/sigma
return SQRT_2_PI_INV / sigma * exp(-0.5 * t * t)
def phi2(x,sigma):
"""
First derivative of the Gaussian function
:param x:
:param sigma:
:return:
"""
t = x/sigma
return -x * SQRT_2_PI_INV / pow(sigma, 3.0) * exp(-0.5 * t * t)
"""
Functions to compute the one-dimensional convolution masks of the 0th, 1st, and 2nd derivative of the Gaussian kernel for a certain smoothing level given
by sigma. The mask is allocated by the function and given as the return value. The caller must ensure that this memory is freed. The output is
intended to be used as an array with range [-num:num]. Therefore, the caller should add num to the return value. Examples for the calling sequence can be
found in convolve_gauss. Examples for the usage of the masks are given in convolve_rows_gauss and convolve_cols_gauss.
"""
def _compute_gauss_mask_0(sigma):
"""
:param sigma:
:return: vector containing the Gaussian smoothing mask
"""
n = linesUtil.mask_size(linesUtil.MAX_SIZE_MASK_0, sigma)
h = np.zeros(2 * n + 1)
for i in list(range(-n+1,n)):
h[n + i] = phi0(-i + 0.5, sigma) - phi0(-i - 0.5, sigma)
h[0] = 1.0 - phi0(n - 0.5, sigma)
h[2 * n] = phi0(-n + 0.5, sigma)
return n,h
def _compute_gauss_mask_1(sigma):
"""
:param sigma:
:return: vector containing the First derivative of Gaussian smoothing mask
"""
n = linesUtil.mask_size(linesUtil.MAX_SIZE_MASK_1, sigma)
h = np.zeros(2 * n + 1)
for i in list(range(-n+1,n)):
h[n + i] = phi1(-i + 0.5, sigma) - phi1(-i - 0.5, sigma)
h[0] = -phi1(n - 0.5, sigma)
h[2 * n] = phi1(-n + 0.5, sigma)
return n,h
def _compute_gauss_mask_2(sigma):
"""
:param sigma:
:return: vector containing the Second derivative of Gaussian smoothing mask
"""
n = linesUtil.mask_size(linesUtil.MAX_SIZE_MASK_2, sigma)
h = np.zeros(2 * n + 1)
for i in list(range(-n+1,n)):
h[n + i] = phi2(-i + 0.5, sigma) - phi2(-i - 0.5, sigma)
h[0] = - phi2(n - 0.5, sigma)
h[2 * n] = phi2(-n + 0.5, sigma)
return n,h
"""
Convolve an image with the derivatives of a Gaussian smoothing kernel. Since all of the masks are separable, this is done in two steps in the function
convolve_gauss. Firstly, the rows of the image are convolved by an appropriate one-dimensional mask in convolve_rows_gauss, yielding an
intermediate float-image h. Then the columns of this image are convolved by another appropriate mask in convolve_cols_gauss to yield the final result k.
At the border of the image the gray values are mirrored.
"""
def convolve_rows_gauss(image, mask, n, h, width, height) :
"""
Convolve rows gauss
:param image:
:param mask:
:param n:
:param h:
:param width:
:param height:
:return:
"""
""" inner region"""
r_values = np.arange(n, height - n)
for l in np.repeat(r_values, width) * width + np.tile(np.arange(width), len(r_values)):
h[l] = np.sum(np.multiply(image[l-n*width:l+(n+1)*width:width],mask[0:2*n+1]))
"""" Border regions """
for r in list(range(0, n)):
for c in list(range(0, width)):
tot = 0.0
l = r * width + c # it is lincoord(row, col,width) of linesUtil.py
for j in list(range(-n, n + 1)):
tot += (image[linesUtil.lincoord(linesUtil.br(r + j, height), c, width)]) * mask[j + n]
h[l] = tot
for r in list(range(height- n, height)):
for c in list(range(0, width)):
tot = 0.0
l = r * width + c
for j in list(range(-n, n + 1)):
tot += (image[linesUtil.lincoord(linesUtil.br(r + j, height), c, width)]) * mask[j + n]
h[l] = tot
def convolve_cols_gauss(h, mask,n, k, width, height):
"""
Convolve the columns of an image with the derivatives of a Gaussian
:param h:
:param mask:
:param n:
:param k:
:param width:
:param height:
:return:
"""
""" inner region"""
c_values = np.arange(n, width-n)
h=np.asarray(h)
for l in np.repeat(c_values, height) * height + np.tile(np.arange(height), len(c_values)):
k[l] = np.sum(np.multiply(h[l-n:l+n+1],mask[0:2*n+1]))
"""" Border regions """
for r in list(range(0, height)):
for c in list(range(0, n)):
tot = 0.0
l = r * width + c
for j in list(range(-n, n + 1)):
tot += h[linesUtil.lincoord(r, linesUtil.bc(c + j, width), width)] * mask[j + n]
k[l] = tot
for r in list(range(0, height)):
for c in list(range(width-n,width)):
tot = 0.0
l= r * width + c
for j in list(range(-n, n+1)):
tot += h[linesUtil.lincoord(r, linesUtil.bc(c + j, width), width)] * mask[j + n]
k[l] = tot
def convolve_gauss( image, width, height, sigma):
"""
convolve gauss
:param image:
:param width:
:param height:
:param sigma:
:return:
"""
k = [[0 for ii in range(width * height)] for i in range(5)]
n0,mask0=_compute_gauss_mask_0(sigma)
n1, mask1 = _compute_gauss_mask_1(sigma)
n2, mask2 = _compute_gauss_mask_2(sigma)
conv_r0 = [0 for ii in range(width * height)]
conv_r1 = [0 for ii in range(width * height)]
conv_r2 = [0 for ii in range(width * height)]
convolve_rows_gauss(image, mask0, n0, conv_r0, width, height)
convolve_rows_gauss(image, mask1, n1, conv_r1, width, height)
convolve_rows_gauss(image, mask2, n2, conv_r2, width, height)
''' linesUtil.DERIV_R case'''
convolve_cols_gauss(conv_r1, mask0, n0, k[0], width, height)
''' linesUtil.DERIV_C case'''
convolve_cols_gauss(conv_r0, mask1, n1, k[1], width, height)
''' linesUtil.DERIV_RR case'''
convolve_cols_gauss(conv_r2, mask0, n0, k[2], width, height)
''' linesUtil.DERIV_RC case'''
convolve_cols_gauss(conv_r1, mask1, n1, k[3], width, height)
''' linesUtil.DERIV_CC case'''
convolve_cols_gauss(conv_r0, mask2, n2, k[4], width, height)
return k | /ridge_detection-3.0.0-py3-none-any.whl/ridge_detection/convol.py | 0.80567 | 0.734072 | convol.py | pypi |
import logging
from math import sqrt
from ridge_detection.linesUtil import MODE_LIGHT
from ridge_detection.basicGeometry import Line, Junction
from ridge_detection.convol import convolve_gauss
from ridge_detection.link import compute_contours
from ridge_detection.helper import DEBUGGING
"""
The pixel boundaries need to be enlarged slightly since in practice it frequently happens for neighboring pixels a and b that pixel a says a maximum
lies within pixel b and vice versa. This presents no problem since linking algoritm will take care of this.
"""
PIXEL_BOUNDARY = 0.6
def solve_linear(a, b):
"""
Solve the linear equation a*x+b=0 and return the result in t and the number of solutions in num.
:param a:
:param b:
:return: the result and the number of solution
"""
if a==0:
return None,0
return -b/a, 1
def compute_eigenvals( dfdrr, dfdrc, dfdcc, eigval, eigvec):
"""
Compute the eigenvalues and eigenvectors of the Hessian matrix given by dfdrr, dfdrc, and dfdcc, and sort them in descending order according to their absolute values
:param dfdrr:
:param dfdrc:
:param dfdcc:
:param eigval:
:param eigvec:
:return:
"""
"""Compute the eigenvalues and eigenvectors of the Hessian matrix"""
c = 1.0
s = 0.0
e1 = dfdrr
e2 = dfdcc
if dfdrc!=0.0:
theta = 0.5 * (dfdcc - dfdrr) / dfdrc
t = 1.0 / (abs(theta) + sqrt(theta * theta + 1.0))
if theta < 0.0:
t = -t
c = 1.0 / sqrt(t * t + 1.0)
s = t * c
e1 = dfdrr - t * dfdrc
e2 = dfdcc + t * dfdrc
n1 = c
n2 = -s
"""If the absolute value of an eigenvalue is larger than the other, put that eigenvalue into first position. If both are of equal absolute value, put the negative one first."""
if abs(e1) > abs(e2):
eigval[0] = e1
eigval[1] = e2
eigvec[0][0] = n1
eigvec[0][1] = n2
eigvec[1][0] = -n2
eigvec[1][1] = n1
elif abs(e1) < abs(e2):
eigval[0] = e2
eigval[1] = e1
eigvec[0][0] = -n2
eigvec[0][1] = n1
eigvec[1][0] = n1
eigvec[1][1] = n2
else:
if e1 < e2:
eigval[0] = e1
eigval[1] = e2
eigvec[0][0] = n1
eigvec[0][1] = n2
eigvec[1][0] = -n2
eigvec[1][1] = n1
else:
eigval[0] = e2
eigval[1] = e1
eigvec[0][0] = -n2
eigvec[0][1] = n1
eigvec[1][0] = n1
eigvec[1][1] = n2
def compute_line_points(ku, ismax, ev, nx, ny, px, py, width, height, low, high, mode):
"""
For each point in the image determine whether there is a local maximum of the second directional derivative in the direction (nx[l],ny[l]) within the
pixels's boundaries. If so, set ismax[l] to 2 if the eigenvalue ev[l] is larger than high, to 1 if ev[l] is larger than low, and to 0 otherwise.
Furthermore, put the sub-pixel position of the maximum into (px[l],py[l]). The parameter mode determines whether maxima (dark lines points) or minima
(bright line points) should be selected. The partial derivatives of the image are input as ku[].
:param ismax: variable byte
:param ev:
:param nx:
:param ny:
:param px:
:param py:
:param width:
:param height:
:param low:
:param high:
:param mode:
:return:
"""
k = [0, 0, 0, 0, 0]
eigval = [0, 0]
eigvec = [[0,0], [0,0]]
for r in list(range(0,height)):
for c in list(range(0, width)):
l= r * width + c # it is lincoord(row, col,width) of linesUtil.py
k[0] = ku[0][l]
k[1] = ku[1][l]
k[2] = ku[2][l]
k[3] = ku[3][l]
k[4] = ku[4][l]
compute_eigenvals(k[2], k[3], k[4], eigval, eigvec)
val = -eigval[0] if mode == MODE_LIGHT else eigval[0]
if val>0.0:
ev[l] = float(val)
n1 = eigvec[0][0]
n2 = eigvec[0][1]
a = k[2] * n1 * n1 + 2.0 * k[3] * n1 * n2 + k[4] * n2 * n2
b = k[0] * n1 + k[1] * n2
t,num= solve_linear(a, b)
if num != 0:
p1 = t * n1
p2 = t * n2
if abs(p1) <= PIXEL_BOUNDARY and abs(p2) <= PIXEL_BOUNDARY:
if val >= low:
ismax[l] = 2 if val >= high else 1
nx[l] = float(n1)
ny[l] = float (n2)
px[l] = float (r + p1)
py[l] = float (c + p2)
def detect_lines(image, width, height, contours, sigma, low, high, mode, compute_width, correct_pos, extend_lines, junctions):
"""
:param image:
:param width:
:param height:
:param contours:
:param sigma:
:param low:
:param high:
:param mode:
:param compute_width:
:param correct_pos:
:param extend_lines:
:param junctions:
:return: since 'num_result' was by reference it returns it --> used is num_result=detect_lines(....,num_result,...)
"""
from ridge_detection.width import compute_line_width
if not isinstance(contours,list) or (len(contours) > 0 and not isinstance(contours[0],Line)):
print("ERROR : The 'contours' input value has to be a list of instances of the class 'Line'. (position.py->detect_lines)")
logging.error(" The 'contours' input value has to be a list of instances of the class 'Line'. (position.py->detect_lines)")
exit(-1)
if not isinstance(junctions, list) or (len(junctions) > 0 and not isinstance(junctions[0], Junction)):
print("ERROR: The 'junctions' input value has to be a list of instances of the class 'Junction'. (position.py->detect_lines)")
logging.error(" The 'junctions' input value has to be a list of instances of the class 'Junction'. (position.py->detect_lines)")
exit(-1)
k=convolve_gauss(image,width,height,sigma)
ismax = [0 for i in range(width * height)] # it was a byte[]
ev = [0 for i in range(width * height)]
n1 = [0 for i in range(width * height)]
n2 = [0 for i in range(width * height)]
p1 = [0 for i in range(width * height)]
p2 = [0 for i in range(width * height)]
compute_line_points(k, ismax, ev, n1, n2, p1, p2, width, height, low, high, mode)
num_result = compute_contours(ismax, ev, n1, n2, p1, p2, k[0], k[1], contours, sigma, extend_lines, mode, width, height, junctions)
if DEBUGGING is True:
logging.debug ("output contours")
for c in contours:
logging.debug (str(c))
if compute_width is True:
compute_line_width(k[0], k[1], width, height, sigma, mode, correct_pos, contours)
return num_result | /ridge_detection-3.0.0-py3-none-any.whl/ridge_detection/position.py | 0.71423 | 0.716476 | position.py | pypi |
import logging
from ridge_detection.helper import Offset
from numpy import sign
from math import ceil,sqrt,sin,cos,floor
from ridge_detection.basicGeometry import Line
from ridge_detection.linesUtil import COUNTOUR_DICT,MODE_LIGHT,lincoord,br,bc
from ridge_detection.correct import Correction,Correct,get_ctableh4CorrectObj
from ridge_detection.convol import phi2
from ridge_detection.position import solve_linear, compute_eigenvals
"""
This constant is introduced because for very narrow lines the facet model width detection scheme sometimes extracts the line width too narrow. Since
the correction function has a very steep slope in that area, this will lead to lines of almost zero width, especially since the bilinear interpolation in
correct.c will tend to overcorrect. Therefore it is wise to make the extracted line width slightly larger before correction.
"""
LINE_WIDTH_COMPENSATION = 1.05
""" Minimum line width allowed (used for outlier check in fix_locations()) """
MIN_LINE_WIDTH = 0.1
""" Maximum contrast allowed (used for outlier check in fix_locations()) """
MAX_CONTRAST = 275.0
def bresenham(nx, ny, px, py, length, line ):
"""
Modified Bresenham algorithm. It returns in line all pixels that are intersected by a half line less than length away from the point (px,py) aint
the direction (nx,ny). The point (px,py) must lie within the pixel of the origin, i.e., fabs(px) <= 0.5 and fabs(py) <= 0.5.
:param nx:
:param ny:
:param px:
:param py:
:param length:
:param line: It has a list of Offset obj
:return: the number of the points founded (NB: in the java code was the fake reference param "num_points"
"""
if isinstance(line,list) and len(line)> 0:
for l in line:
if not isinstance(l,Offset):
print("ERROR: The input 'line' has to be a not empty list of instances of the class 'Offset'. (width.py->bresenham)")
logging.error("ERROR: The input 'line' has to be a not empty list of instances of the class 'Offset'. (width.py->bresenham)")
exit(-1)
else:
print("ERROR: The input 'line' has to be a not empty list of instances of the class 'Offset'. (width.py->bresenham)")
logging.error("ERROR: The input 'line' has to be a not empty list of instances of the class 'Offset'. (width.py->bresenham)")
exit(-1)
x = 0
y = 0
dx = abs(nx)
dy = abs(ny)
s1 = int(sign(nx))
s2 = int(sign(ny))
px *= s1
py *= s2
xchg = 0
if dy > dx:
t = dx
dx = dy
dy = t
t = px
px = py
py = t
xchg = 1
maxit = int (ceil(length * dx))
e = (0.5 - px) * dy / dx - (0.5 - py)
n = 0
for i in list(range(maxit+1)):
line[n].x = x
line[n].y = y
n +=1
while e >= -1e-8:
if xchg != 0:
x += s1
else:
y += s2
e-=1
if e > -1:
line[n].x = x
line[n].y = y
n+=1
if xchg != 0:
y += s2
else:
x += s1
e += dy / dx
return n
def fill_gaps(master=[], slave1=[], slave2=[], cont=None):
"""
Fill gaps in the arrays master, slave1, and slave2, i.e., points where
master=0, by interpolation (interior points) or extrapolation (end points). The array master will usually be the width of the line, while slave1 and
slave2 will be values that depend on master[i] being 0, e.g., the gradient at each line point. The arrays slave1 and slave2 can be NULL.
:param master:
:param slave1:
:param slave2:
:param cont:
:return:
"""
if not isinstance(master,list) or not isinstance(slave1,list) or not isinstance(slave2,list):
print ( "ERROR: The input 'master', 'slave1', and 'slave2' have to be vector. (width.py->fill_gaps)")
logging.error ( "The input 'master', 'slave1', and 'slave2' have to be vector. (width.py->fill_gaps)")
exit(-1)
if not isinstance(cont, Line):
print("ERROR: The 'cont' param has to be an instance of the class 'Line'. (width.py->fill_gaps)")
logging.error("ERROR: The 'cont' param has to be an instance of the class 'Line'. (width.py->fill_gaps)")
exit(-1)
num_points = cont.num
j=None # I do not like this but I cannot change it and be sure that the behaviour will be the same
for i in list(range(num_points)):
if master[i] == 0:
for j in list(range(i+1,num_points)):
if master[j] > 0:
break
if j is None:
break
m_s = 0
m_e = 0
s1_s = 0
s1_e = 0
s2_s = 0
s2_e = 0
if i>0 and j<num_points-1:
s = i
e = j - 1
m_s = master[(s - 1)]
m_e = master[(e + 1)]
if len(slave1) >0:
s1_s = slave1[(s - 1)]
s1_e = slave1[(e + 1)]
if len(slave2) > 0:
s2_s = slave2[(s - 1)]
s2_e = slave2[(e + 1)]
elif i>0:
s = i
e = num_points - 2
m_s = master[(s - 1)]
m_e = master[(s - 1)]
master[(e + 1)] = m_e
if len(slave1) > 0:
s1_s = slave1[(s - 1)]
s1_e = slave1[(s - 1)]
slave1[(e + 1)] = s1_e
if len(slave2) > 0:
s2_s = slave2[(s - 1)]
s2_e = slave2[(s - 1)]
slave2[(e + 1)] = s2_e
elif j<num_points-1:
s = 1
e = j - 1
m_s = master[(e + 1)]
m_e = master[(e + 1)]
master[(s - 1)] = m_s
if len(slave1) > 0:
s1_s = slave1[(e + 1)]
s1_e = slave1[(e + 1)]
slave1[(s - 1)] = s1_s
if len(slave2) > 0:
s2_s = slave2[(e + 1)]
s2_e = slave2[(e + 1)]
slave2[(s - 1)] = s2_s
else:
s = 1
e = num_points - 2
m_s = master[(s - 1)]
m_e = master[(e + 1)]
if len(slave1) > 0:
s1_s = slave1[(s - 1)]
s1_e = slave1[(e + 1)]
if len(slave2) > 0:
s2_s = slave2[(s - 1)]
s2_e = slave2[(e + 1)]
arc_len = 0
for k in list(range(s,e+2)):
d_r = cont.row[k] - cont.row[(k - 1)]
d_c = cont.col[k] - cont.col[(k - 1)]
arc_len += sqrt(d_r * d_r + d_c * d_c)
l=0
for k in list(range(s, e + 1)):
d_r = cont.row[k] - cont.row[(k - 1)]
d_c = cont.col[k] - cont.col[(k - 1)]
l += sqrt(d_r * d_r + d_c * d_c)
master[k] = (arc_len - l) / arc_len * m_s + l/ arc_len * m_e
if len(slave1) > 0:
slave1[k] = (arc_len - l) / arc_len * s1_s + l / arc_len * s1_e
if len(slave2) > 0:
slave2[k] = (arc_len - l) / arc_len * s2_s + l / arc_len * s2_e
i=j #todo: verificalo, in python nn ha senso ... cosa voleva fare? ha valore in java? Se si cambia primo loop in un while
def fix_locations( width_l, width_r, grad_l, grad_r, pos_x, pos_y, correction, contr, asymm, sigma, mode, correct_pos, cont) :
"""
Correct the extracted line positions and widths. The algorithm first closes gaps in the extracted data width_l, width_r, grad_l, and grad_r to provide
meaningful input over the whole line. Then the correction is calculated. After this, gaps that have been introduced by the width correction are again
closed. Finally, the position correction is applied if correct_pos is set. The results are returned in width_l, width_r, and cont
:param width_l:
:param width_r:
:param grad_l:
:param grad_r:
:param pos_x:
:param pos_y:
:param correction:
:param contr:
:param asymm:
:param sigma:
:param mode:
:param correct_pos:
:param cont:
:return:
"""
if not isinstance(cont, Line):
print("ERROR: The 'cont' param has to be an instance of the class 'Line'.")
logging.error("ERROR: The 'cont' param has to be an instance of the class 'Line'.")
exit(-1)
w_est,r_est, w_est, r_est, w_real, h_real, corr, w_strong, w_weak, weak_is_r = 0,0,0,0,0,0,0,0,0,0
fill_gaps(width_l, grad_l, slave2=[], cont=cont)
fill_gaps(width_r, grad_r, slave2=[], cont=cont)
num_points = cont.num
""" Calculate true line width, asymmetry, and position correction """
if correct_pos is True:
""" Do not correct the position of a junction point if its width is found by interpolation, i.e., if the position could be corrected differently for each junction point, thereby destroying the junction."""
correct_start = ((cont.cont_class == COUNTOUR_DICT["cont_no_junc"] or cont.cont_class == COUNTOUR_DICT["cont_end_junc"] or cont.cont_class == COUNTOUR_DICT["cont_closed"])
and (width_r[0] > 0 and width_l[0] > 0))
correct_end = ((cont.cont_class == COUNTOUR_DICT["cont_no_junc"] or cont.cont_class ==
COUNTOUR_DICT["cont_start_junc"] or cont.cont_class == COUNTOUR_DICT["cont_closed"])
and (width_r[(num_points - 1)] > 0 and width_l[(num_points - 1)] > 0))
""" Calculate the true width and assymetry, and its corresponding correction for each line point"""
precalculated_ctable4Correct_obj=get_ctableh4CorrectObj()
for i in list(range(num_points)):
if width_r[i] > 0 and width_l[i] > 0:
w_est = (width_r[i] + width_l[i]) * LINE_WIDTH_COMPENSATION
if grad_r[i] <= grad_l[i]:
r_est = grad_r[i] / grad_l[i]
weak_is_r = True
else:
r_est = grad_l[i] / grad_r[i]
weak_is_r = False
correctObj =Correct(precalculated_ctable4Correct_obj)
correctionObj = Correction(w_est, r_est, w_real, h_real, corr, w_strong, w_weak)
correctObj.line_corrections(sigma, correctionObj)
w_real=correctionObj.w/LINE_WIDTH_COMPENSATION
corr = correctionObj.correction / LINE_WIDTH_COMPENSATION
width_r[i] = w_real
width_l[i] = w_real
if weak_is_r is True:
asymm[i] = correctionObj.h
correction[i] = -corr
else:
asymm[i] = -correctionObj.h
correction[i] = corr
fill_gaps(width_l, correction, asymm, cont)
for i in list(range(0,num_points)):
width_r[i] = width_l[i]
""" Adapt the correction for junction points if necessary """
if not correct_start:
correction[0] = 0
if not correct_end:
correction[(num_points - 1)] = 0
for i in list(range(0, num_points)):
px = pos_x[i]
py = pos_y[i]
nx = cos(cont.angle[i])
ny = sin(cont.angle[i])
px = px + correction[i] * nx
py = py + correction[i] * ny
pos_x[i] = px
pos_y[i] = py
""" Update the position of a line and add the extracted width """
cont.width_l = [float for i in range(num_points)]
cont.width_r = [float for i in range(num_points)]
for i in list(range(0, num_points)):
cont.width_l[i] = width_l[i]
cont.width_r[i] = width_r[i]
cont.row[i] = pos_x[i]
cont.col[i] = pos_y[i]
""" Now calculate the true contrast """
if correct_pos is True:
cont.asymmetry = [float for i in range(num_points)]
cont.intensity = [float for i in range(num_points)]
for i in list(range(0, num_points)):
response = cont.response[i]
asymmetry = abs(asymm[i])
correct = abs(correction[i])
width = cont.width_l[i]
contrast = 0 if width<MIN_LINE_WIDTH else (response / abs(phi2(correct + width, sigma) + (asymmetry - 1) * phi2(correct - width, sigma)))
if contrast>MAX_CONTRAST:
contrast = 0
contr[i] = contrast
fill_gaps(contr, slave1=[], slave2=[], cont=cont)
for i in list(range(0, num_points)):
cont.asymmetry[i] = asymm[i]
cont.intensity[i] = contr[i] if mode == MODE_LIGHT else -contr[i]
def compute_line_width(dx, dy, width, height, sigma, mode, correct_pos, contours):
"""
Extract the line width by using a facet model line detector on an image of the absolute value of the gradient
:param dx:
:param dy:
:param width:
:param height:
:param sigma:
:param mode:
:param correct_pos:
:param contours:
:return:
"""
if (not isinstance(contours, list) or len(contours) == 0 ) or not isinstance(contours[0], Line):
print("ERROR: The 'contours' input value has to be a list of instances of the class 'Line'. (width.py->compute_line_width)")
logging.error("ERROR: The 'contours' input value has to be a list of instances of the class 'Line'. (width.py->compute_line_width)")
exit(-1)
eigvec = [[0,0], [0,0]]
eigval = [0, 0]
max_num_points =max([contour.num for contour in contours])
width_l = [0 for i in range(max_num_points)]
width_r = [0 for i in range(max_num_points)]
grad_l = [0 for i in range(max_num_points)]
grad_r = [0 for i in range(max_num_points)]
pos_x = [0 for i in range(max_num_points)]
pos_y = [0 for i in range(max_num_points)]
grad = [0 for i in range(width * height)]
length = 2.5 * sigma
line = [Offset() for i in range( int (ceil(length * 3)) )]
""" Compute the gradient image """
for r in list(range(0, height)):
for c in list(range(0, width)):
l=r * width + c # it is lincoord(row, col,width) of linesUtil.py
grad[l] = sqrt(dx[l] * dx[l] + dy[l] * dy[l])
for cont in contours:
num_points = cont.num
for j in list(range(0, num_points)):
px = cont.row[j]
py = cont.col[j]
pos_x[j] = px
pos_y[j] = py
r = int(floor(px + 0.5))
c = int(floor(py + 0.5))
nx = cos(cont.angle[j])
ny = sin(cont.angle[j])
""" Compute the search line """
num_line =bresenham(nx, ny, 0.0, 0.0, length, line)
width_r[j] = width_l[j] = 0
""" Look on both sides of the line """
for d_r in [-1,1]:
for k in list(range(0, num_line)):
x = br(r + d_r * line[k].x, height)
y = bc(c + d_r * line[k].y, width)
i1 = grad[lincoord(br(x - 1, height), bc(y - 1, width), width)]
i2 = grad[lincoord(br(x - 1, height), y, width)]
i3 = grad[lincoord(br(x - 1, height), bc(y + 1, width), width)]
i4 = grad[lincoord(x, bc(y - 1, width), width)]
i5 = grad[r * width + c] ## it is lincoord(row, col,width) of linesUtil.py
i6 = grad[lincoord(x, bc(y + 1, width), width)]
i7 = grad[lincoord(br(x + 1, height), bc(y - 1, width), width)]
i8 = grad[lincoord(br(x + 1, height), y, width)]
i9 = grad[lincoord(br(x + 1, height), bc(y + 1, width), width)]
t1 = i1 + i2 + i3
t2 = i4 + i5 + i6
t3 = i7 + i8 + i9
t4 = i1 + i4 + i7
t5 = i2 + i5 + i8
t6 = i3 + i6 + i9
dr = (t3 - t1) / 6
dc = (t6 - t4) / 6
drr = (t1 - 2 * t2 + t3) / 6
dcc = (t4 - 2 * t5 + t6) / 6
drc = (i1 - i3 - i7 + i9) / 4
compute_eigenvals(2 * drr, drc, 2 * dcc, eigval, eigvec)
val = -eigval[0]
if val > 0.0:
n1 = eigvec[0][0]
n2 = eigvec[0][1]
a = 2.0 * (drr * n1 * n1 + drc * n1 * n2 + dcc * n2 * n2)
b = dr * n1 + dc * n2
t,num= solve_linear(a, b)
if num != 0:
p1 = t * n1
p2 = t * n2
if abs(p1) <= 0.5 and abs(p2) <= 0.5:
""" Project the maximum point position perpendicularly onto the search line """
b = nx * (px - (r + d_r * line[k].x + p1)) + ny * (py - (c + d_r * line[k].y + p2))
t,num = solve_linear(1, b)
d = (-i1 + 2 * i2 - i3 + 2 * i4 + 5 * i5 + 2 * i6 - i7 + 2 * i8 - i9) / 9
if d_r ==1 :
grad_r[j] = d + p1 * dr + p2 * dc + p1 * p1 * drr + p1 * p2 * drc + p2 * p2 * dcc
width_r[j] = abs(t)
else:
grad_l[j] = d + p1 * dr + p2 * dc + p1 * p1 * drr + p1 * p2 * drc + p2 * p2 * dcc
width_l[j] = abs(t)
break
correct = [0 for i in range(max_num_points)]
contrast = [0 for i in range(max_num_points)]
asymm = [0 for i in range(max_num_points)]
fix_locations(width_l, width_r, grad_l, grad_r, pos_x, pos_y, correct, contrast, asymm, sigma, mode, correct_pos, cont) | /ridge_detection-3.0.0-py3-none-any.whl/ridge_detection/width.py | 0.526099 | 0.433382 | width.py | pypi |
from urllib.request import urlopen
from tempfile import NamedTemporaryFile
from matplotlib.collections import LineCollection
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import numpy as np
from skimage.filters import rank
from skimage.morphology import square
from skimage.util import img_as_ubyte
import srtm
class FontManager:
"""Utility to load fun fonts from https://fonts.google.com/ for matplotlib.
Find a nice font at https://fonts.google.com/, and then get its corresponding URL
from https://github.com/google/fonts/
Use like:
fm = FontManager()
fig, ax = plt.subplots()
ax.text("Good content.", fontproperties=fm.prop, size=60)
"""
def __init__(
self,
github_url="https://github.com/google/fonts/blob/main/ofl/cinzel/static/Cinzel-Regular.ttf?raw=true", # pylint: disable=line-too-long
):
"""
Lazily download a font.
Parameters
----------
github_url : str
Can really be any .ttf file, but probably looks like
"https://github.com/google/fonts/blob/main/ofl/cinzel/static/Cinzel-Regular.ttf?raw=true"
"""
self.github_url = github_url
self._prop = None
@property
def prop(self):
"""Get matplotlib.font_manager.FontProperties object that sets the custom font."""
if self._prop is None:
with NamedTemporaryFile(delete=False, suffix=".ttf") as temp_file:
temp_file.write(urlopen(self.github_url).read())
self._prop = fm.FontProperties(fname=temp_file.name)
return self._prop
class RidgeMap:
"""Main class for interacting with art.
Keeps state around so no servers are hit too often.
"""
def __init__(self, bbox=(-71.928864, 43.758201, -70.957947, 44.465151), font=None):
"""Initialize RidgeMap.
Parameters
----------
bbox : list-like of length 4
In the form (long, lat, long, lat), describing the
(bottom_left, top_right) corners of a box.
http://bboxfinder.com is a useful way to find these tuples.
font : matplotlib.font_manager.FontProperties
Optional, a custom font to use. Defaults to Cinzel Regular.
"""
self.bbox = bbox
self._srtm_data = srtm.get_data()
if font is None:
font = FontManager().prop
self.font = font
@property
def lats(self):
"""Left and right latitude of bounding box."""
return (self.bbox[1], self.bbox[3])
@property
def longs(self):
"""Bottom and top longitude of bounding box."""
return (self.bbox[0], self.bbox[2])
def get_elevation_data(self, num_lines=80, elevation_pts=300, viewpoint="south"):
"""Fetch elevation data and return a numpy array.
Parameters
----------
num_lines : int
Number of horizontal lines to draw
elevation_pts : int
Number of points on each line to request. There's some limit to
this that srtm enforces, but feel free to go for it!
viewpoint : str in ["south", "west", "north", "east"] (default "south")
The compass direction from which the map will be visualised.
Returns
-------
np.ndarray
"""
if viewpoint in ["east", "west"]:
num_lines, elevation_pts = elevation_pts, num_lines
values = self._srtm_data.get_image(
(elevation_pts, num_lines), self.lats, self.longs, 5280, mode="array"
)
switch = {"south": 0, "west": 3, "north": 2, "east": 1}
rotations = switch[viewpoint]
values = np.rot90(m=values, k=rotations)
return values
def preprocess(
self, *, values=None, water_ntile=10, lake_flatness=3, vertical_ratio=40
):
"""Get map data ready for plotting.
You can do this yourself, and pass an array directly to plot_map. This
gathers all nan values, the lowest `water_ntile` percentile of elevations,
and anything that is flat enough, and sets the values to `nan`, so no line
is drawn. It also exaggerates the vertical scale, which can be nice for flat
or mountainy areas.
Parameters
----------
values : np.ndarray
An array to process, or fetch the elevation data lazily here.
water_ntile : float in [0, 100]
Percentile below which to delete data. Useful for coasts or rivers.
Set to 0 to not delete any data.
lake_flatness : int
How much the elevation can change within 3 squares to delete data.
Higher values delete more data. Useful for rivers, lakes, oceans.
vertical_ratio : float > 0
How much to exaggerate hills. Kind of arbitrary. 40 is reasonable,
but try bigger and smaller values!
Returns
-------
np.ndarray
Processed data.
"""
if values is None:
values = self.get_elevation_data()
nan_vals = np.isnan(values)
values[nan_vals] = np.nanmin(values)
values = (values - np.min(values)) / (np.max(values) - np.min(values))
is_water = values < np.percentile(values, water_ntile)
is_lake = rank.gradient(img_as_ubyte(values), square(3)) < lake_flatness
values[nan_vals] = np.nan
values[np.logical_or(is_water, is_lake)] = np.nan
values = vertical_ratio * values[-1::-1] # switch north and south
return values
# pylint: disable=too-many-arguments,too-many-locals
def plot_map(
self,
values=None,
label="The White\nMountains",
label_x=0.62,
label_y=0.15,
label_verticalalignment="bottom",
label_size=60,
line_color="black",
kind="gradient",
linewidth=2,
background_color=(0.9255, 0.9098, 0.9255),
size_scale=20,
ax=None,
):
"""Plot the map.
Lots of nobs, and they're all useful to sometimes turn.
Parameters
----------
values : np.ndarray
Array of elevations to plot. Defaults to the elevations at the provided
bounding box.
label : string
Label to place on the map. Use an empty string for no label.
label_x : float in [0, 1]
Where to position the label horizontally
label_y : float in [0, 1]
Where to position the label vertically
label_verticalalignment: "top" or "bottom"
Whether the label_x and label_y refer to the top or bottom left corner
of the label text box
label_size : int
fontsize of the label
line_color : string or callable
colors for the map. A callable will be fed the scaled index in [0, 1]
kind : {"gradient" | "elevation"}
If you provide a colormap to `line_color`, "gradient" colors by the line index, and
"elevation" colors by the actual elevation along the line.
linewidth : float
Width of each line in the map
background_color : color
For the background of the map and figure
scale_size : float
If you are printing this, make this number bigger.
ax : matplotlib Axes
You can pass your own axes!
Returns
-------
matplotlib.Axes
"""
if kind not in {"gradient", "elevation"}:
raise TypeError("Argument `kind` must be one of 'gradient' or 'elevation'")
if values is None:
values = self.preprocess()
if ax is None:
ratio = (self.lats[1] - self.lats[0]) / (self.longs[1] - self.longs[0])
_, ax = plt.subplots(figsize=(size_scale, size_scale * ratio))
x = np.arange(values.shape[1])
norm = plt.Normalize(np.nanmin(values), np.nanmax(values))
for idx, row in enumerate(values):
y_base = -6 * idx * np.ones_like(row)
y = row + y_base
if callable(line_color) and kind == "elevation":
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lines = LineCollection(
segments, cmap=line_color, zorder=idx + 1, norm=norm
)
lines.set_array(row)
lines.set_linewidth(linewidth)
ax.add_collection(lines)
else:
if callable(line_color) and kind == "gradient":
color = line_color(idx / values.shape[0])
else:
color = line_color
ax.plot(x, y, "-", color=color, zorder=idx, lw=linewidth)
ax.fill_between(x, y_base, y, color=background_color, zorder=idx)
ax.text(
label_x,
label_y,
label,
transform=ax.transAxes,
fontproperties=self.font,
size=label_size,
verticalalignment=label_verticalalignment,
bbox=dict(facecolor=background_color, alpha=1, linewidth=0),
zorder=len(values) + 10,
)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
for spine in ax.spines.values():
spine.set_visible(False)
ax.set_facecolor(background_color)
return ax | /ridge_map-0.0.4-py3-none-any.whl/ridge_map/ridge_map.py | 0.884027 | 0.462473 | ridge_map.py | pypi |
from __future__ import annotations
from typing import Optional
import matplotlib.axes
import numpy as np
from more_itertools import first
from scipy.stats import gaussian_kde
from ridgeplot.stats import scaling
class RidgePlotError(Exception):
pass
def ridgeplot(
ax: matplotlib.axes,
data: dict[str, list[float]],
xlim: Optional[tuple[float, float]] = None,
fill_colors: Optional[list[str]] = None,
line_colors: Optional[list[str]] = None,
label_size: float = 10.0,
fill_alpha: float = 0.5,
) -> None:
"""
plotting a ridgeplot
Example:
```
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> data = {}
>>> for i in range(10):
>>> data['data_{}'.format(i)] = np.random.randn(100) * (i+1)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ridgeplot(ax, data, xlim=(-20,20))
```
Args:
ax: a matplotlib ax object for writing the plot
data: a dictionary of data, key is the label of the group, values are the data values in the group
xlim: x-limits for the plot (xmin, xmax)
fill_colors: colors for the fill under the distribution, must be same length as input data (default: all steelblue)
line_colors: colors for the line drawing the distribution, must be same length as input data (default: all white)
label_size: label size of the name of each distribution
fill_alpha: alpha value for the fill under the distribution (default: 0.5)
Returns:
NoneType
"""
# assigning colors if not given
if fill_colors is None:
fill_colors = len(data) * ["steelblue"]
if line_colors is None:
line_colors = len(data) * ["white"]
# assigning xlims if not given
if xlim is not None:
xmin, xmax = xlim
else:
xmin = min(first(data.values()))
xmax = max(first(data.values()))
# data validation
if len(fill_colors) != len(data):
raise RidgePlotError("fill_colors must be same length as data")
if len(line_colors) != len(data):
raise RidgePlotError("line_colors must be same length as data")
xlines = []
for sample_number, (data_key, data_values) in enumerate(data.items()):
data_values_array = np.array(data_values, dtype="float")
xs = np.arange(xmin, xmax * 1.1, 0.01) # xaxis is 10% wider than data max
kde = gaussian_kde(data_values_array)
baseline = -sample_number * 0.7
ys = scaling(kde.pdf(xs)) + baseline
ax.plot(xs, ys, color=line_colors[sample_number], lw=2)
ax.fill_between(x=xs, y1=ys, y2=baseline, color=fill_colors[sample_number], alpha=fill_alpha)
xlines.append(baseline)
ax.text(xmin, baseline, data_key, ha="right", va="bottom", fontsize=label_size)
# ax.hlines(xlines, xmin=xmin, xmax=xmax * 1.1, color="black", lw=1)
ax.legend(loc="center").set_visible(False)
ax.get_yaxis().set_visible(False)
for side in ["left", "right", "top"]:
ax.spines[side].set_visible(False)
ax.set_xlim(xmin, xmax) | /ridgeplot_py-0.2.1rc26-py3-none-any.whl/ridgeplot/ridge_plot.py | 0.948941 | 0.826467 | ridge_plot.py | pypi |
from collections import OrderedDict
from typing import Any, Dict, List
import matplotlib.axes
import matplotlib.patches as mpatches
from matplotlib import legend
ColorPalette: Dict[str, List[str]] = dict(
# 1. maximum
# modified from:
# https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/
maximum=[
"#f58231",
"#e6194b",
"#3cb44b",
"#ffe119",
"#4363d8",
"#911eb4",
"#03A8FB",
"#F8BF6C",
"#CAF5CB",
"#fabebe",
"#008080",
"#e6beff",
"#9a6324",
"#fffac8",
"#800000",
"#aaffc3",
"#808000",
"#ffd8b1",
"#000075",
"#808080",
"#ffffff",
"#000000",
],
# 2. simpsons
# A palette from ggsci R package
# https://github.com/road2stat/ggsci/blob/master/data-raw/data-generator.R
simpsons=[
"#FED439",
"#709AE1",
"#8A9197",
"#D2AF81",
"#FD7446",
"#D5E4A2",
"#197EC0",
"#F05C3B",
"#46732E",
"#71D0F5",
"#370335",
"#075149",
"#C80813",
"#91331F",
"#1A9993",
"#FD8CC1",
],
# 3. okabeito:
# Color palette proposed by Okabe and Ito
# copy from colorboindr R package
# https://github.com/clauswilke/colorblindr/blob/master/R/palettes.R
okabeito=[
"#E69F00",
"#56B4E9",
"#009E73",
"#F0E442",
"#0072B2",
"#D55E00",
"#CC79A7",
"#999999",
],
# 4. invitae:
# https://www.buyayo.com/invitae
invitae=[
"#A3CF71",
"#66BF7E",
"#0AACA0",
"#0888B2",
"#373737",
"#EFEDEA",
"#686b69",
"#417d55",
],
)
def ordered_set(xs: List[str]) -> List[str]:
"""
this is a simple function to make a set according to the order of the input list
because python set is unordered, https://stackoverflow.com/questions/9792664/converting-a-list-to-a-set-changes-element-order
Args:
xs: list of input values
Returns:
a list of unique input values in the order of how they arranged in the input list
"""
xs = list(xs)
return sorted(set(xs), key=xs.index)
def check_color_vector_size(categorical_vector: List[str], color_vector: List[str]) -> List[str]:
"""
asserting the number of different categories in the input list is less than the given color list
Args:
categorical_vector: list of input values (i.e. labels of the samples), can be duplicated
color_vector: list of colors, intentionally not checked for duplication
Returns:
list of unique categories in the input list
"""
categories = ordered_set(categorical_vector)
if len(categories) > len(color_vector):
raise ValueError(f"Not enough colors!! {len(color_vector)} colors for {len(categories)} categories")
return categories
class ColorEncoder:
"""
color-encoding a categoric vector
Example:
```
>>> categorical_vector = ['group a','group b','group c','group a']
>>> colors = ColorPalette["okabeito"]
>>> ce = ColorEncoder()
>>> ce.fit(categorical_vector, colors)
>>> ce.encoder
OrderedDict([('group a', '#E69F00'),
('group b', '#56B4E9'),
('group c', '#009E73')])
>>> ce.transform(["group b", "group c", "group a"])
['#56B4E9', '#009E73', '#E69F00']
```
or:
```
>>> ce = ColorEncoder()
>>> ce.fit_transform(categorical_vector, colors)
['#E69F00', '#56B4E9', '#009E73', '#E69F00']
```
access color encoder:
```
>>> ce.encoder
OrderedDict([('group a', '#E69F00'),
('group b', '#56B4E9'),
('group c', '#009E73')])
```
"""
def __init__(self):
self.x: List[str] = list()
self.distinct_categories: List[str] = []
self.encoder: OrderedDict[str, str] = OrderedDict()
def fit(self, categories: List[str], colors: List[str] = ColorPalette["invitae"]) -> None:
"""
mapping colors to the unique categories in the input list
basically fill the encoder dictionary
Example:
```
>>> categorical_vector = ['group a','group b','group c','group a']
>>> colors = ColorPalette["okabeito"]
>>> ce = ColorEncoder()
>>> ce.fit(categroical_vector, colors)
```
Args:
categories: list of input values (i.e. labels of the samples), can be duplicated
colors: list of colors, intentionally not checked for duplication
Returns:
NoneType
"""
self.distinct_categories = check_color_vector_size(categories, colors)
self.encoder = OrderedDict({category: col for category, col in zip(self.distinct_categories, colors)})
def transform(self, categories: List[str]) -> List[str]:
"""
mapping color to the a list of category in the input list
Example:
```
>>> categorical_vector = ['group a','group b','group c','group a']
>>> colors = ColorPalette["okabeito"]
>>> ce = color_encoder()
>>> ce.fit(categroical_vector, colors)
>>> new_categorical_vector = ["group b", "group c"]
>>> ce.transform(new_categorical_vector)
['#56B4E9', '#009E73']
```
Args:
categories: list of input values (i.e. labels of the samples), can be duplicated
Returns:
list of colors for the input list according to the fitted color encoder
"""
if not self.encoder:
raise ValueError("Call color_encoder.fit() first!!")
union_set = set(self.distinct_categories).union(set(categories))
if len(union_set) != len(self.distinct_categories):
unseen = union_set - set(self.distinct_categories)
unseen_str = ", ".join(sorted(list(unseen)))
raise ValueError(f"Input [categories] contain unseen data!!: {unseen_str}")
return [self.encoder[category] for category in categories]
def fit_transform(self, categories: List[str], colors: List[str] = ColorPalette["invitae"]) -> List[str]:
"""
first map the color to the categories, and then return the corresponding color for each category in the input list
Example:
```
>>> categorical_vector = ["group1", "group2", "group1"]
>>> colors = ["salmon","gold"]
>>> ce = ColorEncoder()
>>> ce.fit_transform(categorical_vector, colors)
['salmon', 'gold', 'salmon']
```
Args:
categories: list of input values (i.e. labels of the samples), can be duplicated
colors: list of colors to be assigned to the categories
Returns:
list of colors corresponding to the input
"""
self.fit(categories, colors=colors)
return self.transform(categories)
def show_legend(self, ax: matplotlib.axes, sort: bool = False, **kwargs: Dict[str, Any]) -> legend.Legend:
"""
Adding matplotlib legend describing the color encoder to a matplotlib ax object
Args:
ax: matplotlib ax object
sort: sort the legend by the category
**kwargs: keyword arguments for matplotlib.pyplot.legend
Returns:
the matplotlib legend object
"""
if sort:
self.encoder = OrderedDict(sorted(self.encoder.items(), key=lambda item: item[0]))
pat = [mpatches.Patch(color=col, label=lab) for lab, col in self.encoder.items()]
lgd = ax.legend(handles=pat, **kwargs)
return lgd | /ridgeplot_py-0.2.1rc26-py3-none-any.whl/ridgeplot/colors.py | 0.884639 | 0.706849 | colors.py | pypi |
# Release notes
This document outlines the list of changes to ridgeplot between each release. For full details, see
the [commit logs](https://github.com/tpvasconcelos/ridgeplot/commits/).
Unreleased changes
------------------
- ...
---
0.1.23
------
- Fix the references to the interactive Plotly IFrames
([#129](https://github.com/tpvasconcelos/ridgeplot/pull/129))
---
0.1.22
------
### Deprecations
- The `colormode='index'` value has been deprecated in favor of `colormode='row-index'`, which
provides the same functionality but is more explicit and allows to distinguish between the
`'row-index'` and `'trace-index'` modes.
([#114](https://github.com/tpvasconcelos/ridgeplot/pull/114))
- The `show_annotations` argument has been deprecated in favor of `show_yticklabels`.
([#114](https://github.com/tpvasconcelos/ridgeplot/pull/114))
- The `get_all_colorscale_names()` function has been deprecated in favor of
`list_all_colorscale_names()`.
([#114](https://github.com/tpvasconcelos/ridgeplot/pull/114))
### Features
- Add functionality to allow plotting of multiple traces per row.
([#114](https://github.com/tpvasconcelos/ridgeplot/pull/114))
- Add `ridgeplot.datasets.load_lincoln_weather()` helper function to load the "Lincoln Weather"
toy dataset. ([#114](https://github.com/tpvasconcelos/ridgeplot/pull/114))
- Add more versions of the _probly_ dataset (`"wadefagen"` and `"illinois"`).
([#114](https://github.com/tpvasconcelos/ridgeplot/pull/114))
- Add support for Python 3.11.
### Documentation
- Major update to the documentation, including more examples, interactive plots, script to
generate the HTML and WebP images from the example scripts, improved API reference, and more.
([#114](https://github.com/tpvasconcelos/ridgeplot/pull/114))
### Internal
- Remove `mdformat` from the automated CI checks. It can still be triggered manually.
([#114](https://github.com/tpvasconcelos/ridgeplot/pull/114))
- Improved type annotations and type checking.
([#114](https://github.com/tpvasconcelos/ridgeplot/pull/114))
---
0.1.21
------
### Features
- Add `ridgeplot.datasets.load_probly()` helper function to load the `probly` toy dataset. The
`probly.csv` file is now included in the package under `ridgeplot/datasets/data/`.
([#80](https://github.com/tpvasconcelos/ridgeplot/pull/80))
### Documentation
- Change to numpydoc style docstrings.
([#81](https://github.com/tpvasconcelos/ridgeplot/pull/81))
- Add a robots.txt to the docs site.
([#81](https://github.com/tpvasconcelos/ridgeplot/pull/81))
- Auto-generate a site map for the docs site using `sphinx_sitemap`.
([#81](https://github.com/tpvasconcelos/ridgeplot/pull/81))
- Change the sphinx theme to `furo`.
([#81](https://github.com/tpvasconcelos/ridgeplot/pull/81))
- Improve the internal documentation and some of these internals to the API reference.
([#81](https://github.com/tpvasconcelos/ridgeplot/pull/81))
### Internal
- Fixed and improved some type annotations, including the introduction of `ridgeplot._types`
module for type aliases such as `Numeric` and `NestedNumericSequence`.
([#80](https://github.com/tpvasconcelos/ridgeplot/pull/80))
- Add the `blacken-docs` pre-commit hook and add the `pep8-naming`, `flake8-pytest-style`,
`flake8-simplify`, `flake8-implicit-str-concat`, `flake8-bugbear`, `flake8-rst-docstrings`,
`flake8-rst-docstrings`, etc... plugins to the `flake8` pre-commit hook.
([#81](https://github.com/tpvasconcelos/ridgeplot/pull/81))
- Cleanup and improve some type annotations.
([#81](https://github.com/tpvasconcelos/ridgeplot/pull/81))
- Update deprecated `set-output` commands (GitHub Actions)
([#87](https://github.com/tpvasconcelos/ridgeplot/pull/87))
---
0.1.17
------
- Automate the release process. See .github/workflows/release.yaml, which issues a new GitHub
release whenever a new git tag is pushed to the main branch by extracting the release notes from
the changelog.
- Fix automated release process to PyPI.
([#27](https://github.com/tpvasconcelos/ridgeplot/pull/27))
---
0.1.16
------
- Upgrade project structure, improve testing and CI checks, and start basic Sphinx docs.
([#21](https://github.com/tpvasconcelos/ridgeplot/pull/21))
- Implement `LazyMapping` helper to allow `ridgeplot._colors.PLOTLY_COLORSCALES` to lazy-load from
`colors.json` ([#20](https://github.com/tpvasconcelos/ridgeplot/pull/20))
---
0.1.14
------
- Remove `named_colorscales` from public API
([#18](https://github.com/tpvasconcelos/ridgeplot/pull/18))
---
0.1.13
------
- Add tests for example scripts ([#14](https://github.com/tpvasconcelos/ridgeplot/pull/14))
---
0.1.12
------
### Internal
- Update and standardise CI steps ([#6](https://github.com/tpvasconcelos/ridgeplot/pull/6))
### Documentation
- Publish official contribution guidelines (`CONTRIBUTING.md`)
([#8](https://github.com/tpvasconcelos/ridgeplot/pull/8))
- Publish an official Code of Conduct (`CODE_OF_CONDUCT.md`)
([#7](https://github.com/tpvasconcelos/ridgeplot/pull/7))
- Publish an official release/change log (`CHANGES.md`)
([#6](https://github.com/tpvasconcelos/ridgeplot/pull/6))
---
0.1.11
------
- `colors.json` was missing from the final distributions
([#2](https://github.com/tpvasconcelos/ridgeplot/pull/2))
---
0.1.0
------
- 🚀 Initial release!
| /ridgeplot-0.1.23.tar.gz/ridgeplot-0.1.23/CHANGES.md | 0.877922 | 0.704605 | CHANGES.md | pypi |
<p align="center">
<img src="docs/_static/img/logo-wide.png" alt="ridgeplot - beautiful ridgeline plots in Python">
</p>
<h1 id="ridgeplot" align="center">
ridgeplot: beautiful ridgeline plots in Python
</h1>
<p align="center">
<!-- TODO: https://bestpractices.coreinfrastructure.org/en -->
<!-- TODO: https://www.gitpod.io/docs/getting-started -->
<a href="https://pypi.org/project/ridgeplot/"><img src="https://img.shields.io/pypi/v/ridgeplot" alt="PyPI - Latest Release"></a>
<a href="https://github.com/tpvasconcelos/ridgeplot/"><img src="https://img.shields.io/pypi/pyversions/ridgeplot" alt="PyPI - Python Versions"></a>
<a href="https://pypi.org/project/ridgeplot/"><img src="https://img.shields.io/pypi/dm/ridgeplot" alt="PyPI - Downloads"></a>
<a href="https://pypi.org/project/ridgeplot/"><img src="https://img.shields.io/pypi/status/ridgeplot.svg" alt="PyPI - Package Status"></a>
<a href="https://github.com/tpvasconcelos/ridgeplot/blob/main/LICENSE"><img src="https://img.shields.io/pypi/l/ridgeplot" alt="PyPI - License"></a>
<br>
<a href="https://github.com/tpvasconcelos/ridgeplot/actions/workflows/ci.yaml/"><img src="https://github.com/tpvasconcelos/ridgeplot/actions/workflows/ci.yaml/badge.svg" alt="GitHub CI"></a>
<a href="https://ridgeplot.readthedocs.io/en/latest/"><img src="https://readthedocs.org/projects/ridgeplot/badge/?version=latest&style=flat" alt="Docs"></a>
<a href="https://codecov.io/gh/tpvasconcelos/ridgeplot"><img src="https://codecov.io/gh/tpvasconcelos/ridgeplot/branch/main/graph/badge.svg" alt="codecov"></a>
<a href="https://www.codefactor.io/repository/github/tpvasconcelos/ridgeplot"><img src="https://www.codefactor.io/repository/github/tpvasconcelos/ridgeplot/badge" alt="CodeFactor"></a>
<a href="https://www.codacy.com/gh/tpvasconcelos/ridgeplot/dashboard?utm_source=github.com&utm_medium=referral&utm_content=tpvasconcelos/ridgeplot&utm_campaign=Badge_Grade"><img src="https://app.codacy.com/project/badge/Grade/e21652ac49874b6f94ed3c9b7ac77021" alt="Codacy code quality"/></a>
</p>
______________________________________________________________________
`ridgeplot` is a Python package that provides a simple interface for plotting beautiful and interactive [ridgeline plots](https://www.data-to-viz.com/graph/ridgeline.html) within the extensive [Plotly](https://plotly.com/python/) ecosystem.
## Installation
`ridgeplot` can be installed and updated from [PyPi](https://pypi.org/project/ridgeplot/) using [pip](https://pip.pypa.io/en/stable/quickstart/):
```shell
pip install -U ridgeplot
```
For more information, see the [installation guide](https://ridgeplot.readthedocs.io/en/stable/getting_started/installation.html).
## Getting started
Take a look at the [getting started guide](https://ridgeplot.readthedocs.io/en/stable/getting_started/getting_started.html), which provides a quick introduction to the `ridgeplot` library.
The full official documentation can be found at: https://ridgeplot.readthedocs.io/en/stable/
### Basic example
This basic example gets you started with a simple call to the `ridgeplot()` function.
```python
import numpy as np
from ridgeplot import ridgeplot
my_samples = [np.random.normal(n / 1.2, size=600) for n in range(9, 0, -1)]
fig = ridgeplot(samples=my_samples)
fig.update_layout(height=500, width=800)
fig.show()
```

### Flexible configuration
In this example, we will be replicating the first ridgeline plot example in this [_from Data to Viz_ post](https://www.data-to-viz.com/graph/ridgeline.html), which uses the _"Perception of Probability Words"_ dataset.
```python
import numpy as np
from ridgeplot import ridgeplot
from ridgeplot.datasets import load_probly
# Load the probly dataset
df = load_probly()
# Let's grab the subset of columns used in the example
column_names = [
"Almost Certainly",
"Very Good Chance",
"We Believe",
"Likely",
"About Even",
"Little Chance",
"Chances Are Slight",
"Almost No Chance",
]
df = df[column_names]
# Not only does 'ridgeplot(...)' come configured with sensible defaults
# but is also fully configurable to your own style and preference!
fig = ridgeplot(
samples=df.values.T,
bandwidth=4,
kde_points=np.linspace(-12.5, 112.5, 500),
colorscale="viridis",
colormode="row-index",
coloralpha=0.65,
labels=column_names,
linewidth=2,
spacing=5 / 9,
)
# And you can still update and extend the final
# Plotly Figure using standard Plotly methods
fig.update_layout(
height=760,
width=900,
font_size=16,
plot_bgcolor="white",
xaxis_tickvals=[-12.5, 0, 12.5, 25, 37.5, 50, 62.5, 75, 87.5, 100, 112.5],
xaxis_ticktext=["", "0", "", "25", "", "50", "", "75", "", "100", ""],
xaxis_gridcolor="rgba(0, 0, 0, 0.1)",
yaxis_gridcolor="rgba(0, 0, 0, 0.1)",
yaxis_title="Assigned Probability (%)",
showlegend=False,
)
# Show us the work!
fig.show()
```

### More examples
For more examples, take a look at the [getting started guide](https://ridgeplot.readthedocs.io/en/stable/getting_started/getting_started.html). For instance, this example demonstrates how you can also draw [multiple traces](https://ridgeplot.readthedocs.io/en/stable/getting_started/getting_started.html#more-traces) per row in your ridgeline plot:
```python
import numpy as np
from ridgeplot import ridgeplot
from ridgeplot.datasets import load_lincoln_weather
# Load test data
df = load_lincoln_weather()
# Transform the data into a 3D (ragged) array format of
# daily min and max temperature samples per month
months = df.index.month_name().unique()
samples = [
[
df[df.index.month_name() == month]["Min Temperature [F]"],
df[df.index.month_name() == month]["Max Temperature [F]"],
]
for month in months
]
# And finish by styling it up to your liking!
fig = ridgeplot(
samples=samples,
labels=months,
coloralpha=0.98,
bandwidth=4,
kde_points=np.linspace(-25, 110, 400),
spacing=0.33,
linewidth=2,
)
fig.update_layout(
title="Minimum and maximum daily temperatures in Lincoln, NE (2016)",
height=650,
width=950,
font_size=14,
plot_bgcolor="rgb(245, 245, 245)",
xaxis_gridcolor="white",
yaxis_gridcolor="white",
xaxis_gridwidth=2,
yaxis_title="Month",
xaxis_title="Temperature [F]",
showlegend=False,
)
fig.show()
```

| /ridgeplot-0.1.23.tar.gz/ridgeplot-0.1.23/README.md | 0.873188 | 0.956104 | README.md | pypi |
# Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
[tomasvasconcelos1@gmail.com](mailto:tomasvasconcelos1@gmail.com).
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available
at [https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations
| /ridgeplot-0.1.23.tar.gz/ridgeplot-0.1.23/CODE_OF_CONDUCT.md | 0.599368 | 0.68087 | CODE_OF_CONDUCT.md | pypi |
# Contributing
Thank you for your interest in contributing to ridgeplot! 🚀
The contribution process for ridgeplot should start with
[filing a GitHub issue](https://github.com/tpvasconcelos/ridgeplot/issues/new/choose). We define
three main categories of issues, and each category has its own GitHub issue template
- ⭐ Feature requests
- 🐛 Bug reports
- 📚 Documentation fixes
After the implementation strategy has been agreed on by a ridgeplot contributor, the next step is to
introduce your changes as a pull request (see [](#pull-request-workflow)) against the ridgeplot
repository. Once your pull request is merged, your changes will be automatically included in the
next ridgeplot release. Every change should be listed in the ridgeplot
[](../reference/changelog.md).
The following is a set of (slightly opinionated) rules and general guidelines for contributing to
ridgeplot. Emphasis on **guidelines**, not _rules_. Use your best judgment, and feel free to propose
changes to this document in a pull request.
(Development-environment)=
## Development environment
Here are some guidelines for setting up your development environment. Most of the steps have been
abstracted away using the [make](<https://en.wikipedia.org/wiki/Make_(software)>) build automation
tool. Feel free to peak inside {{ repo_file('Makefile') }} at any time to see exactly what is being
run, and in which order.
First, you will need to
[clone](https://docs.github.com/en/github/getting-started-with-github/fork-a-repo#step-2-create-a-local-clone-of-your-fork)
this repository. For this, make sure you have a [GitHub account](https://github.com/join), fork
ridgeplot to your GitHub account by clicking the
[Fork](https://github.com/tpvasconcelos/ridgeplot/fork) button, and clone the main repository
locally (e.g. using SSH)
```shell
git clone git@github.com:tpvasconcelos/ridgeplot.git
cd ridgeplot
```
You will also need to add your fork as a remote to push your work to. Replace `{username}` with your
GitHub username.
```shell
git remote add fork git@github.com:{username}/ridgeplot.git
```
The following command will 1) create a new virtual environment (under `.venv`), 2) install ridgeplot
in [editable mode](https://pip.pypa.io/en/stable/cli/pip_install/#install-editable) (along with all
it's dependencies), and 3) set up and install all [pre-commit hooks](https://pre-commit.com/). Make
sure you always work within this virtual environment (i.e., `$ source .venv/bin/activate`). On top
of this, you should also set up your IDE to always point to this python interpreter. In PyCharm,
open `Preferences -> Project: ridgeplot -> Project Interpreter` and point the python interpreter to
`.venv/bin/python`.
```shell
make init
```
The default and **recommended** base python is `python3.7` . You can change this by exporting the
`BASE_PYTHON` environment variable. For instance, if you are having issues installing scientific
packages on macOS for python 3.7, you can try python 3.8 instead:
```shell
BASE_PYTHON=python3.8 make init
```
If you need to use jupyter-lab, you can install all extra requirements, as well as set up the
environment and jupyter kernel with
```shell
make init-jupyter
```
## Pull Request Workflow
1. Always confirm that you have properly configured your Git username and email.
```shell
git config --global user.name 'Your name'
git config --global user.email 'Your email address'
```
2. Each release series has its own branch (i.e. `MAJOR.MINOR.x`). If submitting a documentation or
bug fix contribution, branch off of the latest release series branch.
```shell
git fetch origin
git checkout -b <YOUR-BRANCH-NAME> origin/x.x.x
```
Otherwise, if submitting a new feature or API change, branch off of the `main` branch
```shell
git fetch origin
git checkout -b <YOUR-BRANCH-NAME> origin/main
```
3. Apply and commit your changes.
4. Include tests that cover any code changes you make, and make sure the test fails without your
patch.
5. Add an entry to CHANGES.md summarising the changes in this pull request. The entry should follow
the same style and format as other entries, i.e.
> `- Your summary here. (#XXX)`
where `#XXX` should link to the relevant pull request. If you think that the changes in this pull
request do not warrant a changelog entry, please state it in your pull request's description. In
such cases, a maintainer should add a `skip news` label to make CI pass.
6. Make sure all integration approval steps are passing locally (i.e., `tox`).
7. Push your changes to your fork
```shell
git push --set-upstream fork <YOUR-BRANCH-NAME>
```
8. [Create a pull request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request)
. Remember to update the pull request's description with relevant notes on the changes
implemented, and to
[link to relevant issues](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue)
(e.g., `fixes #XXX` or `closes #XXX`).
9. Wait for all remote CI checks to pass and for a ridgeplot contributor to approve your pull
request.
## Continuous Integration
From GitHub's
[Continuous Integration and Continuous Delivery (CI/CD) Fundamentals](https://resources.github.com/ci-cd/):
> _Continuous Integration (CI) automatically builds, tests, and **integrates** code changes within a
> shared repository._
The first step to Continuous Integration (CI) is having a version control system (VCS) in place.
Luckily, you don't have to worry about that! As you have already noticed, we use
[Git](https://git-scm.com/) and host on [GitHub](https://github.com/tpvasconcelos/ridgeplot).
On top of this, we also run a series of integration approval steps that allow us to ship code
changes faster and more reliably. In order to achieve this, we run automated tests and coverage
reports, as well as syntax (and type) checkers, code style formatters, and dependency vulnerability
scans.
### Running it locally
Our tool of choice to configure and reliably run all integration approval steps is
[Tox](https://github.com/tox-dev/tox), which allows us to run each step in reproducible isolated
virtual environments. To trigger all checks in parallel, simply run
```shell
./bin/tox --parallel auto -m static tests
```
It's that simple 🙌 !! Note only that this will take a while the first time you run the command,
since it will have to create all the required virtual environments (along with their dependencies)
for each CI step.
The configuration for Tox can be found in {{ repo_file('tox.ini') }}.
#### Tests and coverage reports
We use [pytest](https://github.com/pytest-dev/pytest) as our testing framework, and
[pytest-cov](https://pytest-cov.readthedocs.io/en/latest/) to track and measure code coverage. You
can find all configuration details in {{ repo_file('tox.ini') }}. To trigger all tests, simply run
```shell
./bin/tox --parallel auto -m tests
```
If you need more control over which tests are running, or which flags are being passed to pytest, you can also invoke `pytest` directly which will run on your current virtual environment. Configuration details can be found in {{ repo_file('tox.ini') }}.
#### Linting
This project uses [pre-commit hooks](https://pre-commit.com/) to check and automatically fix any
formatting rules. These checks are triggered before creating any git commit. To manually trigger all
linting steps (i.e., all pre-commit hooks), run
```shell
pre-commit run --all-files
```
For more information on which hooks will run, have a look inside the {{
repo_file('.pre-commit-config.yaml') }} configuration file. If you want to manually trigger
individual hooks, you can invoke the `pre-commit`script directly. If you need even more control over
the tools used you could also invoke them directly (e.g., `isort .`). Remember however that this is
**not** the recommended approach.
### GitHub Actions
We use [GitHub Actions](https://github.com/features/actions) to automatically run all integration
approval steps defined with Tox on every push or pull request event. These checks run on all major
operating systems and all supported Python versions. Finally, the generated coverage reports are
uploaded to [Codecov](https://about.codecov.io/) and [Codacy](https://www.codacy.com/). Check {{
repo_file('.github/workflows/ci.yaml') }} for more details.
### Tools and software
Here is a quick overview of all CI tools and software in use, some of which have already been
discussed in the sections above.
| Tool | Category | config files | Details |
| -------------------------------------------------------------------------- | --------------- | -------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| [Tox](https://github.com/tox-dev/tox) | 🔧 Orchestration | {{ repo_file('tox.ini') }} | We use Tox to reliably run all integration approval steps in reproducible isolated virtual environments. |
| [GitHub Actions](https://github.com/features/actions) | 🔧 Orchestration | {{ repo_file('.github/workflows/ci.yaml') }} | Workflow automation for GitHub. We use it to automatically run all integration approval steps defined with Tox on every push or pull request event. |
| [Git](https://git-scm.com/) | 🕰 VCS | {{ repo_file('.gitignore') }} | Projects version control system software of choice. |
| [pytest](https://github.com/pytest-dev/pytest) | 🧪 Testing | {{ repo_file('tox.ini') }} | Testing framework for python code. |
| [pytest-cov](https://pytest-cov.readthedocs.io/en/latest/) | 📊 Coverage | {{ repo_file('tox.ini') }} | Coverage plugin for pytest. |
| [Codecov](https://about.codecov.io/) and [Codacy](https://www.codacy.com/) | 📊 Coverage | | Two great services for tracking, monitoring, and alerting on code coverage and code quality. |
| [pre-commit hooks](https://pre-commit.com/) | 💅 Linting | {{ repo_file('.pre-commit-config.yaml') }} | Used to to automatically check and fix any formatting rules on every commit. |
| [mypy](https://github.com/python/mypy) | 💅 Linting | {{ repo_file('mypy.ini') }} | A static type checker for Python. We use quite a strict configuration here, which can be tricky at times. Feel free to ask for help from the community by commenting on your issue or pull request. |
| [black](https://github.com/psf/black) | 💅 Linting | {{ repo_file('pyproject.toml') }} | "The uncompromising Python code formatter". We use `black` to automatically format Python code in a deterministic manner. We use a maximum line length of 100 characters. |
| [flake8](https://github.com/pycqa/flake8) | 💅 Linting | {{ repo_file('setup.cfg') }} | Used to check the style and quality of python code. |
| [isort](https://github.com/pycqa/isort) | 💅 Linting | {{ repo_file('setup.cfg') }} | Used to sort python imports. |
| [EditorConfig](https://editorconfig.org/) | 💅 Linting | {{ repo_file('.editorconfig') }} | This repository uses the `.editorconfig` standard configuration file, which aims to ensure consistent style across multiple programming environments. |
## Project structure
### Community health files
GitHub's community health files allow repository maintainers to set contributing guidelines to help
collaborators make meaningful, useful contributions to a project. Read more on this official
[reference](https://docs.github.com/en/communities/setting-up-your-project-for-healthy-contributions)
.
- {{ repo_file('CODE_OF_CONDUCT.md') }} - A CODE_OF_CONDUCT file defines standards for how to engage
in a community. For more information, see
"[Adding a code of conduct to your project.](https://docs.github.com/en/communities/setting-up-your-project-for-healthy-contributions/adding-a-code-of-conduct-to-your-project)"
- {{ repo_file('CONTRIBUTING.md') }} - A CONTRIBUTING file communicates how people should contribute
to your project. For more information, see
"[Setting guidelines for repository contributors.](https://docs.github.com/en/articles/setting-guidelines-for-repository-contributors)"
### Configuration files
For more context on some of the tools referenced below, refer to the sections on
[Continuous Integration](#continuous-integration).
- {{ repo_file('.github/workflows/ci.yaml') }} - Workflow definition for our CI GitHub Actions
pipeline.
- {{ repo_file('.pre-commit-config.yaml') }} - List of pre-commit hooks.
- {{ repo_file('.editorconfig') }} - [EditorConfig](https://editorconfig.org/) standard
configuration file.
- {{ repo_file('mypy.ini') }} - Configuration for the `mypy` static type checker.
- {{ repo_file('pyproject.toml') }} -
- [build system](https://setuptools.readthedocs.io/en/latest/build_meta.html) requirements (probably
won't need to touch these!) and [black](https://github.com/psf/black) configurations.
- {{ repo_file('setup.cfg') }} - Here, we specify the package metadata, requirements, as well as
configuration details for [flake8](https://github.com/pycqa/flake8) and
[isort](https://github.com/pycqa/isort).
- {{ repo_file('tox.ini') }} - Configuration for [tox](https://github.com/tox-dev/tox),
[pytest](https://github.com/pytest-dev/pytest), and
[coverage](https://coverage.readthedocs.io/en/latest/index.html).
## Release process
You need push access to the project's repository to make releases. The following release steps are
here for reference only.
1. Review the `## Unreleased changes` section in CHANGES.md by checking for consistency in format
and, if necessary, refactoring related entries into relevant subsections (e.g. _Features_ ,
_Docs_, _Bugfixes_, _Security_, etc). Take a look at previous release notes for guidance and try
to keep it consistent.
2. Submit a pull request with these changes only and use the
`"Cleanup release notes for X.X.X release"` template for the pull request title. ridgeplot uses
the [SemVer](https://semver.org/) (`MAJOR.MINOR.PATCH`) versioning standard. You can determine
the latest release version by running `git describe --tags --abbrev=0` on the `main` branch.
Based on this, you can determine the next release version by incrementing the MAJOR, MINOR, or
PATCH. More on this on the next section. For now, just make sure you merge this pull request into
the `main` branch before continuing.
3. Use the [bumpversion](https://github.com/peritus/bumpversion) utility to bump the current
version. This utility will automatically bump the current version, and issue a relevant commit
and git tag. E.g.,
```shell
# Bump MAJOR version (e.g., 0.4.2 -> 1.0.0)
bumpversion major
# Bump MINOR version (e.g., 0.4.2 -> 0.5.0)
bumpversion minor
# Bump PATCH version (e.g., 0.4.2 -> 0.4.3)
bumpversion patch
```
You can always perform a dry-run to see what will happen under the hood.
```shell
bumpversion --dry-run --verbose [--allow-dirty] [major,minor,patch]
```
4. Push your changes along with all tag references:
```shell
git push && git push --tags
```
5. At this point a couple of GitHub Actions workflows will be triggered:
1. `.github/workflows/ci.yaml`: Runs all CI checks with Tox against the new changes pushed to
`main`.
2. `.github/workflows/release.yaml`: Issues a new GitHub release triggered by the new git tag
pushed in the previous step.
3. `.github/workflows/publish-pypi.yaml`: Builds, packages, and uploads the source and wheel
package to PyPI (and test PyPI). This is triggered by the new GitHub release created in the
previous step.
6. **Trust but verify!**
1. Verify that all three workflows passed successfully:
<https://github.com/tpvasconcelos/ridgeplot/actions>
2. Verify that the new git tag is present in the remote repository:
<https://github.com/tpvasconcelos/ridgeplot/tags>
3. Verify that the new release is present in the remote repository and that the release notes
were correctly parsed: <https://github.com/tpvasconcelos/ridgeplot/releases>
4. Verify that the new package is available in PyPI: <https://pypi.org/project/ridgeplot/>
5. Verify that the docs were updated and published to
<https://ridgeplot.readthedocs.io/en/stable/>
## Code of Conduct
Please remember to read and follow our
[Code of Conduct](https://github.com/tpvasconcelos/ridgeplot/blob/main/CODE_OF_CONDUCT.md). 🤝
| /ridgeplot-0.1.23.tar.gz/ridgeplot-0.1.23/docs/development/contributing.md | 0.770335 | 0.855248 | contributing.md | pypi |
# Getting started
This page provides a quick introduction to the `ridgeplot` library, showcasing some of its features and providing a few practical examples. All examples use the {py:func}`ridgeplot.ridgeplot()` function, which is the main entry point to the library. For more information on the available options, take a look at the [reference page](../api/public/ridgeplot.ridgeplot.rst).
## Basic example
This basic example shows how you can quickly get started with a simple call to the {py:func}`~ridgeplot.ridgeplot()` function.
```python
import numpy as np
from ridgeplot import ridgeplot
my_samples = [np.random.normal(n / 1.2, size=600) for n in range(9, 0, -1)]
fig = ridgeplot(samples=my_samples)
fig.update_layout(height=500, width=800)
fig.show()
```
::::{tab-set}
:::{tab-item} Interactive output
```{raw} html
<iframe src="../_static/charts/basic.html" height="400" width="100%" style="border:none;overflow:hidden;"></iframe>
```
:::
:::{tab-item} Static image output

:::
::::
## Flexible configuration
In this example, we will try to replicate the first ridgeline plot in this [_from Data to Viz_ post](https://www.data-to-viz.com/graph/ridgeline.html). The example in the post was created using the _"Perception of Probability Words"_ dataset (see {py:func}`~ridgeplot.datasets.load_probly()`) and the popular [ggridges](https://wilkelab.org/ggridges/) R package. In the end, we will see how the `ridgeplot` Python library can be used to create a (nearly) identical plot, thanks to its extensive configuration options.
```python
import numpy as np
from ridgeplot import ridgeplot
from ridgeplot.datasets import load_probly
# Load the probly dataset
df = load_probly()
# Let's grab the subset of columns used in the example
column_names = [
"Almost Certainly",
"Very Good Chance",
"We Believe",
"Likely",
"About Even",
"Little Chance",
"Chances Are Slight",
"Almost No Chance",
]
df = df[column_names]
# Not only does 'ridgeplot(...)' come configured with sensible defaults
# but is also fully configurable to your own style and preference!
fig = ridgeplot(
samples=df.values.T,
bandwidth=4,
kde_points=np.linspace(-12.5, 112.5, 500),
colorscale="viridis",
colormode="row-index",
coloralpha=0.65,
labels=column_names,
linewidth=2,
spacing=5 / 9,
)
# And you can still update and extend the final
# Plotly Figure using standard Plotly methods
fig.update_layout(
height=760,
width=900,
font_size=16,
plot_bgcolor="white",
xaxis_tickvals=[-12.5, 0, 12.5, 25, 37.5, 50, 62.5, 75, 87.5, 100, 112.5],
xaxis_ticktext=["", "0", "", "25", "", "50", "", "75", "", "100", ""],
xaxis_gridcolor="rgba(0, 0, 0, 0.1)",
yaxis_gridcolor="rgba(0, 0, 0, 0.1)",
yaxis_title="Assigned Probability (%)",
showlegend=False,
)
# Show us the work!
fig.show()
```
::::{tab-set}
:::{tab-item} Interactive output
```{raw} html
<iframe src="../_static/charts/probly.html" height="550" width="100%" style="border:none;overflow:hidden;"></iframe>
```
:::
:::{tab-item} Static image output
The resulting ridgeline plot generated by the code above:

:::
:::{tab-item} Target/reference image
The target reference from the [_from Data to Viz_ post](https://www.data-to-viz.com/graph/ridgeline.html):

:::
::::
## More traces
In this example, we will dive a bit deeper into the {py:paramref}`~ridgeplot.ridgeplot.samples` parameter and see how we can be used to plot multiple traces per row in a ridgeline plot.
### Final result
For the ones in a hurry, we are including the entire final code-block and resulting plot already in this section. It is here also to serve as a reference for the rest of the section and to demonstrate what the goal of this example is. That said, throughout the rest of this section, we will dive a bit deeper into the {py:paramref}`~ridgeplot.ridgeplot.samples` parameter and understand how flexible it is.
::::{tab-set}
:::{tab-item} Interactive output
```{raw} html
<iframe src="../_static/charts/lincoln_weather.html" height="650" width="100%" style="border:none;overflow:hidden;"></iframe>
```
:::
:::{tab-item} Static image output

:::
:::{tab-item} Code
```python
import numpy as np
from ridgeplot import ridgeplot
from ridgeplot.datasets import load_lincoln_weather
# Load test data
df = load_lincoln_weather()
# Transform the data into a 3D (ragged) array format of
# daily min and max temperature samples per month
months = df.index.month_name().unique()
samples = [
[
df[df.index.month_name() == month]["Min Temperature [F]"],
df[df.index.month_name() == month]["Max Temperature [F]"],
]
for month in months
]
# And finish by styling it up to your liking!
fig = ridgeplot(
samples=samples,
labels=months,
coloralpha=0.98,
bandwidth=4,
kde_points=np.linspace(-25, 110, 400),
spacing=0.33,
linewidth=2,
)
fig.update_layout(
title="Minimum and maximum daily temperatures in Lincoln, NE (2016)",
height=650,
width=950,
font_size=14,
plot_bgcolor="rgb(245, 245, 245)",
xaxis_gridcolor="white",
yaxis_gridcolor="white",
xaxis_gridwidth=2,
yaxis_title="Month",
xaxis_title="Temperature [F]",
showlegend=False,
)
fig.show()
```
:::
::::
### Step-by-step
Let's start by loading the _"Lincoln Weather"_ test dataset (see {py:func}`~ridgeplot.datasets.load_lincoln_weather()`).
```{doctest}
>>> from ridgeplot.datasets import load_lincoln_weather
>>> df = load_lincoln_weather()
>>> df[["Min Temperature [F]", "Max Temperature [F]"]].head()
Min Temperature [F] Max Temperature [F]
CST
2016-01-01 11 37
2016-01-02 5 41
2016-01-03 8 37
2016-01-04 4 30
2016-01-05 19 38
```
The goal will be to plot the KDEs for the minimum and maximum daily temperatures for each month of 2016 (i.e. the year covered by the dataset).
```{doctest}
>>> months = df.index.month_name().unique()
>>> months.to_list()
['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December']
```
The {paramref}`~ridgeplot.ridgeplot.samples` argument in the {py:func}`~ridgeplot.ridgeplot()` function expects a 3D array of shape {math}`(R, T_r, S_t)`, where {math}`R` is the number of rows, {math}`T_r` is the number of traces per row, and {math}`S_t` is the number of samples per trace, with:
```{list-table}
:header-rows: 1
:align: left
:widths: 4 8
* - Dimension values
- Description
* - {math}`R=12`
- One row per month.
* - {math}`T_r=2` (for all rows {math}`r \in R`)
- Two traces per row (one for the minimum temperatures and one for the maximum temperatures).
* - {math}`S_t \in \{29, 30, 31\}`
- One sample per day of the month, where different months have different number of days.
```
We can create this array using a simple list comprehension, where each element of the list is a list of two arrays, one for the minimum temperatures and one for the maximum temperatures samples, for each month:
```python
samples = [
[
df[df.index.month_name() == month]["Min Temperature [F]"],
df[df.index.month_name() == month]["Max Temperature [F]"],
]
for month in months
]
```
:::{note}
For other use cases (like in the two previous examples), you could use a numpy ndarray to represent the samples. However, since different months have different number of days, we need to use a data container that can hold arrays of different lengths along the same dimension. Irregular arrays like this one are called [ragged arrays](https://en.wikipedia.org/wiki/Jagged_array). There are many different ways you can represent irregular arrays in Python. In this specific example, we used a list of lists of pandas Series. However,`ridgeplot` is designed to handle any object that implements the {py:class}`~typing.Collection`\[{py:class}`~typing.Collection`\[{py:class}`~typing.Collection`\[{py:data}`~ridgeplot._types.Numeric`\]]] protocol (i.e. any numeric 3D ragged array).
:::
Finally, we can pass the `samples` list to the {py:func}`~ridgeplot.ridgeplot()` function and specify any other arguments we want to customize the plot, like adjusting the KDE's bandwidth, the vertical spacing between rows, etc.
```python
fig = ridgeplot(
samples=samples,
labels=months,
coloralpha=0.98,
bandwidth=4,
kde_points=np.linspace(-25, 110, 400),
spacing=0.33,
linewidth=2,
)
fig.update_layout(
title="Minimum and maximum daily temperatures in Lincoln, NE (2016)",
height=650,
width=950,
font_size=14,
plot_bgcolor="rgb(245, 245, 245)",
xaxis_gridcolor="white",
yaxis_gridcolor="white",
xaxis_gridwidth=2,
yaxis_title="Month",
xaxis_title="Temperature [F]",
showlegend=False,
)
fig.show()
```
::::{tab-set}
:::{tab-item} Interactive output
```{raw} html
<iframe src="../_static/charts/lincoln_weather.html" height="650" width="100%" style="border:none;overflow:hidden;"></iframe>
```
:::
:::{tab-item} Static image output

:::
::::
| /ridgeplot-0.1.23.tar.gz/ridgeplot-0.1.23/docs/getting_started/getting_started.md | 0.910343 | 0.969928 | getting_started.md | pypi |
from __future__ import annotations
import platform
import subprocess
import sys
from dataclasses import dataclass, field
from importlib.abc import Loader
from importlib.machinery import ModuleSpec
from importlib.util import module_from_spec, spec_from_file_location
from pathlib import Path
from types import ModuleType
from typing import Any, List, Tuple, Union, cast
def import_pyscript_as_module(path: Union[str, Path]) -> ModuleType:
"""Import a Python script as a module.
Parameters
----------
path
The path to the Python file to import as a module.
Returns
-------
ModuleType
The imported module.
.. note::
This was mostly taken from the Python docs:
https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
"""
path_posix = Path(path).resolve().as_posix()
module_name = path_posix.split("/")[-1].split(".")[0]
spec = cast(
ModuleSpec,
spec_from_file_location(
name=module_name,
location=path_posix,
),
)
module = module_from_spec(spec)
sys.modules[module_name] = module
loader = cast(Loader, spec.loader)
loader.exec_module(module)
return module
def import_attrs_from_pyscript(path: Union[str, Path], *attributes: str) -> Tuple[Any, ...]:
r"""Import attributes from a Python script.
Parameters
----------
path
The path to the Python file to import.
\*attributes
The attributes to import from the module.
Returns
-------
Tuple[Any, ...]
The imported attributes.
Examples
--------
So, instead of doing
>>> from path.to.script import Foo, bar
You can instead do:
>>> Foo, bar = import_attrs_from_pyscript("path/to/script.py", "Foo", "bar")
"""
module = import_pyscript_as_module(path)
return tuple(getattr(module, attr) for attr in attributes)
def get_py_version() -> str:
"""Get the Python version environment marker."""
return "".join(platform.python_version_tuple()[:2])
def get_sys_platform() -> str:
"""Get the Python sys_platform environment marker."""
return sys.platform
@dataclass
class ToxEnvMarkers:
"""Set the environment markers used in tox.ini to determine which locked
requirements file to use.
Take a look at the tox.ini file to see how these are used.
"""
PY_PYTHON_VERSION: str = field(default_factory=get_py_version)
PY_SYS_PLATFORM: str = field(default_factory=get_sys_platform)
def set_env(self) -> None:
import os
env_markers = (
("PY_PYTHON_VERSION", self.PY_PYTHON_VERSION),
("PY_SYS_PLATFORM", self.PY_SYS_PLATFORM),
)
for name, value in env_markers:
if name in os.environ:
print(
f"Not setting {name} to {value!r}, as it is already set to {os.environ[name]!r}"
)
continue
print(f"Setting {name} to {value!r}")
os.environ[name] = value
def set_tox_env_markers() -> None:
ToxEnvMarkers().set_env()
ANSI_RED = "\033[31m"
ANSI_RESET = "\033[0m"
def _fail(exit_code: int, print_err_message: bool = True) -> None:
if print_err_message:
msg = f"{' '.join(sys.argv)!r} failed with exit code {exit_code}"
print(f"{ANSI_RED}{msg}{ANSI_RESET}", file=sys.stderr)
sys.exit(exit_code)
def run_subprocess(popen_args: List[str], print_err_message: bool = True) -> None:
print(f"Running: {' '.join(popen_args)}")
try:
subprocess.run(popen_args, check=True)
except subprocess.CalledProcessError as exc:
_fail(exc.returncode, print_err_message=print_err_message) | /ridgeplot-0.1.23.tar.gz/ridgeplot-0.1.23/bin/utils.py | 0.719975 | 0.27671 | utils.py | pypi |
from typing import Dict, List
import requests
from requests import Response, HTTPError
from ridi_django_oauth2.config import RidiOAuth2Config
from ridi_django_oauth2_lib.decorators.retry import RetryFailException, retry
from ridi_oauth2.introspector.constants import JWKKeyType, JWKUse
from ridi_oauth2.introspector.dtos import BaseJWKDto
from ridi_oauth2.introspector.exceptions import FailToLoadPublicKeyException, \
InvalidPublicKey, NotExistedKey
from ridi_oauth2.introspector.factories import JWKDtoFactory
class KeyHandler:
_public_key_dtos = {}
@classmethod
def _get_memorized_key_dto(cls, client_id: str, kid: str) -> BaseJWKDto:
return cls._public_key_dtos.get(client_id, {}).get(kid, None)
@classmethod
def get_public_key_by_kid(cls, client_id: str, kid: str):
public_key_dto = cls._get_memorized_key_dto(client_id, kid)
if not public_key_dto or public_key_dto.is_expired:
public_key_dto = cls._reset_key_dtos(client_id, kid)
cls._assert_valid_key(public_key_dto)
return public_key_dto.public_key
@staticmethod
def _assert_valid_key(key: BaseJWKDto):
if not key:
raise NotExistedKey
if key.kty not in (JWKKeyType.RSA, JWKKeyType.EC) or key.use != JWKUse.SIG:
raise InvalidPublicKey
@classmethod
def _reset_key_dtos(cls, client_id: str, kid: str) -> BaseJWKDto:
try:
keys = cls._get_valid_public_keys_by_client_id(client_id)
except RetryFailException as e:
raise FailToLoadPublicKeyException from e
cls._memorize_key_dtos(client_id, keys)
return cls._get_memorized_key_dto(client_id, kid)
@classmethod
def _memorize_key_dtos(cls, client_id: str, keys: List[BaseJWKDto]):
key_dtos = cls._public_key_dtos.get(client_id, {})
for key in keys:
key_dtos[key.kid] = key
cls._public_key_dtos[client_id] = key_dtos
@staticmethod
def _process_response(response: Response) -> Dict:
response.raise_for_status()
return response.json()
@classmethod
@retry(retry_count=3, retriable_exceptions=(HTTPError, ))
def _get_valid_public_keys_by_client_id(cls, client_id: str) -> List[BaseJWKDto]:
response = requests.request(
method='GET',
url=RidiOAuth2Config.get_key_url(),
params={'client_id': client_id},
)
return [JWKDtoFactory.get_dto(key) for key in cls._process_response(response=response).get('keys')] | /ridi-django-oauth2-1.0.7.tar.gz/ridi-django-oauth2-1.0.7/ridi_oauth2/introspector/key_handler.py | 0.662469 | 0.188697 | key_handler.py | pypi |
import typing
from base64 import urlsafe_b64decode
from datetime import datetime, timedelta
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicNumbers, SECP256R1
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
from ridi_django_oauth2_lib.utils.bytes import bytes_to_int
from ridi_oauth2.introspector.constants import JWK_EXPIRES_MIN, JWKCrv
class AccessTokenInfo:
def __init__(self, subject: str, u_idx: int, expire: int, client_id: str, scope: typing.List):
self._subject = subject
self._u_idx = u_idx
self._expire_timestamp = expire
self._expire_date = datetime.fromtimestamp(expire)
self._client_id = client_id
self._scope = scope
@property
def subject(self) -> str:
return self._subject
@property
def u_idx(self) -> int:
return self._u_idx
@property
def expire_timestamp(self) -> int:
return self._expire_timestamp
@property
def expire_date(self) -> datetime:
return self._expire_date
@property
def client_id(self) -> str:
return self._client_id
@property
def scope(self) -> typing.List:
return self._scope
@staticmethod
def from_dict(dictionary: typing.Dict):
return AccessTokenInfo(
subject=dictionary['sub'], u_idx=dictionary['u_idx'], expire=dictionary['exp'], client_id=dictionary['client_id'],
scope=dictionary['scope'],
)
class BaseJWKDto:
def __init__(self, json):
self._json = json
self.expires = datetime.now() + timedelta(minutes=JWK_EXPIRES_MIN)
@property
def kid(self) -> str:
return self._json.get('kid')
@property
def kty(self) -> str:
return self._json.get('kty')
@property
def use(self) -> str:
return self._json.get('use')
@property
def alg(self) -> str:
return self._json.get('alg')
@property
def is_expired(self) -> bool:
return self.expires < datetime.now()
class JWKRSADto(BaseJWKDto):
def __init__(self, json):
super().__init__(json)
decoded_n = bytes_to_int(urlsafe_b64decode(self.n))
decoded_e = bytes_to_int(urlsafe_b64decode(self.e))
rsa_public_key = RSAPublicNumbers(decoded_e, decoded_n).public_key(default_backend())
self.public_key = rsa_public_key.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo).decode()
@property
def e(self) -> str:
return self._json.get('e')
@property
def n(self) -> str:
return self._json.get('n')
class JWKECDto(BaseJWKDto):
def __init__(self, json):
super().__init__(json)
decoded_x = bytes_to_int(urlsafe_b64decode(self.x))
decoded_y = bytes_to_int(urlsafe_b64decode(self.y))
ec_public_key = EllipticCurvePublicNumbers(
decoded_x,
decoded_y,
self._get_curve_instance()
).public_key(default_backend())
self.public_key = ec_public_key.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo).decode()
def _get_curve_instance(self):
if self.crv == JWKCrv.P256:
return SECP256R1()
raise NotImplementedError
@property
def crv(self) -> str:
return self._json.get('crv')
@property
def x(self) -> str:
return self._json.get('x')
@property
def y(self) -> str:
return self._json.get('y') | /ridi-django-oauth2-1.0.7.tar.gz/ridi-django-oauth2-1.0.7/ridi_oauth2/introspector/dtos.py | 0.806929 | 0.185449 | dtos.py | pypi |
import shutil
from pathlib import Path
import numpy as np
from keras import datasets, utils, callbacks, optimizers, losses
from keras.preprocessing.image import ImageDataGenerator
import ridurre
from example.cifar_10_resnet import resnet
from ridurre import model_complexity
TRAIN_LOGS_FOLDER_PATH = Path("./train_logs")
if TRAIN_LOGS_FOLDER_PATH.is_dir():
shutil.rmtree(str(TRAIN_LOGS_FOLDER_PATH))
TRAIN_LOGS_FOLDER_PATH.mkdir()
# Creating ResNet50 model
model = resnet.resnet_v1(input_shape=(32, 32, 3), depth=20, num_classes=10)
def compile_model(my_model):
my_model.compile(optimizer=optimizers.Adam(lr=0.001), loss=losses.categorical_crossentropy, metrics=["accuracy"])
compile_model(model)
# Loading data
(x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data()
# Data Transform
x_train = x_train.astype(np.float32) / 255.0
y_train = utils.to_categorical(y_train)
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test = x_test.astype(np.float32) / 255.0
y_test = utils.to_categorical(y_test)
x_test -= x_train_mean
print("Train shape: X {0}, y: {1}".format(x_train.shape, y_train.shape))
print("Test shape: X {0}, y: {1}".format(x_test.shape, y_test.shape))
# Data Augmentation with Data Generator
data_generator = ImageDataGenerator(horizontal_flip=True, vertical_flip=True, rotation_range=20)
# Create callbacks
tensorboard_callback = callbacks.TensorBoard(log_dir=str(TRAIN_LOGS_FOLDER_PATH))
model_complexity_param = model_complexity.ModelParametersCallback(TRAIN_LOGS_FOLDER_PATH, verbose=1)
model_checkpoint_callback = callbacks.ModelCheckpoint(str(TRAIN_LOGS_FOLDER_PATH) + "/model_{epoch:02d}.h5",
save_best_only=False,
save_weights_only=False,
verbose=1)
callbacks = [tensorboard_callback, model_complexity_param, model_checkpoint_callback]
# Train the model
FIRST_TRAIN_EPOCHS = 20
BATCH_SIZE = 32
STEPS_PER_EPOCH = len(x_train) // BATCH_SIZE
model.fit_generator(data_generator.flow(x_train, y_train, BATCH_SIZE),
epochs=FIRST_TRAIN_EPOCHS,
validation_data=(x_test, y_test),
callbacks=callbacks,
steps_per_epoch=STEPS_PER_EPOCH)
# Prune the model
def finetune_model(my_model, initial_epoch, finetune_epochs):
my_model.fit_generator(data_generator.flow(x_train, y_train, BATCH_SIZE),
epochs=finetune_epochs,
validation_data=(x_test, y_test),
callbacks=callbacks,
initial_epoch=initial_epoch,
verbose=1,
steps_per_epoch=STEPS_PER_EPOCH)
pruning = ridurre.KMeansFilterPruning(0.9,
compile_model,
finetune_model,
nb_finetune_epochs=5,
maximum_pruning_percent=0.85,
maximum_prune_iterations=10,
nb_trained_for_epochs=FIRST_TRAIN_EPOCHS)
model, last_epoch_number = pruning.run_pruning(model)
# Train again for a reasonable number of epochs (no always necessary)
SECOND_TRAIN_EPOCHS = 20
model.fit_generator(data_generator.flow(x_train, y_train, BATCH_SIZE),
epochs=last_epoch_number + SECOND_TRAIN_EPOCHS,
validation_data=(x_test, y_test),
callbacks=callbacks,
steps_per_epoch=STEPS_PER_EPOCH,
initial_epoch=last_epoch_number) | /ridurre-0.0.2.tar.gz/ridurre-0.0.2/example/model_pruning_example.py | 0.729809 | 0.336222 | model_pruning_example.py | pypi |
from keras import layers, models, regularizers
def _resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = layers.Conv2D(num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = layers.BatchNormalization()(x)
if activation is not None:
x = layers.Activation(activation)(x)
else:
if batch_normalization:
x = layers.BatchNormalization()(x)
if activation is not None:
x = layers.Activation(activation)(x)
x = conv(x)
return x
def resnet_v1(input_shape, depth, num_classes=10):
"""ResNet Version 1 Model builder [a]
Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
Last ReLU is after the shortcut connection.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filters is
doubled. Within each stage, the layers have the same number filters and the
same number of filters.
Features maps sizes:
stage 0: 32x32, 16
stage 1: 16x16, 32
stage 2: 8x8, 64
The Number of parameters is approx the same as Table 6 of [a]:
ResNet20 0.27M
ResNet32 0.46M
ResNet44 0.66M
ResNet56 0.85M
ResNet110 1.7M
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 6 != 0:
raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
# Start model definition.
num_filters = 16
num_res_blocks = int((depth - 2) / 6)
inputs = layers.Input(shape=input_shape)
x = _resnet_layer(inputs=inputs)
# Instantiate the stack of residual units
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # downsample
y = _resnet_layer(inputs=x,
num_filters=num_filters,
strides=strides)
y = _resnet_layer(inputs=y,
num_filters=num_filters,
activation="relu")
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match
# changed dims
x = _resnet_layer(inputs=x,
num_filters=num_filters,
kernel_size=1,
strides=strides,
activation="relu",
batch_normalization=False)
x = layers.add([x, y])
x = layers.Activation('relu')(x)
num_filters *= 2
# Add classifier on top.
# v1 does not use BN after last shortcut connection-ReLU
x = layers.AveragePooling2D(pool_size=8)(x)
y = layers.Flatten()(x)
outputs = layers.Dense(num_classes, activation='softmax')(y)
# Instantiate model.
model = models.Model(inputs=inputs, outputs=outputs)
return model
def resnet_v2(input_shape, depth, num_classes=10):
"""ResNet Version 2 Model builder [b]
Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
bottleneck layer
First shortcut connection per layer is 1 x 1 Conv2D.
Second and onwards shortcut connection is identity.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filter maps is
doubled. Within each stage, the layers have the same number filters and the
same filter map sizes.
Features maps sizes:
conv1 : 32x32, 16
stage 0: 32x32, 64
stage 1: 16x16, 128
stage 2: 8x8, 256
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# Start model definition.
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = layers.Input(shape=input_shape)
# v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths
x = _resnet_layer(inputs=inputs,
num_filters=num_filters_in,
conv_first=True)
# Instantiate the stack of residual units
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# bottleneck residual unit
y = _resnet_layer(inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = _resnet_layer(inputs=y,
num_filters=num_filters_in,
conv_first=False)
y = _resnet_layer(inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False)
if res_block == 0:
# linear projection residual shortcut connection to match
# changed dims
x = _resnet_layer(inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation="relu",
batch_normalization=False)
x = layers.add([x, y])
num_filters_in = num_filters_out
# Add classifier on top.
# v2 has BN-ReLU before Pooling
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.AveragePooling2D(pool_size=8)(x)
y = layers.Flatten()(x)
outputs = layers.Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# Instantiate model.
model = models.Model(inputs=inputs, outputs=outputs)
return model | /ridurre-0.0.2.tar.gz/ridurre-0.0.2/example/cifar_10_resnet/resnet.py | 0.946892 | 0.602997 | resnet.py | pypi |
import numpy as np
import pandas as pd
from numpy import linalg as LA
__author__ = 'José Antonio Duarte'
__credit__ = 'José Antonio Duarte'
__status__ = 'beta'
__version__ = '0.1.0'
__all__ = ['get_rie', 'returnsStandardization']
def returnsStandardization(returns):
returns_wth_mean = returns - np.mean(returns, axis=0)
hat_sigma = np.sqrt((returns_wth_mean**2).sum(axis=1))
r_tilde = returns_wth_mean.divide(hat_sigma, axis=0)
X = r_tilde / np.std(r_tilde)
return X
# The returns should be a Dataframe of size T X N without NULL values
def get_rie(returns, normalize=False, max_ones = True):
def get_s_k(index_lambda, N):
return 1/N * (sum(1/(z_k[index_lambda] - lambdas)) - 1/(z_k[index_lambda] - lambdas[index_lambda]))
# T is the number of observations, N is the number of assets
T, N = returns.shape
RIE_estimator = np.zeros((N, N), dtype=float)
if normalize:
returns = returnsStandardization(returns)
# Calculation of the sample correlation matrix
E = np.corrcoef(returns.T)
# The eigenvalues and eigenvectors of the returns are obtained
lambdas, u_ks = LA.eigh(E)
u_ks = u_ks.T
n_lambda = lambdas[0]
q = float(N/T)
sigma_sq = (n_lambda)/(1 - np.sqrt(q))**2
lambda_plus = n_lambda*((1+np.sqrt(q))/(1 - np.sqrt(q)))**2
# Get z_k
z_k = lambdas - (1j / np.sqrt(N))
# Get s_k(z_k)
s_k = list(map(lambda index_lambda: get_s_k(
index_lambda, N), np.argsort(lambdas)))
# Get \xi_k^{RIE}
xi_k = lambdas / np.abs(1 - q + q * z_k * s_k)**2
# Get stieltjes g_{mp}(z)
g_mp = (z_k + sigma_sq*(q-1) - (np.sqrt(z_k - n_lambda)
* np.sqrt(z_k - lambda_plus)))/(2*q*z_k*sigma_sq)
# Get gamma_k(z_k)
gamma_k = sigma_sq * ((np.abs(1 - q + q*z_k*g_mp)**2)/(lambdas))
# Get \hat{xh}_k
xi_hat = list(map(lambda xi, gamma: xi *
gamma if gamma > 1 else xi, xi_k, gamma_k))
# Get RIE
for xi, u_i in zip(xi_hat, u_ks):
RIE_estimator += xi*(u_i.reshape(-1, 1) @ u_i.reshape(-1, 1).T)
if max_ones:
np.fill_diagonal(RIE_estimator, 1)
RIE_estimator[RIE_estimator >1] = 1
return RIE_estimator | /rie_estimator-0.0.4b0-py3-none-any.whl/rie_estimator.py | 0.684475 | 0.411702 | rie_estimator.py | pypi |
from __future__ import absolute_import
import logging
try:
from logging import NullHandler
except ImportError:
# Create a NullHandler class in logging for python 2.6
class NullHandler(logging.Handler):
def emit(self, record):
pass
import socket
try:
from threading import RLock
from threading import Timer
except ImportError:
RLock = None
Timer = None
import time
from . import riemann_pb2
from .transport import UDPTransport, TCPTransport
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
class Client(object):
"""A client for sending events and querying a Riemann server.
Two sets of methods are provided - an API dealing directly with protocol
buffer objects and an extended API that takes and returns dictionaries
representing events.
Protocol buffer API:
- :py:meth:`.send_event`
- :py:meth:`.send_events`
- :py:meth:`.send_query`
Extended API:
- :py:meth:`.event`
- :py:meth:`.events`
- :py:meth:`.query`
Clients do not directly manage connections to a Riemann server - these are
managed by :py:class:`riemann_client.transport.Transport` instances, which
provide methods to read and write messages to the server. Client instances
can be used as a context manager, and will connect and disconnect the
transport when entering and exiting the context.
>>> with Client(transport) as client:
... # Calls transport.connect()
... client.query('true')
... # Calls transport.disconnect()
"""
def __init__(self, transport=None):
if transport is None:
transport = TCPTransport()
self.transport = transport
def __enter__(self):
self.transport.connect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.transport.disconnect()
@staticmethod
def create_event(data):
"""Translates a dictionary of event attributes to an Event object
:param dict data: The attributes to be set on the event
:returns: A protocol buffer ``Event`` object
"""
event = riemann_pb2.Event()
event.host = socket.gethostname()
event.tags.extend(data.pop('tags', []))
for key, value in data.pop('attributes', {}).items():
attribute = event.attributes.add()
attribute.key, attribute.value = key, value
for name, value in data.items():
if value is not None:
setattr(event, name, value)
return event
def send_events(self, events):
"""Sends multiple events to Riemann in a single message
:param events: A list or iterable of ``Event`` objects
:returns: The response message from Riemann
"""
message = riemann_pb2.Msg()
for event in events:
message.events.add().MergeFrom(event)
return self.transport.send(message)
def send_event(self, event):
"""Sends a single event to Riemann
:param event: An ``Event`` protocol buffer object
:returns: The response message from Riemann
"""
return self.send_events((event,))
def events(self, *events):
"""Sends multiple events in a single message
>>> client.events({'service': 'riemann-client', 'state': 'awesome'})
:param events: event dictionaries for :py:func:`create_event`
:returns: The response message from Riemann
"""
return self.send_events(self.create_event(e) for e in events)
def event(self, **data):
"""Sends an event, using keyword arguments to create an Event
>>> client.event(service='riemann-client', state='awesome')
:param data: keyword arguments used for :py:func:`create_event`
:returns: The response message from Riemann
"""
return self.send_event(self.create_event(data))
@staticmethod
def create_dict(event):
"""Translates an Event object to a dictionary of event attributes
All attributes are included, so ``create_dict(create_event(input))``
may return more attributes than were present in the input.
:param event: A protocol buffer ``Event`` object
:returns: A dictionary of event attributes
"""
data = dict()
for descriptor, value in event.ListFields():
if descriptor.name == 'tags':
value = list(value)
elif descriptor.name == 'attributes':
value = dict(((a.key, a.value) for a in value))
data[descriptor.name] = value
return data
def send_query(self, query):
"""Sends a query to the Riemann server
:returns: The response message from Riemann
"""
message = riemann_pb2.Msg()
message.query.string = query
return self.transport.send(message)
def query(self, query):
"""Sends a query to the Riemann server
>>> client.query('true')
:returns: A list of event dictionaries taken from the response
:raises Exception: if used with a :py:class:`.UDPTransport`
"""
if isinstance(self.transport, UDPTransport):
raise Exception('Cannot query the Riemann server over UDP')
response = self.send_query(query)
return [self.create_dict(e) for e in response.events]
class QueuedClient(Client):
"""A Riemann client using a queue that can be used to batch send events.
A message object is used as a queue, with the :py:meth:`.send_event` and
:py:meth:`.send_events` methods adding new events to the message and the
:py:meth:`.flush` sending the message.
"""
def __init__(self, transport=None):
super(QueuedClient, self).__init__(transport)
self.clear_queue()
def flush(self):
"""Sends the waiting message to Riemann
:returns: The response message from Riemann
"""
response = self.transport.send(self.queue)
self.clear_queue()
return response
def send_event(self, event):
"""Adds a single event to the queued message
:returns: None - nothing has been sent to the Riemann server yet
"""
self.send_events((event,))
return None
def send_events(self, events):
"""Adds multiple events to the queued message
:returns: None - nothing has been sent to the Riemann server yet
"""
for event in events:
self.queue.events.add().MergeFrom(event)
return None
def clear_queue(self):
"""Resets the message/queue to a blank :py:class:`.Msg` object"""
self.queue = riemann_pb2.Msg()
if RLock and Timer: # noqa
class AutoFlushingQueuedClient(QueuedClient):
"""A Riemann client using a queue and a timer that will automatically
flush its contents if either:
- the queue size exceeds :param max_batch_size: or
- more than :param max_delay: has elapsed since the last flush and
the queue is non-empty.
if :param stay_connected: is False, then the transport will be
disconnected after each flush and reconnected at the beginning of
the next flush.
if :param clear_on_fail: is True, then the client will discard its
buffer after the second retry in the event of a socket error.
A message object is used as a queue, and the following methods are
given:
- :py:meth:`.send_event` - add a new event to the queue
- :py:meth:`.send_events` add a tuple of new events to the queue
- :py:meth:`.event` - add a new event to the queue from
keyword arguments
- :py:meth:`.events` - add new events to the queue from
dictionaries
- :py:meth:`.flush` - manually force flush the queue to the
transport
"""
def __init__(self, transport, max_delay=0.5, max_batch_size=100,
stay_connected=False, clear_on_fail=False):
super(AutoFlushingQueuedClient, self).__init__(transport)
self.stay_connected = stay_connected
self.clear_on_fail = clear_on_fail
self.max_delay = max_delay
self.max_batch_size = max_batch_size
self.lock = RLock()
self.event_counter = 0
self.last_flush = time.time()
self.timer = None
# start the timer
self.start_timer()
def connect(self):
"""Connect the transport if it is not already connected."""
if not self.is_connected():
self.transport.connect()
def is_connected(self):
"""Check whether the transport is connected."""
try:
# this will throw an exception whenever socket isn't connected
self.transport.socket.type
return True
except (AttributeError, RuntimeError, socket.error):
return False
def event(self, **data):
"""Enqueues an event, using keyword arguments to create an Event
>>> client.event(service='riemann-client', state='awesome')
:param data: keyword arguments used for :py:func:`create_event`
"""
self.send_events((self.create_event(data),))
def events(self, *events):
"""Enqueues multiple events in a single message
>>> client.events({'service': 'riemann-client',
>>> 'state': 'awesome'})
:param events: event dictionaries for :py:func:`create_event`
:returns: The response message from Riemann
"""
self.send_events(self.create_event(evd) for evd in events)
def send_events(self, events):
"""Enqueues multiple events
:param events: A list or iterable of ``Event`` objects
:returns: The response message from Riemann
"""
with self.lock:
for event in events:
self.queue.events.add().MergeFrom(event)
self.event_counter += 1
self.check_for_flush()
def flush(self):
"""Sends the events in the queue to Riemann in a single protobuf
message
:returns: The response message from Riemann
"""
response = None
with self.lock:
if not self.is_connected():
self.connect()
try:
response = super(AutoFlushingQueuedClient, self).flush()
except socket.error:
# log and retry
logger.warning("Socket error on flushing. "
"Attempting reconnect and retry...")
try:
self.transport.disconnect()
self.connect()
response = (
super(AutoFlushingQueuedClient, self).flush())
except Exception:
logger.warning("Socket error on flushing "
"second attempt. Batch discarded.")
self.transport.disconnect()
if self.clear_on_fail:
self.clear_queue()
self.event_counter = 0
if not self.stay_connected:
self.transport.disconnect()
self.last_flush = time.time()
self.start_timer()
return response
def check_for_flush(self):
"""Checks the conditions for flushing the queue"""
if (self.event_counter >= self.max_batch_size or
(time.time() - self.last_flush) >= self.max_delay):
self.flush()
def start_timer(self):
"""Cycle the timer responsible for periodically flushing the queue
"""
if self.timer:
self.timer.cancel()
self.timer = Timer(self.max_delay, self.check_for_flush)
self.timer.daemon = True
self.timer.start()
def stop_timer(self):
"""Stops the current timer
a :py:meth:`.flush` event will reactviate the timer
"""
self.timer.cancel()
__all__ = 'Client', 'QueuedClient', 'AutoFlushingQueuedClient' | /riemann-client-7.0.0rc1.tar.gz/riemann-client-7.0.0rc1/riemann_client/client.py | 0.877674 | 0.350032 | client.py | pypi |
from __future__ import absolute_import
import abc
import socket
import ssl
import struct
from . import riemann_pb2
# Default arguments
HOST = 'localhost'
PORT = 5555
TIMEOUT = None
def socket_recvall(socket, length, bufsize=4096):
"""A helper method to read of bytes from a socket to a maximum length"""
data = b""
while len(data) < length:
data += socket.recv(bufsize)
return data
class RiemannError(Exception):
"""Raised when the Riemann server returns an error message"""
pass
class Transport(object):
"""Abstract transport definition
Subclasses must implement the :py:meth:`.connect`, :py:meth:`.disconnect`
and :py:meth:`.send` methods.
Can be used as a context manager, which will call :py:meth:`.connect` on
entry and :py:meth:`.disconnect` on exit.
"""
__metaclass__ = abc.ABCMeta
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.disconnect()
@abc.abstractmethod
def connect(self):
pass
@abc.abstractmethod
def disconnect(self):
pass
@abc.abstractmethod
def send(self, message):
pass
class SocketTransport(Transport):
"""Provides common methods for Transports that use a sockets"""
def __init__(self, host=HOST, port=PORT):
self.host = host
self.port = port
@property
def address(self):
"""
:returns: A tuple describing the address to connect to
:rtype: (host, port)
"""
return self.host, self.port
@property
def socket(self):
"""Returns the socket after checking it has been created"""
if not hasattr(self, '_socket'):
raise RuntimeError("Transport has not been connected!")
return self._socket
@socket.setter
def socket(self, value):
self._socket = value
class UDPTransport(SocketTransport):
def connect(self):
"""Creates a UDP socket"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def disconnect(self):
"""Closes the socket"""
self.socket.close()
def send(self, message):
"""Sends a message, but does not return a response
:returns: None - can't receive a response over UDP
"""
self.socket.sendto(message.SerializeToString(), self.address)
return None
class TCPTransport(SocketTransport):
def __init__(self, host=HOST, port=PORT, timeout=TIMEOUT):
"""Communicates with Riemann over TCP
:param str host: The hostname to connect to
:param int port: The port to connect to
:param int timeout: The time in seconds to wait before raising an error
"""
super(TCPTransport, self).__init__(host, port)
self.timeout = timeout
def connect(self):
"""Connects to the given host"""
self.socket = socket.create_connection(self.address, self.timeout)
def disconnect(self):
"""Closes the socket"""
self.socket.close()
def send(self, message):
"""Sends a message to a Riemann server and returns it's response
:param message: The message to send to the Riemann server
:returns: The response message from Riemann
:raises RiemannError: if the server returns an error
"""
message = message.SerializeToString()
self.socket.sendall(struct.pack('!I', len(message)) + message)
length = struct.unpack('!I', self.socket.recv(4))[0]
response = riemann_pb2.Msg()
response.ParseFromString(socket_recvall(self.socket, length))
if not response.ok:
raise RiemannError(response.error)
return response
class TLSTransport(TCPTransport):
def __init__(self, host=HOST, port=PORT, timeout=TIMEOUT, ca_certs=None,
keyfile=None, certfile=None):
"""Communicates with Riemann over TCP + TLS
Options are the same as :py:class:`.TCPTransport` unless noted
:param str ca_certs: Path to a CA Cert bundle used to create the socket
:param str keyfile: Path to a client key file
:param str certfile: Path to a client certificate file
"""
super(TLSTransport, self).__init__(host, port, timeout)
self.ca_certs = ca_certs
self.keyfile = keyfile
self.certfile = certfile
def connect(self):
"""Connects using :py:meth:`TLSTransport.connect` and wraps with TLS"""
super(TLSTransport, self).connect()
self.socket = ssl.wrap_socket(
self.socket,
ssl_version=ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_certs,
keyfile=self.keyfile,
certfile=self.certfile)
class BlankTransport(Transport):
"""A transport that collects events in a list, and has no connection
Used by ``--transport none``, which is useful for testing commands without
contacting a Riemann server. This is also used by the automated tests in
``riemann_client/tests/test_riemann_command.py``.
"""
def __init__(self, *args, **kwargs):
self.events = []
def connect(self):
"""Creates a list to hold messages"""
pass
def send(self, message):
"""Adds a message to the list, returning a fake 'ok' response
:returns: A response message with ``ok = True``
"""
for event in message.events:
self.events.append(event)
reply = riemann_pb2.Msg()
reply.ok = True
return reply
def disconnect(self):
"""Clears the list of messages"""
pass
def __len__(self):
return len(self.events)
__all__ = (
'RiemannError', 'SocketTransport', 'UDPTransport',
'TCPTransport', 'TLSTransport', 'BlankTransport',
) | /riemann-client-7.0.0rc1.tar.gz/riemann-client-7.0.0rc1/riemann_client/transport.py | 0.845081 | 0.220301 | transport.py | pypi |
from typing import List, Tuple, Union
LIST = False
BYTES = True
# defined by RLP standard
BYTES_OFFSET = 0x80
LONG_BYTES_OFFSET = BYTES_OFFSET + 55 # 0xb7
ARRAY_OFFSET = 0xc0
LONG_ARRAY_OFFSET = ARRAY_OFFSET + 55 # 0xf7
class RLPError(ValueError):
...
def i2be_rlp_padded(
number: int,
length: int = 0,
signed: bool = False) -> bytes:
'''
Convert int to big endian (b.e.) bytes
Args:
number: int value to convert to bytes in BE format
Returns:
bytes in BE format
'''
if length == 0:
sign_bit = 1 if signed else 0
length = (number.bit_length() + 7 + sign_bit) // 8
if number == 0 and length == 0:
return b''
return number.to_bytes(length, 'big', signed=signed)
def be2i_rlp(b: bytes, signed: bool = False) -> int:
if b == b'':
return 0
return int.from_bytes(b, 'big', signed=signed)
def encode(item: Union[List, bytes]) -> bytes:
if isinstance(item, bytes):
if len(item) == 1 and item[0] < BYTES_OFFSET:
return item
else:
return _encode_length(len(item), BYTES_OFFSET) + item
elif isinstance(item, list):
output = b''.join(encode(i) for i in item)
return _encode_length(len(output), ARRAY_OFFSET) + output
def _encode_length(length: int, offset: int) -> bytes:
'''Encode a length '''
if length <= 55:
return bytes([offset + length])
if length >= 256 ** 8:
raise RLPError('Bytestring is too long to encode')
enc_len = i2be_rlp_padded(number=length)
tag = bytes([offset + 55 + len(enc_len)])
return b''.join([tag, enc_len])
def decode_list(raw: bytes) -> List:
output = []
remaining = raw[:]
while len(remaining) > 0:
(offset, data_len, type) = _decode_length(remaining)
try:
next_item = remaining[:offset + data_len]
output.append(decode(next_item))
except IndexError as e:
raise RLPError('Malformatted bytestring. Overran input.') from e
remaining = remaining[offset + data_len:]
return output
def decode(raw: bytes) -> Union[List, bytes]:
if len(raw) == 0:
return b''
(offset, data_len, type) = _decode_length(raw)
try:
next_item = raw[offset: offset + data_len]
except IndexError as e:
raise RLPError('Malformatted bytestring. Overran input') from e
if type == BYTES:
return next_item
return decode_list(next_item)
def _decode_length(raw: bytes) -> Tuple[int, int, bool]:
try:
tag = raw[0]
except IndexError as e:
raise RLPError('Malformatted bytestring. Null.') from e
# single byte
if tag < BYTES_OFFSET:
return (0, 1, BYTES)
# short bytestring
if tag <= LONG_BYTES_OFFSET:
bytes_len = tag - BYTES_OFFSET
return (1, bytes_len, BYTES)
# long bytestring
if tag <= LONG_BYTES_OFFSET + 7:
enc_len_bytes = tag - LONG_BYTES_OFFSET
enc_len = raw[1: 1 + enc_len_bytes]
bytes_len = int.from_bytes(enc_len, 'big')
return (1 + enc_len_bytes, bytes_len, BYTES)
# short list
if tag <= LONG_ARRAY_OFFSET:
list_len = tag - ARRAY_OFFSET
return (1, list_len, LIST)
# long list
enc_len_bytes = tag - LONG_ARRAY_OFFSET
enc_len = raw[1: 1 + enc_len_bytes]
list_len = int.from_bytes(enc_len, 'big')
return (1 + enc_len_bytes, list_len, LIST) | /riemann-ether-6.0.6.tar.gz/riemann-ether-6.0.6/ether/rlp.py | 0.785802 | 0.389053 | rlp.py | pypi |
from Cryptodome.Hash import keccak
import warnings
from typing import Callable, cast
from ether.ether_types import EthSig
# suppress load warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from eth_keys import datatypes as eth_ecdsa
def keccak256(msg: bytes) -> bytes:
'''
Does solidity's dumb keccak
Args:
msg (bytes): the message to hash
Returns:
(bytes): the keccak256 digest
'''
keccak_hash = keccak.new(digest_bits=256)
keccak_hash.update(msg)
return keccak_hash.digest()
def pow_mod(x: int, y: int, z: int) -> int:
'''
int, int, int (or float)
returns (x^y)mod z
'''
number = 1
while y:
if y & 1:
number = number * x % z
y >>= 1
x = x * x % z
return number
def uncompress_pubkey(pubkey: bytes) -> bytes:
'''
takes a compressed pubkey, returns the uncompressed pubkey (64 bytes)
'''
p = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f
parity = pubkey[0] - 2
x = int.from_bytes(pubkey[1:], 'big')
a = (pow_mod(x, 3, p) + 7) % p
y = pow_mod(a, (p + 1) // 4, p)
if y % 2 != parity:
y = -y % p
return (x.to_bytes(32, 'big')) + (y.to_bytes(32, 'big'))
def compress_pubkey(pubkey: bytes) -> bytes:
'''Take a an uncompressed pubkey, return the compressed representation'''
pub = pubkey[1:] if len(pubkey) == 65 else pubkey
parity = (pub[-1] & 1) + 2
compressed = bytes([parity]) + pub[:32]
return compressed
def priv_to_pub(privkey: bytes) -> bytes:
'''Return the pubkey that corresponds to a private key'''
priv = eth_ecdsa.PrivateKey(privkey)
pub = eth_ecdsa.PublicKey.from_private(private_key=priv)
return cast(bytes, pub.to_bytes())
def pub_to_addr(pubkey: bytes) -> str:
'''Eth addr is last 20 bytes of keccak256 of pubkey'''
return f'0x{keccak256(pubkey)[-20:].hex()}'
def priv_to_addr(privkey: bytes) -> str:
'''Make address from privkey'''
return pub_to_addr(priv_to_pub(privkey))
def recover_pubkey(signature: EthSig, digest: bytes) -> bytes:
'''Recovers the public key from a signature and message digest'''
# bullshit in the underlying eth library
# needs to be 0 if v is odd, 1 if v is even
normalized_v = (signature[0] + 1) % 2
normalized_sig = (normalized_v, signature[1], signature[2])
sig = eth_ecdsa.Signature(vrs=normalized_sig)
pub = sig.recover_public_key_from_msg_hash(digest)
return cast(bytes, pub.to_bytes())
def recover_address(signature: EthSig, digest: bytes) -> str:
return pub_to_addr(recover_pubkey(signature, digest))
def _der_minimal_int(number: int) -> bytes:
if number < 0:
raise ValueError('Negative number in signature')
return number.to_bytes((number.bit_length() + 7) // 8, 'big')
def sig_to_der(signature: EthSig) -> bytes:
'''
0x30|b1|0x02|b2|r|0x02|b3|s
b1 = Length of remaining data
b2 = Length of r
b3 = Length of s
'''
r = _der_minimal_int(signature[1])
s = _der_minimal_int(signature[2])
enc_r = bytes([0x02, len(r)]) + r
enc_s = bytes([0x02, len(s)]) + s
der = bytes([0x30, len(enc_r) + len(enc_s)]) + enc_r + enc_s
return der
def sign_hash(digest: bytes, privkey: bytes) -> EthSig:
'''Sign a digest'''
priv = eth_ecdsa.PrivateKey(privkey)
sig = priv.sign_msg_hash(digest)
return cast(EthSig, sig.vrs)
def sign(
message: bytes,
privkey: bytes,
algo: Callable[[bytes], bytes] = keccak256) -> EthSig:
'''
Gets a signature on a message digest of a message
'''
return sign_hash(algo(message), privkey)
def sign_message(
message: bytes,
privkey: bytes,
algo: Callable[[bytes], bytes] = keccak256) -> EthSig:
'''Sign a message using the ethereum signed message format'''
prefixed = b''.join([b'\x19Ethereum Signed Message:\n', message])
return sign(prefixed, privkey) | /riemann-ether-6.0.6.tar.gz/riemann-ether-6.0.6/ether/crypto.py | 0.863938 | 0.395922 | crypto.py | pypi |
from ether import abi, crypto
from typing import Any, cast, Dict, List
from ether.ether_types import EthABI, ParsedEtherEvent, UnparsedEtherEvent
def _make_topic0(event: Dict[str, Any]) -> str:
'''
Calculates the event topic hash frrom the event ABI
Args:
event (dict): the event ABI
Returns:
(str): the event topic as 0x prepended hex
'''
signature = abi.make_signature(event).encode('utf8')
topic_hex = crypto.keccak256(signature).hex()
return '0x{}'.format(topic_hex)
def _match_topic0_to_event(
event_topic: str,
events: List[Dict[str, Any]]) -> Dict[str, Any]:
'''
Finds the corresponding event from a topic string
Args:
event_topic (str): the event's 0x prepended hex topic
Returns:
(dict): the event ABI
'''
for event in events:
if _make_topic0(event) == event_topic:
return event
raise ValueError('Topic not found')
def _find_indexed(event: Dict[str, Any]) -> List[Dict[str, Any]]:
'''
Finds indexed arguments
Args:
event_topic (str): the event's 0x prepended hex topic
Returns:
(list): the indexed arguments
'''
return [t for t in event['inputs'] if t['indexed']]
def _find_unindexed(event: Dict[str, Any]) -> List[Dict[str, Any]]:
'''
Finds indexed arguments
Args:
event_topic (str): the event's 0x prepended hex topic
Returns:
(list): the unindexed arguments
'''
return [t for t in event['inputs'] if not t['indexed']]
def decode_event(
encoded_event: UnparsedEtherEvent,
contract_abi: EthABI) -> Dict[str, Any]:
'''
Decodes an event using the provided abi
Args:
encoded_event (dict): the etherscan/full node event Dict
abi (dict): the abi as a dict (use json.loads)
Returns:
(dict): the decoded dict
'''
ret = {}
events = [entry for entry in contract_abi if entry['type'] == 'event']
# find the appropriate event interface
event_abi = _match_topic0_to_event(encoded_event['topics'][0], events)
# get the indexed args
indexed = _find_indexed(event_abi)
for signature, blob in zip(indexed, encoded_event['topics'][1:]):
val = abi.decode(signature['type'], bytes.fromhex(blob[2:]))
ret[signature['name']] = val
unindexed = _find_unindexed(event_abi)
unindexed_values = abi.decode_many(
[t['type'] for t in unindexed],
bytes.fromhex(encoded_event['data'][2:]))
for k, v in zip([t['name'] for t in unindexed], unindexed_values):
ret[k] = v
ret['event_name'] = event_abi['name']
return ret
def parse_event_data(
encoded_event: UnparsedEtherEvent,
contract_abi: EthABI) -> ParsedEtherEvent:
'''Parses an event given a contract ABI'''
tmp = cast(ParsedEtherEvent, encoded_event.copy())
tmp['data'] = decode_event(encoded_event, contract_abi)
return tmp | /riemann-ether-6.0.6.tar.gz/riemann-ether-6.0.6/ether/events.py | 0.856857 | 0.421195 | events.py | pypi |
from riemann_keys import utils
BASE58_ALPHABET = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
BASE58_BASE = len(BASE58_ALPHABET)
BASE58_LOOKUP = dict((c, i) for i, c in enumerate(BASE58_ALPHABET))
def encode(data: bytes, checksum: bool = True) -> str:
"""Convert binary to base58 using BASE58_ALPHABET."""
if checksum:
data = data + utils.hash256(data)[:4]
v, prefix = to_long(256, lambda x: x, iter(data))
data = from_long(v, prefix, BASE58_BASE, lambda v: BASE58_ALPHABET[v])
return data.decode("utf8")
def decode(s: str, checksum: bool = True):
"""Convert base58 to binary using BASE58_ALPHABET."""
v, prefix = to_long(
BASE58_BASE, lambda c: BASE58_LOOKUP[c], s.encode("utf8"))
data = from_long(v, prefix, 256, lambda x: x)
if checksum:
data, the_hash = data[:-4], data[-4:]
if utils.hash256(data)[:4] == the_hash:
return data
raise ValueError("hashed base58 has bad checksum %s" % s)
return data
def encode_with_checksum(data: bytes):
"""
A "hashed_base58" structure is a base58 integer (which looks like a string)
with four bytes of hash data at the end.
This function turns data into its hashed_base58 equivalent.
"""
return encode(data, checksum=True)
def decode_with_checksum(s: str):
"""
If the passed string is hashed_base58, return the binary data.
Otherwise raises a ValueError.
"""
return decode(s, checksum=True)
def has_checksum(base58: str):
"""Return True if and only if base58 is valid hashed_base58."""
try:
decode_with_checksum(base58)
except ValueError:
return False
return True
def from_long(v: int, prefix: int, base: int, charset):
"""The inverse of to_long. Convert an integer to an arbitrary base.
v: the integer value to convert
prefix: the number of prefixed 0s to include
base: the new base
charset: an array indicating a printable character to use for each value.
"""
ba = bytearray()
while v > 0:
try:
v, mod = divmod(v, base)
ba.append(charset(mod))
except Exception:
raise ValueError(
"can't convert to character corresponding to %d" % mod)
ba.extend([charset(0)] * prefix)
ba.reverse()
return bytes(ba)
def to_long(base, lookup_f, s):
"""
Convert an array to a (possibly bignum) integer, along with a prefix value
of how many prefixed zeros there are.
base:
the source base
lookup_f:
a function to convert an element of s to a value between 0 and base-1.
s:
the value to convert
"""
prefix = 0
v = 0
for c in s:
v *= base
try:
v += lookup_f(c)
except Exception:
raise ValueError("bad character %s in string %s" % (c, s))
if v == 0:
prefix += 1
return v, prefix | /riemann-keys-0.1.2.tar.gz/riemann-keys-0.1.2/riemann_keys/base58.py | 0.779406 | 0.382257 | base58.py | pypi |
from riemann import utils as rutils
from ledgerblue.comm import getDongle, Dongle
from ledgerblue.commException import CommException
from ledger import utils
from typing import Any, List
class LedgerException(Exception):
...
class Ledger:
'''
A simple wrapper around the ledgerblue Dongle object.
It provides a context manager, as well as passthrough functions
'''
client: Dongle = None
debug: bool
def __init__(self, debug: bool = False):
self.debug = debug
self.client = None
def open(self):
self.client = getDongle(self.debug)
def close(self) -> None:
self.client.device.close()
self.client = None
def __enter__(self) -> 'Ledger':
try:
self.open()
return self
except CommException:
raise RuntimeError('No device found')
async def __aenter__(self) -> 'Ledger':
return self.__enter__()
def __exit__(self, *args: Any) -> None:
self.close()
async def __aexit__(self, *args: Any) -> None:
self.close()
def exchange_sync(self, data: bytes):
'''Synchronous exchange'''
try:
return self.client.exchange(data)
except Exception as e:
raise LedgerException(str(e))
async def exchange(self, data: bytes) -> bytes:
'''Asynchronous exchange'''
return bytes(await utils.asyncify(self.exchange_sync, data))
def make_apdu(
command: bytes,
p1: bytes = b'\x00',
p2: bytes = b'\x00',
data: bytes = b'',
response_len: int = 64) -> bytes:
# https://en.wikipedia.org/wiki/Smart_card_application_protocol_data_unit
apdu = (
b'\xE0' # CLA
+ command # INS
+ p1 # p1
+ p2 # p2
+ rutils.i2be(len(data)) # LC
+ data
+ rutils.i2be(response_len)) # LE
if len(apdu) > 64:
raise ValueError('APDU is too long')
return apdu
def derivation_path_to_apdu_data(path: List[int]) -> bytes:
'''Convert a deriation path (as a list of integers) to a apdu data blob'''
indices_blob = bytearray()
if len(path) > 10:
raise ValueError('Only 10 derivations allowed on Ledger')
# convert each one into a BE number
for index in path:
indices_blob.extend(rutils.i2be_padded(index, 4))
# we length prefix it with the number of derivations
len_prefix = bytes([len(path)])
return len_prefix + indices_blob | /riemann-ledger-2.2.1.tar.gz/riemann-ledger-2.2.1/ledger/blue.py | 0.622 | 0.303826 | blue.py | pypi |
import asyncio
from functools import partial
from riemann import utils as rutils
from riemann.encoding import base58
from ledger.ledger_types import LedgerPubkey, LedgerXPub
from typing import Any, Awaitable, cast, Callable, List, Optional
BIP32_HARDEN = 0x80000000
# https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format
VERSION_BYTES = {
'mainnet': {
'public': b'\x04\x88\xb2\x1e',
'private': b'\x04\x88\xad\xe4',
},
'testnet': {
'public': b'\x04\x35\x87\xcf',
'private': b'\x04\x35\x83\x94',
}
}
def asyncify(function: Callable, *args: Any, **kwargs: Any) -> Awaitable:
'''
Turns a synchronous function into a future
'''
loop = asyncio.get_event_loop()
p = partial(function, *args, **kwargs)
return loop.run_in_executor(None, p)
def parse_derivation(derivation_path: str) -> List[int]:
'''
turns a derivation path (e.g. m/44h/0) into a list of integer indexes
e.g. [2147483692, 0]
Args:
derivation_path (str): the human-readable derivation path
Returns:
(list(int)): the derivaion path as a list of indexes
'''
int_nodes: List[int] = []
# Must be / separated
nodes: List[str] = derivation_path.split('/')
# If the first node is not m, error.
# TODO: allow partial path knowledge
if nodes[0] != 'm':
raise ValueError('Bad path. Got: {}'.format(derivation_path))
if len(nodes) == 1:
return []
# Go over all other nodes, and convert to indexes
nodes = nodes[1:]
for i in range(len(nodes)):
if nodes[i][-1] in ['h', "'"]: # Support 0h and 0' conventions
int_nodes.append(int(nodes[i][:-1]) + BIP32_HARDEN)
else:
int_nodes.append(int(nodes[i]))
return int_nodes
def compress_pubkey(pubkey: bytes) -> bytes:
if len(pubkey) == 65:
pubkey = pubkey[1:]
if len(pubkey) != 64:
raise ValueError('Pubkey must be 64 or 65 bytes')
if pubkey[-1] & 1:
return b'\x03' + pubkey[:32]
else:
return b'\x02' + pubkey[:32]
def make_child_xpub(
derivation: str,
parent_or_none: Optional[LedgerXPub],
child: LedgerXPub,
mainnet: bool = True) -> str:
'''
Builds an xpub for a derived child using its parent and path
Args:
derivation (str): the m-prefixed derivation path e.g. m/44h/0h/0h
parent (LedgerPubkey): the parent public key
child (LedgerPubkey): the child public key
mainnet (bool): whether to use mainnet prefixes
'''
indices = parse_derivation(derivation)
# determine appropriate xpub version bytes
if not mainnet:
prefix = VERSION_BYTES['testnet']['public']
else:
prefix = VERSION_BYTES['mainnet']['public']
if parent_or_none is not None:
# xpubs include the parent fingerprint
parent = cast(LedgerPubkey, parent_or_none)
compressed_parent_key = compress_pubkey(parent['pubkey'])
parent_fingerprint = rutils.hash160(compressed_parent_key)[:4]
child_index = indices[-1].to_bytes(4, byteorder='big')
depth = len(indices)
else:
# this means it's a master key
parent_fingerprint = b'\x00' * 4
child_index = b'\x00' * 4
depth = 0
# xpubs always use compressed pubkeys
compressed_pubkey = compress_pubkey(child['pubkey'])
# build the xpub
xpub = bytearray()
xpub.extend(prefix) # xpub prefix
xpub.extend([depth]) # depth
xpub.extend(parent_fingerprint) # paren't fingerprint
xpub.extend(child_index) # index
xpub.extend(child['chain_code']) # chain_code
xpub.extend(compressed_pubkey) # pubkey (comp)
return base58.encode(xpub) | /riemann-ledger-2.2.1.tar.gz/riemann-ledger-2.2.1/ledger/utils.py | 0.695028 | 0.313078 | utils.py | pypi |
## Riemann: bitcoin transactions for humans
[](https://travis-ci.org/summa-tx/riemann)
[](https://coveralls.io/github/summa-tx/riemann)
### Purpose
`$ pip install riemann-tx`
Riemann is a **dependency-free Python3** library for creating **bitcoin-style
transactions**. It is **compatible with many chains** and **supports SegWit**.
Riemann aims to make it easy to create application-specific transactions. It
serializes and unserializes scripts from human-readable strings. It contains
a complete toolbox for transaction construction, as well as built-in support
for ~20 live networks and ~40 testnet or regtest nets.
Riemann is NOT a wallet. It does NOT handle keys or create signatures.
Riemann is NOT a protocol or RPC implementation. Riemann does NOT communicate
with anything. Ever. Riemann is NOT a Script VM. Riemann does NOT check the
validity of your scriptsigs.
Riemann is _almost_ stateless. Before calling functions, you select a
network. A list of supported networks is in `riemann/networks/__init__.py`.
Tests are made using on-chain transactions, primarily from Bitcoin.
### Contributing
Please read CONTRIBUTING.md.
### Installation, Development & Running Tests
Install from pypi for use in your project:
```
pip3 install riemann-tx
```
Install to develop Riemann:
```
$ git clone git@github.com:summa-tx/riemann.git
$ cd riemann
$ virtualenv -p python3 venv
$ source venv/bin/activate
$ pip install -r requirements-test.txt
$ pip install -e .
$ tox
```
### Usage
At a low level, Riemann deals in byte-like objects. However, it provides
layers of abstractions on top of this. Notably, scripts are commonly
expressed as strings. In script strings, data (like pubkeys) is expressed in
unprefixed hex. For example, a P2PKH output script_pubkey might be expressed
as follows:
```Python
# Note that the PUSH0x14 for the pubkey is implied
"OP_DUP OP_HASH160 00112233445566778899AABBCCDDEEFF00112233 OP_EQUALVERIFY
OP_CHECKSIG"
```
`tx.tx` contains the data structures for the different pieces of a transaction.
It deals in bytes and bytearrays.
`tx.tx_builder` provides tools for constructing transactions. It accepts
human-readable inputs, like ints and human readable script strings wherever
possible, and returns serialized transactions.
`simple` contains a simplified interface to the tx_builder. It accepts
human-readable inputs.
Bitcoin mainnet is the default network. Select a network as follows:
```Python
import riemann
riemann.select_network('network_name')
```
When relevant, segwit is enabled by passing `witness=True`. Example:
`make_sh_address(script_string, witness=True)`. There are also convenience
functions that provide the same functionality, e.g.,
`make_p2wsh_address(script_string)`.
Data structures are IMMUTABLE. You can not (and definitely should not!) edit an
instance of any of the underlying classes. Instead, make a new instance, or use
the `copy` method. The `copy` method allows you to make a copy, and takes
arguments to override any specific attribute.
### Notes and Bitcoin gotchas:
* For convenience, we separate the script_sig into the stack_script and the
redeem_script. For PKH spends, the redeem script MUST BE `b''`.
* If there are any witnesses, all inputs must have a witness. The witness list
MUST be the same size as the input list.
* If all sequence numbers are set to max (0xFFFFFFFF), `lock_time` is
disregarded by consensus rules. For this reason, 0xFFFFFFFE is the default
sequence number in simple.py.
* Relative lock-time signaling uses a **different time format** than absolute
lock-time. See here: https://prestwi.ch/bitcoin-time-locks/
* Not all chains support OP_CHECKSEQUENCEVERIFY and relative lock-times
(lookin' at you Zcash).
* Replace-by-fee signaling is also communicated by sequence numbers. If any
sequence number is 0xFFFFFFFD or lower, then RBF is enabled. RBF is _NOT_ a
consensus feature.
* `lock_time` and `sequence` use different encodings for time.
```Python
# NB:
# script_sig -> Goes in TxIn.
# - Legacy only
# - Contains initial stack (stack_script)
# - Contains p2sh script (redeem_script)
# - Contains pubkey/script revelation
# stack_script -> Goes in script_sig
# - Legacy only
# - Contains script that makes initial stack
# script_pubkey -> Goes in TxOut
# - Also called pk_script, output_script
# - P2PKH: OP_DUP OP_HASH160 PUSH14 {pkh} OP_EQUALVERIFY OP_CHECKSIG
# - P2SH: OP_HASH160 {script_hash} OP_EQUAL
# - P2WPKH: OP_0 PUSH0x14 {pkh}
# - P2WSH: OP_0 PUSH0x20 {script_hash}
# WitnessStackItem -> Goes in InputWitness
# - Witness only
# - Contains a length-prefixed stack item
# InputWitness -> Goes in Witness
# - A stack associated with a specific input
# - If spending from p2wsh, the last item is a serialized script
# - If spending from p2wpkh, consists of [signature, pubkey]
```
# LICENSE
Riemann is released under the LGPL.
Riemann contains some code released under MIT and ISC licenses. The appropriate
license is included at the top of these files.
In particular:
* Base58 implementation from the excellent pycoin by Richard Kiss.
[Link](https://github.com/richardkiss/pycoin)
* Bech32 implementation from Pieter Wuille.
[Link](https://github.com/sipa/bech32/tree/master/ref/python)
* blake256 and blake2 implementation by Larry Bugbee.
[Link](http://www.seanet.com/~bugbee/crypto/blake/)
[Link](https://github.com/buggywhip/blake2_py)
| /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/README.md | 0.461502 | 0.915242 | README.md | pypi |
from riemann import tx
from riemann import utils
from riemann.tx import decred
from riemann.tx import tx_builder as tb
from riemann.encoding import addresses as addr
from typing import overload
def output(value: int, address: str) -> tx.TxOut:
'''
int, str -> TxOut
accepts base58 or bech32 addresses
'''
script = addr.to_output_script(address)
value_bytes = utils.i2le_padded(value, 8)
return tb._make_output(value_bytes, script)
@overload
def outpoint(tx_id: str, index: int, tree: int) -> decred.DecredOutpoint:
...
@overload # noqa: F811
def outpoint(tx_id: str, index: int) -> tx.Outpoint:
...
def outpoint(tx_id, index, tree=None): # noqa: F811
'''
Some overloads are not documented by Sphinx
hex_str, int, int -> Outpoint
accepts block explorer txid string
'''
tx_id_le = bytes.fromhex(tx_id)[::-1]
return tb.make_outpoint(tx_id_le, index, tree)
@overload
def unsigned_input(
outpoint: tx.Outpoint,
sequence: int) -> tx.TxIn:
...
@overload # noqa: F811
def unsigned_input(
outpoint: decred.DecredOutpoint,
sequence: int) -> decred.DecredTxIn:
...
@overload # noqa: F811
def unsigned_input(
outpoint: decred.DecredOutpoint) -> decred.DecredTxIn:
...
@overload # noqa: F811
def unsigned_input(
outpoint: tx.Outpoint) -> decred.DecredTxIn:
...
def unsigned_input(outpoint, sequence=0xFFFFFFFE): # noqa: F811
'''
Some overloads are not documented by Sphinx
Outpoint, byte-like, int -> TxIn
'''
return tb.make_legacy_input(
outpoint=outpoint,
stack_script=b'',
redeem_script=b'',
sequence=sequence)
def unsigned_legacy_tx(tx_ins, tx_outs, **kwargs):
'''Create an unsigned transaction
Use this to generate sighashes for unsigned TxIns
Hint: set version to 2 if using sequence number relative time locks
Args:
tx_ins list(TxIn instances): list of transaction inputs
tx_outs list(TxOut instances): list of transaction outputs
**kwargs:
version (int): transaction version number
lock_time (hex): transaction locktime
expiry (int): overwinter expiry time
tx_joinsplits (list): list of joinsplits transactions
joinsplit_pubkey (bytes): joinsplit public key
joinsplit_sig (bytes): joinsplit signature
Returns:
(Tx instance): unsigned transaction
'''
return tb.make_tx(
version=kwargs['version'] if 'version' in kwargs else 2,
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=kwargs['lock_time'] if 'lock_time' in kwargs else 0,
expiry=kwargs['expiry'] if 'expiry' in kwargs else 0,
tx_joinsplits=(kwargs['tx_joinsplits']
if 'tx_joinsplits' in kwargs else []),
joinsplit_pubkey=(kwargs['joinsplit_pubkey']
if 'joinsplit_pubkey' in kwargs
else []),
joinsplit_sig=(kwargs['joinsplit_sig']
if 'joinsplit_sig' in kwargs else []))
def unsigned_witness_tx(tx_ins, tx_outs, **kwargs):
'''Create an unsigned segwit transaction
Create an unsigned segwit transaction
Use this to generate sighashes for unsigned TxIns
Hint: set version to 2 if using sequence number relative time locks
Args:
tx_ins list(TxIn instances): list of transaction inputs
tx_outs list(TxOut instances): list of transaction outputs
**kwargs:
version (int): transaction version number
locktime (hex): transaction locktime
Returns:
(Tx instance): unsigned transaction with empty witness
'''
return tb.make_tx(
version=kwargs['version'] if 'version' in kwargs else 2,
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=kwargs['lock_time'] if 'lock_time' in kwargs else 0,
tx_witnesses=[tb.make_empty_witness() for _ in tx_ins]) | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/simple.py | 0.855459 | 0.257835 | simple.py | pypi |
import hashlib
import riemann
from riemann import blake256 as b256
def i2le(number: int) -> bytes:
'''
Convert int to little endian (LE) bytes
Args:
number: int value to convert to bytes in LE format
Returns:
LE-encoded number
'''
if number == 0:
return b'\x00'
return number.to_bytes((number.bit_length() + 7) // 8, 'little')
def i2le_padded(number: int, length: int) -> bytes:
'''
Convert int to little endian (LE) bytes with specified length
Args:
number: int value to convert to LE bytes
length: length of resulting bytes
Returns:
LE-encoded number with fixed length
'''
return number.to_bytes(length, 'little')
def i2le_script(number: int) -> str:
'''
Convert int to signed little endian (LE) hex for use within scripts
Args:
number: int value to convert to bytes in LE format
Returns:
the hex-encoded signed LE number
'''
if number == 0:
return '00'
for i in range(520):
try: # this is stupid
return number.to_bytes(
length=i, # minimal bytes lol
byteorder='little',
signed=True).hex()
except OverflowError:
continue
raise ValueError(
'Number cannot be expressed in 520 bytes or less') # pragma: nocover
def le2i(b: bytes, signed: bool = False) -> int:
'''
Convert little endian (LE) bytes to int
Args:
b: LE bytes to convert to int
signed: two's complement flag
Returns:
'''
return int.from_bytes(b, 'little', signed=signed)
def be2i(b: bytes, signed: bool = False) -> int:
'''
Convert big endian (b.e.) bytes to int
Args:
b: BE bytes to convert to int
signed: two's complement flag
Returns:
'''
return int.from_bytes(b, 'big', signed=signed)
def i2be(number: int) -> bytes:
'''
Convert int to big endian (b.e.) bytes
Args:
number: int value to convert to bytes in BE format
Returns:
bytes in BE format
'''
if number == 0:
return b'\x00'
return number.to_bytes((number.bit_length() + 7) // 8, 'big')
def i2be_padded(number: int, length: int) -> bytes:
'''
Convert int to big endian (b.e.) bytes with specified length
Args:
number: int value to convert to bytes in BE format
length: length of resulting bytes
Returns:
bytes in BE format with specified length
'''
return number.to_bytes(length, 'big')
def change_endianness(b: bytes) -> bytes:
'''Reverse a bytestring'''
return b[::-1]
def rmd160(msg_bytes: bytes) -> bytes:
'''
ripemd160 digest of a messge
'''
h = hashlib.new('ripemd160')
h.update(msg_bytes)
return h.digest()
def sha256(msg_bytes: bytes) -> bytes:
'''sha256 digest of a message'''
return hashlib.sha256(msg_bytes).digest()
def hash160(msg_bytes: bytes) -> bytes:
'''rmd160 of sha256 of message'''
h = hashlib.new('ripemd160')
if 'decred' in riemann.get_current_network_name():
h.update(blake256(msg_bytes))
return h.digest()
h.update(sha256(msg_bytes))
return h.digest()
def hash256(msg_bytes: bytes) -> bytes:
'''sha256 of sha256 of message'''
if 'decred' in riemann.get_current_network_name():
return blake256(blake256(msg_bytes))
return hashlib.sha256(hashlib.sha256(msg_bytes).digest()).digest()
def blake256(msg_bytes: bytes) -> bytes:
'''blake256 digest of a message'''
return b256.blake_hash(msg_bytes)
def blake2b(data: bytes = b'', **kwargs) -> bytes:
'''blake2b digest of a message'''
b2 = hashlib.blake2b(**kwargs)
b2.update(data)
return b2.digest()
def blake2s(data: bytes = b'', **kwargs) -> bytes:
'''blake2s digest of a message'''
b2 = hashlib.blake2s(**kwargs)
b2.update(data)
return b2.digest() | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/utils.py | 0.788949 | 0.654315 | utils.py | pypi |
import riemann
from riemann import utils
from typing import Any, Optional, Union
Byteslike = Union[bytes, bytearray, 'ByteData']
SIGHASH_ALL = 0x01
SIGHASH_NONE = 0x02
SIGHASH_SINGLE = 0x03
SIGHASH_FORKID = 0x40
SIGHASH_ANYONECANPAY = 0x80
class ByteData():
'''
Wrapper class for byte-like data
Iterable and subscriptable (by iterating and subscribing to wrapped data)
Can be made immutable
self._bytes is a bytearray object when mutable
self._bytes is a byte object when immutable
Should be mostly transparent to the user
Can be treated like bytes or a bytearray in most cases
'''
__immutable = False
def __init__(self):
self._bytes = bytearray()
def __iter__(self):
return iter(self._bytes)
def __getitem__(self, val):
return self._bytes[val]
def __iadd__(self, other: Byteslike):
'''
ByteData, byte-like -> ByteData
Define += operator.
Extend self's bytes with other's bytes.
'''
if isinstance(other, bytes) or isinstance(other, bytearray):
self._bytes.extend(other)
elif isinstance(other, ByteData):
self._bytes.extend(other._bytes)
else:
raise TypeError('unsupported operand type(s) for +=: '
'{} and {}'.format(type(self).__name__,
type(other).__name__))
return self
def __ne__(self, other):
'''
ByteData, byte-like -> bool
Define != operator.
Compares self._bytes to other.
'''
if isinstance(other, bytes) or isinstance(other, bytearray):
return self._bytes != other
elif isinstance(other, ByteData):
return self._bytes != other._bytes
else:
raise TypeError('Equality not supported for ByteData and {}.'
.format(type(other)))
def __eq__(self, other):
'''
ByteData, byte-like -> bool
Define == operator.
'''
return not self != other
def __len__(self):
'''
ByteData -> int
'''
return len(self._bytes)
def __setattr__(self, key: str, value):
if self.__immutable:
raise TypeError("%r cannot be written to." % self)
object.__setattr__(self, key, value)
def __format__(self, code):
'''
ByteData -> str
us use the internal bytes formatting methods
'''
if 'x' in code:
return self.hex()
if 'X' in code:
return self.hex().upper()
return self._bytes.__format__(code)
def __repr__(self):
'''
ByteData -> str
'''
return '{}: {}'.format(type(self).__name__, self._bytes)
def to_bytes(self) -> bytes:
'''
ByteData -> bytes
'''
return bytes(self._bytes)
def hex(self) -> str:
'''
ByteData -> hex_string
'''
return self._bytes.hex()
def _make_immutable(self):
'''
Prevents any future changes to the object
'''
self._bytes = bytes(self._bytes)
self.__immutable = True
def find(self, substring: Byteslike) -> int:
'''
byte-like -> int
Finds the index of substring
'''
if isinstance(substring, ByteData):
substring = substring.to_bytes()
return self._bytes.find(substring)
@staticmethod
def validate_bytes(data: Any, length: Optional[int] = 4):
'''
Raises ValueError if data is not bytes.
Raises ValueError if len(data) is not length.
Length may be None for unknown lengths (e.g. scripts).
length=None will allow 0 length data.
'''
if (not isinstance(data, ByteData)
and not isinstance(data, bytes)
and not isinstance(data, bytearray)):
raise ValueError('Expected byte-like object. '
'Got: {}'.format(type(data)))
# allow any length
if length is None:
return
if len(data) != length:
raise ValueError('Expected byte-like object with length {}. '
'Got {} with length {}.'
.format(length, type(data), len(data)))
@classmethod
def from_hex(C, hex_string: str):
return C.from_bytes(bytes.fromhex(hex_string))
@classmethod
def from_bytes(ByteData, byte_string: bytes) -> 'ByteData':
ret = ByteData()
ret += byte_string
return ret
class VarInt(ByteData):
'''
NB: number must be integer
'''
def __init__(self, number: int, length: Optional[int] = None):
super().__init__()
if number < 0x0:
raise ValueError('VarInt cannot be less than 0. '
'Got: {}'.format(number))
if number > 0xffffffffffffffff:
raise ValueError('VarInt cannot be greater than (2 ** 64) - 1. '
'Got: {}'
.format(number))
if number <= 0xfc:
pass # No prefix
elif number <= 0xffff or length == 3:
self += bytes([0xfd])
length = 3
elif number <= 0xffffffff or length == 5:
self += bytes([0xfe])
length = 5
elif number <= 0xffffffffffffffff or length == 9:
self += bytes([0xff])
length = 9
self += utils.i2le(number)
if length is not None:
while len(self) < length:
self += b'\x00'
self.number = number
self._make_immutable()
def copy(self) -> 'VarInt':
return VarInt(self.number)
@classmethod
def from_bytes(VarInt, byte_string: bytes) -> 'VarInt':
'''
byte-like -> VarInt
accepts arbitrary length input, gets a VarInt off the front
'''
num = byte_string
if num[0] <= 0xfc:
num = num[0:1]
non_compact = False
elif num[0] == 0xfd:
num = num[1:3]
non_compact = (num[-1:] == b'\x00')
elif num[0] == 0xfe:
num = num[1:5]
non_compact = (num[-2:] == b'\x00\x00')
elif num[0] == 0xff:
num = num[1:9]
non_compact = (num[-4:] == b'\x00\x00\x00\x00')
if len(num) not in [1, 2, 4, 8]:
raise ValueError('Malformed VarInt. Got: {}'
.format(byte_string.hex()))
if (non_compact
and ('overwinter' in riemann.get_current_network_name()
or 'sapling' in riemann.get_current_network_name())):
raise ValueError('VarInt must be compact. Got: {}'
.format(byte_string.hex()))
ret = VarInt(
utils.le2i(num),
length=len(num) + 1 if non_compact else 0)
return ret | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/tx/shared.py | 0.803598 | 0.353289 | shared.py | pypi |
import riemann
from riemann import utils
from riemann.script import serialization
from riemann.tx import tx, decred, overwinter, sapling, sprout, zcash_shared
from typing import cast, List, Optional, overload
def make_sh_script_pubkey(script_bytes: bytes, witness: bool = False) -> bytes:
'''
Make a P2SH or P2WSH script pubkey from a serialized script. Does not
support Compatibility p2wsh-via-p2sh output scripts.
Args:
script_bytes: The serialized redeem script or witness script.
witness: Pass True to make a P2WSH script pubkey.
Returns:
The script pubkey containing the hash of the serialized script.
'''
output_script = bytearray()
if witness:
script_hash = utils.sha256(script_bytes)
output_script.extend(riemann.network.P2WSH_PREFIX)
output_script.extend(script_hash)
else:
script_hash = utils.hash160(script_bytes)
output_script.extend(b'\xa9\x14') # OP_HASH160 PUSH0x14
output_script.extend(script_hash)
output_script.extend(b'\x87') # OP_EQUAL
return bytes(output_script)
def make_sh_output_script(script_string: str, witness: bool = False) -> bytes:
'''
Make a P2SH or P2WSH script pubkey from a human-readable script. Does not
support Compatibility p2wsh-via-p2sh output scripts.
Args:
script_string: The human-readable redeem script or witness script.
witness: Pass True to make a P2WSH script pubkey.
Returns:
The script pubkey containing the hash of the serialized script.
'''
if witness and not riemann.network.SEGWIT:
raise ValueError(
'Network {} does not support witness scripts.'
.format(riemann.get_current_network_name()))
script_bytes = serialization.serialize(script_string)
return make_sh_script_pubkey(script_bytes=script_bytes, witness=witness)
def make_pkh_output_script(pubkey: bytes, witness: bool = False) -> bytes:
'''
Makes a P2PKH or P2WPKH script pubkey from a raw public key. Does not
support Compatibility p2wpkh-via-p2sh output scripts.
Args:
pubkey: The 33- or 65-byte public key.
witness: Pass True to make a P2WSH script pubkey.
Returns:
The script pubkey containing the hash of the pubkey.
'''
if witness and not riemann.network.SEGWIT:
raise ValueError(
'Network {} does not support witness scripts.'
.format(riemann.get_current_network_name()))
output_script = bytearray()
if type(pubkey) is not bytearray and type(pubkey) is not bytes:
raise ValueError('Unknown pubkey format. '
'Expected bytes. Got: {}'.format(type(pubkey)))
pubkey_hash = utils.hash160(pubkey)
if witness:
output_script.extend(riemann.network.P2WPKH_PREFIX)
output_script.extend(pubkey_hash)
else:
output_script.extend(b'\x76\xa9\x14') # OP_DUP OP_HASH160 PUSH14
output_script.extend(pubkey_hash)
output_script.extend(b'\x88\xac') # OP_EQUALVERIFY OP_CHECKSIG
return bytes(output_script)
def make_p2sh_output_script(script_string: str) -> bytes:
'''
Make a P2SH script pubkey from a human-readable Script.
Args:
script_string: The human-readable redeem script.
Returns:
The P2SH script pubkey containing the hash of the serialized script.
'''
return make_sh_output_script(script_string, witness=False)
def make_p2pkh_output_script(pubkey: bytes) -> bytes:
'''
Makes a P2PKH script pubkey from a raw public key.
Args:
pubkey: The 33- or 65-byte public key.
Returns:
The P2PKH script pubkey containing the hash of the pubkey.
'''
return make_pkh_output_script(pubkey, witness=False)
def make_p2wsh_output_script(script_string: str) -> bytes:
'''
Make a P2WSH script pubkey from a human-readable Script. Does not support
Compatibility p2wsh-via-p2sh output scripts.
Args:
script_string: The human-readable witness script.
Returns:
The P2WSH script pubkey containing the hash of the serialized script.
'''
return make_sh_output_script(script_string, witness=True)
def make_p2wpkh_output_script(pubkey: bytes) -> bytes:
'''
Makes a P2PKH or P2WPKH script pubkey from a raw public key. Does not
support Compatibility p2wsh-via-p2sh output scripts.
Args:
pubkey: The 33- or 65-byte public key.
Returns:
The P2WPKH script pubkey containing the hash of the pubkey.
'''
return make_pkh_output_script(pubkey, witness=True)
@overload
def _make_output(
value: bytes,
output_script: bytes,
version: bytes) -> decred.DecredTxOut:
... # pragma: nocover
@overload # noqa: F811
def _make_output(
value: bytes,
output_script: bytes) -> tx.TxOut:
... # pragma: nocover
def _make_output( # noqa: F811
value,
output_script,
version=None):
'''
Instantiates a TxOut from value and output script.
Args:
value: The 8-byte LE-encoded integer value of the output.
output_script: The non-length-prepended output script.
version: Only in Decred transactions, the output version.
Returns:
The TxOut object. A DecredTxOut if version was passed in.
'''
if 'decred' in riemann.get_current_network_name():
return decred.DecredTxOut(
value=value,
version=cast(int, version),
output_script=output_script)
return tx.TxOut(value=value, output_script=output_script)
def make_sh_output(
value: int,
output_script: str,
witness: bool = False) -> tx.TxOut:
'''
Instantiates a P2SH or P2WSH TxOut from value and human-readable Script.
Args:
value: The 8-byte LE-encoded integer value of the output.
output_script: The non-length-prepended human-readable Script.
witness: Pass True to make a P2WSH script pubkey.
Returns:
A TxOut object
'''
return _make_output(
value=utils.i2le_padded(value, 8),
output_script=make_sh_output_script(output_script, witness))
def make_p2sh_output(value: int, output_script: str) -> tx.TxOut:
'''
Instantiates a P2SH TxOut from value and human-readable Script.
Args:
value: The 8-byte LE-encoded integer value of the output.
output_script: The non-length-prepended output script.
Returns:
A TxOut object paying a P2SH script pubkey.
'''
return make_sh_output(value, output_script, witness=False)
def make_p2wsh_output(value: int, output_script: str) -> tx.TxOut:
'''
Instantiates a P2WSH TxOut from value and human-readable Script.
Args:
value: The 8-byte LE-encoded integer value of the output.
output_script: The non-length-prepended output script.
Returns:
A TxOut object paying a P2WSH script pubkey.
'''
return make_sh_output(value, output_script, witness=True)
def make_pkh_output(
value: int,
pubkey: bytes,
witness: bool = False) -> tx.TxOut:
'''
Instantiates a P2PKH or P2WPKH TxOut from value and raw pubkey.
Args:
value: The 8-byte LE-encoded integer value of the output.
pubkey: The 33- or 65-byte raw public key.
witness: Pass True to make a P2WPKH script pubkey.
Returns:
A TxOut object
'''
return _make_output(
value=utils.i2le_padded(value, 8),
output_script=make_pkh_output_script(pubkey, witness))
def make_p2pkh_output(value: int, pubkey: bytes) -> tx.TxOut:
'''
Instantiates a P2PKH TxOut from value and raw pubkey.
Args:
value: The 8-byte LE-encoded integer value of the output.
pubkey: The 33- or 65-byte raw public key.
Returns:
A TxOut object paying a P2PKH script pubkey
'''
return make_pkh_output(value, pubkey, witness=False)
def make_p2wpkh_output(value: int, pubkey: bytes) -> tx.TxOut:
'''
Instantiates a P2WPKH TxOut from value and raw pubkey.
Args:
value: The 8-byte LE-encoded integer value of the output.
pubkey: The 33- or 65-byte raw public key.
Returns:
A TxOut object paying a P2WPKH script pubkey
'''
return make_pkh_output(value, pubkey, witness=True)
def make_op_return_output(data: bytes) -> tx.TxOut:
'''
Generates OP_RETURN output for data of up to 77 bytes. OP_RETURN outputs
are data carriers with no impact on the UTXO set. They are comonly used to
create on-chain commitments to some off-chain information. There are few
consensus constraints on their content or structure, however they become
non-standard above 77 bytes.
Args:
data (bytes): data to be included in output
Returns:
(TxOut): TxOut object with OP_RETURN output
'''
if len(data) > 77: # 77 bytes is the limit
raise ValueError('Data is too long. Expected <= 77 bytes')
pk_script = bytearray()
pk_script.extend(b'\x6a') # OP_RETURN
# OP_PUSHDATA1 only used if data is greater than 75 bytes
if len(data) in [76, 77]:
pk_script.extend(b'\x4c') # OP_PUSHDATA1
pk_script.extend([len(data)]) # One byte for length of data
pk_script.extend(data) # Data
return _make_output(utils.i2le_padded(0, 8), pk_script)
def make_empty_witness() -> tx.InputWitness:
'''
Create an InputWitness with an empty stack. Useful for unsigned
transactions, as well as Legacy inputs in Segwit transactions. By
consensus, if any witness is present, all inputs must have a witness.
'''
return make_witness([])
def make_witness_stack_item(data: bytes) -> tx.WitnessStackItem:
'''
Wrap a bytestring in a WitnessStackItem object
'''
return tx.WitnessStackItem(item=data)
def make_witness(data_list: List[bytes]) -> tx.InputWitness:
'''
Make a witness stack from a list of bytestrings. Each bytestring is wrapped
in a WitnessStackItem object and places into the InputWitness in order
'''
return tx.InputWitness(
stack=[make_witness_stack_item(item) for item in data_list])
def make_decred_witness(
value: bytes,
height: bytes,
index: bytes,
stack_script: bytes,
redeem_script: bytes) -> decred.DecredInputWitness:
'''
Decred has a unique witness structure.
'''
return decred.DecredInputWitness(
value=value,
height=height,
index=index,
stack_script=stack_script,
redeem_script=redeem_script)
@overload
def make_outpoint(
tx_id_le: bytes, index: int, tree: int) -> decred.DecredOutpoint:
... # pragma: nocover
@overload # noqa: F811
def make_outpoint(tx_id_le: bytes, index: int) -> tx.Outpoint:
... # pragma: nocover
def make_outpoint(tx_id_le, index, tree=None): # noqa: F811
'''
Instantiate an Outpoint object from a transaction id and an index.
Args:
tx_id_le: The 32-byte LE hash of the transaction that created the
prevout being referenced.
index: The index of the TxOut that created the prevout in its
transaction's output vector
tree: Only in Decred transactions. Specifies the commitment tree.
Returns:
An Outpoint object. If network is set to Decred, a DecredOutpoint
'''
if 'decred' in riemann.get_current_network_name():
tree_bytes = b'\x00' if tree is None else utils.i2le_padded(tree, 1)
return decred.DecredOutpoint(tx_id=tx_id_le,
index=utils.i2le_padded(index, 4),
tree=tree_bytes)
return tx.Outpoint(tx_id=tx_id_le,
index=utils.i2le_padded(index, 4))
def make_script_sig(stack_script: str, redeem_script: str) -> bytes:
'''
Make a serialized script sig from a human-readable stack script and redeem
script.
'''
script_sig = '{} {}'.format(
stack_script,
serialization.hex_serialize(redeem_script))
return serialization.serialize(script_sig)
@overload
def make_legacy_input(
outpoint: decred.DecredOutpoint,
stack_script: bytes,
redeem_script: bytes,
sequence: int) -> decred.DecredTxIn:
... # pragma: nocover
@overload # noqa: F811
def make_legacy_input(
outpoint: tx.Outpoint,
stack_script: bytes,
redeem_script: bytes,
sequence: int) -> tx.TxIn:
... # pragma: nocover
def make_legacy_input( # noqa: F811
outpoint,
stack_script,
redeem_script,
sequence):
'''
Make a legacy input. This supports creating Compatibility inputs by passing
the witness program to `redeem_script` while passing an empty bytestring
for `stack_script`.
Args:
outpoint: The Outpoint object
stack_script: A serialized Script program that sets the initial stack
redeem_script: A serialized Script program that is run on the stack
sequence: The 4-byte LE-encoded sequence number
Returns:
A Legacy TxIn object.
'''
if 'decred' in riemann.get_current_network_name():
return decred.DecredTxIn(
outpoint=outpoint,
sequence=utils.i2le_padded(sequence, 4))
return tx.TxIn(outpoint=outpoint,
stack_script=stack_script,
redeem_script=redeem_script,
sequence=utils.i2le_padded(sequence, 4))
@overload
def make_witness_input(
outpoint: decred.DecredOutpoint,
sequence: int) -> decred.DecredTxIn:
... # pragma: nocover
@overload # noqa: F811
def make_witness_input(
outpoint: tx.Outpoint,
sequence: int) -> tx.TxIn:
... # pragma: nocover
def make_witness_input(outpoint, sequence): # noqa: F811
'''
Make a Segwit input. This is clearly superior to `make_legacy_input` and
you should use witness always.
Args:
outpoint: The Outpoint object
sequence: The 4-byte LE-encoded sequence number
Returns:
A Segwit TxIn object.
'''
if 'decred' in riemann.get_current_network_name():
return decred.DecredTxIn(
outpoint=outpoint,
sequence=utils.i2le_padded(sequence, 4))
return tx.TxIn(outpoint=outpoint,
stack_script=b'',
redeem_script=b'',
sequence=utils.i2le_padded(sequence, 4))
def make_decred_input(
outpoint: decred.DecredOutpoint,
sequence: int) -> decred.DecredTxIn:
return decred.DecredTxIn(
outpoint=outpoint,
sequence=utils.i2le_padded(sequence, 4))
@overload
def make_tx(
version: int,
tx_ins: List[decred.DecredTxIn],
tx_outs: List[decred.DecredTxOut],
lock_time: int,
expiry: int,
tx_witnesses: List[decred.DecredInputWitness]) -> decred.DecredTx:
... # pragma: nocover
@overload # noqa: F811
def make_tx(
version: int,
tx_ins: List[tx.TxIn],
tx_outs: List[tx.TxOut],
lock_time: int,
tx_joinsplits: List[zcash_shared.SproutJoinsplit],
joinsplit_pubkey: Optional[bytes],
joinsplit_sig: Optional[bytes],
binding_sig: Optional[bytes]) -> sprout.SproutTx:
... # pragma: nocover
@overload # noqa: F811
def make_tx(
tx_ins: List[tx.TxIn],
tx_outs: List[tx.TxOut],
lock_time: int,
expiry: int,
tx_joinsplits: List[zcash_shared.SproutJoinsplit],
joinsplit_pubkey: Optional[bytes],
joinsplit_sig: Optional[bytes],
binding_sig: Optional[bytes]) -> overwinter.OverwinterTx:
... # pragma: nocover
@overload # noqa: F811
def make_tx(
tx_ins: List[tx.TxIn],
tx_outs: List[tx.TxOut],
lock_time: int,
expiry: int,
value_balance: int,
tx_shielded_spends: List[sapling.SaplingShieldedSpend],
tx_shielded_outputs: List[sapling.SaplingShieldedOutput],
tx_joinsplits: List[sapling.SaplingJoinsplit],
joinsplit_pubkey: Optional[bytes],
joinsplit_sig: Optional[bytes],
binding_sig: Optional[bytes]) -> sapling.SaplingTx:
... # pragma: nocover
@overload # noqa: F811
def make_tx(
version: int,
tx_ins: List[tx.TxIn],
tx_outs: List[tx.TxOut],
lock_time: int,
tx_witnesses: Optional[List[tx.InputWitness]] = None) -> tx.Tx:
... # pragma: nocover
def make_tx( # noqa: F811
version,
tx_ins,
tx_outs,
lock_time,
expiry=None,
value_balance=0,
tx_shielded_spends=None,
tx_shielded_outputs=None,
tx_witnesses=None,
tx_joinsplits=None,
joinsplit_pubkey=None,
joinsplit_sig=None,
binding_sig=None):
'''
Instantiate a complete Tx object from its components.
Args:
version: The 4-byte LE-encoded version number.
tx_ins: A list of TxIn objects.
tx_outs: A list of TxOut objects.
lock_time: The 4-byte LE-encoded lock_time number.
expiry: Decred, Overwinter, and Sapling only. 4-byte LE expiry number.
value_balance: Sapling only. An 8-byte LE number representing the net
change in shielded pool size as a result of this
transaction.
tx_shielded_spends: Sapling only. An array of SaplingShieldedSpend.
tx_shielded_outputs: Sapling only. An array of SaplingShieldedOutput.
tx_witnesses: An array of InputWitness objects.
tx_joinsplits: Sprout, Overwinter, and Sapling only. An array of
SproutJoinsplit or SaplingJoinsplit objects.
joinsplit_pubkey: The joinsplit pubkey. See Zcash protocol docs.
joinsplit_sig: The joinsplit signature. See Zcash protocol docs.
binding_sig: The binding signature. See Zcash protocol docs.
Returns:
A Tx object. DecredTx if network is set to Decred. SproutTx if set to
Zcash Sprout. OverwinterTx if set to Zcash Overwinter. SaplingTx if set
to Zcash Sapling.
'''
n = riemann.get_current_network_name()
if 'decred' in n:
return decred.DecredTx(
version=utils.i2le_padded(version, 4),
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=utils.i2le_padded(lock_time, 4),
expiry=utils.i2le_padded(expiry, 4),
tx_witnesses=[tx_witnesses])
if 'sprout' in n and tx_joinsplits is not None:
return sprout.SproutTx(
version=version,
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=utils.i2le_padded(lock_time, 4),
tx_joinsplits=tx_joinsplits if tx_joinsplits is not None else [],
joinsplit_pubkey=joinsplit_pubkey,
joinsplit_sig=joinsplit_sig)
if 'overwinter' in n:
return overwinter.OverwinterTx(
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=utils.i2le_padded(lock_time, 4),
expiry_height=utils.i2le_padded(expiry, 4),
tx_joinsplits=tx_joinsplits if tx_joinsplits is not None else [],
joinsplit_pubkey=joinsplit_pubkey,
joinsplit_sig=joinsplit_sig)
if 'sapling' in n:
return sapling.SaplingTx(
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=utils.i2le_padded(lock_time, 4),
expiry_height=utils.i2le_padded(expiry, 4),
value_balance=utils.i2le_padded(value_balance, 8),
tx_shielded_spends=tx_shielded_spends,
tx_shielded_outputs=tx_shielded_outputs,
tx_joinsplits=tx_joinsplits,
joinsplit_pubkey=joinsplit_pubkey,
joinsplit_sig=joinsplit_sig,
binding_sig=binding_sig)
flag = riemann.network.SEGWIT_TX_FLAG \
if tx_witnesses is not None else None
return tx.Tx(version=utils.i2le_padded(version, 4),
flag=flag,
tx_ins=tx_ins,
tx_outs=tx_outs,
tx_witnesses=tx_witnesses,
lock_time=utils.i2le_padded(lock_time, 4))
def length_prepend(byte_string: bytes) -> bytes:
'''Adds a VarInt length marker to a bytestring'''
length = tx.VarInt(len(byte_string))
return length.to_bytes() + byte_string | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/tx/tx_builder.py | 0.865608 | 0.202759 | tx_builder.py | pypi |
import riemann
from riemann import utils
from riemann.tx import shared
class ZcashByteData(shared.ByteData):
def __init__(self):
if 'zcash' not in riemann.get_current_network_name():
raise ValueError('Zcash classes not supported by network {}. '
'How did you get here?'
.format(riemann.get_current_network_name()))
super().__init__()
class SproutZkproof(ZcashByteData):
pi_sub_a: bytes
pi_prime_sub_a: bytes
pi_sub_b: bytes
pi_prime_sub_b: bytes
pi_sub_c: bytes
pi_prime_sub_c: bytes
pi_sub_k: bytes
pi_sub_h: bytes
def __init__(self,
pi_sub_a: bytes,
pi_prime_sub_a: bytes,
pi_sub_b: bytes,
pi_prime_sub_b: bytes,
pi_sub_c: bytes,
pi_prime_sub_c: bytes,
pi_sub_k: bytes,
pi_sub_h: bytes):
super().__init__()
self.validate_bytes(pi_sub_a, 33)
self.validate_bytes(pi_prime_sub_a, 33)
self.validate_bytes(pi_sub_b, 65)
self.validate_bytes(pi_prime_sub_b, 33)
self.validate_bytes(pi_sub_c, 33)
self.validate_bytes(pi_prime_sub_c, 33)
self.validate_bytes(pi_sub_k, 33)
self.validate_bytes(pi_sub_h, 33)
self += pi_sub_a
self += pi_prime_sub_a
self += pi_sub_b
self += pi_prime_sub_b
self += pi_sub_c
self += pi_prime_sub_c
self += pi_sub_k
self += pi_sub_h
self.pi_sub_a = pi_sub_a
self.pi_prime_sub_a = pi_prime_sub_a
self.pi_sub_b = pi_sub_b
self.pi_prime_sub_b = pi_prime_sub_b
self.pi_sub_c = pi_sub_c
self.pi_prime_sub_c = pi_prime_sub_c
self.pi_sub_k = pi_sub_k
self.pi_sub_h = pi_sub_h
self._make_immutable()
@classmethod
def from_bytes(SproutZkproof, byte_string: bytes) -> 'SproutZkproof':
return SproutZkproof(
pi_sub_a=byte_string[0:33],
pi_prime_sub_a=byte_string[33:66],
pi_sub_b=byte_string[66:131],
pi_prime_sub_b=byte_string[131:164],
pi_sub_c=byte_string[164:197],
pi_prime_sub_c=byte_string[197:230],
pi_sub_k=byte_string[230:263],
pi_sub_h=byte_string[263:296])
class SproutJoinsplit(ZcashByteData):
vpub_old: bytes
vpub_new: bytes
anchor: bytes
nullifiers: bytes
commitments: bytes
ephemeral_key: bytes
random_seed: bytes
vmacs: bytes
zkproof: SproutZkproof
encoded_notes: bytes
def __init__(self,
vpub_old: bytes,
vpub_new: bytes,
anchor: bytes,
nullifiers: bytes,
commitments: bytes,
ephemeral_key: bytes,
random_seed: bytes,
vmacs: bytes,
zkproof: SproutZkproof,
encoded_notes: bytes):
super().__init__()
if not isinstance(zkproof, SproutZkproof):
raise ValueError(
'Invalid zkproof. '
'Expected instance of SproutZkproof. Got {}'
.format(type(zkproof).__name__))
if (utils.le2i(vpub_old) != 0 and utils.le2i(vpub_new) != 0):
raise ValueError('vpub_old or vpub_new must be zero')
self.validate_bytes(vpub_old, 8)
self.validate_bytes(vpub_new, 8)
self.validate_bytes(anchor, 32)
self.validate_bytes(nullifiers, 64)
self.validate_bytes(commitments, 64)
self.validate_bytes(ephemeral_key, 32)
self.validate_bytes(random_seed, 32)
self.validate_bytes(vmacs, 64)
self.validate_bytes(encoded_notes, 1202)
self += vpub_old
self += vpub_new
self += anchor
self += nullifiers
self += commitments
self += ephemeral_key
self += random_seed
self += vmacs
self += zkproof
self += encoded_notes
self.vpub_old = vpub_old
self.vpub_new = vpub_new
self.anchor = anchor
self.nullifiers = nullifiers
self.commitments = commitments
self.ephemeral_key = ephemeral_key
self.random_seed = random_seed
self.vmacs = vmacs
self.zkproof = zkproof
self.encoded_notes = encoded_notes
self._make_immutable()
@classmethod
def from_bytes(SproutJoinsplit, byte_string: bytes) -> 'SproutJoinsplit':
return SproutJoinsplit(
vpub_old=byte_string[0:8],
vpub_new=byte_string[8:16],
anchor=byte_string[16:48],
nullifiers=byte_string[48:112],
commitments=byte_string[112:176],
ephemeral_key=byte_string[176:208],
random_seed=byte_string[208:240],
vmacs=byte_string[240:304],
zkproof=SproutZkproof.from_bytes(byte_string[304:600]),
encoded_notes=byte_string[600:1802]) | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/tx/zcash_shared.py | 0.756537 | 0.250431 | zcash_shared.py | pypi |
import riemann
from riemann import utils
from riemann.tx import shared
from typing import List, Optional
class DecredByteData(shared.ByteData):
def __init__(self):
if 'decred' not in riemann.get_current_network_name():
raise ValueError('Decred classes not supported by network {}. '
'How did you get here?'
.format(riemann.get_current_network_name()))
super().__init__()
class DecredOutpoint(DecredByteData):
tx_id: bytes
index: bytes
tree: bytes
def __init__(self, tx_id: bytes, index: bytes, tree: bytes):
super().__init__()
self.validate_bytes(tx_id, 32)
self.validate_bytes(index, 4)
self.validate_bytes(tree, 1)
self += tx_id
self += index
self += tree
self.tx_id = tx_id
self.index = index
self.tree = tree
self._make_immutable()
def copy(self,
tx_id: Optional[bytes] = None,
index: Optional[bytes] = None,
tree: Optional[bytes] = None):
return DecredOutpoint(
tx_id=tx_id if tx_id is not None else self.tx_id,
index=index if index is not None else self.index,
tree=tree if tree is not None else self.tree)
@classmethod
def from_bytes(DecredOutpoint, byte_string: bytes) -> 'DecredOutpoint':
return DecredOutpoint(
tx_id=byte_string[:32],
index=byte_string[32:36],
tree=byte_string[36:37])
class DecredTxIn(DecredByteData):
outpoint: DecredOutpoint
sequence: bytes
def __init__(self, outpoint: DecredOutpoint, sequence: bytes):
super().__init__()
self.validate_bytes(outpoint, 37)
self.validate_bytes(sequence, 4)
self += outpoint
self += sequence
self.outpoint = outpoint
self.sequence = sequence
self._make_immutable()
def copy(self,
outpoint: Optional[DecredOutpoint] = None,
sequence: Optional[bytes] = None) -> 'DecredTxIn':
return DecredTxIn(
outpoint=outpoint if outpoint is not None else self.outpoint,
sequence=sequence if sequence is not None else self.sequence)
@classmethod
def from_bytes(DecredTxIn, byte_string: bytes) -> 'DecredTxIn':
return DecredTxIn(
outpoint=DecredOutpoint.from_bytes(byte_string[:37]),
sequence=byte_string[37:41])
class DecredTxOut(DecredByteData):
value: bytes
version: bytes
output_script: bytes
def __init__(self, value: bytes, version: bytes, output_script: bytes):
super().__init__()
self.validate_bytes(value, 8)
self.validate_bytes(version, 2)
self.validate_bytes(output_script, None)
self += value
self += version
self += shared.VarInt(len(output_script))
self += output_script
self.value = value
self.version = version
self.output_script_len = len(output_script)
self.output_script = output_script
self._make_immutable()
def copy(self,
value: Optional[bytes] = None,
version: Optional[bytes] = None,
output_script: Optional[bytes] = None) -> 'DecredTxOut':
return DecredTxOut(
value=value if value is not None else self.value,
version=version if version is not None else self.version,
output_script=(output_script if output_script is not None
else self.output_script))
@classmethod
def from_bytes(DecredTxOut, byte_string: bytes) -> 'DecredTxOut':
n = shared.VarInt.from_bytes(byte_string[10:])
script_start = 10 + len(n)
script_end = script_start + n.number
if n.number < 0xfc:
return DecredTxOut(
value=byte_string[:8],
version=byte_string[8:10],
output_script=byte_string[script_start:script_end])
else:
raise NotImplementedError(
'No support for abnormally long pk_scripts.')
class DecredInputWitness(DecredByteData):
value: bytes
height: bytes
index: bytes
stack_script: bytes
redeem_script: bytes
def __init__(self,
value: bytes,
height: bytes,
index: bytes,
stack_script: bytes,
redeem_script: bytes):
super().__init__()
self.validate_bytes(value, 8)
self.validate_bytes(height, 4)
self.validate_bytes(index, 4)
self.validate_bytes(stack_script, None)
self.validate_bytes(redeem_script, None)
self += value
self += height
self += index
self += shared.VarInt(len(stack_script) + len(redeem_script))
self += stack_script
self += redeem_script
self.value = value
self.height = height
self.index = index
self.script_len = len(stack_script + redeem_script)
self.stack_script = stack_script
self.redeem_script = redeem_script
self.script_sig = self.stack_script + self.redeem_script
self._make_immutable()
def copy(self,
value: Optional[bytes] = None,
height: Optional[bytes] = None,
index: Optional[bytes] = None,
stack_script: Optional[bytes] = None,
redeem_script: Optional[bytes] = None) -> 'DecredInputWitness':
return DecredInputWitness(
value=value if value is not None else self.value,
height=height if height is not None else self.height,
index=index if index is not None else self.index,
stack_script=(stack_script if stack_script is not None
else self.stack_script),
redeem_script=(redeem_script if redeem_script is not None
else self.redeem_script))
@classmethod
def from_bytes(DecredInputWitness,
byte_string: bytes) -> 'DecredInputWitness':
raise NotImplementedError('TODO')
class DecredTx(DecredByteData):
def __init__(self,
version: bytes,
tx_ins: List[DecredTxIn],
tx_outs: List[DecredTxOut],
lock_time: bytes,
expiry: bytes,
tx_witnesses: List[DecredInputWitness]):
super().__init__()
self.validate_bytes(version, 4)
self.validate_bytes(lock_time, 4)
self.validate_bytes(expiry, 4)
if min(len(tx_ins), len(tx_outs)) == 0:
raise ValueError('Too few inputs or outputs. Stop that.')
for tx_in in tx_ins:
if not isinstance(tx_in, DecredTxIn):
raise ValueError(
'Invalid TxIn. '
'Expected instance of DecredTxIn. Got {}'
.format(type(tx_in).__name__))
for tx_out in tx_outs:
if not isinstance(tx_out, DecredTxOut):
raise ValueError(
'Invalid TxOut. '
'Expected instance of DecredTxOut. Got {}'
.format(type(tx_out).__name__))
for tx_witness in tx_witnesses:
if not isinstance(tx_witness, DecredInputWitness):
raise ValueError(
'Invalid TxWitness. '
'Expected instance of DecredInputWitness. Got {}'
.format(type(tx_witness).__name__))
self += version
self += shared.VarInt(len(tx_ins))
for tx_in in tx_ins:
self += tx_in
self += shared.VarInt(len(tx_outs))
for tx_out in tx_outs:
self += tx_out
self += lock_time
self += expiry
self += shared.VarInt(len(tx_witnesses))
for tx_witness in tx_witnesses:
self += tx_witness
self.version = version
self.tx_ins = tx_ins
self.tx_outs = tx_outs
self.lock_time = lock_time
self.expiry = expiry
self.tx_witnesses = tx_witnesses
if len(self) > 100000:
raise ValueError(
'Tx is too large. '
'Expect less than 100kB. Got: {} bytes'.format(len(self)))
# TODO: check this
self.tx_id_le = self.prefix_hash()
self.tx_id = utils.change_endianness(self.tx_id_le)
# Ignoring this for now, as it's only used for in-block merkle trees
# self.tx_id_full_le = utils.blake256(self.tx_id_le
# + self.witness_hash())
# self.tx_id_full = utils.change_endianness(self.tx_id_full_le)
self._make_immutable()
@classmethod
def from_bytes(DecredTx, byte_string: bytes) -> 'DecredTx':
raise NotImplementedError('TODO')
def prefix_hash(self) -> bytes:
try:
return self.tx_id_le # Prevent redundant hashing
except AttributeError:
return utils.blake256(self.prefix())
def witness_hash(self) -> bytes:
return utils.blake256(self.witness())
def witness_signing_hash(self) -> bytes:
return utils.blake256(self.witness_signing())
def prefix(self) -> bytes:
data = DecredByteData()
data += self.version[:2]
data += b'\x01\x00' # Serialization type 1 (prefix only)
data += shared.VarInt(len(self.tx_ins))
for tx_in in self.tx_ins:
data += tx_in
data += shared.VarInt(len(self.tx_outs))
for tx_out in self.tx_outs:
data += tx_out
data += self.lock_time
data += self.expiry
return data.to_bytes()
def witness(self) -> bytes:
data = DecredByteData()
data += self.version[:2]
data += b'\x02\x00' # Serialization type 2 (witness only)
data += shared.VarInt(len(self.tx_witnesses))
for tx_witness in self.tx_witnesses:
data += tx_witness
return data.to_bytes()
def witness_signing(self) -> bytes:
data = DecredByteData()
data += self.version[:2]
data += b'\x03\x00' # Serialization type 3 (witness signing)
data += shared.VarInt(len(self.tx_witnesses))
for tx_witness in self.tx_witnesses:
data += shared.VarInt(tx_witness.script_len)
data += tx_witness.script_sig
return data.to_bytes()
def calculate_fee(self) -> int:
return \
sum([utils.le2i(w.value) for w in self.tx_witnesses]) \
- sum([utils.le2i(o.value) for o in self.tx_outs])
def copy(self, version=None, tx_ins=None, tx_outs=None,
lock_time=None, expiry=None, tx_witnesses=None):
return DecredTx(
version=version if version is not None else self.version,
tx_ins=tx_ins if tx_ins is not None else self.tx_ins,
tx_outs=tx_outs if tx_outs is not None else self.tx_outs,
lock_time=(lock_time if lock_time is not None
else self.lock_time),
expiry=expiry if expiry is not None else self.expiry,
tx_witnesses=(tx_witnesses if tx_witnesses is not None
else self.tx_witnesses))
def sighash_none(self) -> bytes:
raise NotImplementedError('SIGHASH_NONE is a bad idea.')
def _sighash_prep(self,
index: int,
script: Optional[bytes] = None) -> 'DecredTx':
copy_tx_witnesses = [w.copy(stack_script=b'', redeem_script=b'')
for w in self.tx_witnesses]
copy_tx_witnesses[index] = \
copy_tx_witnesses[index].copy(stack_script=script,
redeem_script=b'')
return self.copy(tx_witnesses=copy_tx_witnesses)
def sighash_single(self,
index: int,
script: Optional[bytes] = None,
anyone_can_pay: bool = False) -> bytes:
'''
https://github.com/decred/dcrd/blob/master/txscript/script.go
'''
copy_tx = self._sighash_prep(
index=index,
script=script)
try:
copy_tx_outs = copy_tx.tx_outs[:index + 1]
copy_tx_outs = [DecredTxOut(
value=b'\xff' * 8,
version=b'\x00\x00',
output_script=b'')
for _ in copy_tx.tx_ins]
copy_tx_outs[index] = copy_tx.tx_outs[index]
except IndexError:
raise NotImplementedError(
'I refuse to implement the SIGHASH_SINGLE bug.')
copy_tx_ins = [tx_in.copy(sequence=b'\x00\x00\x00\x00')
for tx_in in copy_tx.tx_ins]
copy_tx_ins[index] = copy_tx.tx_ins[index]
copy_tx = copy_tx.copy(tx_ins=copy_tx_ins, tx_outs=copy_tx_outs)
if anyone_can_pay:
return self._sighash_anyone_can_pay(
index=index,
copy_tx=copy_tx,
sighash_type=shared.SIGHASH_SINGLE)
return self._sighash_final_hashing(
index=index,
copy_tx=copy_tx,
sighash_type=shared.SIGHASH_SINGLE)
def sighash_all(self,
index: int,
script: Optional[bytes] = None,
anyone_can_pay: bool = False) -> bytes:
'''
https://gist.github.com/davecgh/b00ec6e11f73620c3deddf160353961c
https://github.com/decred/dcrd/blob/master/txscript/script.go
'''
copy_tx = self._sighash_prep(index, script)
if anyone_can_pay:
return self._sighash_anyone_can_pay(
index=index,
copy_tx=copy_tx,
sighash_type=shared.SIGHASH_ALL)
return self._sighash_final_hashing(
index=index,
copy_tx=copy_tx,
sighash_type=shared.SIGHASH_ALL)
def _sighash_anyone_can_pay(self,
index: int,
copy_tx: 'DecredTx',
sighash_type: int) -> bytes:
copy_tx_witnesses = [
w.copy(stack_script=b'', redeem_script=b'')
for w in copy_tx.tx_witnesses]
copy_tx_witnesses[index] = copy_tx.tx_witnesses[index]
copy_tx = copy_tx.copy(tx_witnesses=copy_tx_witnesses)
return self._sighash_final_hashing(
index=index,
copy_tx=copy_tx,
sighash_type=sighash_type | shared.SIGHASH_ANYONECANPAY)
def _sighash_final_hashing(
self,
index: int,
copy_tx: 'DecredTx',
sighash_type: int) -> bytes:
sighash = DecredByteData()
sighash += utils.i2le_padded(sighash_type, 4)
sighash += copy_tx.prefix_hash()
sighash += copy_tx.witness_signing_hash()
return utils.blake256(sighash.to_bytes()) | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/tx/decred.py | 0.822653 | 0.329661 | decred.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.