code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from __future__ import annotations
from typing import Callable
import xarray as xr
from risus.engine import target_number, combat, single_action_conflict
def make_target_number_table(
max_potency: int,
n_faces: int = 6,
**kwargs
) -> xr.DataArray:
"""Make a dataframe comparing potencies to target numbers.
This table includes all the target numbers up to the maximum possible for
the highest potency (that is, 6*max_potency).
# Arguments:
* `max_potency`: The highest potency to consider.
* `n_faces`: How many faces the dice should have.
# Returns:
A DataFrame whose columns are the potencies, whose rows are the target
numbers, and whose contents are the probability (as a percentage) that a
cliché of that potency would beat that target number.
# Examples:
>>> make_target_number_table(4, percent=True).round(1).to_pandas().T
attack_potency 1 2 3 4
enemy_potency
1 100.0 100.0 100.0 100.0
2 83.3 100.0 100.0 100.0
3 66.7 97.2 100.0 100.0
4 50.0 91.7 99.5 100.0
5 33.3 83.3 98.1 99.9
6 16.7 72.2 95.4 99.6
7 0.0 58.3 90.7 98.8
8 0.0 41.7 83.8 97.3
9 0.0 27.8 74.1 94.6
10 0.0 16.7 62.5 90.3
11 0.0 8.3 50.0 84.1
12 0.0 2.8 37.5 76.1
13 0.0 0.0 25.9 66.4
14 0.0 0.0 16.2 55.6
15 0.0 0.0 9.3 44.4
16 0.0 0.0 4.6 33.6
17 0.0 0.0 1.9 23.9
18 0.0 0.0 0.5 15.9
19 0.0 0.0 0.0 9.7
20 0.0 0.0 0.0 5.4
21 0.0 0.0 0.0 2.7
22 0.0 0.0 0.0 1.2
23 0.0 0.0 0.0 0.4
24 0.0 0.0 0.0 0.1
>>> make_target_number_table(4, n_faces=4, explode=True).round(3).to_pandas().T
attack_potency 1 2 3 4
enemy_potency
1 1.00 1.000 1.000 1.000
2 0.75 1.000 1.000 1.000
3 0.50 0.938 1.000 1.000
4 0.25 0.812 0.984 1.000
5 0.00 0.625 0.938 0.996
6 0.00 0.500 0.844 0.980
7 0.00 0.406 0.734 0.941
8 0.00 0.344 0.629 0.879
9 0.00 0.000 0.547 0.801
10 0.00 0.000 0.461 0.723
11 0.00 0.000 0.379 0.645
12 0.00 0.000 0.303 0.566
13 0.00 0.000 0.000 0.486
14 0.00 0.000 0.000 0.414
15 0.00 0.000 0.000 0.350
16 0.00 0.000 0.000 0.295
"""
return make_table(target_number, x_axis=max_potency, y_axis=n_faces*max_potency, n_faces=n_faces, **kwargs)
def make_combat_table(max_potency: int, **kwargs) -> xr.DataArray:
"""Make a square dataframe with `engine.combat`.
# Examples:
>>> make_combat_table(6, percent=True).round(1).to_pandas()
enemy_potency 1 2 3 4 5 6
attack_potency
1 50.0 5.0 0.1 0.0 0.0 0.0
2 95.0 50.0 8.2 0.3 0.0 0.0
3 99.9 91.8 50.0 10.5 0.7 0.0
4 100.0 99.7 89.5 50.0 12.2 1.1
5 100.0 100.0 99.3 87.8 50.0 13.7
6 100.0 100.0 100.0 98.9 86.3 50.0
"""
return make_table(combat, y_axis=max_potency, x_axis=max_potency, z_axis=0, **kwargs)
def make_single_action_conflict_table(max_potency: int, **kwargs) -> xr.DataArray:
"""Make a square single-action conflict dataframe.
# Examples:
>>> make_single_action_conflict_table(8, n_faces=8, percent=True).round(1).to_pandas()
enemy_potency 1 2 3 4 5 6 7 8
attack_potency
1 50.0 11.6 1.7 0.2 0.0 0.0 0.0 0.0
2 88.4 50.0 17.9 4.6 0.9 0.1 0.0 0.0
3 98.3 82.1 50.0 22.0 7.4 2.0 0.5 0.1
4 99.8 95.4 78.0 50.0 24.8 9.9 3.3 0.9
5 100.0 99.1 92.6 75.2 50.0 27.0 12.0 4.6
6 100.0 99.9 98.0 90.1 73.0 50.0 28.6 13.9
7 100.0 100.0 99.5 96.7 88.0 71.4 50.0 30.0
8 100.0 100.0 99.9 99.1 95.4 86.1 70.0 50.0
"""
return make_table(single_action_conflict, x_axis=max_potency, y_axis=max_potency, **kwargs)
def make_team_combat_table(
attack_potency: int,
helper_potency: int,
enemy_potency: int,
**kwargs) -> xr.DataArray:
"""Make a team combat table.
# Arguments
* `attack_potency`: The maximum player potency to consider.
* `helper_potency`: The maximum helper potency to consider.
* `enemy_potency`: The maximum enemy potency to consider.
Also accepts keyword arguments as for `risus.engine`.
# Returns
A DataFrame of series whose first axis is the player potency, the
second axis the enemy potency, and the third axis the helper
potency.
# Examples
>>> from risus.engine import combat
>>> pred = combat(attack_potency=6,helper_potency=8,enemy_potency=10)
>>> tab = make_team_combat_table(attack_potency=6,helper_potency=8,enemy_potency=10).loc[6,10,8]
>>> (pred == tab).values
array(True)
>>> (make_team_combat_table(6,0,6) == make_combat_table(6)).all().values
array(True)
"""
return make_table(combat, x_axis=attack_potency, z_axis=helper_potency, y_axis=enemy_potency, **kwargs)
def make_table(
compare_func: Callable[..., float],
x_axis: int,
y_axis: int,
z_axis: int = 0,
**kwargs
) -> xr.DataArray:
"""Make a victory table of arbitrary shape for a comparison function.
# Arguments:
* `compare_func`: The comparison function to use.
* `x_axis`: The shape in the first axis.
* `y_axis`: The shape in the second axis.
* `z_axis`: The shape in the third axis.
Also accepts keyword arguments as for `risus.engine`.
# Returns:
A DataFrame of floats, or a DataFrame of series where z_axis > 0.
# Examples:
>>> from risus.engine import combat
>>> make_table(combat, 6, 10, 0, percent=True, explode=True).round(1).to_pandas()
enemy_potency 1 2 3 4 5 6 7 8 9 10
attack_potency
1 51.6 9.4 0.6 0.0 0.0 0.0 0.0 0.0 0.0 0.0
2 95.2 57.6 17.1 2.4 0.2 0.0 0.0 0.0 0.0 0.0
3 99.9 93.7 62.7 24.1 5.0 0.5 0.0 0.0 0.0 0.0
4 100.0 99.8 93.5 67.3 30.8 8.3 1.3 0.1 0.0 0.0
5 100.0 100.0 99.7 93.9 71.5 37.2 12.2 2.4 0.3 0.0
6 100.0 100.0 100.0 99.6 94.5 75.2 43.3 16.5 4.0 0.6
"""
# Whoosh...
x_coords = range(1, x_axis+1)
y_coords = range(1, y_axis+1)
z_coords = range(0, z_axis+1)
return xr.DataArray(
[[[compare_func(attack_potency=attack_potency,
helper_potency=helper_potency,
enemy_potency=enemy_potency,
**kwargs)
for helper_potency
in z_coords]
for enemy_potency
in y_coords]
for attack_potency
in x_coords],
coords=[x_coords, y_coords, z_coords],
dims=["attack_potency", "enemy_potency", "helper_potency"],
name=f"{x_axis}x{y_axis}x{z_axis}-victory"
).squeeze(drop=True) | /risus_py-0.0.12-py3-none-any.whl/risus/table.py | 0.914896 | 0.420183 | table.py | pypi |
import datetime
import enum
import re
from typing import List, Optional
from pydantic import AnyUrl, EmailStr, Field, HttpUrl, datetime_parse, root_validator
from .common import BaseModel
# Validate zipcode is in 5 digit or 5 digit + 4 digit format
# e.g. 94612, 94612-1234
ZIPCODE_RE = re.compile(r"^[0-9]{5}(?:-[0-9]{4})?$")
# Validate that phone number is a valid US phone number.
# Less strict than spec so normalizers don't need to encode phone numbers exactly
# e.g. (444) 444-4444, +1 (444) 444-4444
US_PHONE_RE = re.compile(
r"^(?:(?:\+?1\s*(?:[.-]\s*)?)?(?:\(([0-9]{3})\)|([0-9]{3}))\s*(?:[.-]\s*)?)?([0-9]{3})\s*(?:[.-]\s*)?([0-9]{4})(?:\s*(?:\#|x\.?|ext\.?|extension)\s*(\d+))?$" # noqa: E501
)
# Lowercase alpha-numeric and underscores
# e.g. google_places
ENUM_VALUE_RE = re.compile(r"^[a-z0-9_]+$")
# Lowercase alpha-numeric and underscores with one colon
# e.g. az_arcgis:hsdg46sj
LOCATION_ID_RE = re.compile(r"^[a-z0-9_]+\:[a-zA-Z0-9_-]+$")
# Source ids can have anything but a space or a colon. Those must be replaced with another character (like a dash).
SOURCE_ID_RE = re.compile(r"^[^\s\:]+$")
# Max length for long text fields storing notes
NOTE_MAX_LENGTH = 2046
# Max length for normal string value fields
VALUE_MAX_LENGTH = 256
# Max length for short enum identifier fields
ENUM_MAX_LENGTH = 64
# Max length for id string fields
ID_MAX_LENGTH = 128
class StringDatetime(datetime.datetime):
@classmethod
def __get_validators__(cls):
yield datetime_parse.parse_datetime
yield cls.validate
@classmethod
def validate(cls, v: datetime.datetime) -> str:
return v.isoformat()
class StringDate(datetime.date):
@classmethod
def __get_validators__(cls):
yield datetime_parse.parse_date
yield cls.validate
@classmethod
def validate(cls, v: datetime.date) -> str:
return v.isoformat()
class StringTime(datetime.date):
@classmethod
def __get_validators__(cls):
yield datetime_parse.parse_time
yield cls.validate
@classmethod
def validate(cls, v: datetime.time) -> str:
return v.isoformat("minutes")
@enum.unique
class State(str, enum.Enum):
ALABAMA = "AL"
ALASKA = "AK"
AMERICAN_SAMOA = "AS"
ARIZONA = "AZ"
ARKANSAS = "AR"
CALIFORNIA = "CA"
COLORADO = "CO"
CONNECTICUT = "CT"
DELAWARE = "DE"
DISTRICT_OF_COLUMBIA = "DC"
FLORIDA = "FL"
GEORGIA = "GA"
GUAM = "GU"
HAWAII = "HI"
IDAHO = "ID"
ILLINOIS = "IL"
INDIANA = "IN"
IOWA = "IA"
KANSAS = "KS"
KENTUCKY = "KY"
LOUISIANA = "LA"
MAINE = "ME"
MARYLAND = "MD"
MASSACHUSETTS = "MA"
MICHIGAN = "MI"
MINNESOTA = "MN"
MISSISSIPPI = "MS"
MISSOURI = "MO"
MONTANA = "MT"
NEBRASKA = "NE"
NEVADA = "NV"
NEW_HAMPSHIRE = "NH"
NEW_JERSEY = "NJ"
NEW_MEXICO = "NM"
NEW_YORK = "NY"
NORTH_CAROLINA = "NC"
NORTH_DAKOTA = "ND"
NORTHERN_MARIANA_IS = "MP"
OHIO = "OH"
OKLAHOMA = "OK"
OREGON = "OR"
PENNSYLVANIA = "PA"
PUERTO_RICO = "PR"
RHODE_ISLAND = "RI"
SOUTH_CAROLINA = "SC"
SOUTH_DAKOTA = "SD"
TENNESSEE = "TN"
TEXAS = "TX"
UTAH = "UT"
VERMONT = "VT"
VIRGINIA = "VA"
VIRGIN_ISLANDS = "VI"
WASHINGTON = "WA"
WEST_VIRGINIA = "WV"
WISCONSIN = "WI"
WYOMING = "WY"
@enum.unique
class ContactType(str, enum.Enum):
GENERAL = "general"
BOOKING = "booking"
@enum.unique
class DayOfWeek(str, enum.Enum):
MONDAY = "monday"
TUESDAY = "tuesday"
WEDNESDAY = "wednesday"
THURSDAY = "thursday"
FRIDAY = "friday"
SATURDAY = "saturday"
SUNDAY = "sunday"
PUBLIC_HOLIDAYS = "public_holidays"
@enum.unique
class WheelchairAccessLevel(str, enum.Enum):
YES = "yes" # there is wheelchair access not sure about level of service
FULL = "full" # here is full wheelchair access
PARTIAL = "partial" # there is partial wheelchair access
NO = "no" # there is no wheelchair access
class Address(BaseModel):
"""
{
"street1": str,
"street2": str,
"city": str,
"state": str as state initial e.g. CA,
"zip": str,
},
"""
street1: Optional[str] = Field(max_length=VALUE_MAX_LENGTH)
street2: Optional[str] = Field(max_length=VALUE_MAX_LENGTH)
city: Optional[str] = Field(max_length=VALUE_MAX_LENGTH)
state: Optional[State]
zip: Optional[str] = Field(pattern=ZIPCODE_RE.pattern)
class Contact(BaseModel):
"""
{
"contact_type": str as contact type enum e.g. booking,
"phone": str as (###) ###-###,
"website": str,
"email": str,
"other": str,
}
"""
contact_type: Optional[ContactType]
phone: Optional[str] = Field(pattern=US_PHONE_RE.pattern)
website: Optional[HttpUrl]
email: Optional[EmailStr]
other: Optional[str] = Field(max_length=NOTE_MAX_LENGTH)
@root_validator(skip_on_failure=True)
@classmethod
def validate_has_one_value(cls, values: dict) -> dict:
oneof_fields = ["phone", "website", "email", "other"]
has_values = [key for key in oneof_fields if values.get(key)]
if len(has_values) > 1:
raise ValueError(
f"Multiple values specified in {', '.join(has_values)}. "
"Only one value should be specified per Contact entry."
)
if not has_values:
raise ValueError("No values specified for Contact.")
return values
class OpenDate(BaseModel):
"""
{
"opens": str as iso8601 date,
"closes": str as iso8601 date,
}
"""
opens: Optional[StringDate]
closes: Optional[StringDate]
@root_validator(skip_on_failure=True)
@classmethod
def validate_closes_after_opens(cls, values: dict) -> dict:
opens = values.get("opens")
closes = values.get("closes")
if opens and closes:
if closes < opens:
raise ValueError("Closes date must be after opens date")
return values
class OpenHour(BaseModel):
"""
{
"day": str as day of week enum e.g. monday,
"opens": str as 24h local time formatted as hh:mm,
"closes": str as 24h local time formatted as hh:mm,
}
"""
day: DayOfWeek
opens: StringTime
closes: StringTime
@root_validator(skip_on_failure=True)
@classmethod
def validate_closes_after_opens(cls, values: dict) -> dict:
opens = values.get("opens")
closes = values.get("closes")
if opens and closes:
if closes < opens:
raise ValueError("Closes time must be after opens time")
return values
class Availability(BaseModel):
"""
{
"drop_in": bool,
"appointments": bool,
},
"""
drop_in: Optional[bool]
appointments: Optional[bool]
class Access(BaseModel):
"""
{
"walk": bool,
"drive": bool,
"wheelchair": str,
}
"""
walk: Optional[bool]
drive: Optional[bool]
wheelchair: Optional[WheelchairAccessLevel]
class Source(BaseModel):
"""
{
"source": str as source type enum e.g. vaccinespotter,
"id": str as source defined id e.g. 7382088,
"fetched_from_uri": str as uri where data was fetched from,
"fetched_at": str as iso8601 utc datetime (when scraper ran),
"published_at": str as iso8601 utc datetime (when source claims it updated),
"data": {...parsed source data in source schema...},
}
"""
source: str = Field(pattern=ENUM_VALUE_RE.pattern, max_length=ENUM_MAX_LENGTH)
id: str = Field(pattern=SOURCE_ID_RE.pattern, max_length=ID_MAX_LENGTH)
fetched_from_uri: Optional[AnyUrl]
fetched_at: Optional[StringDatetime]
published_at: Optional[StringDatetime]
data: dict
# TODO: accessibility accomodations as amenities
# "accomodationName": {
# "description": "name of the accessibility accomodation",
# "type": "string"
# },
# "accomodationDescription": {
# "description": "Description of the accomodation",
# "type": "string"
# },
# "accomodationIcon": {
# "description": " icon for the accessibility accomodation",
# "type": "string"
# }
class Amenity(BaseModel):
"""
{
"name": str, name of the amenity,
"description": str description of the amenity,
"icon": str as uri of icon provided with amenity,
}
"""
name: Optional[str] = Field(max_length=VALUE_MAX_LENGTH)
description: Optional[str]
icon: Optional[AnyUrl]
class RentCost(BaseModel):
"""
{
"notes": str, Any notes about the rent,
"minCost": int, Minimum average cost of rent in cents per month,
"maxCost": int, Maximum average cost of rent in cents per month,
}
"""
notes: Optional[str]
minCost: Optional[int]
maxCost: Optional[int]
@root_validator(skip_on_failure=True)
@classmethod
def validate_max_greaterthan_min(cls, values: dict) -> dict:
low = values.get("minCost")
high = values.get("maxCost")
if low and high:
if high < low:
raise ValueError(
"Minimum rent cost must be less than maximum rent cost"
)
return values
class UtilityCosts(BaseModel):
"""
{
"electric": int cost of electric service in cents per month
"water": int cost of water service in cents per month
"gas": int cost of gas service in cents per month
"sewer": int cost of sewer service in cents per month
"internet": int cost of internet service in cents per month
}
"""
electric: Optional[int]
water: Optional[int]
gas: Optional[int]
sewer: Optional[int]
internet: Optional[int]
class Appliances(BaseModel):
"""
{
"washingMachine": bool whether or not there is a washingMachine in the unit,
"dryer": bool whether or not there is a dryer in the unit,
"oven": bool whether or not there is a oven (put food inside) in the unit,
"stove": bool whether or not there is a stove (put food on top) in the unit,
"ovenAsRange": bool whether the oven and the range are part of the same appliance,
"dishwasher": bool whether or not there is a dishwasher in the unit,
"refrigerator": bool whether or not there is a refrigerator in the unit,
"microwave": bool whether or not there is a microwave in the unit,
}
"""
# sink
# garbageDisposal
# island
washingMachine: Optional[bool]
dryer: Optional[bool]
oven: Optional[bool]
stove: Optional[bool]
ovenAsRange: Optional[bool]
dishwasher: Optional[bool]
refrigerator: Optional[bool]
microwave: Optional[bool]
class UnitType(BaseModel):
"""
{
"name": str, name of this unit type/model,
"id": str as source defined id e.g. 7382088,
"description": str description of this unit type/model,
"shared": bool Is this unit shared with roommates?,
"bedroomCount": int number of bedrooms,
"bathroomCount": int number of bathrooms,
"floorplanUrl": str as uri to floorplan document,
"rent": object as RentCost,
"appliances": object as Appliances what appliances does this unit have,
"amenities": list of Amenity consisting of other amenities in the unit,
"utilitiesCost": object as cost of each utility,
}
"""
name: Optional[str] = Field(max_length=VALUE_MAX_LENGTH)
description: Optional[str]
id: str = Field(pattern=SOURCE_ID_RE.pattern, max_length=ID_MAX_LENGTH)
shared: Optional[bool]
bedroomCount: Optional[int]
bathroomCount: Optional[int]
floorplanUrl: Optional[AnyUrl]
rent: Optional[RentCost]
appliances: Optional[Appliances]
amenities: Optional[List[Amenity]]
utilitiesCost: Optional[UtilityCosts]
class NormalizedApartmentComplex(BaseModel):
id: str = Field(max_length=ID_MAX_LENGTH)
name: Optional[str] = Field(max_length=VALUE_MAX_LENGTH)
address: Optional[Address]
onRITCampus: Optional[bool]
renewable: Optional[bool]
contact: Optional[List[Contact]]
# languages: Optional[List[str]] # [str as ISO 639-1 code]
opening_dates: Optional[List[OpenDate]]
opening_hours: Optional[List[OpenHour]]
availability: Optional[Availability]
access: Optional[Access]
links: Optional[List[AnyUrl]]
description: Optional[str]
subletPolicy: Optional[str]
reletPolicy: Optional[str]
imageUrl: Optional[AnyUrl]
active: Optional[bool]
amenities: Optional[List[Amenity]]
unitTypes: Optional[List[UnitType]]
source: Source
@root_validator(skip_on_failure=True)
@classmethod
def validate_id_source(cls, values: dict) -> dict:
loc_id = values.get("id")
if not loc_id:
return values
source = values.get("source")
if not source:
return values
source_name = source.source
if not source_name:
return values
if not loc_id.startswith(f"{source_name}:"):
raise ValueError("Location ID must be prefixed with source name")
return values | /rit-housing-data-schema-0.1.0.tar.gz/rit-housing-data-schema-0.1.0/rit_housing_data_schema/apartment.py | 0.653127 | 0.177241 | apartment.py | pypi |
import logging
from contextlib import contextmanager
from unicodedata import normalize, category
from itertools import cycle, chain
from time import time
from json import JSONEncoder
logger = logging.getLogger(__name__)
class Node(object):
"""
An utility structure. Has no meaning outside
Allows to specify single path showing how it branches
and by doing `unwrap` we get multiple lists for each possible variation
"""
def __init__(self, data=None):
self.data = data
self.children = []
self.next_node = None
self.children_cycle = None
self.ref_count = 0
self.depth = 0
self.current = None
def add_child(self, c):
self.children.append(Node(c))
self.reset_cycle()
def add_next(self, node):
self.next_node = node
@property
def child(self):
# Corner case of 0 depth
if self.depth == 0:
result = self.current
self.next_child()
return result
if self.ref_count >= self.depth:
self.next_child()
self.ref_count = 0
else:
self.ref_count += 1
return self.current
def next_child(self):
self.current = next(self.children_cycle)
def reset_cycle(self):
self.children_cycle = cycle(self.children)
self.current = next(self.children_cycle)
def unwrap(self):
variants = 1
current = self
while current is not None:
variants *= current.weight
current = current.next_node
logger.debug("Total variants: {}".format(variants))
for i in range(0, variants):
result = []
current = self
while current is not None:
if current.data:
result.append(current.data)
if len(current.children) > 0:
c = current.child
result.append(c.data)
current = current.next_node
yield result
@property
def weight(self):
if len(self.children) == 0:
return 1
else:
return len(self.children)
def __repr__(self):
return "{data}[{children}] -> {next_node}".format(
data=self.data,
children=", ".join(map(str, self.children)),
next_node=str(self.next_node)
)
class SingletonMixin(object):
_instance = None
def __new__(class_, *args, **kwargs):
if not isinstance(class_._instance, class_):
class_._instance = object.__new__(class_, *args, **kwargs)
return class_._instance
def deaccent(text):
return normalize("NFC",
"".join(c
for c in normalize("NFD", text)
if category(c) != "Mn"))
def flatten(lst, shallow=False):
def explode(v):
if callable(v):
return v()
else:
return v
if len(lst) > 1 and not shallow:
return lst
new_lst = map(explode, lst)
if shallow:
return new_lst
else:
return chain(*new_lst)
class ExtendedOp(object):
def __init__(self, op=None):
self.case_sensitive_override = False
self.local_regex_override = False
if isinstance(op, ExtendedOp):
self.op = op.op
self.case_sensitive_override = op.case_sensitive_override
self.local_regex_override = op.local_regex_override
else:
self.op = op
@property
def value(self):
return self.op
def empty(self):
return self.op is None or self.op.strip() == ""
def ignore_case(self, config):
if self.case_sensitive_override:
return False
else:
return config.ignore_case
def __str__(self):
if self.op:
return self.op
return ""
def __repr__(self):
return str(self)
def __eq__(self, other):
if type(other) == str:
return self.op == other
return (
self.op == other.op and
self.case_sensitive_override == other.case_sensitive_override and
self.local_regex_override == other.local_regex_override
)
class Timer(object):
def __init__(self, title):
self.title = title
self.ts = time()
def stop(self, debug=True):
now = time()
delta = int(now - self.ts) * 1000
msg = "{} took {}ms".format(self.title, delta)
if debug:
logger.debug(msg)
else:
logger.info(msg)
return delta
@contextmanager
def timer(title):
t = Timer(title)
yield
t.stop()
class RitaJSONEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, ExtendedOp):
return o.op
return o.__dict__ | /rita-dsl-0.7.4.tar.gz/rita-dsl-0.7.4/rita/utils.py | 0.643665 | 0.190366 | utils.py | pypi |
import logging
import ply.yacc as yacc
from functools import partial
from rita.lexer import RitaLexer
from rita import macros
logger = logging.getLogger(__name__)
def stub(*args, **kwargs):
return None
def either(a, b):
yield a
yield b
def load_macro(name, config):
try:
return partial(getattr(macros, name), config=config)
except Exception:
pass
def lazy_load(*args, **kwargs):
logger.info(config.modules)
for mod in config.modules:
try:
fn = getattr(mod, name)
return fn(*args, **kwargs)
except Exception as ex:
logger.error(ex)
continue
raise RuntimeError("MACRO {} not loaded".format(name))
return lazy_load
def var_wrapper(variable, config):
def wrapper(*args, **kwargs):
logger.debug("Variables: {}".format(config.variables))
return config.get_variable(variable)
return wrapper
class RitaParser(object):
tokens = RitaLexer.tokens
precedence = (
("nonassoc", "ARROW"),
("nonassoc", "PIPE"),
("nonassoc", "COMMA"),
("left", "EXEC"),
("left", "ASSIGN"),
("left", "RBRACKET", "LBRACKET", "LPAREN", "RPAREN"),
("left", "KEYWORD", "NAME", "LITERAL"),
("right", "MODIF_QMARK", "MODIF_STAR", "MODIF_PLUS"),
)
def __init__(self, config):
self.config = config
self.lexer = None
self.parser = None
def p_document(self, p):
"""
DOCUMENT : MACRO_CHAIN
| MACRO_EXEC
| VARIABLE
"""
logger.debug("Building initial document {}".format(p[1]))
p[0] = [p[1]]
def p_document_list(self, p):
"""
DOCUMENT : DOCUMENT MACRO_CHAIN
| DOCUMENT MACRO_EXEC
| DOCUMENT VARIABLE
"""
logger.debug("Extending document {}".format(p[2]))
p[0] = p[1] + [p[2]]
def p_macro_chain(self, p):
" MACRO_CHAIN : MACRO ARROW MACRO "
logger.debug("Have {0} -> {1}".format(p[1], p[3]))
p[0] = partial(
p[3],
macros.PATTERN(p[1], config=self.config),
config=self.config
)
def p_macro_chain_from_array(self, p):
" MACRO_CHAIN : ARRAY ARROW MACRO "
logger.debug("Have {0} -> {1}".format(p[1], p[3]))
p[0] = partial(
p[3],
macros.PATTERN(*p[1], config=self.config),
config=self.config
)
def p_macro_exec(self, p):
" MACRO_EXEC : EXEC MACRO "
logger.debug("Exec {0}".format(p[2]))
macros.EXEC(p[2], config=self.config)
p[0] = stub
def p_macro_w_modif(self, p):
"""
MACRO : MACRO MODIF_PLUS
| MACRO MODIF_STAR
| MACRO MODIF_QMARK
| MACRO EXEC
"""
logger.debug("Adding modifier to Macro {}".format(p[1]))
fn = p[1]
p[0] = partial(fn, op=p[2])
def p_macro_wo_args(self, p):
" MACRO : KEYWORD "
fn = load_macro(p[1], config=self.config)
logger.debug("Parsing macro (w/o args): {}".format(p[1]))
p[0] = fn
def p_macro_w_args(self, p):
" MACRO : KEYWORD LPAREN ARGS RPAREN "
logger.debug("Parsing macro: {0}, args: {1}".format(p[1], p[3]))
fn = load_macro(p[1], config=self.config)
p[0] = partial(fn, *p[3])
def p_macro_from_array(self, p):
"""
MACRO : KEYWORD ARRAY
| KEYWORD ARG_ARRAY
"""
logger.debug("Parsing macro: {0}, args: {1}".format(p[1], p[2]))
fn = load_macro(p[1], config=self.config)
p[0] = partial(fn, *p[2])
def p_array(self, p):
" ARRAY : LBRACKET ARGS RBRACKET "
p[0] = p[2]
def p_arg_array(self, p):
" ARG_ARRAY : LPAREN ARGS RPAREN "
p[0] = p[2]
def p_variable(self, p):
" VARIABLE_NAME : NAME "
p[0] = var_wrapper(p[1], self.config)
def p_variable_from_args(self, p):
" VARIABLE : NAME ASSIGN ARGS "
if len(p[3]) == 1:
macros.ASSIGN(p[1], p[3][0], config=self.config)
else:
macros.ASSIGN(p[1], p[3], config=self.config)
p[0] = stub
def p_either(self, p):
" ARG : ARG PIPE ARG "
p[0] = either(p[1], p[3])
def p_arg_list(self, p):
" ARGS : ARGS COMMA ARG "
p[0] = p[1] + [p[3]]
def p_args(self, p):
" ARGS : ARG "
p[0] = [p[1]]
def p_arg(self, p):
" ARG : LITERAL "
p[0] = p[1]
def p_arg_from_macro(self, p):
" ARG : MACRO "
p[0] = p[1]
def p_arg_from_var(self, p):
" ARG : VARIABLE_NAME "
p[0] = p[1]()
def p_arg_from_array(self, p):
" ARGS : ARRAY "
p[0] = p[1]
def p_error(self, p):
if p:
logger.error("Syntax error at '{}'".format(p.value))
else:
logger.error("p is null")
def build(self, **kwargs):
self.lexer = RitaLexer().build(**kwargs)
self.parser = yacc.yacc(module=self, errorlog=logger, **kwargs)
def parse(self, data):
if data.strip() == "":
return []
return self.parser.parse(r"{}".format(data), lexer=self.lexer, debug=logger) | /rita-dsl-0.7.4.tar.gz/rita-dsl-0.7.4/rita/parser.py | 0.444083 | 0.179567 | parser.py | pypi |
import logging
import re
import json
from functools import partial
from itertools import groupby, chain
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Any, TYPE_CHECKING, Mapping, Callable
from rita.utils import ExtendedOp
from rita.types import Rules, Patterns
logger = logging.getLogger(__name__)
ParseFn = Callable[[Any, "SessionConfig", ExtendedOp], str]
if TYPE_CHECKING:
# We cannot simply import SessionConfig because of cyclic imports
from rita.config import SessionConfig
def apply_operator(syntax, op: ExtendedOp) -> str:
if op.empty():
return syntax
elif str(op) == "!": # A bit complicated one
return (r"((?!{})\w+)".format(syntax
.rstrip(")")
.lstrip("(")))
else:
return syntax + str(op)
def any_of_parse(lst, config: "SessionConfig", op: ExtendedOp) -> str:
clause = r"((^|\s)(({0})\s?))".format("|".join(sorted(lst, key=lambda x: (-len(x), x))))
return apply_operator(clause, op)
def regex_parse(r, config: "SessionConfig", op: ExtendedOp) -> str:
if op.local_regex_override:
return local_regex_parse(r, config, op)
else:
initial = "(" + r + r"\s?" + ")"
return apply_operator(initial, op)
def local_regex_parse(r, config: "SessionConfig", op: ExtendedOp) -> str:
if r[0] == "^" and r[-1] == "$": # Fully strictly defined string?
pattern = r[1:-1]
elif r[0] == "^": # We define start of the string
pattern = r[1:] + r"\w*"
elif r[-1] == "$": # We define end of string
pattern = r"\w*" + r[:-1]
else: # We define string inside word
pattern = r"\w*" + r + r"\w*"
initial = "(" + r"\b" + pattern + r"\b" + r"\s?" + ")"
return apply_operator(initial, op)
def not_supported(key, *args, **kwargs) -> str:
raise RuntimeError(
"Rule '{0}' is not supported in standalone mode"
.format(key)
)
def person_parse(config: "SessionConfig", op: ExtendedOp) -> str:
return apply_operator(r"([A-Z]\w+\s?)", op)
def entity_parse(value, config: "SessionConfig", op: ExtendedOp) -> str:
if value == "PERSON":
return person_parse(config, op=op)
else:
return not_supported(value)
def punct_parse(_, config: "SessionConfig", op: ExtendedOp) -> str:
return apply_operator(r"([.,!;?:]\s?)", op)
def word_parse(value, config: "SessionConfig", op: ExtendedOp) -> str:
initial = r"({}\s?)".format(value)
return apply_operator(initial, op)
def fuzzy_parse(r, config: "SessionConfig", op: ExtendedOp) -> str:
# TODO: build premutations
return apply_operator(r"({0})[.,?;!]?".format("|".join(r)), op)
def phrase_parse(value, config: "SessionConfig", op: ExtendedOp) -> str:
return apply_operator(r"({}\s?)".format(value), op)
def nested_parse(values, config: "SessionConfig", op: ExtendedOp) -> str:
from rita.macros import resolve_value
(_, patterns) = rules_to_patterns("", [resolve_value(v, config=config)
for v in values], config=config)
return r"(?P<g{}>{})".format(config.new_nested_group_id(), "".join(patterns))
def any_parse(_, config: "SessionConfig", op: ExtendedOp) -> str:
return regex_parse(r".*", config, op)
PARSERS: Mapping[str, ParseFn] = {
"any_of": any_of_parse,
"any": any_parse,
"value": word_parse,
"regex": regex_parse,
"entity": entity_parse,
"lemma": partial(not_supported, "LEMMA"),
"pos": partial(not_supported, "POS"),
"punct": punct_parse,
"fuzzy": fuzzy_parse,
"phrase": phrase_parse,
"nested": nested_parse,
}
def rules_to_patterns(label: str, data: Patterns, config: "SessionConfig"):
logger.debug("data: {}".format(data))
def gen():
"""
Implicitly add spaces between rules
"""
if len(data) == 0:
return
yield data[0]
for (t, d, op) in data[1:]:
yield t, d, op
return (
label,
[PARSERS[t](d, config, op) for (t, d, op) in gen()],
)
class RuleExecutor(object):
def __init__(self, patterns, config, regex_impl=re, max_workers=4):
self.config = config
self.regex_impl = regex_impl
self.patterns = [self.compile(label, rules)
for label, rules in patterns]
self.raw_patterns = patterns
self.max_workers = max_workers
def compile(self, label, rules):
flags = self.regex_impl.DOTALL
if self.config.ignore_case:
flags = flags | self.regex_impl.IGNORECASE
indexed_rules = ["(?P<s{}>{})".format(i, r) if not r.startswith("(?P<") else r
for i, r in enumerate(rules)]
regex_str = r"(?P<{0}>{1})".format(label, "".join(indexed_rules))
try:
return self.regex_impl.compile(regex_str, flags)
except Exception as ex:
logger.exception("Failed to compile: '{0}', Reason: \n{1}".format(regex_str, str(ex)))
return None
def _match_task(self, pattern, text, include_submatches):
def gen():
for match in pattern.finditer(text):
def submatches():
for k, v in match.groupdict().items():
if not v or v.strip() == "":
continue
yield {
"key": k,
"text": v.strip(),
"start": match.start(k),
"end": match.end(k)
}
yield {
"start": match.start(),
"end": match.end(),
"text": match.group().strip(),
"label": match.lastgroup,
"submatches": sorted(list(submatches()), key=lambda x: x["start"]) if include_submatches else []
}
return list(gen())
def _results(self, text, include_submatches):
with ThreadPoolExecutor(self.max_workers) as executor:
tasks = [executor.submit(self._match_task, p, text, include_submatches)
for p in self.patterns]
for future in as_completed(tasks):
yield future.result(timeout=1)
def execute(self, text, include_submatches=True):
results = sorted(chain(*self._results(text, include_submatches)), key=lambda x: x["start"])
for k, g in groupby(results, lambda x: x["start"]):
group = list(g)
if len(group) == 1:
yield group[0]
else:
data = sorted(group, key=lambda x: -x["end"])
yield data[0]
@staticmethod
def load(path, regex_impl=re):
from rita.config import SessionConfig
config = SessionConfig()
with open(path, "r") as f:
patterns = [(obj["label"], obj["rules"])
for obj in map(json.loads, f.readlines())]
return RuleExecutor(patterns, config, regex_impl=regex_impl)
def save(self, path):
with open(path, "w") as f:
for pattern in self:
f.write("{0}\n".format(json.dumps(pattern)))
def __iter__(self):
for label, rules in self.raw_patterns:
yield {"label": label, "rules": rules}
def compile_rules(rules: Rules, config: "SessionConfig", regex_impl=re, **kwargs) -> RuleExecutor:
logger.info("Using standalone rule implementation")
patterns = [rules_to_patterns(*group, config=config) for group in rules]
executor = RuleExecutor(patterns, config, regex_impl=regex_impl)
return executor | /rita-dsl-0.7.4.tar.gz/rita-dsl-0.7.4/rita/engine/translate_standalone.py | 0.467332 | 0.250592 | translate_standalone.py | pypi |
import json
import logging
from platform import system
from ctypes import (c_char_p, c_int, c_uint, c_long, Structure, cdll, POINTER)
from typing import Any, TYPE_CHECKING, Tuple, List, AnyStr
from rita.engine.translate_standalone import rules_to_patterns, RuleExecutor
from rita.types import Rules
logger = logging.getLogger(__name__)
field = Tuple[AnyStr, Any]
fields = List[field]
if TYPE_CHECKING:
# We cannot simply import SessionConfig because of cyclic imports
from rita.config import SessionConfig
class NamedRangeResult(Structure):
_fields_ = [
("start", c_long),
("end", c_long),
("name", c_char_p),
]
class ResultEntity(Structure):
_fields_ = [
("label", c_char_p),
("start", c_long),
("end", c_long),
("sub_count", c_uint),
]
class Result(Structure):
_fields_ = [
("count", c_uint)
]
class Context(Structure):
_fields_: fields = []
def load_lib():
try:
os_name = system()
if os_name == "Windows":
lib = cdll.LoadLibrary("rita_rust.dll")
elif os_name == "Darwin":
lib = cdll.LoadLibrary("librita_rust.dylib")
else:
lib = cdll.LoadLibrary("librita_rust.so")
lib.compile.restype = POINTER(Context)
lib.execute.argtypes = [POINTER(Context), c_char_p]
lib.execute.restype = POINTER(Result)
lib.clean_env.argtypes = [POINTER(Context)]
lib.clean_result.argtypes = [POINTER(Result)]
lib.read_result.argtypes = [POINTER(Result), c_int]
lib.read_result.restype = POINTER(ResultEntity)
lib.read_submatch.argtypes = [POINTER(ResultEntity), c_int]
lib.read_submatch.restype = POINTER(NamedRangeResult)
return lib
except Exception as ex:
logger.error("Failed to load rita-rust library, reason: {}\n\n"
"Most likely you don't have required shared library to use it".format(ex))
class RustRuleExecutor(RuleExecutor):
def __init__(self, patterns, config: "SessionConfig"):
self.config = config
self.context = None
self.lib = load_lib()
self.patterns = [self._build_regex_str(label, rules)
for label, rules in patterns]
self.compile()
@staticmethod
def _build_regex_str(label, rules):
indexed_rules = ["(?P<s{}>{})".format(i, r) if not r.startswith("(?P<") else r
for i, r in enumerate(rules)]
return r"(?P<{0}>{1})".format(label, "".join(indexed_rules))
def compile(self):
flag = 0 if self.config.ignore_case else 1
c_array = (c_char_p * len(self.patterns))(*list([p.encode("UTF-8") for p in self.patterns]))
self.context = self.lib.compile(c_array, len(c_array), flag)
return self.context
def execute(self, text, include_submatches=True):
result_ptr = self.lib.execute(self.context, text.encode("UTF-8"))
count = result_ptr[0].count
for i in range(0, count):
match_ptr = self.lib.read_result(result_ptr, i)
match = match_ptr[0]
matched_text = text[match.start:match.end].strip()
def parse_subs():
k = match.sub_count
for j in range(0, k):
s = self.lib.read_submatch(match_ptr, j)[0]
start = s.start
end = s.end
sub_text = text[start:end]
if sub_text.strip() == "":
continue
yield {
"text": sub_text.strip(),
"start": start,
"end": end,
"key": s.name.decode("UTF-8"),
}
yield {
"start": match.start,
"end": match.end,
"text": matched_text,
"label": match.label.decode("UTF-8"),
"submatches": list(parse_subs()) if include_submatches else []
}
def clean_context(self):
self.lib.clean_env(self.context)
@staticmethod
def load(path):
from rita.config import SessionConfig
config = SessionConfig()
with open(path, "r") as f:
patterns = [(obj["label"], obj["rules"])
for obj in map(json.loads, f.readlines())]
return RustRuleExecutor(patterns, config)
def compile_rules(rules: Rules, config: "SessionConfig", **kwargs) -> RustRuleExecutor:
logger.info("Using rita-rust rule implementation")
patterns = [rules_to_patterns(*group, config=config) for group in rules]
executor = RustRuleExecutor(patterns, config)
return executor | /rita-dsl-0.7.4.tar.gz/rita-dsl-0.7.4/rita/engine/translate_rust.py | 0.500977 | 0.185504 | translate_rust.py | pypi |
import logging
from functools import partial
from typing import Any, TYPE_CHECKING, Mapping, Callable, Generator, AnyStr
from rita.utils import ExtendedOp
from rita.types import Rules, Patterns
logger = logging.getLogger(__name__)
SpacyPattern = Generator[Mapping[AnyStr, Any], None, None]
ParseFn = Callable[[Any, "SessionConfig", ExtendedOp], SpacyPattern]
if TYPE_CHECKING:
# We cannot simply import SessionConfig because of cyclic imports
from rita.config import SessionConfig
def any_of_parse(lst, config: "SessionConfig", op: ExtendedOp) -> SpacyPattern:
if op.ignore_case(config):
normalized = sorted([item.lower()
for item in lst])
base = {"LOWER": {"IN": normalized}}
else:
base = {"LOWER": {"IN": sorted(lst)}}
if not op.empty():
base["OP"] = op.value
yield base
def regex_parse(r, config: "SessionConfig", op: ExtendedOp) -> SpacyPattern:
if op.ignore_case(config):
d = {"LOWER": {"REGEX": r.lower()}}
else:
d = {"TEXT": {"REGEX": r}}
if not op.empty():
d["OP"] = op.value
yield d
def fuzzy_parse(r, config: "SessionConfig", op: ExtendedOp) -> SpacyPattern:
# TODO: build premutations
d = {"LOWER": {"REGEX": "({0})[.,?;!]?".format("|".join(r))}}
if not op.empty():
d["OP"] = op.value
yield d
def generic_parse(tag, value, config: "SessionConfig", op: ExtendedOp) -> SpacyPattern:
d = {}
if isinstance(value, list) and len(value) > 1:
value = {"IN": value}
d[tag] = value
if not op.empty():
d["OP"] = op.value
yield d
def entity_parse(value, config: "SessionConfig", op: ExtendedOp) -> SpacyPattern:
tag = "ENT_TYPE"
if op.empty():
op.op = "+"
return generic_parse(tag, value, config, op)
def punct_parse(_, config: "SessionConfig", op: ExtendedOp) -> SpacyPattern:
d = dict()
d["IS_PUNCT"] = True
if not op.empty():
d["OP"] = op.value
yield d
def any_parse(_, config: "SessionConfig", op: ExtendedOp) -> SpacyPattern:
d = dict()
if not op.empty():
d["OP"] = op.value
yield d
def phrase_parse(value, config: "SessionConfig", op: ExtendedOp) -> SpacyPattern:
"""
TODO: Does not support operators
"""
splitter = next((s for s in ["-", " "]
if s in value), None)
if splitter:
buff = value.split(splitter)
yield next(orth_parse(buff[0], config=config, op=ExtendedOp()))
for b in buff[1:]:
if splitter != " ":
yield next(orth_parse(splitter, config=config, op=ExtendedOp()))
yield next(orth_parse(b, config=config, op=ExtendedOp()))
else:
yield next(orth_parse(value, config=config, op=ExtendedOp()))
def tag_parse(values, config: "SessionConfig", op: ExtendedOp) -> SpacyPattern:
"""
For generating POS/TAG patterns based on a Regex
e.g. TAG("^NN|^JJ") for adjectives or nouns
also deals with TAG_WORD for tag and word or tag and list
"""
d = {"TAG": {"REGEX": values["tag"]}}
if "word" in values:
if op.ignore_case(config):
d["LOWER"] = values["word"].lower()
else:
d["TEXT"] = values["word"]
elif "list" in values:
lst = values["list"]
if op.ignore_case(config):
normalized = sorted([item.lower()
for item in lst])
d["LOWER"] = {"REGEX": r"^({0})$".format("|".join(normalized))}
else:
d["TEXT"] = {"REGEX": r"^({0})$".format("|".join(sorted(lst)))}
if not op.empty():
d["OP"] = op.value
yield d
def nested_parse(values, config: "SessionConfig", op: ExtendedOp) -> SpacyPattern:
from rita.macros import resolve_value
results = rules_to_patterns("", [resolve_value(v, config=config)
for v in values], config=config)
return results["pattern"]
def orth_parse(value, config: "SessionConfig", op: ExtendedOp) -> SpacyPattern:
d = {}
print(op.case_sensitive_override)
if op.ignore_case(config):
d["LOWER"] = value.lower()
else:
d["ORTH"] = value
if not op.empty():
d["OP"] = op.value
yield d
PARSERS: Mapping[str, ParseFn] = {
"any_of": any_of_parse,
"any": any_parse,
"value": orth_parse,
"regex": regex_parse,
"entity": entity_parse,
"lemma": partial(generic_parse, "LEMMA"),
"pos": partial(generic_parse, "POS"),
"punct": punct_parse,
"fuzzy": fuzzy_parse,
"phrase": phrase_parse,
"tag": tag_parse,
"nested": nested_parse,
"orth": orth_parse,
}
def rules_to_patterns(label: str, data: Patterns, config: "SessionConfig"):
logger.debug(data)
return {
"label": label,
"pattern": [p
for (t, d, op) in data
for p in PARSERS[t](d, config, ExtendedOp(op))],
}
def compile_rules(rules: Rules, config: "SessionConfig", **kwargs):
logger.info("Using spaCy rules implementation")
return [rules_to_patterns(label, patterns, config=config)
for (label, patterns) in rules] | /rita-dsl-0.7.4.tar.gz/rita-dsl-0.7.4/rita/engine/translate_spacy.py | 0.525369 | 0.240931 | translate_spacy.py | pypi |
rithm
=====
[](https://github.com/lycantropos/rithm/actions/workflows/ci.yml "Github Actions")
[](https://codecov.io/gh/lycantropos/rithm "Codecov")
[](https://github.com/lycantropos/rithm/blob/master/LICENSE "License")
[](https://badge.fury.io/py/rithm "PyPI")
[](https://crates.io/crates/rithm "crates.io")
In what follows `python` is an alias for `python3.7` or `pypy3.7`
or any later version (`python3.8`, `pypy3.8` and so on).
Installation
------------
Install the latest `pip` & `setuptools` packages versions
```bash
python -m pip install --upgrade pip setuptools
```
### User
Download and install the latest stable version from `PyPI` repository
```bash
python -m pip install --upgrade rithm
```
### Developer
Download the latest version from `GitHub` repository
```bash
git clone https://github.com/lycantropos/rithm.git
cd rithm
```
Install dependencies
```bash
python -m pip install -r requirements.txt
```
Install
```bash
python setup.py install
```
Usage
-----
### Python
#### Arbitrary precision integer
With setup
```python
>>> from rithm import Int
```
we can:
- construct
```python
>>> Int()
rithm.Int(0)
>>> Int(9)
rithm.Int(9)
>>> Int('9')
rithm.Int(9)
>>> Int('0b1_001', 2)
rithm.Int(9)
>>> Int('0o11', 8)
rithm.Int(9)
>>> Int('0x9', 16)
rithm.Int(9)
>>> Int('1_001', 2)
rithm.Int(9)
>>> Int('0o11', 8)
rithm.Int(9)
>>> Int('9', 16)
rithm.Int(9)
>>> Int(9.99)
rithm.Int(9)
```
- compare
```python
>>> Int(9) == Int(9)
True
>>> Int(9) >= Int(9)
True
>>> Int(9) > Int(8)
True
>>> Int(9) <= Int(9)
True
>>> Int(9) < Int(10)
True
```
- calculate
```python
>>> abs(Int(-9))
rithm.Int(9)
>>> Int(4) + Int(5)
rithm.Int(9)
>>> Int(9) & Int(11)
rithm.Int(9)
>>> Int(19) // Int(2)
rithm.Int(9)
>>> ~Int(-10)
rithm.Int(9)
>>> Int(19) % Int(10)
rithm.Int(9)
>>> Int(3) * Int(3)
rithm.Int(9)
>>> -Int(-9)
rithm.Int(9)
>>> Int(1) | Int(8)
rithm.Int(9)
>>> Int(3) ** Int(2)
rithm.Int(9)
>>> Int(5) << Int(1)
rithm.Int(10)
>>> Int(5) >> Int(1)
rithm.Int(2)
>>> Int(25) - Int(16)
rithm.Int(9)
>>> Int(18) / Int(2)
rithm.Fraction(rithm.Int(9), rithm.Int(1))
>>> Int(2) ^ Int(11)
rithm.Int(9)
```
#### Exact fraction
With setup
```python
>>> from rithm import Fraction
```
we can:
- construct
```python
>>> Fraction()
rithm.Fraction(rithm.Int(0), rithm.Int(1))
>>> Fraction(1)
rithm.Fraction(rithm.Int(1), rithm.Int(1))
>>> Fraction(1, 2)
rithm.Fraction(rithm.Int(1), rithm.Int(2))
>>> Fraction(50, 100)
rithm.Fraction(rithm.Int(1), rithm.Int(2))
>>> Fraction(0.5)
rithm.Fraction(rithm.Int(1), rithm.Int(2))
```
- compare
```python
>>> Fraction(1, 2) == Fraction(1, 2)
True
>>> Fraction(1, 2) >= Fraction(1, 2)
True
>>> Fraction(1, 2) > Fraction(1, 3)
True
>>> Fraction(1, 2) < Fraction(2, 3)
True
>>> Fraction(1, 2) != Fraction(1, 3)
True
```
- calculate
```python
>>> abs(Fraction(-1, 2))
rithm.Fraction(rithm.Int(1), rithm.Int(2))
>>> Fraction(1, 3) + Fraction(1, 6)
rithm.Fraction(rithm.Int(1), rithm.Int(2))
>>> Fraction(3, 2) // Fraction(1)
rithm.Int(1)
>>> Fraction(3, 2) % Fraction(1)
rithm.Fraction(rithm.Int(1), rithm.Int(2))
>>> Fraction(1, 3) * Fraction(3, 2)
rithm.Fraction(rithm.Int(1), rithm.Int(2))
>>> -Fraction(-1, 2)
rithm.Fraction(rithm.Int(1), rithm.Int(2))
>>> Fraction(1, 2) ** 2
rithm.Fraction(rithm.Int(1), rithm.Int(4))
>>> Fraction(3, 2) - Fraction(1)
rithm.Fraction(rithm.Int(1), rithm.Int(2))
>>> Fraction(1, 3) / Fraction(2, 3)
rithm.Fraction(rithm.Int(1), rithm.Int(2))
```
### Rust
#### Arbitrary precision integer
```rust
/// With setup
use std::convert::TryFrom;
use traiter::numbers::{
Abs, DivEuclid, FromStrRadix, Pow, RemEuclid, Zeroable
};
use rithm::big_int;
#[cfg(target_arch = "x86")]
type Digit = u16;
#[cfg(not(target_arch = "x86"))]
type Digit = u32;
const BINARY_SHIFT: usize = (Digit::BITS - 1) as usize;
const _: () = assert!(big_int::is_valid_shift::<Digit, BINARY_SHIFT>());
type BigInt = big_int::BigInt<Digit, '_', BINARY_SHIFT>;
/// we can:
/// - construct
assert_eq!(BigInt::zero(), 0);
assert_eq!(BigInt::from(9), 9);
assert_eq!(BigInt::try_from("9").unwrap(), 9);
assert_eq!(BigInt::try_from("0b1_001").unwrap(), 9);
assert_eq!(BigInt::try_from("0o11").unwrap(), 9);
assert_eq!(BigInt::try_from("0x9").unwrap(), 9);
assert_eq!(BigInt::from_str_radix("1_001", 2).unwrap(), 9);
assert_eq!(BigInt::from_str_radix("11", 8).unwrap(), 9);
assert_eq!(BigInt::from_str_radix("9", 16).unwrap(), 9);
assert_eq!(BigInt::try_from(9.99).unwrap(), 9);
/// - compare
assert!(BigInt::from(9) == BigInt::from(9));
assert!(BigInt::from(9) >= BigInt::from(9));
assert!(BigInt::from(9) > BigInt::from(8));
assert!(BigInt::from(9) <= BigInt::from(9));
assert!(BigInt::from(9) < BigInt::from(10));
/// - calculate
assert_eq!(BigInt::from(-9).abs(), 9);
assert_eq!(BigInt::from(4) + BigInt::from(5), 9);
assert_eq!(BigInt::from(9) & BigInt::from(11), 9);
assert_eq!(BigInt::from(1) | BigInt::from(8), 9);
assert_eq!(BigInt::from(2) ^ BigInt::from(11), 9);
assert_eq!(BigInt::from(19) / BigInt::from(2), 9);
assert_eq!(BigInt::from(19).div_euclid(BigInt::from(2)), 9);
assert_eq!(BigInt::from(3) * BigInt::from(3), 9);
assert_eq!(-BigInt::from(-9), 9);
assert_eq!(!BigInt::from(-10), 9);
assert_eq!(BigInt::from(3).pow(BigInt::from(2)), 9);
assert_eq!(BigInt::from(19) % BigInt::from(10), 9);
assert_eq!(BigInt::from(19).rem_euclid(BigInt::from(10)), 9);
assert_eq!(BigInt::from(5) << 1, 10);
assert_eq!(BigInt::from(5) >> 1, 2);
assert_eq!(BigInt::from(25) - BigInt::from(16), 9);
```
#### Exact fraction
```rust
/// With setup
use std::convert::TryFrom;
use traiter::numbers::{Abs, DivEuclid, Pow, RemEuclid, Unitary, Zeroable};
use rithm::fraction;
type Fraction = fraction::Fraction<i8>;
/// we can:
/// - construct
assert_eq!(Fraction::zero(), 0);
assert_eq!(Fraction::one(), 1);
assert_eq!(Fraction::new(1, 2), Some(Fraction::from(1) / 2));
assert_eq!(Fraction::new(50, 100), Fraction::new(1, 2));
assert_eq!(Fraction::try_from(0.5).unwrap(), Fraction::new(1, 2).unwrap());
/// - compare
assert!(Fraction::new(1, 2).unwrap() == Fraction::new(1, 2).unwrap());
assert!(Fraction::new(1, 2).unwrap() >= Fraction::new(1, 2).unwrap());
assert!(Fraction::new(1, 2).unwrap() > Fraction::new(1, 3).unwrap());
assert!(Fraction::new(1, 2).unwrap() <= Fraction::new(1, 2).unwrap());
assert!(Fraction::new(1, 2).unwrap() < Fraction::new(2, 3).unwrap());
assert!(Fraction::new(1, 2).unwrap() != Fraction::new(1, 3).unwrap());
/// - calculate
assert_eq!(Fraction::new(-1, 2).unwrap().abs(), Fraction::new(1, 2).unwrap());
assert_eq!(Fraction::new(1, 3).unwrap() + Fraction::new(1, 6).unwrap(),
Fraction::new(1, 2).unwrap());
assert_eq!(Fraction::new(1, 3).unwrap() / Fraction::new(2, 3).unwrap(),
Fraction::new(1, 2).unwrap());
assert_eq!(Fraction::new(3, 2).unwrap().div_euclid(Fraction::from(1)), 1);
assert_eq!(Fraction::new(1, 3).unwrap() * Fraction::new(3, 2).unwrap(),
Fraction::new(1, 2).unwrap());
assert_eq!(-Fraction::new(-1, 2).unwrap(), Fraction::new(1, 2).unwrap());
assert_eq!(Fraction::new(1, 2).unwrap().pow(2), Fraction::new(1, 4).unwrap());
assert_eq!(Fraction::new(3, 2).unwrap() % Fraction::from(1),
Fraction::new(1, 2).unwrap());
assert_eq!(Fraction::new(3, 2).unwrap().rem_euclid(Fraction::from(1)),
Fraction::new(1, 2).unwrap());
assert_eq!(Fraction::new(3, 2).unwrap() - Fraction::from(1),
Fraction::new(1, 2).unwrap());
```
Development
-----------
### Bumping version
#### Preparation
Install
[bump2version](https://github.com/c4urself/bump2version#installation).
#### Pre-release
Choose which version number category to bump following [semver
specification](http://semver.org/).
Test bumping version
```bash
bump2version --dry-run --verbose $CATEGORY
```
where `$CATEGORY` is the target version number category name, possible
values are `patch`/`minor`/`major`.
Bump version
```bash
bump2version --verbose $CATEGORY
```
This will set version to `major.minor.patch-alpha`.
#### Release
Test bumping version
```bash
bump2version --dry-run --verbose release
```
Bump version
```bash
bump2version --verbose release
```
This will set version to `major.minor.patch`.
### Running tests
Install dependencies
```bash
python -m pip install -r requirements-tests.txt
```
Plain
```bash
pytest
```
Inside `Docker` container:
- with `CPython`
```bash
docker-compose --file docker-compose.cpython.yml up
```
- with `PyPy`
```bash
docker-compose --file docker-compose.pypy.yml up
```
`Bash` script:
- with `CPython`
```bash
./run-tests.sh
```
or
```bash
./run-tests.sh cpython
```
- with `PyPy`
```bash
./run-tests.sh pypy
```
`PowerShell` script:
- with `CPython`
```powershell
.\run-tests.ps1
```
or
```powershell
.\run-tests.ps1 cpython
```
- with `PyPy`
```powershell
.\run-tests.ps1 pypy
```
| /rithm-10.0.0.tar.gz/rithm-10.0.0/README.md | 0.654895 | 0.928344 | README.md | pypi |
from feature_engine.encoding import OrdinalEncoder, RareLabelEncoder
from feature_engine.imputation import (
AddMissingIndicator,
CategoricalImputer,
MeanMedianImputer,
)
from feature_engine.selection import DropFeatures
from feature_engine.transformation import LogTransformer
from feature_engine.wrappers import SklearnTransformerWrapper
from sklearn.linear_model import Lasso
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Binarizer, MinMaxScaler
from regression_model.config.core import config
from regression_model.processing import features as pp
price_pipe = Pipeline(
[
# ===== IMPUTATION =====
# impute categorical variables with string missing
(
"missing_imputation",
CategoricalImputer(
imputation_method="missing",
variables=config.model_config.categorical_vars_with_na_missing,
),
),
(
"frequent_imputation",
CategoricalImputer(
imputation_method="frequent",
variables=config.model_config.categorical_vars_with_na_frequent,
),
),
# add missing indicator
(
"missing_indicator",
AddMissingIndicator(variables=config.model_config.numerical_vars_with_na),
),
# impute numerical variables with the mean
(
"mean_imputation",
MeanMedianImputer(
imputation_method="mean",
variables=config.model_config.numerical_vars_with_na,
),
),
# == TEMPORAL VARIABLES ====
(
"elapsed_time",
pp.TemporalVariableTransformer(
variables=config.model_config.temporal_vars,
reference_variable=config.model_config.ref_var,
),
),
("drop_features", DropFeatures(features_to_drop=[config.model_config.ref_var])),
# ==== VARIABLE TRANSFORMATION =====
("log", LogTransformer(variables=config.model_config.numericals_log_vars)),
(
"binarizer",
SklearnTransformerWrapper(
transformer=Binarizer(threshold=0),
variables=config.model_config.binarize_vars,
),
),
# === mappers ===
(
"mapper_qual",
pp.Mapper(
variables=config.model_config.qual_vars,
mappings=config.model_config.qual_mappings,
),
),
(
"mapper_exposure",
pp.Mapper(
variables=config.model_config.exposure_vars,
mappings=config.model_config.exposure_mappings,
),
),
(
"mapper_finish",
pp.Mapper(
variables=config.model_config.finish_vars,
mappings=config.model_config.finish_mappings,
),
),
(
"mapper_garage",
pp.Mapper(
variables=config.model_config.garage_vars,
mappings=config.model_config.garage_mappings,
),
),
# == CATEGORICAL ENCODING
(
"rare_label_encoder",
RareLabelEncoder(
tol=0.01, n_categories=1, variables=config.model_config.categorical_vars
),
),
# encode categorical variables using the target mean
(
"categorical_encoder",
OrdinalEncoder(
encoding_method="ordered",
variables=config.model_config.categorical_vars,
),
),
("scaler", MinMaxScaler()),
(
"Lasso",
Lasso(
alpha=config.model_config.alpha,
random_state=config.model_config.random_state,
),
),
]
) | /rithousingpackage-0.0.4-py3-none-any.whl/regression_model/pipeline.py | 0.704364 | 0.202956 | pipeline.py | pypi |
from pathlib import Path
from typing import Dict, List, Sequence
from pydantic import BaseModel
from strictyaml import YAML, load
import regression_model
# Project Directories
PACKAGE_ROOT = Path(regression_model.__file__).resolve().parent
ROOT = PACKAGE_ROOT.parent
CONFIG_FILE_PATH = PACKAGE_ROOT / "config.yml"
DATASET_DIR = PACKAGE_ROOT / "datasets"
TRAINED_MODEL_DIR = PACKAGE_ROOT / "trained_models"
class AppConfig(BaseModel):
"""
Application-level config.
"""
package_name: str
training_data_file: str
test_data_file: str
pipeline_save_file: str
class ModelConfig(BaseModel):
"""
All configuration relevant to model
training and feature engineering.
"""
target: str
variables_to_rename: Dict
features: List[str]
test_size: float
random_state: int
alpha: float
categorical_vars_with_na_frequent: List[str]
categorical_vars_with_na_missing: List[str]
numerical_vars_with_na: List[str]
temporal_vars: List[str]
ref_var: str
numericals_log_vars: Sequence[str]
binarize_vars: Sequence[str]
qual_vars: List[str]
exposure_vars: List[str]
finish_vars: List[str]
garage_vars: List[str]
categorical_vars: Sequence[str]
qual_mappings: Dict[str, int]
exposure_mappings: Dict[str, int]
garage_mappings: Dict[str, int]
finish_mappings: Dict[str, int]
class Config(BaseModel):
"""Master config object."""
app_config: AppConfig
model_config: ModelConfig
def find_config_file() -> Path:
"""Locate the configuration file."""
if CONFIG_FILE_PATH.is_file():
return CONFIG_FILE_PATH
raise Exception(f"Config not found at {CONFIG_FILE_PATH!r}")
def fetch_config_from_yaml(cfg_path: Path = None) -> YAML:
"""Parse YAML containing the package configuration."""
if not cfg_path:
cfg_path = find_config_file()
if cfg_path:
with open(cfg_path, "r") as conf_file:
parsed_config = load(conf_file.read())
return parsed_config
raise OSError(f"Did not find config file at path: {cfg_path}")
def create_and_validate_config(parsed_config: YAML = None) -> Config:
"""Run validation on config values."""
if parsed_config is None:
parsed_config = fetch_config_from_yaml()
# specify the data attribute from the strictyaml YAML type.
_config = Config(
app_config=AppConfig(**parsed_config.data),
model_config=ModelConfig(**parsed_config.data),
)
return _config
config = create_and_validate_config() | /rithousingpackage-0.0.4-py3-none-any.whl/regression_model/config/core.py | 0.815049 | 0.348091 | core.py | pypi |
from typing import List, Optional, Tuple
import numpy as np
import pandas as pd
from pydantic import BaseModel, ValidationError
from regression_model.config.core import config
def drop_na_inputs(*, input_data: pd.DataFrame) -> pd.DataFrame:
"""Check model inputs for na values and filter."""
validated_data = input_data.copy()
new_vars_with_na = [
var
for var in config.model_config.features
if var
not in config.model_config.categorical_vars_with_na_frequent
+ config.model_config.categorical_vars_with_na_missing
+ config.model_config.numerical_vars_with_na
and validated_data[var].isnull().sum() > 0
]
validated_data.dropna(subset=new_vars_with_na, inplace=True)
return validated_data
def validate_inputs(*, input_data: pd.DataFrame) -> Tuple[pd.DataFrame, Optional[dict]]:
"""Check model inputs for unprocessable values."""
# convert syntax error field names (beginning with numbers)
input_data.rename(columns=config.model_config.variables_to_rename, inplace=True)
input_data["MSSubClass"] = input_data["MSSubClass"].astype("O")
relevant_data = input_data[config.model_config.features].copy()
validated_data = drop_na_inputs(input_data=relevant_data)
errors = None
try:
# replace numpy nans so that pydantic can validate
MultipleHouseDataInputs(
inputs=validated_data.replace({np.nan: None}).to_dict(orient="records")
)
except ValidationError as error:
errors = error.json()
return validated_data, errors
class HouseDataInputSchema(BaseModel):
Alley: Optional[str]
BedroomAbvGr: Optional[int]
BldgType: Optional[str]
BsmtCond: Optional[str]
BsmtExposure: Optional[str]
BsmtFinSF1: Optional[float]
BsmtFinSF2: Optional[float]
BsmtFinType1: Optional[str]
BsmtFinType2: Optional[str]
BsmtFullBath: Optional[float]
BsmtHalfBath: Optional[float]
BsmtQual: Optional[str]
BsmtUnfSF: Optional[float]
CentralAir: Optional[str]
Condition1: Optional[str]
Condition2: Optional[str]
Electrical: Optional[str]
EnclosedPorch: Optional[int]
ExterCond: Optional[str]
ExterQual: Optional[str]
Exterior1st: Optional[str]
Exterior2nd: Optional[str]
Fence: Optional[str]
FireplaceQu: Optional[str]
Fireplaces: Optional[int]
Foundation: Optional[str]
FullBath: Optional[int]
Functional: Optional[str]
GarageArea: Optional[float]
GarageCars: Optional[float]
GarageCond: Optional[str]
GarageFinish: Optional[str]
GarageQual: Optional[str]
GarageType: Optional[str]
GarageYrBlt: Optional[float]
GrLivArea: Optional[int]
HalfBath: Optional[int]
Heating: Optional[str]
HeatingQC: Optional[str]
HouseStyle: Optional[str]
Id: Optional[int]
KitchenAbvGr: Optional[int]
KitchenQual: Optional[str]
LandContour: Optional[str]
LandSlope: Optional[str]
LotArea: Optional[int]
LotConfig: Optional[str]
LotFrontage: Optional[float]
LotShape: Optional[str]
LowQualFinSF: Optional[int]
MSSubClass: Optional[int]
MSZoning: Optional[str]
MasVnrArea: Optional[float]
MasVnrType: Optional[str]
MiscFeature: Optional[str]
MiscVal: Optional[int]
MoSold: Optional[int]
Neighborhood: Optional[str]
OpenPorchSF: Optional[int]
OverallCond: Optional[int]
OverallQual: Optional[int]
PavedDrive: Optional[str]
PoolArea: Optional[int]
PoolQC: Optional[str]
RoofMatl: Optional[str]
RoofStyle: Optional[str]
SaleCondition: Optional[str]
SaleType: Optional[str]
ScreenPorch: Optional[int]
Street: Optional[str]
TotRmsAbvGrd: Optional[int]
TotalBsmtSF: Optional[float]
Utilities: Optional[str]
WoodDeckSF: Optional[int]
YearBuilt: Optional[int]
YearRemodAdd: Optional[int]
YrSold: Optional[int]
FirstFlrSF: Optional[int] # renamed
SecondFlrSF: Optional[int] # renamed
ThreeSsnPortch: Optional[int] # renamed
class MultipleHouseDataInputs(BaseModel):
inputs: List[HouseDataInputSchema] | /rithousingpackage-0.0.4-py3-none-any.whl/regression_model/processing/validation.py | 0.803598 | 0.42173 | validation.py | pypi |
from datetime import timedelta
from pathlib import Path
import numpy as np
import torch
from ritm_annotation.data.datasets import (
BerkeleyDataset,
DavisDataset,
GrabCutDataset,
PascalVocDataset,
SBDEvaluationDataset,
)
from ritm_annotation.utils.serialization import load_model
def get_time_metrics(all_ious, elapsed_time):
n_images = len(all_ious)
n_clicks = sum(map(len, all_ious))
mean_spc = elapsed_time / n_clicks
mean_spi = elapsed_time / n_images
return mean_spc, mean_spi
def load_is_model(checkpoint, device, **kwargs):
if isinstance(checkpoint, (str, Path)):
state_dict = torch.load(checkpoint, map_location="cpu")
else:
state_dict = checkpoint
if isinstance(state_dict, list):
model = load_single_is_model(state_dict[0], device, **kwargs)
models = [
load_single_is_model(x, device, **kwargs) for x in state_dict
]
return model, models
else:
return load_single_is_model(state_dict, device, **kwargs)
def load_single_is_model(state_dict, device, **kwargs):
model = load_model(state_dict["config"], **kwargs)
model.load_state_dict(state_dict["state_dict"], strict=False)
for param in model.parameters():
param.requires_grad = False
model.to(device)
model.eval()
return model
def get_dataset(dataset_name, cfg):
if dataset_name == "GrabCut":
dataset = GrabCutDataset(cfg.GRABCUT_PATH)
elif dataset_name == "Berkeley":
dataset = BerkeleyDataset(cfg.BERKELEY_PATH)
elif dataset_name == "DAVIS":
dataset = DavisDataset(cfg.DAVIS_PATH)
elif dataset_name == "SBD":
dataset = SBDEvaluationDataset(cfg.SBD_PATH)
elif dataset_name == "SBD_Train":
dataset = SBDEvaluationDataset(cfg.SBD_PATH, split="train")
elif dataset_name == "PascalVOC":
dataset = PascalVocDataset(cfg.PASCALVOC_PATH, split="test")
elif dataset_name == "COCO_MVal":
dataset = DavisDataset(cfg.COCO_MVAL_PATH)
else:
dataset = None
return dataset
def get_iou(gt_mask, pred_mask, ignore_label=-1):
ignore_gt_mask_inv = gt_mask != ignore_label
obj_gt_mask = gt_mask == 1
intersection = np.logical_and(
np.logical_and(pred_mask, obj_gt_mask), ignore_gt_mask_inv
).sum()
union = np.logical_and(
np.logical_or(pred_mask, obj_gt_mask), ignore_gt_mask_inv
).sum()
return intersection / union
def compute_noc_metric(all_ious, iou_thrs, max_clicks=20):
def _get_noc(iou_arr, iou_thr):
vals = iou_arr >= iou_thr
return np.argmax(vals) + 1 if np.any(vals) else max_clicks
noc_list = []
over_max_list = []
for iou_thr in iou_thrs:
scores_arr = np.array(
[_get_noc(iou_arr, iou_thr) for iou_arr in all_ious], dtype=np.int
)
score = scores_arr.mean()
over_max = (scores_arr == max_clicks).sum()
noc_list.append(score)
over_max_list.append(over_max)
return noc_list, over_max_list
def find_checkpoint(weights_folder, checkpoint_name):
weights_folder = Path(weights_folder)
if ":" in checkpoint_name:
model_name, checkpoint_name = checkpoint_name.split(":")
models_candidates = [
x for x in weights_folder.glob(f"{model_name}*") if x.is_dir()
]
assert len(models_candidates) == 1
model_folder = models_candidates[0]
else:
model_folder = weights_folder
if checkpoint_name.endswith(".pth"):
if Path(checkpoint_name).exists():
checkpoint_path = checkpoint_name
else:
checkpoint_path = weights_folder / checkpoint_name
else:
model_checkpoints = list(model_folder.rglob(f"{checkpoint_name}*.pth"))
assert len(model_checkpoints) == 1
checkpoint_path = model_checkpoints[0]
return str(checkpoint_path)
def get_results_table(
noc_list,
over_max_list,
brs_type,
dataset_name,
mean_spc,
elapsed_time,
n_clicks=20,
model_name=None,
):
table_header = (
f'|{"BRS Type":^13}|{"Dataset":^11}|'
f'{"NoC@80%":^9}|{"NoC@85%":^9}|{"NoC@90%":^9}|'
f'{">="+str(n_clicks)+"@85%":^9}|{">="+str(n_clicks)+"@90%":^9}|'
f'{"SPC,s":^7}|{"Time":^9}|'
)
row_width = len(table_header)
header = (
_("Eval results for model: {model_name}\n").format(
model_name=model_name
)
if model_name is not None
else ""
)
header += "-" * row_width + "\n"
header += table_header + "\n" + "-" * row_width
eval_time = str(timedelta(seconds=int(elapsed_time)))
table_row = f"|{brs_type:^13}|{dataset_name:^11}|"
table_row += f"{noc_list[0]:^9.2f}|"
table_row += f"{noc_list[1]:^9.2f}|" if len(noc_list) > 1 else f'{"?":^9}|'
table_row += f"{noc_list[2]:^9.2f}|" if len(noc_list) > 2 else f'{"?":^9}|'
table_row += (
f"{over_max_list[1]:^9}|" if len(noc_list) > 1 else f'{"?":^9}|'
)
table_row += (
f"{over_max_list[2]:^9}|" if len(noc_list) > 2 else f'{"?":^9}|'
)
table_row += f"{mean_spc:^7.3f}|{eval_time:^9}|"
return header, table_row | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/inference/utils.py | 0.683314 | 0.357399 | utils.py | pypi |
from copy import deepcopy
import cv2
import numpy as np
class Clicker(object):
def __init__(
self,
gt_mask=None,
init_clicks=None,
ignore_label=-1,
click_indx_offset=0,
):
self.click_indx_offset = click_indx_offset
if gt_mask is not None:
self.gt_mask = gt_mask == 1
self.not_ignore_mask = gt_mask != ignore_label
else:
self.gt_mask = None
self.reset_clicks()
if init_clicks is not None:
for click in init_clicks:
self.add_click(click)
def make_next_click(self, pred_mask):
assert self.gt_mask is not None
click = self._get_next_click(pred_mask)
self.add_click(click)
def get_clicks(self, clicks_limit=None):
return self.clicks_list[:clicks_limit]
def _get_next_click(self, pred_mask, padding=True):
fn_mask = np.logical_and(
np.logical_and(self.gt_mask, np.logical_not(pred_mask)),
self.not_ignore_mask,
)
fp_mask = np.logical_and(
np.logical_and(np.logical_not(self.gt_mask), pred_mask),
self.not_ignore_mask,
)
if padding:
fn_mask = np.pad(fn_mask, ((1, 1), (1, 1)), "constant")
fp_mask = np.pad(fp_mask, ((1, 1), (1, 1)), "constant")
fn_mask_dt = cv2.distanceTransform(
fn_mask.astype(np.uint8), cv2.DIST_L2, 0
)
fp_mask_dt = cv2.distanceTransform(
fp_mask.astype(np.uint8), cv2.DIST_L2, 0
)
if padding:
fn_mask_dt = fn_mask_dt[1:-1, 1:-1]
fp_mask_dt = fp_mask_dt[1:-1, 1:-1]
fn_mask_dt = fn_mask_dt * self.not_clicked_map
fp_mask_dt = fp_mask_dt * self.not_clicked_map
fn_max_dist = np.max(fn_mask_dt)
fp_max_dist = np.max(fp_mask_dt)
is_positive = fn_max_dist > fp_max_dist
if is_positive:
coords_y, coords_x = np.where(
fn_mask_dt == fn_max_dist
) # coords is [y, x]
else:
coords_y, coords_x = np.where(
fp_mask_dt == fp_max_dist
) # coords is [y, x]
return Click(
is_positive=is_positive, coords=(coords_y[0], coords_x[0])
)
def add_click(self, click):
coords = click.coords
click.indx = (
self.click_indx_offset + self.num_pos_clicks + self.num_neg_clicks
)
if click.is_positive:
self.num_pos_clicks += 1
else:
self.num_neg_clicks += 1
self.clicks_list.append(click)
if self.gt_mask is not None:
self.not_clicked_map[coords[0], coords[1]] = False
def _remove_last_click(self):
click = self.clicks_list.pop()
coords = click.coords
if click.is_positive:
self.num_pos_clicks -= 1
else:
self.num_neg_clicks -= 1
if self.gt_mask is not None:
self.not_clicked_map[coords[0], coords[1]] = True
def reset_clicks(self):
if self.gt_mask is not None:
self.not_clicked_map = np.ones_like(self.gt_mask, dtype=np.bool)
self.num_pos_clicks = 0
self.num_neg_clicks = 0
self.clicks_list = []
def get_state(self):
return deepcopy(self.clicks_list)
def set_state(self, state):
self.reset_clicks()
for click in state:
self.add_click(click)
def __len__(self):
return len(self.clicks_list)
class Click:
def __init__(self, is_positive, coords, indx=None):
self.is_positive = is_positive
self.coords = coords
self.indx = indx
@property
def coords_and_indx(self):
return (*self.coords, self.indx)
def copy(self, **kwargs):
self_copy = deepcopy(self)
for k, v in kwargs.items():
setattr(self_copy, k, v)
return self_copy | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/inference/clicker.py | 0.533397 | 0.259829 | clicker.py | pypi |
import math
from typing import List
import numpy as np
import torch
from ritm_annotation.inference.clicker import Click
from .base import BaseTransform
class Crops(BaseTransform):
def __init__(self, crop_size=(320, 480), min_overlap=0.2):
super().__init__()
self.crop_height, self.crop_width = crop_size
self.min_overlap = min_overlap
self.x_offsets = None
self.y_offsets = None
self._counts = None
def transform(self, image_nd, clicks_lists: List[List[Click]]):
assert image_nd.shape[0] == 1 and len(clicks_lists) == 1
image_height, image_width = image_nd.shape[2:4]
self._counts = None
if image_height < self.crop_height or image_width < self.crop_width:
return image_nd, clicks_lists
self.x_offsets = get_offsets(
image_width, self.crop_width, self.min_overlap
)
self.y_offsets = get_offsets(
image_height, self.crop_height, self.min_overlap
)
self._counts = np.zeros((image_height, image_width))
image_crops = []
for dy in self.y_offsets:
for dx in self.x_offsets:
self._counts[
dy : dy + self.crop_height, dx : dx + self.crop_width
] += 1
image_crop = image_nd[
:, :, dy : dy + self.crop_height, dx : dx + self.crop_width
]
image_crops.append(image_crop)
image_crops = torch.cat(image_crops, dim=0) # type: ignore
self._counts = torch.tensor(
self._counts, device=image_nd.device, dtype=torch.float32
)
clicks_list = clicks_lists[0]
clicks_lists = []
for dy in self.y_offsets:
for dx in self.x_offsets:
crop_clicks = [
x.copy(coords=(x.coords[0] - dy, x.coords[1] - dx))
for x in clicks_list
]
clicks_lists.append(crop_clicks)
return image_crops, clicks_lists
def inv_transform(self, prob_map):
if self._counts is None:
return prob_map
new_prob_map = torch.zeros(
(1, 1, *self._counts.shape),
dtype=prob_map.dtype,
device=prob_map.device,
)
crop_indx = 0
for dy in self.y_offsets:
for dx in self.x_offsets:
new_prob_map[
0, 0, dy : dy + self.crop_height, dx : dx + self.crop_width
] += prob_map[crop_indx, 0]
crop_indx += 1
new_prob_map = torch.div(new_prob_map, self._counts)
return new_prob_map
def get_state(self):
return self.x_offsets, self.y_offsets, self._counts
def set_state(self, state):
self.x_offsets, self.y_offsets, self._counts = state
def reset(self):
self.x_offsets = None
self.y_offsets = None
self._counts = None
def get_offsets(length, crop_size, min_overlap_ratio=0.2):
if length == crop_size:
return [0]
N = (length / crop_size - min_overlap_ratio) / (1 - min_overlap_ratio)
N = math.ceil(N)
overlap_ratio = (N - length / crop_size) / (N - 1)
overlap_width = int(crop_size * overlap_ratio)
offsets = [0]
for i in range(1, N):
new_offset = offsets[-1] + crop_size - overlap_width
if new_offset + crop_size > length:
new_offset = length - crop_size
offsets.append(new_offset)
return offsets | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/inference/transforms/crops.py | 0.797872 | 0.392453 | crops.py | pypi |
from typing import List
import torch
from ritm_annotation.inference.clicker import Click
from ritm_annotation.utils.misc import (
clamp_bbox,
expand_bbox,
get_bbox_from_mask,
get_bbox_iou,
)
from .base import BaseTransform
class ZoomIn(BaseTransform):
def __init__(
self,
target_size=400,
skip_clicks=1,
expansion_ratio=1.4,
min_crop_size=200,
recompute_thresh_iou=0.5,
prob_thresh=0.50,
):
super().__init__()
self.target_size = target_size
self.min_crop_size = min_crop_size
self.skip_clicks = skip_clicks
self.expansion_ratio = expansion_ratio
self.recompute_thresh_iou = recompute_thresh_iou
self.prob_thresh = prob_thresh
self._input_image_shape = None
self._prev_probs = None
self._object_roi = None
self._roi_image = None
def transform(self, image_nd, clicks_lists: List[List[Click]]):
assert image_nd.shape[0] == 1 and len(clicks_lists) == 1
self.image_changed = False
clicks_list = clicks_lists[0]
if len(clicks_list) <= self.skip_clicks:
return image_nd, clicks_lists
self._input_image_shape = image_nd.shape
current_object_roi = None
if self._prev_probs is not None:
current_pred_mask = (self._prev_probs > self.prob_thresh)[0, 0]
if current_pred_mask.sum() > 0:
current_object_roi = get_object_roi(
current_pred_mask,
clicks_list,
self.expansion_ratio,
self.min_crop_size,
)
if current_object_roi is None:
if self.skip_clicks >= 0:
return image_nd, clicks_lists
else:
current_object_roi = (
0,
image_nd.shape[2] - 1,
0,
image_nd.shape[3] - 1,
)
update_object_roi = False
if self._object_roi is None:
update_object_roi = True
elif not check_object_roi(self._object_roi, clicks_list):
update_object_roi = True
elif (
get_bbox_iou(current_object_roi, self._object_roi)
< self.recompute_thresh_iou
):
update_object_roi = True
if update_object_roi:
self._object_roi = current_object_roi
self.image_changed = True
self._roi_image = get_roi_image_nd(
image_nd, self._object_roi, self.target_size
)
tclicks_lists = [self._transform_clicks(clicks_list)]
return self._roi_image.to(image_nd.device), tclicks_lists
def inv_transform(self, prob_map):
if self._object_roi is None:
self._prev_probs = prob_map.cpu().numpy()
return prob_map
assert prob_map.shape[0] == 1
rmin, rmax, cmin, cmax = self._object_roi
prob_map = torch.nn.functional.interpolate(
prob_map,
size=(rmax - rmin + 1, cmax - cmin + 1),
mode="bilinear",
align_corners=True,
)
if self._prev_probs is not None:
new_prob_map = torch.zeros(
*self._prev_probs.shape,
device=prob_map.device,
dtype=prob_map.dtype
)
new_prob_map[:, :, rmin : rmax + 1, cmin : cmax + 1] = prob_map
else:
new_prob_map = prob_map
self._prev_probs = new_prob_map.cpu().numpy()
return new_prob_map
def check_possible_recalculation(self):
if (
self._prev_probs is None
or self._object_roi is not None
or self.skip_clicks > 0
):
return False
pred_mask = (self._prev_probs > self.prob_thresh)[0, 0]
if pred_mask.sum() > 0:
possible_object_roi = get_object_roi(
pred_mask, [], self.expansion_ratio, self.min_crop_size
)
image_roi = (
0,
self._input_image_shape[2] - 1,
0,
self._input_image_shape[3] - 1,
)
if get_bbox_iou(possible_object_roi, image_roi) < 0.50:
return True
return False
def get_state(self):
roi_image = (
self._roi_image.cpu() if self._roi_image is not None else None
)
return (
self._input_image_shape,
self._object_roi,
self._prev_probs,
roi_image,
self.image_changed,
)
def set_state(self, state):
(
self._input_image_shape,
self._object_roi,
self._prev_probs,
self._roi_image,
self.image_changed,
) = state
def reset(self):
self._input_image_shape = None
self._object_roi = None
self._prev_probs = None
self._roi_image = None
self.image_changed = False
def _transform_clicks(self, clicks_list):
if self._object_roi is None:
return clicks_list
rmin, rmax, cmin, cmax = self._object_roi
crop_height, crop_width = self._roi_image.shape[2:]
transformed_clicks = []
for click in clicks_list:
new_r = crop_height * (click.coords[0] - rmin) / (rmax - rmin + 1)
new_c = crop_width * (click.coords[1] - cmin) / (cmax - cmin + 1)
transformed_clicks.append(click.copy(coords=(new_r, new_c)))
return transformed_clicks
def get_object_roi(pred_mask, clicks_list, expansion_ratio, min_crop_size):
pred_mask = pred_mask.copy()
for click in clicks_list:
if click.is_positive:
pred_mask[int(click.coords[0]), int(click.coords[1])] = 1
bbox = get_bbox_from_mask(pred_mask)
bbox = expand_bbox(bbox, expansion_ratio, min_crop_size)
h, w = pred_mask.shape[0], pred_mask.shape[1]
bbox = clamp_bbox(bbox, 0, h - 1, 0, w - 1)
return bbox
def get_roi_image_nd(image_nd, object_roi, target_size):
rmin, rmax, cmin, cmax = object_roi
height = rmax - rmin + 1
width = cmax - cmin + 1
if isinstance(target_size, tuple):
new_height, new_width = target_size
else:
scale = target_size / max(height, width)
new_height = int(round(height * scale))
new_width = int(round(width * scale))
with torch.no_grad():
roi_image_nd = image_nd[:, :, rmin : rmax + 1, cmin : cmax + 1]
roi_image_nd = torch.nn.functional.interpolate(
roi_image_nd,
size=(new_height, new_width),
mode="bilinear",
align_corners=True,
)
return roi_image_nd
def check_object_roi(object_roi, clicks_list):
for click in clicks_list:
if click.is_positive:
if (
click.coords[0] < object_roi[0]
or click.coords[0] >= object_roi[1]
):
return False
if (
click.coords[1] < object_roi[2]
or click.coords[1] >= object_roi[3]
):
return False
return True | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/inference/transforms/zoom_in.py | 0.793866 | 0.182207 | zoom_in.py | pypi |
import numpy as np
import torch
import torch.nn.functional as F
from scipy.optimize import fmin_l_bfgs_b
from .base import BasePredictor
class BRSBasePredictor(BasePredictor):
def __init__(
self, model, device, opt_functor, optimize_after_n_clicks=1, **kwargs
):
super().__init__(model, device, **kwargs)
self.optimize_after_n_clicks = optimize_after_n_clicks
self.opt_functor = opt_functor
self.opt_data = None
self.input_data = None
def set_input_image(self, image):
super().set_input_image(image)
self.opt_data = None
self.input_data = None
def _get_clicks_maps_nd(self, clicks_lists, image_shape, radius=1):
pos_clicks_map = np.zeros(
(len(clicks_lists), 1) + image_shape, dtype=np.float32
)
neg_clicks_map = np.zeros(
(len(clicks_lists), 1) + image_shape, dtype=np.float32
)
for list_indx, clicks_list in enumerate(clicks_lists):
for click in clicks_list:
y, x = click.coords
y, x = int(round(y)), int(round(x))
y1, x1 = y - radius, x - radius
y2, x2 = y + radius + 1, x + radius + 1
if click.is_positive:
pos_clicks_map[list_indx, 0, y1:y2, x1:x2] = True
else:
neg_clicks_map[list_indx, 0, y1:y2, x1:x2] = True
with torch.no_grad():
pos_clicks_map = torch.from_numpy(pos_clicks_map).to(self.device)
neg_clicks_map = torch.from_numpy(neg_clicks_map).to(self.device)
return pos_clicks_map, neg_clicks_map
def get_states(self):
return {
"transform_states": self._get_transform_states(),
"opt_data": self.opt_data,
}
def set_states(self, states):
self._set_transform_states(states["transform_states"])
self.opt_data = states["opt_data"]
class FeatureBRSPredictor(BRSBasePredictor):
def __init__(
self,
model,
device,
opt_functor,
insertion_mode="after_deeplab",
**kwargs
):
super().__init__(model, device, opt_functor=opt_functor, **kwargs)
self.insertion_mode = insertion_mode
self._c1_features = None
if self.insertion_mode == "after_deeplab":
self.num_channels = model.feature_extractor.ch
elif self.insertion_mode == "after_c4":
self.num_channels = model.feature_extractor.aspp_in_channels
elif self.insertion_mode == "after_aspp":
self.num_channels = model.feature_extractor.ch + 32
else:
raise NotImplementedError
def _get_prediction(self, image_nd, clicks_lists, is_image_changed):
points_nd = self.get_points_nd(clicks_lists)
pos_mask, neg_mask = self._get_clicks_maps_nd(
clicks_lists, image_nd.shape[2:]
)
num_clicks = len(clicks_lists[0])
bs = image_nd.shape[0] // 2 if self.with_flip else image_nd.shape[0]
if (
self.opt_data is None
or self.opt_data.shape[0] // (2 * self.num_channels) != bs
):
self.opt_data = np.zeros(
(bs * 2 * self.num_channels), dtype=np.float32
)
if (
num_clicks <= self.net_clicks_limit
or is_image_changed
or self.input_data is None
):
self.input_data = self._get_head_input(image_nd, points_nd)
def get_prediction_logits(scale, bias):
scale = scale.view(bs, -1, 1, 1)
bias = bias.view(bs, -1, 1, 1)
if self.with_flip:
scale = scale.repeat(2, 1, 1, 1)
bias = bias.repeat(2, 1, 1, 1)
scaled_backbone_features = self.input_data * scale
scaled_backbone_features = scaled_backbone_features + bias
if self.insertion_mode == "after_c4":
x = self.net.feature_extractor.aspp(scaled_backbone_features)
x = F.interpolate(
x,
mode="bilinear",
size=self._c1_features.size()[2:],
align_corners=True,
)
x = torch.cat((x, self._c1_features), dim=1)
scaled_backbone_features = self.net.feature_extractor.head(x)
elif self.insertion_mode == "after_aspp":
scaled_backbone_features = self.net.feature_extractor.head(
scaled_backbone_features
)
pred_logits = self.net.head(scaled_backbone_features)
pred_logits = F.interpolate(
pred_logits,
size=image_nd.size()[2:],
mode="bilinear",
align_corners=True,
)
return pred_logits
self.opt_functor.init_click(
get_prediction_logits, pos_mask, neg_mask, self.device
)
if num_clicks > self.optimize_after_n_clicks:
opt_result = fmin_l_bfgs_b(
func=self.opt_functor,
x0=self.opt_data,
**self.opt_functor.optimizer_params
)
self.opt_data = opt_result[0]
with torch.no_grad():
if self.opt_functor.best_prediction is not None:
opt_pred_logits = self.opt_functor.best_prediction
else:
opt_data_nd = torch.from_numpy(self.opt_data).to(self.device)
opt_vars, _ = self.opt_functor.unpack_opt_params(opt_data_nd)
opt_pred_logits = get_prediction_logits(*opt_vars)
return opt_pred_logits
def _get_head_input(self, image_nd, points):
with torch.no_grad():
image_nd, prev_mask = self.net.prepare_input(image_nd)
coord_features = self.net.get_coord_features(
image_nd, prev_mask, points
)
if self.net.rgb_conv is not None:
x = self.net.rgb_conv(
torch.cat((image_nd, coord_features), dim=1)
)
additional_features = None
elif hasattr(self.net, "maps_transform"):
x = image_nd
additional_features = self.net.maps_transform(coord_features)
if (
self.insertion_mode == "after_c4"
or self.insertion_mode == "after_aspp"
):
c1, _, c3, c4 = self.net.feature_extractor.backbone(
x, additional_features
)
c1 = self.net.feature_extractor.skip_project(c1)
if self.insertion_mode == "after_aspp":
x = self.net.feature_extractor.aspp(c4)
x = F.interpolate(
x,
size=c1.size()[2:],
mode="bilinear",
align_corners=True,
)
x = torch.cat((x, c1), dim=1)
backbone_features = x
else:
backbone_features = c4
self._c1_features = c1
else:
backbone_features = self.net.feature_extractor(
x, additional_features
)[0]
return backbone_features
class HRNetFeatureBRSPredictor(BRSBasePredictor):
def __init__(
self, model, device, opt_functor, insertion_mode="A", **kwargs
):
super().__init__(model, device, opt_functor=opt_functor, **kwargs)
self.insertion_mode = insertion_mode
self._c1_features = None
if self.insertion_mode == "A":
self.num_channels = sum(
k * model.feature_extractor.width for k in [1, 2, 4, 8]
)
elif self.insertion_mode == "C":
self.num_channels = 2 * model.feature_extractor.ocr_width
else:
raise NotImplementedError
def _get_prediction(self, image_nd, clicks_lists, is_image_changed):
points_nd = self.get_points_nd(clicks_lists)
pos_mask, neg_mask = self._get_clicks_maps_nd(
clicks_lists, image_nd.shape[2:]
)
num_clicks = len(clicks_lists[0])
bs = image_nd.shape[0] // 2 if self.with_flip else image_nd.shape[0]
if (
self.opt_data is None
or self.opt_data.shape[0] // (2 * self.num_channels) != bs
):
self.opt_data = np.zeros(
(bs * 2 * self.num_channels), dtype=np.float32
)
if (
num_clicks <= self.net_clicks_limit
or is_image_changed
or self.input_data is None
):
self.input_data = self._get_head_input(image_nd, points_nd)
def get_prediction_logits(scale, bias):
scale = scale.view(bs, -1, 1, 1)
bias = bias.view(bs, -1, 1, 1)
if self.with_flip:
scale = scale.repeat(2, 1, 1, 1)
bias = bias.repeat(2, 1, 1, 1)
scaled_backbone_features = self.input_data * scale
scaled_backbone_features = scaled_backbone_features + bias
if self.insertion_mode == "A":
if self.net.feature_extractor.ocr_width > 0:
out_aux = self.net.feature_extractor.aux_head(
scaled_backbone_features
)
feats = self.net.feature_extractor.conv3x3_ocr(
scaled_backbone_features
)
context = self.net.feature_extractor.ocr_gather_head(
feats, out_aux
)
feats = self.net.feature_extractor.ocr_distri_head(
feats, context
)
else:
feats = scaled_backbone_features
pred_logits = self.net.feature_extractor.cls_head(feats)
elif self.insertion_mode == "C":
pred_logits = self.net.feature_extractor.cls_head(
scaled_backbone_features
)
else:
raise NotImplementedError
pred_logits = F.interpolate(
pred_logits,
size=image_nd.size()[2:],
mode="bilinear",
align_corners=True,
)
return pred_logits
self.opt_functor.init_click(
get_prediction_logits, pos_mask, neg_mask, self.device
)
if num_clicks > self.optimize_after_n_clicks:
opt_result = fmin_l_bfgs_b(
func=self.opt_functor,
x0=self.opt_data,
**self.opt_functor.optimizer_params
)
self.opt_data = opt_result[0]
with torch.no_grad():
if self.opt_functor.best_prediction is not None:
opt_pred_logits = self.opt_functor.best_prediction
else:
opt_data_nd = torch.from_numpy(self.opt_data).to(self.device)
opt_vars, _ = self.opt_functor.unpack_opt_params(opt_data_nd)
opt_pred_logits = get_prediction_logits(*opt_vars)
return opt_pred_logits
def _get_head_input(self, image_nd, points):
with torch.no_grad():
image_nd, prev_mask = self.net.prepare_input(image_nd)
coord_features = self.net.get_coord_features(
image_nd, prev_mask, points
)
if self.net.rgb_conv is not None:
x = self.net.rgb_conv(
torch.cat((image_nd, coord_features), dim=1)
)
additional_features = None
elif hasattr(self.net, "maps_transform"):
x = image_nd
additional_features = self.net.maps_transform(coord_features)
feats = self.net.feature_extractor.compute_hrnet_feats(
x, additional_features
)
if self.insertion_mode == "A":
backbone_features = feats
elif self.insertion_mode == "C":
out_aux = self.net.feature_extractor.aux_head(feats)
feats = self.net.feature_extractor.conv3x3_ocr(feats)
context = self.net.feature_extractor.ocr_gather_head(
feats, out_aux
)
backbone_features = self.net.feature_extractor.ocr_distri_head(
feats, context
)
else:
raise NotImplementedError
return backbone_features
class InputBRSPredictor(BRSBasePredictor):
def __init__(
self, model, device, opt_functor, optimize_target="rgb", **kwargs
):
super().__init__(model, device, opt_functor=opt_functor, **kwargs)
self.optimize_target = optimize_target
def _get_prediction(self, image_nd, clicks_lists, is_image_changed):
points_nd = self.get_points_nd(clicks_lists)
pos_mask, neg_mask = self._get_clicks_maps_nd(
clicks_lists, image_nd.shape[2:]
)
num_clicks = len(clicks_lists[0])
if self.opt_data is None or is_image_changed:
if self.optimize_target == "dmaps":
opt_channels = (
self.net.coord_feature_ch - 1
if self.net.with_prev_mask
else self.net.coord_feature_ch
)
else:
opt_channels = 3
bs = (
image_nd.shape[0] // 2 if self.with_flip else image_nd.shape[0]
)
self.opt_data = torch.zeros(
(bs, opt_channels, image_nd.shape[2], image_nd.shape[3]),
device=self.device,
dtype=torch.float32,
)
def get_prediction_logits(opt_bias):
input_image, prev_mask = self.net.prepare_input(image_nd)
dmaps = self.net.get_coord_features(
input_image, prev_mask, points_nd
)
if self.optimize_target == "rgb":
input_image = input_image + opt_bias
elif self.optimize_target == "dmaps":
if self.net.with_prev_mask:
dmaps[:, 1:, :, :] = dmaps[:, 1:, :, :] + opt_bias
else:
dmaps = dmaps + opt_bias
if self.net.rgb_conv is not None:
x = self.net.rgb_conv(torch.cat((input_image, dmaps), dim=1))
if self.optimize_target == "all":
x = x + opt_bias
coord_features = None
elif hasattr(self.net, "maps_transform"):
x = input_image
coord_features = self.net.maps_transform(dmaps)
pred_logits = self.net.backbone_forward(
x, coord_features=coord_features
)["instances"]
pred_logits = F.interpolate(
pred_logits,
size=image_nd.size()[2:],
mode="bilinear",
align_corners=True,
)
return pred_logits
self.opt_functor.init_click(
get_prediction_logits,
pos_mask,
neg_mask,
self.device,
shape=self.opt_data.shape,
)
if num_clicks > self.optimize_after_n_clicks:
opt_result = fmin_l_bfgs_b(
func=self.opt_functor,
x0=self.opt_data.cpu().numpy().ravel(),
**self.opt_functor.optimizer_params
)
self.opt_data = (
torch.from_numpy(opt_result[0])
.view(self.opt_data.shape)
.to(self.device)
)
with torch.no_grad():
if self.opt_functor.best_prediction is not None:
opt_pred_logits = self.opt_functor.best_prediction
else:
opt_vars, _ = self.opt_functor.unpack_opt_params(self.opt_data)
opt_pred_logits = get_prediction_logits(*opt_vars)
return opt_pred_logits | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/inference/predictors/brs.py | 0.836354 | 0.271964 | brs.py | pypi |
import torch
import torch.nn.functional as F
from torchvision import transforms
from ritm_annotation.inference.transforms import (
AddHorizontalFlip,
LimitLongestSide,
SigmoidForPred,
)
class BasePredictor(object):
def __init__(
self,
model,
device,
net_clicks_limit=None,
with_flip=False,
zoom_in=None,
max_size=None,
**kwargs
):
self.with_flip = with_flip
self.net_clicks_limit = net_clicks_limit
self.original_image = None
self.device = device
self.zoom_in = zoom_in
self.prev_prediction = None
self.model_indx = 0
self.click_models = None
self.net_state_dict = None
if isinstance(model, tuple):
self.net, self.click_models = model
else:
self.net = model
self.to_tensor = transforms.ToTensor()
self.transforms = [zoom_in] if zoom_in is not None else []
if max_size is not None:
self.transforms.append(LimitLongestSide(max_size=max_size))
self.transforms.append(SigmoidForPred())
if with_flip:
self.transforms.append(AddHorizontalFlip())
def set_input_image(self, image):
image_nd = self.to_tensor(image)
for transform in self.transforms:
transform.reset()
self.original_image = image_nd.to(self.device)
if len(self.original_image.shape) == 3:
self.original_image = self.original_image.unsqueeze(0)
self.prev_prediction = torch.zeros_like(
self.original_image[:, :1, :, :]
)
def get_prediction(self, clicker, prev_mask=None):
clicks_list = clicker.get_clicks()
if self.click_models is not None:
model_indx = (
min(
clicker.click_indx_offset + len(clicks_list),
len(self.click_models),
)
- 1
)
if model_indx != self.model_indx:
self.model_indx = model_indx
self.net = self.click_models[model_indx]
input_image = self.original_image
if prev_mask is None:
prev_mask = self.prev_prediction
if hasattr(self.net, "with_prev_mask") and self.net.with_prev_mask:
input_image = torch.cat((input_image, prev_mask), dim=1)
image_nd, clicks_lists, is_image_changed = self.apply_transforms(
input_image, [clicks_list]
)
pred_logits = self._get_prediction(
image_nd, clicks_lists, is_image_changed
)
prediction = F.interpolate(
pred_logits,
mode="bilinear",
align_corners=True,
size=image_nd.size()[2:],
)
for t in reversed(self.transforms):
prediction = t.inv_transform(prediction)
if (
self.zoom_in is not None
and self.zoom_in.check_possible_recalculation()
):
return self.get_prediction(clicker)
self.prev_prediction = prediction
return prediction.cpu().numpy()[0, 0]
def _get_prediction(self, image_nd, clicks_lists, is_image_changed):
points_nd = self.get_points_nd(clicks_lists)
return self.net(image_nd, points_nd)["instances"]
def _get_transform_states(self):
return [x.get_state() for x in self.transforms]
def _set_transform_states(self, states):
assert len(states) == len(self.transforms)
for state, transform in zip(states, self.transforms):
transform.set_state(state)
def apply_transforms(self, image_nd, clicks_lists):
is_image_changed = False
for t in self.transforms:
image_nd, clicks_lists = t.transform(image_nd, clicks_lists)
is_image_changed |= t.image_changed
return image_nd, clicks_lists, is_image_changed
def get_points_nd(self, clicks_lists):
total_clicks = []
num_pos_clicks = [
sum(x.is_positive for x in clicks_list)
for clicks_list in clicks_lists
]
num_neg_clicks = [
len(clicks_list) - num_pos
for clicks_list, num_pos in zip(clicks_lists, num_pos_clicks)
]
num_max_points = max(num_pos_clicks + num_neg_clicks)
if self.net_clicks_limit is not None:
num_max_points = min(self.net_clicks_limit, num_max_points)
num_max_points = max(1, num_max_points)
for clicks_list in clicks_lists:
clicks_list = clicks_list[: self.net_clicks_limit]
pos_clicks = [
click.coords_and_indx
for click in clicks_list
if click.is_positive
]
pos_clicks = pos_clicks + (num_max_points - len(pos_clicks)) * [
(-1, -1, -1)
]
neg_clicks = [
click.coords_and_indx
for click in clicks_list
if not click.is_positive
]
neg_clicks = neg_clicks + (num_max_points - len(neg_clicks)) * [
(-1, -1, -1)
]
total_clicks.append(pos_clicks + neg_clicks)
return torch.tensor(total_clicks, device=self.device)
def get_states(self):
return {
"transform_states": self._get_transform_states(),
"prev_prediction": self.prev_prediction.clone(),
}
def set_states(self, states):
self._set_transform_states(states["transform_states"])
self.prev_prediction = states["prev_prediction"] | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/inference/predictors/base.py | 0.751739 | 0.352954 | base.py | pypi |
import torch
from ritm_annotation.model.losses import SigmoidBinaryCrossEntropyLoss
class BRSMaskLoss(torch.nn.Module):
def __init__(self, eps=1e-5):
super().__init__()
self._eps = eps
def forward(self, result, pos_mask, neg_mask):
pos_diff = (1 - result) * pos_mask
pos_target = torch.sum(pos_diff**2)
pos_target = pos_target / (torch.sum(pos_mask) + self._eps)
neg_diff = result * neg_mask
neg_target = torch.sum(neg_diff**2)
neg_target = neg_target / (torch.sum(neg_mask) + self._eps)
loss = pos_target + neg_target
with torch.no_grad():
f_max_pos = torch.max(torch.abs(pos_diff)).item()
f_max_neg = torch.max(torch.abs(neg_diff)).item()
return loss, f_max_pos, f_max_neg
class OracleMaskLoss(torch.nn.Module):
def __init__(self):
super().__init__()
self.gt_mask = None
self.loss = SigmoidBinaryCrossEntropyLoss(from_sigmoid=True)
self.predictor = None
self.history = []
def set_gt_mask(self, gt_mask):
self.gt_mask = gt_mask
self.history = []
def forward(self, result, pos_mask, neg_mask):
gt_mask = self.gt_mask.to(result.device)
if self.predictor.object_roi is not None:
r1, r2, c1, c2 = self.predictor.object_roi[:4]
gt_mask = gt_mask[:, :, r1 : r2 + 1, c1 : c2 + 1]
gt_mask = torch.nn.functional.interpolate(
gt_mask, result.size()[2:], mode="bilinear", align_corners=True
)
if result.shape[0] == 2:
gt_mask_flipped = torch.flip(gt_mask, dims=[3])
gt_mask = torch.cat([gt_mask, gt_mask_flipped], dim=0)
loss = self.loss(result, gt_mask)
self.history.append(loss.detach().cpu().numpy()[0])
if (
len(self.history) > 5
and abs(self.history[-5] - self.history[-1]) < 1e-5
):
return 0, 0, 0
return loss, 1.0, 1.0 | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/inference/predictors/brs_losses.py | 0.901653 | 0.535463 | brs_losses.py | pypi |
import numpy as np
import torch
from ritm_annotation.model.metrics import _compute_iou
from .brs_losses import BRSMaskLoss
class BaseOptimizer:
def __init__(
self,
optimizer_params,
prob_thresh=0.49,
reg_weight=1e-3,
min_iou_diff=0.01,
brs_loss=BRSMaskLoss(),
with_flip=False,
flip_average=False,
**kwargs
):
self.brs_loss = brs_loss
self.optimizer_params = optimizer_params
self.prob_thresh = prob_thresh
self.reg_weight = reg_weight
self.min_iou_diff = min_iou_diff
self.with_flip = with_flip
self.flip_average = flip_average
self.best_prediction = None
self._get_prediction_logits = None
self._opt_shape = None
self._best_loss = None
self._click_masks = None
self._last_mask = None
self.device = None
def init_click(
self, get_prediction_logits, pos_mask, neg_mask, device, shape=None
):
self.best_prediction = None
self._get_prediction_logits = get_prediction_logits
self._click_masks = (pos_mask, neg_mask)
self._opt_shape = shape
self._last_mask = None
self.device = device
def __call__(self, x):
opt_params = torch.from_numpy(x).float().to(self.device)
opt_params.requires_grad_(True)
with torch.enable_grad():
opt_vars, reg_loss = self.unpack_opt_params(opt_params)
result_before_sigmoid = self._get_prediction_logits(*opt_vars)
result = torch.sigmoid(result_before_sigmoid)
pos_mask, neg_mask = self._click_masks
if self.with_flip and self.flip_average:
result, result_flipped = torch.chunk(result, 2, dim=0)
result = 0.5 * (result + torch.flip(result_flipped, dims=[3]))
pos_mask, neg_mask = (
pos_mask[: result.shape[0]],
neg_mask[: result.shape[0]],
)
loss, f_max_pos, f_max_neg = self.brs_loss(
result, pos_mask, neg_mask
)
loss = loss + reg_loss
f_val = loss.detach().cpu().numpy()
if self.best_prediction is None or f_val < self._best_loss:
self.best_prediction = result_before_sigmoid.detach()
self._best_loss = f_val
if f_max_pos < (1 - self.prob_thresh) and f_max_neg < self.prob_thresh:
return [f_val, np.zeros_like(x)]
current_mask = result > self.prob_thresh
if self._last_mask is not None and self.min_iou_diff > 0:
diff_iou = _compute_iou(current_mask, self._last_mask)
if len(diff_iou) > 0 and diff_iou.mean() > 1 - self.min_iou_diff:
return [f_val, np.zeros_like(x)]
self._last_mask = current_mask
loss.backward()
f_grad = opt_params.grad.cpu().numpy().ravel().astype(np.float)
return [f_val, f_grad]
def unpack_opt_params(self, opt_params):
raise NotImplementedError
class InputOptimizer(BaseOptimizer):
def unpack_opt_params(self, opt_params):
opt_params = opt_params.view(self._opt_shape)
if self.with_flip:
opt_params_flipped = torch.flip(opt_params, dims=[3])
opt_params = torch.cat([opt_params, opt_params_flipped], dim=0)
reg_loss = self.reg_weight * torch.sum(opt_params**2)
return (opt_params,), reg_loss
class ScaleBiasOptimizer(BaseOptimizer):
def __init__(self, *args, scale_act=None, reg_bias_weight=10.0, **kwargs):
super().__init__(*args, **kwargs)
self.scale_act = scale_act
self.reg_bias_weight = reg_bias_weight
def unpack_opt_params(self, opt_params):
scale, bias = torch.chunk(opt_params, 2, dim=0)
reg_loss = self.reg_weight * (
torch.sum(scale**2) + self.reg_bias_weight * torch.sum(bias**2)
)
if self.scale_act == "tanh":
scale = torch.tanh(scale)
elif self.scale_act == "sin":
scale = torch.sin(scale)
return (1 + scale, bias), reg_loss | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/inference/predictors/brs_functors.py | 0.784814 | 0.304662 | brs_functors.py | pypi |
from ritm_annotation.inference.transforms import ZoomIn
from ritm_annotation.model.is_hrnet_model import HRNetModel
from .base import BasePredictor
from .brs import (
FeatureBRSPredictor,
HRNetFeatureBRSPredictor,
InputBRSPredictor,
)
from .brs_functors import InputOptimizer, ScaleBiasOptimizer
def get_predictor(
net,
brs_mode,
device,
prob_thresh=0.49,
with_flip=True,
zoom_in_params=dict(),
predictor_params=None,
brs_opt_func_params=None,
lbfgs_params=None,
):
lbfgs_params_ = {
"m": 20,
"factr": 0,
"pgtol": 1e-8,
"maxfun": 20,
}
predictor_params_ = {"optimize_after_n_clicks": 1}
if zoom_in_params is not None:
zoom_in = ZoomIn(**zoom_in_params)
else:
zoom_in = None
if lbfgs_params is not None:
lbfgs_params_.update(lbfgs_params)
lbfgs_params_["maxiter"] = 2 * lbfgs_params_["maxfun"]
if brs_opt_func_params is None:
brs_opt_func_params = dict()
if isinstance(net, (list, tuple)):
assert brs_mode == "NoBRS", _(
"Multi-stage models support only NoBRS mode."
)
if brs_mode == "NoBRS":
if predictor_params is not None:
predictor_params_.update(predictor_params)
predictor = BasePredictor(
net,
device,
zoom_in=zoom_in,
with_flip=with_flip,
**predictor_params_
)
elif brs_mode.startswith("f-BRS"):
predictor_params_.update(
{
"net_clicks_limit": 8,
}
)
if predictor_params is not None:
predictor_params_.update(predictor_params)
insertion_mode = {
"f-BRS-A": "after_c4",
"f-BRS-B": "after_aspp",
"f-BRS-C": "after_deeplab",
}[brs_mode]
opt_functor = ScaleBiasOptimizer(
prob_thresh=prob_thresh,
with_flip=with_flip,
optimizer_params=lbfgs_params_,
**brs_opt_func_params
)
if isinstance(net, HRNetModel):
FeaturePredictor = HRNetFeatureBRSPredictor
insertion_mode = {
"after_c4": "A",
"after_aspp": "A",
"after_deeplab": "C",
}[insertion_mode]
else:
FeaturePredictor = FeatureBRSPredictor
predictor = FeaturePredictor(
net,
device,
opt_functor=opt_functor,
with_flip=with_flip,
insertion_mode=insertion_mode,
zoom_in=zoom_in,
**predictor_params_
)
elif brs_mode == "RGB-BRS" or brs_mode == "DistMap-BRS":
use_dmaps = brs_mode == "DistMap-BRS"
predictor_params_.update(
{
"net_clicks_limit": 5,
}
)
if predictor_params is not None:
predictor_params_.update(predictor_params)
opt_functor = InputOptimizer(
prob_thresh=prob_thresh,
with_flip=with_flip,
optimizer_params=lbfgs_params_,
**brs_opt_func_params
)
predictor = InputBRSPredictor(
net,
device,
optimize_target="dmaps" if use_dmaps else "rgb",
opt_functor=opt_functor,
with_flip=with_flip,
zoom_in=zoom_in,
**predictor_params_
)
else:
raise NotImplementedError
return predictor | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/inference/predictors/__init__.py | 0.572723 | 0.200969 | __init__.py | pypi |
import logging
import random
from pathlib import Path
import cv2
from albumentations.augmentations.geometric import longest_max_size
from ritm_annotation.data.base import ISDataset
from ritm_annotation.data.sample import DSample
from ritm_annotation.utils.exp_imports.default import *
logger = logging.getLogger(__name__)
def get_train_augmentator(model_cfg):
crop_size = model_cfg.crop_size
return Compose(
[
UniformRandomResize(scale_range=(0.75, 1.40)),
HorizontalFlip(),
PadIfNeeded(
min_height=crop_size[0], min_width=crop_size[1], border_mode=0
),
RandomCrop(*crop_size),
RandomBrightnessContrast(
brightness_limit=(-0.25, 0.25),
contrast_limit=(-0.15, 0.4),
p=0.75,
),
RGBShift(
r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=0.75
),
],
p=1.0,
)
def get_val_augmentator(model_cfg):
crop_size = model_cfg.crop_size
return Compose(
[
PadIfNeeded(
min_height=crop_size[0], min_width=crop_size[1], border_mode=0
),
RandomCrop(*crop_size),
],
p=1.0,
)
def get_points_sampler(model_cfg):
return MultiPointSampler(
model_cfg.num_max_points,
prob_gamma=0.8,
merge_objects_prob=0.15,
max_num_merged_objects=2,
)
class AnnotationDataset(ISDataset):
def __init__(
self,
images_path: Path,
masks_path: Path,
split="train",
dry_run=False,
# the idea here is to resize the image to speed up data ingestion and training # noqa:E501
max_bigger_dimension=None,
**kwargs,
):
super(AnnotationDataset, self).__init__(**kwargs)
self.images_path = images_path
self.masks_path = masks_path
self.max_bigger_dimension = max_bigger_dimension
self.dataset_samples = []
if not dry_run:
for item in masks_path.iterdir():
image_file = images_path / item.name
if not item.is_dir():
logger.warn(
_(
"AnnotationDataset: found impurities: {item}"
).format(item=item)
)
continue
if not (image_file.exists() and image_file.is_file()):
logger.warn(
_("Found mask for {item_name} but not image").format(
item_name=item.name
)
)
continue
has_mask = False
for mask_file in item.iterdir():
has_mask = True
if has_mask:
self.dataset_samples.append(item.name)
self.dataset_samples.sort()
total_amount = len(self.dataset_samples)
train_amount = int(total_amount * 0.8)
val_amount = total_amount - train_amount
if split == "train":
self.dataset_samples = self.dataset_samples[:train_amount]
elif split == "val":
self.dataset_samples = self.dataset_samples[-val_amount:]
else:
raise ValueError(_("split must be either train or val"))
def get_sample(self, index: int) -> DSample:
item = self.dataset_samples[index]
image_path = self.images_path / item
masks_path = self.masks_path / item
image = cv2.imread(str(image_path))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.max_bigger_dimension is not None:
image = longest_max_size(
image, self.max_bigger_dimension, cv2.INTER_LINEAR
)
(h, w, *_rest) = image.shape
mask_path = random.choice(list(masks_path.iterdir()))
gt_mask = cv2.imread(str(mask_path), 0)
if self.max_bigger_dimension is not None:
gt_mask = longest_max_size(
gt_mask, self.max_bigger_dimension, cv2.INTER_NEAREST_EXACT
)
gt_mask[gt_mask > 0] = 1
gt_mask = gt_mask.astype("int32")
logger.debug(
_("Processed item {index}: '{item}' (shape: ({w}, {h})").format(
index=index, item=item, w=w, h=h
)
)
return DSample(image, gt_mask, objects_ids=[1], sample_id=index) | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/cli/finetune/dataset.py | 0.526343 | 0.159872 | dataset.py | pypi |
from ritm_annotation.utils.exp_imports.default import *
MODEL_NAME = "sbd_hrnet18"
def init_model(cfg, dry_run=False):
model_cfg = edict()
model_cfg.crop_size = (320, 480)
model_cfg.num_max_points = 24
model_cfg.default_num_epochs = 220
model = HRNetModel(
width=18,
ocr_width=64,
with_aux_output=True,
use_leaky_relu=True,
use_rgb_conv=False,
use_disks=True,
norm_radius=5,
with_prev_mask=True,
)
model.to(cfg.device)
model.apply(initializer.XavierGluon(rnd_type="gaussian", magnitude=2.0))
if not dry_run:
model.feature_extractor.load_pretrained_weights(
cfg.IMAGENET_PRETRAINED_MODELS.HRNETV2_W18
)
return model, model_cfg
def get_trainer(model, cfg, model_cfg, dry_run=False, no_dataset=False):
cfg.batch_size = 28 if cfg.batch_size < 1 else cfg.batch_size
cfg.val_batch_size = cfg.batch_size
crop_size = model_cfg.crop_size
loss_cfg = edict()
loss_cfg.instance_loss = NormalizedFocalLossSigmoid(alpha=0.5, gamma=2)
loss_cfg.instance_loss_weight = 1.0
loss_cfg.instance_aux_loss = SigmoidBinaryCrossEntropyLoss()
loss_cfg.instance_aux_loss_weight = 0.4
train_augmentator = Compose(
[
UniformRandomResize(scale_range=(0.75, 1.25)),
Flip(),
RandomRotate90(),
ShiftScaleRotate(
shift_limit=0.03,
scale_limit=0,
rotate_limit=(-3, 3),
border_mode=0,
p=0.75,
),
PadIfNeeded(
min_height=crop_size[0], min_width=crop_size[1], border_mode=0
),
RandomCrop(*crop_size),
RandomBrightnessContrast(
brightness_limit=(-0.25, 0.25),
contrast_limit=(-0.15, 0.4),
p=0.75,
),
RGBShift(
r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=0.75
),
],
p=1.0,
)
val_augmentator = Compose(
[
UniformRandomResize(scale_range=(0.75, 1.25)),
PadIfNeeded(
min_height=crop_size[0], min_width=crop_size[1], border_mode=0
),
RandomCrop(*crop_size),
],
p=1.0,
)
points_sampler = MultiPointSampler(
model_cfg.num_max_points,
prob_gamma=0.80,
merge_objects_prob=0.15,
max_num_merged_objects=2,
)
if no_dataset:
trainset = None
valset = None
else:
trainset = SBDDataset(
cfg.SBD_PATH,
split="train",
augmentator=train_augmentator,
min_object_area=80,
keep_background_prob=0.01,
points_sampler=points_sampler,
samples_scores_path="./assets/sbd_samples_weights.pkl",
samples_scores_gamma=1.25,
dry_run=dry_run,
)
valset = SBDDataset(
cfg.SBD_PATH,
split="val",
augmentator=val_augmentator,
min_object_area=80,
points_sampler=points_sampler,
epoch_len=500,
dry_run=dry_run,
)
optimizer_params = {"lr": 5e-4, "betas": (0.9, 0.999), "eps": 1e-8}
lr_scheduler = partial(
torch.optim.lr_scheduler.MultiStepLR, milestones=[200, 215], gamma=0.1
)
return ISTrainer(
model,
cfg,
model_cfg,
loss_cfg,
trainset,
valset,
optimizer="adam",
optimizer_params=optimizer_params,
lr_scheduler=lr_scheduler,
checkpoint_interval=[(0, 5), (100, 1)],
image_dump_interval=3000,
metrics=[AdaptiveIoU()],
max_interactive_points=model_cfg.num_max_points,
max_num_next_clicks=3,
dry_run=dry_run,
) | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/models/iter_mask/hrnet18_sbd_itermask_3p.py | 0.758779 | 0.151498 | hrnet18_sbd_itermask_3p.py | pypi |
import math
import random
from functools import lru_cache
import cv2
import numpy as np
from .sample import DSample
class BasePointSampler:
def __init__(self):
self._selected_mask = None
self._selected_masks = None
def sample_object(self, sample: DSample):
raise NotImplementedError
def sample_points(self):
raise NotImplementedError
@property
def selected_mask(self):
assert self._selected_mask is not None
return self._selected_mask
@selected_mask.setter
def selected_mask(self, mask):
self._selected_mask = mask[np.newaxis, :].astype(np.float32)
class MultiPointSampler(BasePointSampler):
def __init__(
self,
max_num_points,
prob_gamma=0.7,
expand_ratio=0.1,
positive_erode_prob=0.9,
positive_erode_iters=3,
negative_bg_prob=0.1,
negative_other_prob=0.4,
negative_border_prob=0.5,
merge_objects_prob=0.0,
max_num_merged_objects=2,
use_hierarchy=False,
soft_targets=False,
first_click_center=False,
only_one_first_click=False,
sfc_inner_k=1.7,
sfc_full_inner_prob=0.0,
):
super().__init__()
self.max_num_points = max_num_points
self.expand_ratio = expand_ratio
self.positive_erode_prob = positive_erode_prob
self.positive_erode_iters = positive_erode_iters
self.merge_objects_prob = merge_objects_prob
self.use_hierarchy = use_hierarchy
self.soft_targets = soft_targets
self.first_click_center = first_click_center
self.only_one_first_click = only_one_first_click
self.sfc_inner_k = sfc_inner_k
self.sfc_full_inner_prob = sfc_full_inner_prob
if max_num_merged_objects == -1:
max_num_merged_objects = max_num_points
self.max_num_merged_objects = max_num_merged_objects
self.neg_strategies = ["bg", "other", "border"]
self.neg_strategies_prob = [
negative_bg_prob,
negative_other_prob,
negative_border_prob,
]
assert math.isclose(sum(self.neg_strategies_prob), 1.0)
self._pos_probs = generate_probs(max_num_points, gamma=prob_gamma)
self._neg_probs = generate_probs(max_num_points + 1, gamma=prob_gamma)
self._neg_masks = None
def sample_object(self, sample: DSample):
if len(sample) == 0:
bg_mask = sample.get_background_mask()
self.selected_mask = np.zeros_like(bg_mask, dtype=np.float32)
self._selected_masks = [[]]
self._neg_masks = {
strategy: bg_mask for strategy in self.neg_strategies
}
self._neg_masks["required"] = []
return
gt_mask, pos_masks, neg_masks = self._sample_mask(sample)
binary_gt_mask = gt_mask > 0.5 if self.soft_targets else gt_mask > 0
self.selected_mask = gt_mask
self._selected_masks = pos_masks
neg_mask_bg = np.logical_not(binary_gt_mask)
neg_mask_border = self._get_border_mask(binary_gt_mask)
if len(sample) <= len(self._selected_masks):
neg_mask_other = neg_mask_bg
else:
neg_mask_other = np.logical_and(
np.logical_not(sample.get_background_mask()),
np.logical_not(binary_gt_mask),
)
self._neg_masks = {
"bg": neg_mask_bg,
"other": neg_mask_other,
"border": neg_mask_border,
"required": neg_masks,
}
def _sample_mask(self, sample: DSample):
root_obj_ids = sample.root_objects
if len(root_obj_ids) > 1 and random.random() < self.merge_objects_prob:
max_selected_objects = min(
len(root_obj_ids), self.max_num_merged_objects
)
num_selected_objects = np.random.randint(
2, max_selected_objects + 1
)
random_ids = random.sample(root_obj_ids, num_selected_objects)
else:
random_ids = [random.choice(root_obj_ids)]
gt_mask = None
pos_segments = []
neg_segments = []
for obj_id in random_ids:
(
obj_gt_mask,
obj_pos_segments,
obj_neg_segments,
) = self._sample_from_masks_layer(obj_id, sample)
if gt_mask is None:
gt_mask = obj_gt_mask
else:
gt_mask = np.maximum(gt_mask, obj_gt_mask)
pos_segments.extend(obj_pos_segments)
neg_segments.extend(obj_neg_segments)
pos_masks = [self._positive_erode(x) for x in pos_segments]
neg_masks = [self._positive_erode(x) for x in neg_segments]
return gt_mask, pos_masks, neg_masks
def _sample_from_masks_layer(self, obj_id, sample: DSample):
objs_tree = sample._objects
if not self.use_hierarchy:
node_mask = sample.get_object_mask(obj_id)
gt_mask = (
sample.get_soft_object_mask(obj_id)
if self.soft_targets
else node_mask
)
return gt_mask, [node_mask], []
def _select_node(node_id):
node_info = objs_tree[node_id]
if not node_info["children"] or random.random() < 0.5:
return node_id
return _select_node(random.choice(node_info["children"]))
selected_node = _select_node(obj_id)
node_info = objs_tree[selected_node]
node_mask = sample.get_object_mask(selected_node)
gt_mask = (
sample.get_soft_object_mask(selected_node)
if self.soft_targets
else node_mask
)
pos_mask = node_mask.copy()
negative_segments = []
if (
node_info["parent"] is not None
and node_info["parent"] in objs_tree
):
parent_mask = sample.get_object_mask(node_info["parent"])
negative_segments.append(
np.logical_and(parent_mask, np.logical_not(node_mask))
)
for child_id in node_info["children"]:
if objs_tree[child_id]["area"] / node_info["area"] < 0.10:
child_mask = sample.get_object_mask(child_id)
pos_mask = np.logical_and(pos_mask, np.logical_not(child_mask))
if node_info["children"]:
max_disabled_children = min(len(node_info["children"]), 3)
num_disabled_children = np.random.randint(
0, max_disabled_children + 1
)
disabled_children = random.sample(
node_info["children"], num_disabled_children
)
for child_id in disabled_children:
child_mask = sample.get_object_mask(child_id)
pos_mask = np.logical_and(pos_mask, np.logical_not(child_mask))
if self.soft_targets:
soft_child_mask = sample.get_soft_object_mask(child_id)
gt_mask = np.minimum(gt_mask, 1.0 - soft_child_mask)
else:
gt_mask = np.logical_and(
gt_mask, np.logical_not(child_mask)
)
negative_segments.append(child_mask)
return gt_mask, [pos_mask], negative_segments
def sample_points(self):
assert self._selected_mask is not None
pos_points = self._multi_mask_sample_points(
self._selected_masks,
is_negative=[False] * len(self._selected_masks),
with_first_click=self.first_click_center,
)
neg_strategy = [
(self._neg_masks[k], prob)
for k, prob in zip(self.neg_strategies, self.neg_strategies_prob)
]
neg_masks = self._neg_masks["required"] + [neg_strategy]
neg_points = self._multi_mask_sample_points(
neg_masks,
is_negative=[False] * len(self._neg_masks["required"]) + [True],
)
return pos_points + neg_points
def _multi_mask_sample_points(
self, selected_masks, is_negative, with_first_click=False
):
selected_masks = selected_masks[: self.max_num_points]
each_obj_points = [
self._sample_points(
mask,
is_negative=is_negative[i],
with_first_click=with_first_click,
)
for i, mask in enumerate(selected_masks)
]
each_obj_points = [x for x in each_obj_points if len(x) > 0]
points = []
if len(each_obj_points) == 1:
points = each_obj_points[0]
elif len(each_obj_points) > 1:
if self.only_one_first_click:
each_obj_points = each_obj_points[:1]
points = [obj_points[0] for obj_points in each_obj_points]
aggregated_masks_with_prob = []
for indx, x in enumerate(selected_masks):
if (
isinstance(x, (list, tuple))
and x
and isinstance(x[0], (list, tuple))
):
for t, prob in x:
aggregated_masks_with_prob.append(
(t, prob / len(selected_masks))
)
else:
aggregated_masks_with_prob.append(
(x, 1.0 / len(selected_masks))
)
other_points_union = self._sample_points(
aggregated_masks_with_prob, is_negative=True
)
if len(other_points_union) + len(points) <= self.max_num_points:
points.extend(other_points_union)
else:
points.extend(
random.sample(
other_points_union, self.max_num_points - len(points)
)
)
if len(points) < self.max_num_points:
points.extend([(-1, -1, -1)] * (self.max_num_points - len(points)))
return points
def _sample_points(self, mask, is_negative=False, with_first_click=False):
if is_negative:
num_points = np.random.choice(
np.arange(self.max_num_points + 1), p=self._neg_probs
)
else:
num_points = 1 + np.random.choice(
np.arange(self.max_num_points), p=self._pos_probs
)
indices_probs = None
if isinstance(mask, (list, tuple)):
indices_probs = [x[1] for x in mask]
indices = [(np.argwhere(x), prob) for x, prob in mask]
if indices_probs:
assert math.isclose(sum(indices_probs), 1.0)
else:
indices = np.argwhere(mask)
points = []
for j in range(num_points):
first_click = with_first_click and j == 0 and indices_probs is None
if first_click:
point_indices = get_point_candidates(
mask,
k=self.sfc_inner_k,
full_prob=self.sfc_full_inner_prob,
)
elif indices_probs:
point_indices_indx = np.random.choice(
np.arange(len(indices)), p=indices_probs
)
point_indices = indices[point_indices_indx][0]
else:
point_indices = indices
num_indices = len(point_indices)
if num_indices > 0:
point_indx = 0 if first_click else 100
click = point_indices[
np.random.randint(0, num_indices)
].tolist() + [point_indx]
points.append(click)
return points
def _positive_erode(self, mask):
if random.random() > self.positive_erode_prob:
return mask
kernel = np.ones((3, 3), np.uint8)
eroded_mask = cv2.erode(
mask.astype(np.uint8), kernel, iterations=self.positive_erode_iters
).astype(bool)
if eroded_mask.sum() > 10:
return eroded_mask
else:
return mask
def _get_border_mask(self, mask):
expand_r = int(np.ceil(self.expand_ratio * np.sqrt(mask.sum())))
kernel = np.ones((3, 3), np.uint8)
expanded_mask = cv2.dilate(
mask.astype(np.uint8), kernel, iterations=expand_r
)
expanded_mask[mask.astype(bool)] = 0
return expanded_mask
@lru_cache(maxsize=None)
def generate_probs(max_num_points, gamma):
probs = []
last_value = 1
for i in range(max_num_points):
probs.append(last_value)
last_value *= gamma
probs = np.array(probs)
probs /= probs.sum()
return probs
def get_point_candidates(obj_mask, k=1.7, full_prob=0.0):
if full_prob > 0 and random.random() < full_prob:
return obj_mask
padded_mask = np.pad(obj_mask, ((1, 1), (1, 1)), "constant")
dt = cv2.distanceTransform(padded_mask.astype(np.uint8), cv2.DIST_L2, 0)[
1:-1, 1:-1
]
if k > 0:
inner_mask = dt > dt.max() / k
return np.argwhere(inner_mask)
else:
prob_map = dt.flatten()
prob_map /= max(prob_map.sum(), 1e-6)
click_indx = np.random.choice(len(prob_map), p=prob_map)
click_coords = np.unravel_index(click_indx, dt.shape)
return np.array([click_coords]) | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/data/points_sampler.py | 0.647352 | 0.258478 | points_sampler.py | pypi |
import pickle
import random
import numpy as np
import torch
from torchvision import transforms
from .points_sampler import MultiPointSampler
from .sample import DSample
class ISDataset(torch.utils.data.dataset.Dataset):
def __init__(
self,
augmentator=None,
points_sampler=MultiPointSampler(max_num_points=12),
min_object_area=0,
keep_background_prob=0.0,
with_image_info=False,
samples_scores_path=None,
samples_scores_gamma=1.0,
epoch_len=-1,
dry_run=False,
):
super(ISDataset, self).__init__()
self.epoch_len = epoch_len
self.augmentator = augmentator
self.min_object_area = min_object_area
self.keep_background_prob = keep_background_prob
self.points_sampler = points_sampler
self.with_image_info = with_image_info
self.samples_precomputed_scores = self._load_samples_scores(
samples_scores_path, samples_scores_gamma, dry_run=dry_run
)
self.to_tensor = transforms.ToTensor()
self.dataset_samples = None
self.dry_run = dry_run
def __getitem__(self, index):
if self.samples_precomputed_scores is not None:
index = np.random.choice(
self.samples_precomputed_scores["indices"],
p=self.samples_precomputed_scores["probs"],
)
else:
if self.epoch_len > 0:
index = random.randrange(0, len(self.dataset_samples))
sample = self.get_sample(index)
sample = self.augment_sample(sample)
sample.remove_small_objects(self.min_object_area)
self.points_sampler.sample_object(sample)
points = np.array(self.points_sampler.sample_points())
mask = self.points_sampler.selected_mask
output = {
"images": self.to_tensor(sample.image),
"points": points.astype(np.float32),
"instances": mask,
}
if self.with_image_info:
output["image_info"] = sample.sample_id
return output
def augment_sample(self, sample) -> DSample:
if self.augmentator is None:
return sample
valid_augmentation = False
while not valid_augmentation:
sample.augment(self.augmentator)
keep_sample = (
self.keep_background_prob < 0.0
or random.random() < self.keep_background_prob
)
valid_augmentation = len(sample) > 0 or keep_sample
return sample
def get_sample(self, index) -> DSample:
raise NotImplementedError
def __len__(self):
if self.epoch_len > 0:
return self.epoch_len
else:
return self.get_samples_number()
def get_samples_number(self):
return len(self.dataset_samples)
@staticmethod
def _load_samples_scores(
samples_scores_path, samples_scores_gamma, dry_run=False
):
if samples_scores_path is None or (not dry_run):
return None
with open(samples_scores_path, "rb") as f:
images_scores = pickle.load(f)
probs = np.array(
[(1.0 - x[2]) ** samples_scores_gamma for x in images_scores]
)
probs /= probs.sum()
samples_scores = {
"indices": [x[0] for x in images_scores],
"probs": probs,
}
print(
_("Loaded {num_weights} weights with gamma={gamma}").format(
num_weights=len(probs), gamma=samples_scores_gamma
)
)
return samples_scores | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/data/base.py | 0.829527 | 0.281578 | base.py | pypi |
import random
import cv2
import numpy as np
from albumentations import DualTransform, ImageOnlyTransform
from albumentations.augmentations import functional as F
from albumentations.augmentations.geometric import functional as FG
from albumentations.core.serialization import SERIALIZABLE_REGISTRY
from albumentations.core.transforms_interface import to_tuple
from ritm_annotation.utils.misc import (
clamp_bbox,
expand_bbox,
get_bbox_from_mask,
get_labels_with_sizes,
)
class UniformRandomResize(DualTransform):
def __init__(
self,
scale_range=(0.9, 1.1),
interpolation=cv2.INTER_LINEAR,
always_apply=False,
p=1,
):
super().__init__(always_apply, p)
self.scale_range = scale_range
self.interpolation = interpolation
def get_params_dependent_on_targets(self, params):
scale = random.uniform(*self.scale_range)
height = int(round(params["image"].shape[0] * scale))
width = int(round(params["image"].shape[1] * scale))
return {"new_height": height, "new_width": width}
def apply(
self,
img,
new_height=0,
new_width=0,
interpolation=cv2.INTER_LINEAR,
**params
):
return FG.resize(
img,
height=new_height,
width=new_width,
interpolation=interpolation,
)
def apply_to_keypoint(self, keypoint, new_height=0, new_width=0, **params):
scale_x = new_width / params["cols"]
scale_y = new_height / params["rows"]
return F.keypoint_scale(keypoint, scale_x, scale_y)
def get_transform_init_args_names(self):
return "scale_range", "interpolation"
@property
def targets_as_params(self):
return ["image"]
class ZoomIn(DualTransform):
def __init__(
self,
height,
width,
bbox_jitter=0.1,
expansion_ratio=1.4,
min_crop_size=200,
min_area=100,
always_resize=False,
always_apply=False,
p=0.5,
):
super(ZoomIn, self).__init__(always_apply, p)
self.height = height
self.width = width
self.bbox_jitter = to_tuple(bbox_jitter)
self.expansion_ratio = expansion_ratio
self.min_crop_size = min_crop_size
self.min_area = min_area
self.always_resize = always_resize
def apply(self, img, selected_object, bbox, **params):
if selected_object is None:
if self.always_resize:
img = FG.resize(img, height=self.height, width=self.width)
return img
rmin, rmax, cmin, cmax = bbox
img = img[rmin : rmax + 1, cmin : cmax + 1]
img = FG.resize(img, height=self.height, width=self.width)
return img
def apply_to_mask(self, mask, selected_object, bbox, **params):
if selected_object is None:
if self.always_resize:
mask = FG.resize(
mask,
height=self.height,
width=self.width,
interpolation=cv2.INTER_NEAREST,
)
return mask
rmin, rmax, cmin, cmax = bbox
mask = mask[rmin : rmax + 1, cmin : cmax + 1]
if isinstance(selected_object, tuple):
layer_indx, mask_id = selected_object
obj_mask = mask[:, :, layer_indx] == mask_id
new_mask = np.zeros_like(mask)
new_mask[:, :, layer_indx][obj_mask] = mask_id
else:
obj_mask = mask == selected_object
new_mask = mask.copy()
new_mask[np.logical_not(obj_mask)] = 0
new_mask = FG.resize(
new_mask,
height=self.height,
width=self.width,
interpolation=cv2.INTER_NEAREST,
)
return new_mask
def get_params_dependent_on_targets(self, params):
instances = params["mask"]
is_mask_layer = len(instances.shape) > 2
candidates = []
if is_mask_layer:
for layer_indx in range(instances.shape[2]):
labels, areas = get_labels_with_sizes(
instances[:, :, layer_indx]
)
candidates.extend(
[
(layer_indx, obj_id)
for obj_id, area in zip(labels, areas)
if area > self.min_area
]
)
else:
labels, areas = get_labels_with_sizes(instances)
candidates = [
obj_id
for obj_id, area in zip(labels, areas)
if area > self.min_area
]
selected_object = None
bbox = None
if candidates:
selected_object = random.choice(candidates)
if is_mask_layer:
layer_indx, mask_id = selected_object
obj_mask = instances[:, :, layer_indx] == mask_id
else:
obj_mask = instances == selected_object
bbox = get_bbox_from_mask(obj_mask)
if isinstance(self.expansion_ratio, tuple):
expansion_ratio = random.uniform(*self.expansion_ratio)
else:
expansion_ratio = self.expansion_ratio
bbox = expand_bbox(bbox, expansion_ratio, self.min_crop_size)
bbox = self._jitter_bbox(bbox)
bbox = clamp_bbox(
bbox, 0, obj_mask.shape[0] - 1, 0, obj_mask.shape[1] - 1
)
return {"selected_object": selected_object, "bbox": bbox}
def _jitter_bbox(self, bbox):
rmin, rmax, cmin, cmax = bbox
height = rmax - rmin + 1
width = cmax - cmin + 1
rmin = int(rmin + random.uniform(*self.bbox_jitter) * height)
rmax = int(rmax + random.uniform(*self.bbox_jitter) * height)
cmin = int(cmin + random.uniform(*self.bbox_jitter) * width)
cmax = int(cmax + random.uniform(*self.bbox_jitter) * width)
return rmin, rmax, cmin, cmax
def apply_to_bbox(self, bbox, **params):
raise NotImplementedError
def apply_to_keypoint(self, keypoint, **params):
raise NotImplementedError
@property
def targets_as_params(self):
return ["mask"]
def get_transform_init_args_names(self):
return (
"height",
"width",
"bbox_jitter",
"expansion_ratio",
"min_crop_size",
"min_area",
"always_resize",
)
def remove_image_only_transforms(sdict):
if "transforms" not in sdict:
return sdict
keep_transforms = []
for tdict in sdict["transforms"]:
cls = SERIALIZABLE_REGISTRY[tdict["__class_fullname__"]]
if "transforms" in tdict:
keep_transforms.append(remove_image_only_transforms(tdict))
elif not issubclass(cls, ImageOnlyTransform):
keep_transforms.append(tdict)
sdict["transforms"] = keep_transforms
return sdict | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/data/transforms.py | 0.728845 | 0.174833 | transforms.py | pypi |
from copy import deepcopy
import numpy as np
from albumentations import ReplayCompose
from ritm_annotation.data.transforms import remove_image_only_transforms
from ritm_annotation.utils.misc import get_labels_with_sizes
class DSample:
def __init__(
self,
image,
encoded_masks,
objects=None,
objects_ids=None,
ignore_ids=None,
sample_id=None,
):
self.image = image
self.sample_id = sample_id
if len(encoded_masks.shape) == 2:
encoded_masks = encoded_masks[:, :, np.newaxis]
self._encoded_masks = encoded_masks
self._ignored_regions = []
if objects_ids is not None:
if not objects_ids or not isinstance(objects_ids[0], tuple):
assert encoded_masks.shape[2] == 1
objects_ids = [(0, obj_id) for obj_id in objects_ids]
self._objects = dict()
for indx, obj_mapping in enumerate(objects_ids):
self._objects[indx] = {
"parent": None,
"mapping": obj_mapping,
"children": [],
}
if ignore_ids:
if isinstance(ignore_ids[0], tuple):
self._ignored_regions = ignore_ids
else:
self._ignored_regions = [
(0, region_id) for region_id in ignore_ids
]
else:
self._objects = deepcopy(objects)
self._augmented = False
self._soft_mask_aug = None
self._original_data = (
self.image,
self._encoded_masks,
deepcopy(self._objects),
)
def augment(self, augmentator):
self.reset_augmentation()
aug_output = augmentator(image=self.image, mask=self._encoded_masks)
self.image = aug_output["image"]
self._encoded_masks = aug_output["mask"]
aug_replay = aug_output.get("replay", None)
if aug_replay:
assert len(self._ignored_regions) == 0
mask_replay = remove_image_only_transforms(aug_replay)
self._soft_mask_aug = ReplayCompose._restore_for_replay(
mask_replay
)
self._compute_objects_areas()
self.remove_small_objects(min_area=1)
self._augmented = True
def reset_augmentation(self):
if not self._augmented:
return
orig_image, orig_masks, orig_objects = self._original_data
self.image = orig_image
self._encoded_masks = orig_masks
self._objects = deepcopy(orig_objects)
self._augmented = False
self._soft_mask_aug = None
def remove_small_objects(self, min_area):
if self._objects and "area" not in list(self._objects.values())[0]:
self._compute_objects_areas()
for obj_id, obj_info in list(self._objects.items()):
if obj_info["area"] < min_area:
self._remove_object(obj_id)
def get_object_mask(self, obj_id):
layer_indx, mask_id = self._objects[obj_id]["mapping"]
obj_mask = (self._encoded_masks[:, :, layer_indx] == mask_id).astype(
np.int32
)
if self._ignored_regions:
for layer_indx, mask_id in self._ignored_regions:
ignore_mask = self._encoded_masks[:, :, layer_indx] == mask_id
obj_mask[ignore_mask] = -1
return obj_mask
def get_soft_object_mask(self, obj_id):
assert self._soft_mask_aug is not None
original_encoded_masks = self._original_data[1]
layer_indx, mask_id = self._objects[obj_id]["mapping"]
obj_mask = (
original_encoded_masks[:, :, layer_indx] == mask_id
).astype(np.float32)
obj_mask = self._soft_mask_aug(
image=obj_mask, mask=original_encoded_masks
)["image"]
return np.clip(obj_mask, 0, 1)
def get_background_mask(self):
return np.max(self._encoded_masks, axis=2) == 0
@property
def objects_ids(self):
return list(self._objects.keys())
@property
def gt_mask(self):
assert len(self._objects) == 1
return self.get_object_mask(self.objects_ids[0])
@property
def root_objects(self):
return [
obj_id
for obj_id, obj_info in self._objects.items()
if obj_info["parent"] is None
]
def _compute_objects_areas(self):
inverse_index = {
node["mapping"]: node_id for node_id, node in self._objects.items()
}
ignored_regions_keys = set(self._ignored_regions)
for layer_indx in range(self._encoded_masks.shape[2]):
objects_ids, objects_areas = get_labels_with_sizes(
self._encoded_masks[:, :, layer_indx]
)
for obj_id, obj_area in zip(objects_ids, objects_areas):
inv_key = (layer_indx, obj_id)
if inv_key in ignored_regions_keys:
continue
try:
self._objects[inverse_index[inv_key]]["area"] = obj_area
del inverse_index[inv_key]
except KeyError:
layer = self._encoded_masks[:, :, layer_indx]
layer[layer == obj_id] = 0
self._encoded_masks[:, :, layer_indx] = layer
for obj_id in inverse_index.values():
self._objects[obj_id]["area"] = 0
def _remove_object(self, obj_id):
obj_info = self._objects[obj_id]
obj_parent = obj_info["parent"]
for child_id in obj_info["children"]:
self._objects[child_id]["parent"] = obj_parent
if obj_parent is not None:
parent_children = self._objects[obj_parent]["children"]
parent_children = [x for x in parent_children if x != obj_id]
self._objects[obj_parent]["children"] = (
parent_children + obj_info["children"]
)
del self._objects[obj_id]
def __len__(self):
return len(self._objects) | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/data/sample.py | 0.63023 | 0.245277 | sample.py | pypi |
import pickle as pkl
from pathlib import Path
import cv2
import numpy as np
from ritm_annotation.data.base import ISDataset
from ritm_annotation.data.sample import DSample
class PascalVocDataset(ISDataset):
def __init__(self, dataset_path, split="train", dry_run=False, **kwargs):
super().__init__(**kwargs)
assert split in {"train", "val", "trainval", "test"}
self.dataset_path = Path(dataset_path)
self._images_path = self.dataset_path / "JPEGImages"
self._insts_path = self.dataset_path / "SegmentationObject"
self.dataset_split = split
if dry_run:
self.dataset_samples = []
else:
if split == "test":
with open(
self.dataset_path / "ImageSets/Segmentation/test.pickle",
"rb",
) as f:
self.dataset_samples, self.instance_ids = pkl.load(f)
else:
with open(
self.dataset_path / f"ImageSets/Segmentation/{split}.txt",
"r",
) as f:
self.dataset_samples = [
name.strip() for name in f.readlines()
]
def get_sample(self, index) -> DSample:
sample_id = self.dataset_samples[index]
image_path = str(self._images_path / f"{sample_id}.jpg")
mask_path = str(self._insts_path / f"{sample_id}.png")
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
instances_mask = cv2.imread(mask_path)
instances_mask = cv2.cvtColor(
instances_mask, cv2.COLOR_BGR2GRAY
).astype(np.int32)
if self.dataset_split == "test":
instance_id = self.instance_ids[index]
mask = np.zeros_like(instances_mask)
mask[instances_mask == 220] = 220 # ignored area
mask[instances_mask == instance_id] = 1
objects_ids = [1]
instances_mask = mask
else:
objects_ids = np.unique(instances_mask) # type: ignore
objects_ids = [x for x in objects_ids if x != 0 and x != 220]
return DSample(
image,
instances_mask,
objects_ids=objects_ids,
ignore_ids=[220],
sample_id=index,
) | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/data/datasets/pascalvoc.py | 0.455925 | 0.290402 | pascalvoc.py | pypi |
import json
import pickle
import random
from copy import deepcopy
from pathlib import Path
import cv2
import numpy as np
from ritm_annotation.data.base import ISDataset
from ritm_annotation.data.sample import DSample
class CocoLvisDataset(ISDataset):
def __init__(
self,
dataset_path,
split="train",
stuff_prob=0.0,
allow_list_name=None,
anno_file="hannotation.pickle",
dry_run=False,
**kwargs,
):
super(CocoLvisDataset, self).__init__(**kwargs)
dataset_path = Path(dataset_path)
self._split_path = dataset_path / split
self.split = split
self._images_path = self._split_path / "images"
self._masks_path = self._split_path / "masks"
self.stuff_prob = stuff_prob
if not dry_run:
with open(self._split_path / anno_file, "rb") as f:
self.dataset_samples = sorted(pickle.load(f).items())
else:
self.dataset_samples = []
if allow_list_name is not None:
allow_list_path = self._split_path / allow_list_name
with open(allow_list_path, "r") as f:
allow_images_ids = json.load(f)
allow_images_ids = set(allow_images_ids)
self.dataset_samples = [
sample
for sample in self.dataset_samples
if sample[0] in allow_images_ids
]
def get_sample(self, index) -> DSample:
image_id, sample = self.dataset_samples[index]
image_path = self._images_path / f"{image_id}.jpg"
image = cv2.imread(str(image_path))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
packed_masks_path = self._masks_path / f"{image_id}.pickle"
with open(packed_masks_path, "rb") as f:
encoded_layers, objs_mapping = pickle.load(f)
layers = [
cv2.imdecode(x, cv2.IMREAD_UNCHANGED) for x in encoded_layers
]
layers = np.stack(layers, axis=2)
instances_info = deepcopy(sample["hierarchy"])
for inst_id, inst_info in list(instances_info.items()):
if inst_info is None:
inst_info = {"children": [], "parent": None, "node_level": 0}
instances_info[inst_id] = inst_info
inst_info["mapping"] = objs_mapping[inst_id]
if self.stuff_prob > 0 and random.random() < self.stuff_prob:
for inst_id in range(
sample["num_instance_masks"], len(objs_mapping)
):
instances_info[inst_id] = {
"mapping": objs_mapping[inst_id],
"parent": None,
"children": [],
}
else:
for inst_id in range(
sample["num_instance_masks"], len(objs_mapping)
):
layer_indx, mask_id = objs_mapping[inst_id]
layers[:, :, layer_indx][ # type: ignore
layers[:, :, layer_indx] == mask_id # type: ignore
] = 0
return DSample(image, layers, objects=instances_info) | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/data/datasets/coco_lvis.py | 0.422028 | 0.167866 | coco_lvis.py | pypi |
from pathlib import Path
import cv2
import numpy as np
from ritm_annotation.data.base import ISDataset
from ritm_annotation.data.sample import DSample
class ImagesDirDataset(ISDataset):
def __init__(
self,
dataset_path,
images_dir_name="images",
masks_dir_name="masks",
**kwargs
):
super(ImagesDirDataset, self).__init__(**kwargs)
self.dataset_path = Path(dataset_path)
self._images_path = self.dataset_path / images_dir_name
self._insts_path = self.dataset_path / masks_dir_name
images_list = [x for x in sorted(self._images_path.glob("*.*"))]
samples = {x.stem: {"image": x, "masks": []} for x in images_list}
for mask_path in self._insts_path.glob("*.*"):
mask_name = mask_path.stem
if mask_name in samples:
samples[mask_name]["masks"].append(mask_path)
continue
mask_name_split = mask_name.split("_")
if mask_name_split[-1].isdigit():
mask_name = "_".join(mask_name_split[:-1])
assert mask_name in samples
samples[mask_name]["masks"].append(mask_path)
for x in samples.values():
assert len(x["masks"]) > 0, x["image"]
self.dataset_samples = [v for k, v in sorted(samples.items())]
def get_sample(self, index) -> DSample:
sample = self.dataset_samples[index]
image_path = str(sample["image"])
objects = []
ignored_regions = []
masks = []
for indx, mask_path in enumerate(sample["masks"]):
gt_mask = cv2.imread(str(mask_path))[:, :, 0].astype(np.int32)
instances_mask = np.zeros_like(gt_mask)
instances_mask[gt_mask == 128] = 2
instances_mask[gt_mask > 128] = 1
masks.append(instances_mask)
objects.append((indx, 1))
ignored_regions.append((indx, 2))
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return DSample(
image,
np.stack(masks, axis=2),
objects_ids=objects,
ignore_ids=ignored_regions,
sample_id=index,
) | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/data/datasets/images_dir.py | 0.712432 | 0.331282 | images_dir.py | pypi |
import json
import random
from collections import defaultdict
from pathlib import Path
import cv2
import numpy as np
from ritm_annotation.data.base import ISDataset
from ritm_annotation.data.sample import DSample
class LvisDataset(ISDataset):
def __init__(
self,
dataset_path,
split="train",
max_overlap_ratio=0.5,
dry_run=False,
**kwargs,
):
super(LvisDataset, self).__init__(**kwargs)
dataset_path = Path(dataset_path)
train_categories_path = dataset_path / "train_categories.json"
self._train_path = dataset_path / "train"
self._val_path = dataset_path / "val"
self.split = split
self.max_overlap_ratio = max_overlap_ratio
if not dry_run:
with open(
dataset_path / split / f"lvis_{self.split}.json", "r"
) as f:
json_annotation = json.loads(f.read())
else:
json_annotation = dict(annotations=[], images=[])
self.annotations = defaultdict(list)
for x in json_annotation["annotations"]:
self.annotations[x["image_id"]].append(x)
if not train_categories_path.exists():
self.generate_train_categories(
dataset_path, train_categories_path, dry_run=dry_run
)
self.dataset_samples = [
x
for x in json_annotation["images"]
if len(self.annotations[x["id"]]) > 0
]
def get_sample(self, index) -> DSample:
image_info = self.dataset_samples[index]
image_id, image_url = image_info["id"], image_info["coco_url"]
image_filename = image_url.split("/")[-1]
image_annotations = self.annotations[image_id]
random.shuffle(image_annotations)
# LVISv1 splits do not match older LVIS splits (some images in val may come from COCO train2017) # noqa: E501
if "train2017" in image_url:
image_path = self._train_path / "images" / image_filename
else:
image_path = self._val_path / "images" / image_filename
image = cv2.imread(str(image_path))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
instances_mask = None
instances_area: dict[int, int] = defaultdict(int)
objects_ids = []
for indx, obj_annotation in enumerate(image_annotations):
mask = self.get_mask_from_polygon(obj_annotation, image)
object_mask = mask > 0
object_area = object_mask.sum()
if instances_mask is None:
instances_mask = np.zeros_like(object_mask, dtype=np.int32)
overlap_ids = np.bincount(instances_mask[object_mask].flatten())
overlap_areas = [
overlap_area / instances_area[inst_id]
for inst_id, overlap_area in enumerate(overlap_ids)
if overlap_area > 0 and inst_id > 0
]
overlap_ratio = (
np.logical_and(object_mask, instances_mask > 0).sum()
/ object_area
)
if overlap_areas:
overlap_ratio = max(overlap_ratio, max(overlap_areas))
if overlap_ratio > self.max_overlap_ratio:
continue
instance_id = indx + 1
instances_mask[object_mask] = instance_id
instances_area[instance_id] = object_area
objects_ids.append(instance_id)
return DSample(image, instances_mask, objects_ids=objects_ids)
@staticmethod
def get_mask_from_polygon(annotation, image):
mask = np.zeros(image.shape[:2], dtype=np.int32)
for contour_points in annotation["segmentation"]:
contour_points = np.array(contour_points).reshape((-1, 2))
contour_points = np.round(contour_points).astype(np.int32)[
np.newaxis, :
]
cv2.fillPoly(mask, contour_points, 1)
return mask
@staticmethod
def generate_train_categories(
dataset_path, train_categories_path, dry_run=False
):
if not dry_run:
with open(dataset_path / "train/lvis_train.json", "r") as f:
annotation = json.load(f)
else:
annotation = dict(categories=[])
with open(train_categories_path, "w") as f:
json.dump(annotation["categories"], f, indent=1) | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/data/datasets/lvis.py | 0.493164 | 0.194884 | lvis.py | pypi |
import os
import pickle as pkl
import random
from pathlib import Path
import cv2
import numpy as np
from ritm_annotation.data.base import ISDataset
from ritm_annotation.data.sample import DSample
class OpenImagesDataset(ISDataset):
def __init__(self, dataset_path, split="train", dry_run=False, **kwargs):
super().__init__(**kwargs)
assert split in {"train", "val", "test"}
self.dataset_path = Path(dataset_path)
self._split_path = self.dataset_path / split
self._images_path = self._split_path / "images"
self._masks_path = self._split_path / "masks"
self.dataset_split = split
clean_anno_path = (
self._split_path
/ f"{split}-annotations-object-segmentation_clean.pkl"
)
if os.path.exists(clean_anno_path):
with clean_anno_path.open("rb") as f:
annotations = pkl.load(f)
elif dry_run:
annotations = dict(image_id_to_masks=[], dataset_samples=[])
else:
raise RuntimeError(
_("Can't find annotations at {anno_path}").format(
anno_path=clean_anno_path
)
)
self.image_id_to_masks = annotations["image_id_to_masks"]
self.dataset_samples = annotations["dataset_samples"]
def get_sample(self, index) -> DSample:
image_id = self.dataset_samples[index]
image_path = str(self._images_path / f"{image_id}.jpg")
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask_paths = self.image_id_to_masks[image_id]
# select random mask for an image
mask_path = str(self._masks_path / random.choice(mask_paths))
instances_mask = cv2.imread(mask_path)
instances_mask = cv2.cvtColor(instances_mask, cv2.COLOR_BGR2GRAY)
instances_mask[instances_mask > 0] = 1
instances_mask = instances_mask.astype(np.int32)
min_width = min(image.shape[1], instances_mask.shape[1])
min_height = min(image.shape[0], instances_mask.shape[0])
if image.shape[0] != min_height or image.shape[1] != min_width:
image = cv2.resize(
image, (min_width, min_height), interpolation=cv2.INTER_LINEAR
)
if (
instances_mask.shape[0] != min_height
or instances_mask.shape[1] != min_width
):
instances_mask = cv2.resize(
instances_mask,
(min_width, min_height),
interpolation=cv2.INTER_NEAREST,
)
object_ids = [1] if instances_mask.sum() > 0 else []
return DSample(
image, instances_mask, objects_ids=object_ids, sample_id=index
) | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/data/datasets/openimages.py | 0.451327 | 0.270985 | openimages.py | pypi |
import os
import pickle as pkl
import random
from pathlib import Path
import cv2
import numpy as np
from ritm_annotation.data.base import ISDataset
from ritm_annotation.data.sample import DSample
from ritm_annotation.utils.misc import get_labels_with_sizes
class ADE20kDataset(ISDataset):
def __init__(
self,
dataset_path,
split="train",
stuff_prob=0.0,
dry_run=False,
**kwargs,
):
super().__init__(**kwargs)
assert split in {"train", "val"}
self.dataset_path = Path(dataset_path)
self.dataset_split = split
self.dataset_split_folder = (
"training" if split == "train" else "validation"
)
self.stuff_prob = stuff_prob
anno_path = (
self.dataset_path / f"{split}-annotations-object-segmentation.pkl"
)
if os.path.exists(anno_path):
with anno_path.open("rb") as f:
annotations = pkl.load(f)
elif dry_run:
annotations = {}
else:
raise RuntimeError(
_("Can't find annotations at {anno_path}").format(
anno_path=anno_path
)
)
self.annotations = annotations
self.dataset_samples = list(annotations.keys())
def get_sample(self, index) -> DSample:
image_id = self.dataset_samples[index]
sample_annos = self.annotations[image_id]
image_path = str(
self.dataset_path / sample_annos["folder"] / f"{image_id}.jpg"
)
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# select random mask for an image
layer = random.choice(sample_annos["layers"])
mask_path = str(
self.dataset_path / sample_annos["folder"] / layer["mask_name"]
)
instances_mask = cv2.imread(mask_path, cv2.IMREAD_UNCHANGED)[
:, :, 0
] # the B channel holds instances
instances_mask = instances_mask.astype(np.int32)
object_ids, _ = get_labels_with_sizes(instances_mask)
if (self.stuff_prob <= 0) or (random.random() > self.stuff_prob):
# remove stuff objects
for i, object_id in enumerate(object_ids):
if i in layer["stuff_instances"]:
instances_mask[instances_mask == object_id] = 0
object_ids, _ = get_labels_with_sizes(instances_mask)
return DSample(
image, instances_mask, objects_ids=object_ids, sample_id=index
) | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/data/datasets/ade20k.py | 0.408749 | 0.176885 | ade20k.py | pypi |
import pickle as pkl
from pathlib import Path
import cv2
import numpy as np
from scipy.io import loadmat
from ritm_annotation.data.base import ISDataset
from ritm_annotation.data.sample import DSample
from ritm_annotation.utils.misc import (
get_bbox_from_mask,
get_labels_with_sizes,
)
class SBDDataset(ISDataset):
def __init__(
self,
dataset_path,
split="train",
buggy_mask_thresh=0.08,
dry_run=False,
**kwargs,
):
super(SBDDataset, self).__init__(**kwargs)
assert split in {"train", "val"}
self.dataset_path = Path(dataset_path)
self.dataset_split = split
self._images_path = self.dataset_path / "img"
self._insts_path = self.dataset_path / "inst"
self._buggy_objects = dict()
self._buggy_mask_thresh = buggy_mask_thresh
if dry_run:
self.dataset_samples = []
else:
with open(self.dataset_path / f"{split}.txt", "r") as f:
self.dataset_samples = [x.strip() for x in f.readlines()]
def get_sample(self, index):
image_name = self.dataset_samples[index]
image_path = str(self._images_path / f"{image_name}.jpg")
inst_info_path = str(self._insts_path / f"{image_name}.mat")
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
instances_mask = loadmat(str(inst_info_path))["GTinst"][0][0][
0
].astype(np.int32)
instances_mask = self.remove_buggy_masks(index, instances_mask)
instances_ids, _ = get_labels_with_sizes(instances_mask)
return DSample(
image, instances_mask, objects_ids=instances_ids, sample_id=index
)
def remove_buggy_masks(self, index, instances_mask):
if self._buggy_mask_thresh > 0.0:
buggy_image_objects = self._buggy_objects.get(index, None)
if buggy_image_objects is None:
buggy_image_objects = []
instances_ids, _ = get_labels_with_sizes(instances_mask)
for obj_id in instances_ids:
obj_mask = instances_mask == obj_id
mask_area = obj_mask.sum()
bbox = get_bbox_from_mask(obj_mask)
bbox_area = (bbox[1] - bbox[0] + 1) * (
bbox[3] - bbox[2] + 1
)
obj_area_ratio = mask_area / bbox_area
if obj_area_ratio < self._buggy_mask_thresh:
buggy_image_objects.append(obj_id)
self._buggy_objects[index] = buggy_image_objects
for obj_id in buggy_image_objects:
instances_mask[instances_mask == obj_id] = 0
return instances_mask
class SBDEvaluationDataset(ISDataset):
def __init__(self, dataset_path, split="val", dry_run=False, **kwargs):
super(SBDEvaluationDataset, self).__init__(**kwargs)
assert split in {"train", "val"}
self.dataset_path = Path(dataset_path)
self.dataset_split = split
self._images_path = self.dataset_path / "img"
self._insts_path = self.dataset_path / "inst"
if dry_run:
self.dataset_samples = []
else:
with open(self.dataset_path / f"{split}.txt", "r") as f:
self.dataset_samples = [x.strip() for x in f.readlines()]
self.dataset_samples = self.get_sbd_images_and_ids_list()
def get_sample(self, index) -> DSample:
image_name, instance_id = self.dataset_samples[index]
image_path = str(self._images_path / f"{image_name}.jpg")
inst_info_path = str(self._insts_path / f"{image_name}.mat")
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
instances_mask = loadmat(str(inst_info_path))["GTinst"][0][0][
0
].astype(np.int32)
instances_mask[instances_mask != instance_id] = 0
instances_mask[instances_mask > 0] = 1
return DSample(image, instances_mask, objects_ids=[1], sample_id=index)
def get_sbd_images_and_ids_list(self):
pkl_path = (
self.dataset_path / f"{self.dataset_split}_images_and_ids_list.pkl"
)
if pkl_path.exists():
with open(str(pkl_path), "rb") as fp:
images_and_ids_list = pkl.load(fp)
else:
images_and_ids_list = []
for sample in self.dataset_samples:
inst_info_path = str(self._insts_path / f"{sample}.mat")
instances_mask = loadmat(str(inst_info_path))["GTinst"][0][0][
0
].astype(np.int32)
instances_ids, _ = get_labels_with_sizes(instances_mask)
for instances_id in instances_ids:
images_and_ids_list.append((sample, instances_id))
with open(str(pkl_path), "wb") as fp:
pkl.dump(images_and_ids_list, fp)
return images_and_ids_list | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/data/datasets/sbd.py | 0.446495 | 0.214712 | sbd.py | pypi |
import json
import random
from pathlib import Path
import cv2
import numpy as np
from ritm_annotation.data.base import ISDataset
from ritm_annotation.data.sample import DSample
class CocoDataset(ISDataset):
def __init__(
self,
dataset_path,
split="train",
stuff_prob=0.0,
dry_run=False,
**kwargs,
):
super(CocoDataset, self).__init__(**kwargs)
self.split = split
self.dataset_path = Path(dataset_path)
self.stuff_prob = stuff_prob
self.dry_run = dry_run
self.load_samples()
def load_samples(self):
annotation_path = (
self.dataset_path / "annotations" / f"panoptic_{self.split}.json"
)
self.labels_path = (
self.dataset_path / "annotations" / f"panoptic_{self.split}"
)
self.images_path = self.dataset_path / self.split
if not self.dry_run:
with open(annotation_path, "r") as f:
annotation = json.load(f)
else:
annotation = dict(annotations=[], categories=[])
self.dataset_samples = annotation["annotations"]
self._categories = annotation["categories"]
self._stuff_labels = [
x["id"] for x in self._categories if x["isthing"] == 0
]
self._things_labels = [
x["id"] for x in self._categories if x["isthing"] == 1
]
self._things_labels_set = set(self._things_labels)
self._stuff_labels_set = set(self._stuff_labels)
def get_sample(self, index) -> DSample:
dataset_sample = self.dataset_samples[index]
image_path = self.images_path / self.get_image_name(
dataset_sample["file_name"]
)
label_path = self.labels_path / dataset_sample["file_name"]
image = cv2.imread(str(image_path))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
label = cv2.imread(str(label_path), cv2.IMREAD_UNCHANGED).astype(
np.int32
)
label = (
256 * 256 * label[:, :, 0] + 256 * label[:, :, 1] + label[:, :, 2]
)
instance_map = np.full_like(label, 0)
things_ids = []
stuff_ids = []
for segment in dataset_sample["segments_info"]:
class_id = segment["category_id"]
obj_id = segment["id"]
if class_id in self._things_labels_set:
if segment["iscrowd"] == 1:
continue
things_ids.append(obj_id)
else:
stuff_ids.append(obj_id)
instance_map[label == obj_id] = obj_id
if self.stuff_prob > 0 and random.random() < self.stuff_prob:
instances_ids = things_ids + stuff_ids
else:
instances_ids = things_ids
for stuff_id in stuff_ids:
instance_map[instance_map == stuff_id] = 0
return DSample(image, instance_map, objects_ids=instances_ids)
@classmethod
def get_image_name(cls, panoptic_name):
return panoptic_name.replace(".png", ".jpg") | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/data/datasets/coco.py | 0.430746 | 0.185652 | coco.py | pypi |
import inspect
from copy import deepcopy
from functools import wraps
import torch.nn as nn
def serialize(init):
parameters = list(inspect.signature(init).parameters)
@wraps(init)
def new_init(self, *args, **kwargs):
params = deepcopy(kwargs)
for pname, value in zip(parameters[1:], args):
params[pname] = value
config = {"class": get_classname(self.__class__), "params": dict()}
specified_params = set(params.keys())
for pname, param in get_default_params(self.__class__).items():
if pname not in params:
params[pname] = param.default
for name, value in list(params.items()):
param_type = "builtin"
if inspect.isclass(value):
param_type = "class"
value = get_classname(value)
config["params"][name] = {
"type": param_type,
"value": value,
"specified": name in specified_params,
}
setattr(self, "_config", config)
init(self, *args, **kwargs)
return new_init
def load_model(config, **kwargs):
model_class = get_class_from_str(
config["class"].replace("isegm", "ritm_annotation")
)
model_default_params = get_default_params(model_class)
model_args = dict()
for pname, param in config["params"].items():
value = param["value"]
if param["type"] == "class":
value = get_class_from_str(value)
if pname not in model_default_params and not param["specified"]:
continue
assert pname in model_default_params
if (
not param["specified"]
and model_default_params[pname].default == value
):
continue
model_args[pname] = value
model_args.update(kwargs)
return model_class(**model_args)
def get_config_repr(config):
config_str = f'Model: {config["class"]}\n'
for pname, param in config["params"].items():
value = param["value"]
if param["type"] == "class":
value = value.split(".")[-1]
param_str = f"{pname:<22} = {str(value):<12}"
if not param["specified"]:
param_str += " (default)"
config_str += param_str + "\n"
return config_str
def get_default_params(some_class):
params = dict()
for mclass in some_class.mro():
if mclass is nn.Module or mclass is object:
continue
mclass_params = inspect.signature(mclass.__init__).parameters
for pname, param in mclass_params.items():
if param.default != param.empty and pname not in params:
params[pname] = param
return params
def get_classname(cls):
module = cls.__module__
name = cls.__qualname__
if module is not None and module != "__builtin__":
name = module + "." + name
return name
def get_class_from_str(class_str):
components = class_str.split(".")
mod = __import__(".".join(components[:-1]))
for comp in components[1:]:
mod = getattr(mod, comp)
return mod | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/utils/serialization.py | 0.710628 | 0.218993 | serialization.py | pypi |
import importlib
import logging
import numpy as np
import torch
logger = logging.getLogger(__name__)
def get_dims_with_exclusion(dim, exclude=None):
dims = list(range(dim))
if exclude is not None:
dims.remove(exclude)
return dims
def save_checkpoint(
net, checkpoints_path, epoch=None, prefix="", multi_gpu=False
):
if epoch is None:
checkpoint_name = "last_checkpoint.pth"
else:
checkpoint_name = f"{epoch:03d}.pth"
if prefix:
checkpoint_name = f"{prefix}_{checkpoint_name}"
if not checkpoints_path.exists():
checkpoints_path.mkdir(parents=True)
checkpoint_path = checkpoints_path / checkpoint_name
logger.debug(
_("Save checkpoint to {checkpoint_path}").format(
checkpoint_path=str(checkpoint_path)
)
)
net = net.module if multi_gpu else net
torch.save(
{"state_dict": net.state_dict(), "config": net._config},
str(checkpoint_path),
)
def get_bbox_from_mask(mask):
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
return rmin, rmax, cmin, cmax
def expand_bbox(bbox, expand_ratio, min_crop_size=None):
rmin, rmax, cmin, cmax = bbox
rcenter = 0.5 * (rmin + rmax)
ccenter = 0.5 * (cmin + cmax)
height = expand_ratio * (rmax - rmin + 1)
width = expand_ratio * (cmax - cmin + 1)
if min_crop_size is not None:
height = max(height, min_crop_size)
width = max(width, min_crop_size)
rmin = int(round(rcenter - 0.5 * height))
rmax = int(round(rcenter + 0.5 * height))
cmin = int(round(ccenter - 0.5 * width))
cmax = int(round(ccenter + 0.5 * width))
return rmin, rmax, cmin, cmax
def clamp_bbox(bbox, rmin, rmax, cmin, cmax):
return (
max(rmin, bbox[0]),
min(rmax, bbox[1]),
max(cmin, bbox[2]),
min(cmax, bbox[3]),
)
def get_bbox_iou(b1, b2):
h_iou = get_segments_iou(b1[:2], b2[:2])
w_iou = get_segments_iou(b1[2:4], b2[2:4])
return h_iou * w_iou
def get_segments_iou(s1, s2):
a, b = s1
c, d = s2
intersection = max(0, min(b, d) - max(a, c) + 1)
union = max(1e-6, max(b, d) - min(a, c) + 1)
return intersection / union
def get_labels_with_sizes(x):
obj_sizes = np.bincount(x.flatten())
labels = np.nonzero(obj_sizes)[0].tolist()
labels = [x for x in labels if x != 0]
return labels, obj_sizes[labels].tolist()
def ignore_params_then_call(func):
def ret(*args, **kwargs):
return func()
return ret
def load_module(script_path, module_name="module"):
logger.debug(
_("Loading module '{module_path}'...").format(module_path=script_path)
)
spec = importlib.util.spec_from_file_location(module_name, script_path)
assert spec is not None, _(
"Can't import module at '{module_path}'"
).format(module_path=script_path)
model_script = importlib.util.module_from_spec(spec)
spec.loader.exec_module(model_script)
return model_script | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/utils/misc.py | 0.45423 | 0.21264 | misc.py | pypi |
from functools import lru_cache
import cv2
import numpy as np
def visualize_instances(
imask,
bg_color=255,
boundaries_color=None,
boundaries_width=1,
boundaries_alpha=0.8,
):
num_objects = imask.max() + 1
palette = get_palette(num_objects)
if bg_color is not None:
palette[0] = bg_color
result = palette[imask].astype(np.uint8)
if boundaries_color is not None:
boundaries_mask = get_boundaries(
imask, boundaries_width=boundaries_width
)
tresult = result.astype(np.float32)
tresult[boundaries_mask] = boundaries_color
tresult = tresult * boundaries_alpha + (1 - boundaries_alpha) * result
result = tresult.astype(np.uint8)
return result
@lru_cache(maxsize=16)
def get_palette(num_cls):
palette = np.zeros(3 * num_cls, dtype=np.int32)
for j in range(0, num_cls):
lab = j
i = 0
while lab > 0:
palette[j * 3 + 0] |= ((lab >> 0) & 1) << (7 - i)
palette[j * 3 + 1] |= ((lab >> 1) & 1) << (7 - i)
palette[j * 3 + 2] |= ((lab >> 2) & 1) << (7 - i)
i = i + 1
lab >>= 3
return palette.reshape((-1, 3))
def visualize_mask(mask, num_cls):
palette = get_palette(num_cls)
mask[mask == -1] = 0
return palette[mask].astype(np.uint8)
def visualize_proposals(
proposals_info, point_color=(255, 0, 0), point_radius=1
):
proposal_map, colors, candidates = proposals_info
proposal_map = draw_probmap(proposal_map)
for x, y in candidates:
proposal_map = cv2.circle(
proposal_map, (y, x), point_radius, point_color, -1
)
return proposal_map
def draw_probmap(x):
return cv2.applyColorMap((x * 255).astype(np.uint8), cv2.COLORMAP_HOT)
def draw_points(image, points, color, radius=3):
image = image.copy()
for p in points:
if p[0] < 0:
continue
if len(p) == 3:
pradius = {0: 8, 1: 6, 2: 4}[p[2]] if p[2] < 3 else 2
else:
pradius = radius
image = cv2.circle(image, (int(p[1]), int(p[0])), pradius, color, -1)
return image
def draw_instance_map(x, palette=None):
num_colors = x.max() + 1
if palette is None:
palette = get_palette(num_colors)
return palette[x].astype(np.uint8)
def blend_mask(image, mask, alpha=0.6):
if mask.min() == -1:
mask = mask.copy() + 1
imap = draw_instance_map(mask)
result = (image * (1 - alpha) + alpha * imap).astype(np.uint8)
return result
def get_boundaries(instances_masks, boundaries_width=1):
boundaries = np.zeros(
(instances_masks.shape[0], instances_masks.shape[1]), dtype=np.bool
)
for obj_id in np.unique(instances_masks.flatten()):
if obj_id == 0:
continue
obj_mask = instances_masks == obj_id
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
inner_mask = cv2.erode(
obj_mask.astype(np.uint8), kernel, iterations=boundaries_width
).astype(np.bool)
obj_boundary = np.logical_xor(
obj_mask, np.logical_and(inner_mask, obj_mask)
)
boundaries = np.logical_or(boundaries, obj_boundary)
return boundaries
def draw_with_blend_and_clicks(
img,
mask=None,
alpha=0.6,
clicks_list=None,
pos_color=(0, 255, 0),
neg_color=(255, 0, 0),
radius=4,
):
result = img.copy()
if mask is not None:
palette = get_palette(np.max(mask) + 1)
rgb_mask = palette[mask.astype(np.uint8)]
mask_region = (mask > 0).astype(np.uint8)
result = (
result * (1 - mask_region[:, :, np.newaxis])
+ (1 - alpha) * mask_region[:, :, np.newaxis] * result
+ alpha * rgb_mask
)
result = result.astype(np.uint8)
# result = (result * (1 - alpha) + alpha * rgb_mask).astype(np.uint8)
if clicks_list is not None and len(clicks_list) > 0:
pos_points = [
click.coords for click in clicks_list if click.is_positive
]
neg_points = [
click.coords for click in clicks_list if not click.is_positive
]
result = draw_points(result, pos_points, pos_color, radius=radius)
result = draw_points(result, neg_points, neg_color, radius=radius)
return result | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/utils/vis.py | 0.603932 | 0.350408 | vis.py | pypi |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ritm_annotation.utils import misc
class NormalizedFocalLossSigmoid(nn.Module):
def __init__(
self,
axis=-1,
alpha=0.25,
gamma=2,
max_mult=-1,
eps=1e-12,
from_sigmoid=False,
detach_delimeter=True,
batch_axis=0,
weight=None,
size_average=True,
ignore_label=-1,
):
super(NormalizedFocalLossSigmoid, self).__init__()
self._axis = axis
self._alpha = alpha
self._gamma = gamma
self._ignore_label = ignore_label
self._weight = weight if weight is not None else 1.0
self._batch_axis = batch_axis
self._from_logits = from_sigmoid
self._eps = eps
self._size_average = size_average
self._detach_delimeter = detach_delimeter
self._max_mult = max_mult
self._k_sum = 0
self._m_max = 0
def forward(self, pred, label):
one_hot = label > 0.5
sample_weight = label != self._ignore_label
if not self._from_logits:
pred = torch.sigmoid(pred)
alpha = torch.where(
one_hot,
self._alpha * sample_weight,
(1 - self._alpha) * sample_weight,
)
pt = torch.where(
sample_weight, 1.0 - torch.abs(label - pred), torch.ones_like(pred)
)
beta = (1 - pt) ** self._gamma
sw_sum = torch.sum(sample_weight, dim=(-2, -1), keepdim=True)
beta_sum = torch.sum(beta, dim=(-2, -1), keepdim=True)
mult = sw_sum / (beta_sum + self._eps)
if self._detach_delimeter:
mult = mult.detach()
beta = beta * mult
if self._max_mult > 0:
beta = torch.clamp_max(beta, self._max_mult)
with torch.no_grad():
ignore_area = (
torch.sum(
label == self._ignore_label,
dim=tuple(range(1, label.dim())),
)
.cpu()
.numpy()
)
sample_mult = (
torch.mean(mult, dim=tuple(range(1, mult.dim()))).cpu().numpy()
)
if np.any(ignore_area == 0):
self._k_sum = (
0.9 * self._k_sum
+ 0.1 * sample_mult[ignore_area == 0].mean()
)
beta_pmax, _ = torch.flatten(beta, start_dim=1).max(dim=1)
beta_pmax = beta_pmax.mean().item()
self._m_max = 0.8 * self._m_max + 0.2 * beta_pmax
loss = (
-alpha
* beta
* torch.log(
torch.min(
pt + self._eps,
torch.ones(1, dtype=torch.float).to(pt.device),
)
)
)
loss = self._weight * (loss * sample_weight)
if self._size_average:
bsum = torch.sum(
sample_weight,
dim=misc.get_dims_with_exclusion(
sample_weight.dim(), self._batch_axis
),
)
loss = torch.sum(
loss,
dim=misc.get_dims_with_exclusion(loss.dim(), self._batch_axis),
) / (bsum + self._eps)
else:
loss = torch.sum(
loss,
dim=misc.get_dims_with_exclusion(loss.dim(), self._batch_axis),
)
return loss
def log_states(self, sw, name, global_step):
sw.add_scalar(
tag=name + "_k", value=self._k_sum, global_step=global_step
)
sw.add_scalar(
tag=name + "_m", value=self._m_max, global_step=global_step
)
class FocalLoss(nn.Module):
def __init__(
self,
axis=-1,
alpha=0.25,
gamma=2,
from_logits=False,
batch_axis=0,
weight=None,
num_class=None,
eps=1e-9,
size_average=True,
scale=1.0,
ignore_label=-1,
):
super(FocalLoss, self).__init__()
self._axis = axis
self._alpha = alpha
self._gamma = gamma
self._ignore_label = ignore_label
self._weight = weight if weight is not None else 1.0
self._batch_axis = batch_axis
self._scale = scale
self._num_class = num_class
self._from_logits = from_logits
self._eps = eps
self._size_average = size_average
def forward(self, pred, label, sample_weight=None):
one_hot = label > 0.5
sample_weight = label != self._ignore_label
if not self._from_logits:
pred = torch.sigmoid(pred)
alpha = torch.where(
one_hot,
self._alpha * sample_weight,
(1 - self._alpha) * sample_weight,
)
pt = torch.where(
sample_weight, 1.0 - torch.abs(label - pred), torch.ones_like(pred)
)
beta = (1 - pt) ** self._gamma
loss = (
-alpha
* beta
* torch.log(
torch.min(
pt + self._eps,
torch.ones(1, dtype=torch.float).to(pt.device),
)
)
)
loss = self._weight * (loss * sample_weight)
if self._size_average:
tsum = torch.sum(
sample_weight,
dim=misc.get_dims_with_exclusion(
label.dim(), self._batch_axis
),
)
loss = torch.sum(
loss,
dim=misc.get_dims_with_exclusion(loss.dim(), self._batch_axis),
) / (tsum + self._eps)
else:
loss = torch.sum(
loss,
dim=misc.get_dims_with_exclusion(loss.dim(), self._batch_axis),
)
return self._scale * loss
class SoftIoU(nn.Module):
def __init__(self, from_sigmoid=False, ignore_label=-1):
super().__init__()
self._from_sigmoid = from_sigmoid
self._ignore_label = ignore_label
def forward(self, pred, label):
label = label.view(pred.size())
sample_weight = label != self._ignore_label
if not self._from_sigmoid:
pred = torch.sigmoid(pred)
loss = 1.0 - torch.sum(pred * label * sample_weight, dim=(1, 2, 3)) / (
torch.sum(torch.max(pred, label) * sample_weight, dim=(1, 2, 3))
+ 1e-8
)
return loss
class SigmoidBinaryCrossEntropyLoss(nn.Module):
def __init__(
self, from_sigmoid=False, weight=None, batch_axis=0, ignore_label=-1
):
super(SigmoidBinaryCrossEntropyLoss, self).__init__()
self._from_sigmoid = from_sigmoid
self._ignore_label = ignore_label
self._weight = weight if weight is not None else 1.0
self._batch_axis = batch_axis
def forward(self, pred, label):
label = label.view(pred.size())
sample_weight = label != self._ignore_label
label = torch.where(sample_weight, label, torch.zeros_like(label))
if not self._from_sigmoid:
loss = (
torch.relu(pred) - pred * label + F.softplus(-torch.abs(pred))
)
else:
eps = 1e-12
loss = -(
torch.log(pred + eps) * label
+ torch.log(1.0 - pred + eps) * (1.0 - label)
)
loss = self._weight * (loss * sample_weight)
return torch.mean(
loss,
dim=misc.get_dims_with_exclusion(loss.dim(), self._batch_axis),
) | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/model/losses.py | 0.942889 | 0.360517 | losses.py | pypi |
import numpy as np
import torch
from ritm_annotation.utils import misc
class TrainMetric(object):
def __init__(self, pred_outputs, gt_outputs):
self.pred_outputs = pred_outputs
self.gt_outputs = gt_outputs
def update(self, *args, **kwargs):
raise NotImplementedError
def get_epoch_value(self):
raise NotImplementedError
def reset_epoch_stats(self):
raise NotImplementedError
def log_states(self, sw, tag_prefix, global_step):
pass
@property
def name(self):
return type(self).__name__
class AdaptiveIoU(TrainMetric):
def __init__(
self,
init_thresh=0.4,
thresh_step=0.025,
thresh_beta=0.99,
iou_beta=0.9,
ignore_label=-1,
from_logits=True,
pred_output="instances",
gt_output="instances",
):
super().__init__(pred_outputs=(pred_output,), gt_outputs=(gt_output,))
self._ignore_label = ignore_label
self._from_logits = from_logits
self._iou_thresh = init_thresh
self._thresh_step = thresh_step
self._thresh_beta = thresh_beta
self._iou_beta = iou_beta
self._ema_iou = 0.0
self._epoch_iou_sum = 0.0
self._epoch_batch_count = 0
def update(self, pred, gt):
gt_mask = gt > 0.5
if self._from_logits:
pred = torch.sigmoid(pred)
gt_mask_area = torch.sum(gt_mask, dim=(1, 2)).detach().cpu().numpy()
if np.all(gt_mask_area == 0):
return
ignore_mask = gt == self._ignore_label
max_iou = _compute_iou(
pred > self._iou_thresh, gt_mask, ignore_mask
).mean()
best_thresh = self._iou_thresh
for t in [
best_thresh - self._thresh_step,
best_thresh + self._thresh_step,
]:
temp_iou = _compute_iou(pred > t, gt_mask, ignore_mask).mean()
if temp_iou > max_iou:
max_iou = temp_iou
best_thresh = t
self._iou_thresh = (
self._thresh_beta * self._iou_thresh
+ (1 - self._thresh_beta) * best_thresh
)
self._ema_iou = (
self._iou_beta * self._ema_iou + (1 - self._iou_beta) * max_iou
)
self._epoch_iou_sum += max_iou
self._epoch_batch_count += 1
def get_epoch_value(self):
if self._epoch_batch_count > 0:
return self._epoch_iou_sum / self._epoch_batch_count
else:
return 0.0
def reset_epoch_stats(self):
self._epoch_iou_sum = 0.0
self._epoch_batch_count = 0
def log_states(self, sw, tag_prefix, global_step):
sw.add_scalar(
tag=tag_prefix + "_ema_iou",
value=self._ema_iou,
global_step=global_step,
)
sw.add_scalar(
tag=tag_prefix + "_iou_thresh",
value=self._iou_thresh,
global_step=global_step,
)
@property
def iou_thresh(self):
return self._iou_thresh
def _compute_iou(pred_mask, gt_mask, ignore_mask=None, keep_ignore=False):
if ignore_mask is not None:
pred_mask = torch.where(
ignore_mask, torch.zeros_like(pred_mask), pred_mask
)
reduction_dims = misc.get_dims_with_exclusion(gt_mask.dim(), 0)
union = (
torch.mean((pred_mask | gt_mask).float(), dim=reduction_dims)
.detach()
.cpu()
.numpy()
)
intersection = (
torch.mean((pred_mask & gt_mask).float(), dim=reduction_dims)
.detach()
.cpu()
.numpy()
)
nonzero = union > 0
iou = intersection[nonzero] / union[nonzero]
if not keep_ignore:
return iou
else:
result = np.full_like(intersection, -1)
result[nonzero] = iou
return result | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/model/metrics.py | 0.784979 | 0.182899 | metrics.py | pypi |
import numpy as np
import torch
from torch import nn as nn
import ritm_annotation.model.initializer as initializer
def select_activation_function(activation):
if isinstance(activation, str):
if activation.lower() == "relu":
return nn.ReLU
elif activation.lower() == "softplus":
return nn.Softplus
else:
raise ValueError(
_("Unknown activation type {activation}").format(
activation=activation
)
)
elif isinstance(activation, nn.Module):
return activation
else:
raise ValueError(
_("Unknown activation type {activation}").format(
activation=activation
)
)
class BilinearConvTranspose2d(nn.ConvTranspose2d):
def __init__(self, in_channels, out_channels, scale, groups=1):
kernel_size = 2 * scale - scale % 2
self.scale = scale
super().__init__(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=scale,
padding=1,
groups=groups,
bias=False,
)
self.apply(
initializer.Bilinear(
scale=scale, in_channels=in_channels, groups=groups
)
)
class DistMaps(nn.Module):
def __init__(
self, norm_radius, spatial_scale=1.0, cpu_mode=False, use_disks=False
):
super(DistMaps, self).__init__()
self.spatial_scale = spatial_scale
self.norm_radius = norm_radius
self.cpu_mode = cpu_mode
self.use_disks = use_disks
if self.cpu_mode:
from ritm_annotation.utils.cython import get_dist_maps
self._get_dist_maps = get_dist_maps
def get_coord_features(self, points, batchsize, rows, cols):
if self.cpu_mode:
coords = []
for i in range(batchsize):
norm_delimeter = (
1.0
if self.use_disks
else self.spatial_scale * self.norm_radius
)
coords.append(
self._get_dist_maps(
points[i].cpu().float().numpy(),
rows,
cols,
norm_delimeter,
)
)
coords = (
torch.from_numpy(np.stack(coords, axis=0))
.to(points.device)
.float()
)
else:
num_points = points.shape[1] // 2
points = points.view(-1, points.size(2))
points, points_order = torch.split(points, [2, 1], dim=1)
invalid_points = torch.max(points, dim=1, keepdim=False)[0] < 0
row_array = torch.arange(
start=0,
end=rows,
step=1,
dtype=torch.float32,
device=points.device,
)
col_array = torch.arange(
start=0,
end=cols,
step=1,
dtype=torch.float32,
device=points.device,
)
coord_rows, coord_cols = torch.meshgrid(row_array, col_array)
coords = (
torch.stack((coord_rows, coord_cols), dim=0)
.unsqueeze(0)
.repeat(points.size(0), 1, 1, 1)
)
add_xy = (points * self.spatial_scale).view(
points.size(0), points.size(1), 1, 1
)
coords.add_(-add_xy)
if not self.use_disks:
coords.div_(self.norm_radius * self.spatial_scale)
coords.mul_(coords)
coords[:, 0] += coords[:, 1]
coords = coords[:, :1]
coords[invalid_points, :, :, :] = 1e6
coords = coords.view(-1, num_points, 1, rows, cols)
coords = coords.min(dim=1)[
0
] # -> (bs * num_masks * 2) x 1 x h x w
coords = coords.view(-1, 2, rows, cols)
if self.use_disks:
coords = (
coords <= (self.norm_radius * self.spatial_scale) ** 2
).float()
else:
coords.sqrt_().mul_(2).tanh_()
return coords
def forward(self, x, coords):
return self.get_coord_features(
coords, x.shape[0], x.shape[2], x.shape[3]
)
class ScaleLayer(nn.Module):
def __init__(self, init_value=1.0, lr_mult=1):
super().__init__()
self.lr_mult = lr_mult
self.scale = nn.Parameter(
torch.full((1,), init_value / lr_mult, dtype=torch.float32)
)
def forward(self, x):
scale = torch.abs(self.scale * self.lr_mult)
return x * scale
class BatchImageNormalize:
def __init__(self, mean, std, dtype=torch.float):
self.mean = torch.as_tensor(mean, dtype=dtype)[None, :, None, None]
self.std = torch.as_tensor(std, dtype=dtype)[None, :, None, None]
def __call__(self, tensor):
tensor = tensor.clone()
tensor.sub_(self.mean.to(tensor.device)).div_(
self.std.to(tensor.device)
)
return tensor | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/model/ops.py | 0.927851 | 0.403861 | ops.py | pypi |
import numpy as np
import torch
import torch.nn as nn
class Initializer(object):
def __init__(self, local_init=True, gamma=None):
self.local_init = local_init
self.gamma = gamma
def __call__(self, m):
if getattr(m, "__initialized", False):
return
if (
isinstance(
m,
(
nn.BatchNorm1d,
nn.BatchNorm2d,
nn.BatchNorm3d,
nn.InstanceNorm1d,
nn.InstanceNorm2d,
nn.InstanceNorm3d,
nn.GroupNorm,
nn.SyncBatchNorm,
),
)
or "BatchNorm" in m.__class__.__name__
):
if m.weight is not None:
self._init_gamma(m.weight.data)
if m.bias is not None:
self._init_beta(m.bias.data)
else:
if getattr(m, "weight", None) is not None:
self._init_weight(m.weight.data)
if getattr(m, "bias", None) is not None:
self._init_bias(m.bias.data)
if self.local_init:
object.__setattr__(m, "__initialized", True)
def _init_weight(self, data):
nn.init.uniform_(data, -0.07, 0.07)
def _init_bias(self, data):
nn.init.constant_(data, 0)
def _init_gamma(self, data):
if self.gamma is None:
nn.init.constant_(data, 1.0)
else:
nn.init.normal_(data, 1.0, self.gamma)
def _init_beta(self, data):
nn.init.constant_(data, 0)
class Bilinear(Initializer):
def __init__(self, scale, groups, in_channels, **kwargs):
super().__init__(**kwargs)
self.scale = scale
self.groups = groups
self.in_channels = in_channels
def _init_weight(self, data):
"""Reset the weight and bias."""
bilinear_kernel = self.get_bilinear_kernel(self.scale)
weight = torch.zeros_like(data)
for i in range(self.in_channels):
if self.groups == 1:
j = i
else:
j = 0
weight[i, j] = bilinear_kernel
data[:] = weight
@staticmethod
def get_bilinear_kernel(scale):
"""Generate a bilinear upsampling kernel."""
kernel_size = 2 * scale - scale % 2
scale = (kernel_size + 1) // 2
center = scale - 0.5 * (1 + kernel_size % 2)
og = np.ogrid[:kernel_size, :kernel_size]
kernel = (1 - np.abs(og[0] - center) / scale) * (
1 - np.abs(og[1] - center) / scale
)
return torch.tensor(kernel, dtype=torch.float32)
class XavierGluon(Initializer):
def __init__(
self, rnd_type="uniform", factor_type="avg", magnitude=3, **kwargs
):
super().__init__(**kwargs)
self.rnd_type = rnd_type
self.factor_type = factor_type
self.magnitude = float(magnitude)
def _init_weight(self, arr):
fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(arr)
if self.factor_type == "avg":
factor = (fan_in + fan_out) / 2.0
elif self.factor_type == "in":
factor = fan_in
elif self.factor_type == "out":
factor = fan_out
else:
raise ValueError(_("Incorrect factor type"))
scale = np.sqrt(self.magnitude / factor)
if self.rnd_type == "uniform":
nn.init.uniform_(arr, -scale, scale)
elif self.rnd_type == "gaussian":
nn.init.normal_(arr, 0, scale)
else:
raise ValueError(_("Unknown random type")) | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/model/initializer.py | 0.872157 | 0.308255 | initializer.py | pypi |
import numpy as np
import torch
import torch.nn as nn
from ritm_annotation.model.modifiers import LRMult
from ritm_annotation.model.ops import BatchImageNormalize, DistMaps, ScaleLayer
class ISModel(nn.Module):
def __init__(
self,
use_rgb_conv=True,
with_aux_output=False,
norm_radius=260,
use_disks=False,
cpu_dist_maps=False,
clicks_groups=None,
with_prev_mask=False,
use_leaky_relu=False,
binary_prev_mask=False,
conv_extend=False,
norm_layer=nn.BatchNorm2d,
norm_mean_std=([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
):
super().__init__()
self.with_aux_output = with_aux_output
self.clicks_groups = clicks_groups
self.with_prev_mask = with_prev_mask
self.binary_prev_mask = binary_prev_mask
self.normalization = BatchImageNormalize(
norm_mean_std[0], norm_mean_std[1]
)
self.coord_feature_ch = 2
if clicks_groups is not None:
self.coord_feature_ch *= len(clicks_groups)
if self.with_prev_mask:
self.coord_feature_ch += 1
if use_rgb_conv:
rgb_conv_layers = [
nn.Conv2d(
in_channels=3 + self.coord_feature_ch,
out_channels=6 + self.coord_feature_ch,
kernel_size=1,
),
norm_layer(6 + self.coord_feature_ch),
nn.LeakyReLU(negative_slope=0.2)
if use_leaky_relu
else nn.ReLU(inplace=True),
nn.Conv2d(
in_channels=6 + self.coord_feature_ch,
out_channels=3,
kernel_size=1,
),
]
self.rgb_conv = nn.Sequential(*rgb_conv_layers)
elif conv_extend:
self.rgb_conv = None
self.maps_transform = nn.Conv2d(
in_channels=self.coord_feature_ch,
out_channels=64,
kernel_size=3,
stride=2,
padding=1,
)
self.maps_transform.apply(LRMult(0.1))
else:
self.rgb_conv = None
mt_layers = [
nn.Conv2d(
in_channels=self.coord_feature_ch,
out_channels=16,
kernel_size=1,
),
nn.LeakyReLU(negative_slope=0.2)
if use_leaky_relu
else nn.ReLU(inplace=True),
nn.Conv2d(
in_channels=16,
out_channels=64,
kernel_size=3,
stride=2,
padding=1,
),
ScaleLayer(init_value=0.05, lr_mult=1),
]
self.maps_transform = nn.Sequential(*mt_layers)
if self.clicks_groups is not None:
self.dist_maps = nn.ModuleList()
for click_radius in self.clicks_groups:
self.dist_maps.append(
DistMaps(
norm_radius=click_radius,
spatial_scale=1.0,
cpu_mode=cpu_dist_maps,
use_disks=use_disks,
)
)
else:
self.dist_maps = DistMaps(
norm_radius=norm_radius,
spatial_scale=1.0,
cpu_mode=cpu_dist_maps,
use_disks=use_disks,
)
def forward(self, image, points):
image, prev_mask = self.prepare_input(image)
coord_features = self.get_coord_features(image, prev_mask, points)
if self.rgb_conv is not None:
x = self.rgb_conv(torch.cat((image, coord_features), dim=1))
outputs = self.backbone_forward(x)
else:
coord_features = self.maps_transform(coord_features)
outputs = self.backbone_forward(image, coord_features)
outputs["instances"] = nn.functional.interpolate(
outputs["instances"],
size=image.size()[2:],
mode="bilinear",
align_corners=True,
)
if self.with_aux_output:
outputs["instances_aux"] = nn.functional.interpolate(
outputs["instances_aux"],
size=image.size()[2:],
mode="bilinear",
align_corners=True,
)
return outputs
def prepare_input(self, image):
prev_mask = None
if self.with_prev_mask:
prev_mask = image[:, 3:, :, :]
image = image[:, :3, :, :]
if self.binary_prev_mask:
prev_mask = (prev_mask > 0.5).float()
image = self.normalization(image)
return image, prev_mask
def backbone_forward(self, image, coord_features=None):
raise NotImplementedError
def get_coord_features(self, image, prev_mask, points):
if self.clicks_groups is not None:
points_groups = split_points_by_order(
points,
groups=(2,) + (1,) * (len(self.clicks_groups) - 2) + (-1,),
)
coord_features = [
dist_map(image, pg)
for dist_map, pg in zip(self.dist_maps, points_groups)
]
coord_features = torch.cat(coord_features, dim=1)
else:
coord_features = self.dist_maps(image, points)
if prev_mask is not None:
coord_features = torch.cat((prev_mask, coord_features), dim=1)
return coord_features
def split_points_by_order(tpoints: torch.Tensor, groups):
points = tpoints.cpu().numpy()
num_groups = len(groups)
bs = points.shape[0]
num_points = points.shape[1] // 2
groups = [x if x > 0 else num_points for x in groups]
group_points = [
np.full((bs, 2 * x, 3), -1, dtype=np.float32) for x in groups
]
last_point_indx_group = np.zeros((bs, num_groups, 2), dtype=np.int) # type: ignore # noqa: E501
for group_indx, group_size in enumerate(groups):
last_point_indx_group[:, group_indx, 1] = group_size
for bindx in range(bs):
for pindx in range(2 * num_points):
point = points[bindx, pindx, :]
group_id = int(point[2])
if group_id < 0:
continue
is_negative = int(pindx >= num_points)
if group_id >= num_groups or (
group_id == 0 and is_negative
): # disable negative first click
group_id = num_groups - 1
new_point_indx = last_point_indx_group[
bindx, group_id, is_negative
]
last_point_indx_group[bindx, group_id, is_negative] += 1
group_points[group_id][bindx, new_point_indx, :] = point
group_points = [
torch.tensor(x, dtype=tpoints.dtype, device=tpoints.device) # type: ignore # noqa: E501
for x in group_points
]
return group_points | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/model/is_model.py | 0.880264 | 0.225502 | is_model.py | pypi |
import torch
import torch._utils
import torch.nn as nn
import torch.nn.functional as F
class SpatialGather_Module(nn.Module):
"""
Aggregate the context features according to the initial
predicted probability distribution.
Employ the soft-weighted method to aggregate the context.
"""
def __init__(self, cls_num=0, scale=1):
super(SpatialGather_Module, self).__init__()
self.cls_num = cls_num
self.scale = scale
def forward(self, feats, probs):
batch_size, c, _, _ = (
probs.size(0),
probs.size(1),
probs.size(2),
probs.size(3),
)
probs = probs.view(batch_size, c, -1)
feats = feats.view(batch_size, feats.size(1), -1)
feats = feats.permute(0, 2, 1) # batch x hw x c
probs = F.softmax(self.scale * probs, dim=2) # batch x k x hw
ocr_context = (
torch.matmul(probs, feats).permute(0, 2, 1).unsqueeze(3)
) # batch x k x c
return ocr_context
class SpatialOCR_Module(nn.Module):
"""
Implementation of the OCR module:
We aggregate the global object representation to update the representation for each pixel.
""" # noqa: E501
def __init__(
self,
in_channels,
key_channels,
out_channels,
scale=1,
dropout=0.1,
norm_layer=nn.BatchNorm2d,
align_corners=True,
):
super(SpatialOCR_Module, self).__init__()
self.object_context_block = ObjectAttentionBlock2D(
in_channels, key_channels, scale, norm_layer, align_corners
)
_in_channels = 2 * in_channels
self.conv_bn_dropout = nn.Sequential(
nn.Conv2d(
_in_channels,
out_channels,
kernel_size=1,
padding=0,
bias=False,
),
nn.Sequential(norm_layer(out_channels), nn.ReLU(inplace=True)),
nn.Dropout2d(dropout),
)
def forward(self, feats, proxy_feats):
context = self.object_context_block(feats, proxy_feats)
output = self.conv_bn_dropout(torch.cat([context, feats], 1))
return output
class ObjectAttentionBlock2D(nn.Module):
"""
The basic implementation for object context block
Input:
N X C X H X W
Parameters:
in_channels : the dimension of the input feature map
key_channels : the dimension after the key/query transform
scale : choose the scale to downsample the input feature maps (save memory cost)
bn_type : specify the bn type
Return:
N X C X H X W
""" # noqa: E501
def __init__(
self,
in_channels,
key_channels,
scale=1,
norm_layer=nn.BatchNorm2d,
align_corners=True,
):
super(ObjectAttentionBlock2D, self).__init__()
self.scale = scale
self.in_channels = in_channels
self.key_channels = key_channels
self.align_corners = align_corners
self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
self.f_pixel = nn.Sequential(
nn.Conv2d(
in_channels=self.in_channels,
out_channels=self.key_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
nn.Sequential(
norm_layer(self.key_channels), nn.ReLU(inplace=True)
),
nn.Conv2d(
in_channels=self.key_channels,
out_channels=self.key_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
nn.Sequential(
norm_layer(self.key_channels), nn.ReLU(inplace=True)
),
)
self.f_object = nn.Sequential(
nn.Conv2d(
in_channels=self.in_channels,
out_channels=self.key_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
nn.Sequential(
norm_layer(self.key_channels), nn.ReLU(inplace=True)
),
nn.Conv2d(
in_channels=self.key_channels,
out_channels=self.key_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
nn.Sequential(
norm_layer(self.key_channels), nn.ReLU(inplace=True)
),
)
self.f_down = nn.Sequential(
nn.Conv2d(
in_channels=self.in_channels,
out_channels=self.key_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
nn.Sequential(
norm_layer(self.key_channels), nn.ReLU(inplace=True)
),
)
self.f_up = nn.Sequential(
nn.Conv2d(
in_channels=self.key_channels,
out_channels=self.in_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
nn.Sequential(norm_layer(self.in_channels), nn.ReLU(inplace=True)),
)
def forward(self, x, proxy):
batch_size, h, w = x.size(0), x.size(2), x.size(3)
if self.scale > 1:
x = self.pool(x)
query = self.f_pixel(x).view(batch_size, self.key_channels, -1)
query = query.permute(0, 2, 1)
key = self.f_object(proxy).view(batch_size, self.key_channels, -1)
value = self.f_down(proxy).view(batch_size, self.key_channels, -1)
value = value.permute(0, 2, 1)
sim_map = torch.matmul(query, key)
sim_map = (self.key_channels**-0.5) * sim_map
sim_map = F.softmax(sim_map, dim=-1)
# add bg context ...
context = torch.matmul(sim_map, value)
context = context.permute(0, 2, 1).contiguous()
context = context.view(batch_size, self.key_channels, *x.size()[2:])
context = self.f_up(context)
if self.scale > 1:
context = F.interpolate(
input=context,
size=(h, w),
mode="bilinear",
align_corners=self.align_corners,
)
return context | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/model/modeling/ocr.py | 0.93711 | 0.494751 | ocr.py | pypi |
import os
import numpy as np
import torch
import torch._utils
import torch.nn as nn
import torch.nn.functional as F
from .ocr import SpatialGather_Module, SpatialOCR_Module
from .resnetv1b import BasicBlockV1b, BottleneckV1b
relu_inplace = True
class HighResolutionModule(nn.Module):
def __init__(
self,
num_branches,
blocks,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
multi_scale_output=True,
norm_layer=nn.BatchNorm2d,
align_corners=True,
):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches, num_blocks, num_inchannels, num_channels
)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.norm_layer = norm_layer
self.align_corners = align_corners
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels
)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=relu_inplace)
def _check_branches(
self, num_branches, num_blocks, num_inchannels, num_channels
):
if num_branches != len(num_blocks):
error_msg = "NUM_BRANCHES({}) <> NUM_BLOCKS({})".format(
num_branches, len(num_blocks)
)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = "NUM_BRANCHES({}) <> NUM_CHANNELS({})".format(
num_branches, len(num_channels)
)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = "NUM_BRANCHES({}) <> NUM_INCHANNELS({})".format(
num_branches, len(num_inchannels)
)
raise ValueError(error_msg)
def _make_one_branch(
self, branch_index, block, num_blocks, num_channels, stride=1
):
downsample = None
if (
stride != 1
or self.num_inchannels[branch_index]
!= num_channels[branch_index] * block.expansion
):
downsample = nn.Sequential(
nn.Conv2d(
self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
self.norm_layer(num_channels[branch_index] * block.expansion),
)
layers = []
layers.append(
block(
self.num_inchannels[branch_index],
num_channels[branch_index],
stride,
downsample=downsample,
norm_layer=self.norm_layer,
)
)
self.num_inchannels[branch_index] = (
num_channels[branch_index] * block.expansion
)
for i in range(1, num_blocks[branch_index]):
layers.append(
block(
self.num_inchannels[branch_index],
num_channels[branch_index],
norm_layer=self.norm_layer,
)
)
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels)
)
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(
nn.Sequential(
nn.Conv2d(
in_channels=num_inchannels[j],
out_channels=num_inchannels[i],
kernel_size=1,
bias=False,
),
self.norm_layer(num_inchannels[i]),
)
)
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i - j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(
nn.Sequential(
nn.Conv2d(
num_inchannels[j],
num_outchannels_conv3x3,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
self.norm_layer(num_outchannels_conv3x3),
)
)
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(
nn.Sequential(
nn.Conv2d(
num_inchannels[j],
num_outchannels_conv3x3,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
self.norm_layer(num_outchannels_conv3x3),
nn.ReLU(inplace=relu_inplace),
)
)
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
elif j > i:
width_output = x[i].shape[-1]
height_output = x[i].shape[-2]
y = y + F.interpolate(
self.fuse_layers[i][j](x[j]),
size=[height_output, width_output],
mode="bilinear",
align_corners=self.align_corners,
)
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
class HighResolutionNet(nn.Module):
def __init__(
self,
width,
num_classes,
ocr_width=256,
small=False,
norm_layer=nn.BatchNorm2d,
align_corners=True,
):
super(HighResolutionNet, self).__init__()
self.norm_layer = norm_layer
self.width = width
self.ocr_width = ocr_width
self.align_corners = align_corners
self.conv1 = nn.Conv2d(
3, 64, kernel_size=3, stride=2, padding=1, bias=False
)
self.bn1 = norm_layer(64)
self.conv2 = nn.Conv2d(
64, 64, kernel_size=3, stride=2, padding=1, bias=False
)
self.bn2 = norm_layer(64)
self.relu = nn.ReLU(inplace=relu_inplace)
num_blocks = 2 if small else 4
stage1_num_channels = 64
self.layer1 = self._make_layer(
BottleneckV1b, 64, stage1_num_channels, blocks=num_blocks
)
stage1_out_channel = BottleneckV1b.expansion * stage1_num_channels
self.stage2_num_branches = 2
num_channels = [width, 2 * width]
num_inchannels = [
num_channels[i] * BasicBlockV1b.expansion
for i in range(len(num_channels))
]
self.transition1 = self._make_transition_layer(
[stage1_out_channel], num_inchannels
)
self.stage2, pre_stage_channels = self._make_stage(
BasicBlockV1b,
num_inchannels=num_inchannels,
num_modules=1,
num_branches=self.stage2_num_branches,
num_blocks=2 * [num_blocks],
num_channels=num_channels,
)
self.stage3_num_branches = 3
num_channels = [width, 2 * width, 4 * width]
num_inchannels = [
num_channels[i] * BasicBlockV1b.expansion
for i in range(len(num_channels))
]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_inchannels
)
self.stage3, pre_stage_channels = self._make_stage(
BasicBlockV1b,
num_inchannels=num_inchannels,
num_modules=3 if small else 4,
num_branches=self.stage3_num_branches,
num_blocks=3 * [num_blocks],
num_channels=num_channels,
)
self.stage4_num_branches = 4
num_channels = [width, 2 * width, 4 * width, 8 * width]
num_inchannels = [
num_channels[i] * BasicBlockV1b.expansion
for i in range(len(num_channels))
]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_inchannels
)
self.stage4, pre_stage_channels = self._make_stage(
BasicBlockV1b,
num_inchannels=num_inchannels,
num_modules=2 if small else 3,
num_branches=self.stage4_num_branches,
num_blocks=4 * [num_blocks],
num_channels=num_channels,
)
last_inp_channels = int(np.sum(pre_stage_channels))
if self.ocr_width > 0:
ocr_mid_channels = 2 * self.ocr_width
ocr_key_channels = self.ocr_width
self.conv3x3_ocr = nn.Sequential(
nn.Conv2d(
last_inp_channels,
ocr_mid_channels,
kernel_size=3,
stride=1,
padding=1,
),
norm_layer(ocr_mid_channels),
nn.ReLU(inplace=relu_inplace),
)
self.ocr_gather_head = SpatialGather_Module(num_classes)
self.ocr_distri_head = SpatialOCR_Module(
in_channels=ocr_mid_channels,
key_channels=ocr_key_channels,
out_channels=ocr_mid_channels,
scale=1,
dropout=0.05,
norm_layer=norm_layer,
align_corners=align_corners,
)
self.cls_head = nn.Conv2d(
ocr_mid_channels,
num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=True,
)
self.aux_head = nn.Sequential(
nn.Conv2d(
last_inp_channels,
last_inp_channels,
kernel_size=1,
stride=1,
padding=0,
),
norm_layer(last_inp_channels),
nn.ReLU(inplace=relu_inplace),
nn.Conv2d(
last_inp_channels,
num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=True,
),
)
else:
self.cls_head = nn.Sequential(
nn.Conv2d(
last_inp_channels,
last_inp_channels,
kernel_size=3,
stride=1,
padding=1,
),
norm_layer(last_inp_channels),
nn.ReLU(inplace=relu_inplace),
nn.Conv2d(
last_inp_channels,
num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=True,
),
)
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer
):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(
nn.Sequential(
nn.Conv2d(
num_channels_pre_layer[i],
num_channels_cur_layer[i],
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
self.norm_layer(num_channels_cur_layer[i]),
nn.ReLU(inplace=relu_inplace),
)
)
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i + 1 - num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = (
num_channels_cur_layer[i]
if j == i - num_branches_pre
else inchannels
)
conv3x3s.append(
nn.Sequential(
nn.Conv2d(
inchannels,
outchannels,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
self.norm_layer(outchannels),
nn.ReLU(inplace=relu_inplace),
)
)
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
self.norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
inplanes,
planes,
stride,
downsample=downsample,
norm_layer=self.norm_layer,
)
)
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes, norm_layer=self.norm_layer))
return nn.Sequential(*layers)
def _make_stage(
self,
block,
num_inchannels,
num_modules,
num_branches,
num_blocks,
num_channels,
fuse_method="SUM",
multi_scale_output=True,
):
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(
num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output,
norm_layer=self.norm_layer,
align_corners=self.align_corners,
)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x, additional_features=None):
feats = self.compute_hrnet_feats(x, additional_features)
if self.ocr_width > 0:
out_aux = self.aux_head(feats)
feats = self.conv3x3_ocr(feats)
context = self.ocr_gather_head(feats, out_aux)
feats = self.ocr_distri_head(feats, context)
out = self.cls_head(feats)
return [out, out_aux]
else:
return [self.cls_head(feats), None]
def compute_hrnet_feats(self, x, additional_features):
x = self.compute_pre_stage_features(x, additional_features)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_num_branches):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_num_branches):
if self.transition2[i] is not None:
if i < self.stage2_num_branches:
x_list.append(self.transition2[i](y_list[i]))
else:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_num_branches):
if self.transition3[i] is not None:
if i < self.stage3_num_branches:
x_list.append(self.transition3[i](y_list[i]))
else:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
return self.aggregate_hrnet_features(x)
def compute_pre_stage_features(self, x, additional_features):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if additional_features is not None:
x = x + additional_features
x = self.conv2(x)
x = self.bn2(x)
return self.relu(x)
def aggregate_hrnet_features(self, x):
# Upsampling
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(
x[1],
size=(x0_h, x0_w),
mode="bilinear",
align_corners=self.align_corners,
)
x2 = F.interpolate(
x[2],
size=(x0_h, x0_w),
mode="bilinear",
align_corners=self.align_corners,
)
x3 = F.interpolate(
x[3],
size=(x0_h, x0_w),
mode="bilinear",
align_corners=self.align_corners,
)
return torch.cat([x[0], x1, x2, x3], 1)
def load_pretrained_weights(self, pretrained_path=""):
model_dict = self.state_dict()
if not os.path.exists(pretrained_path):
print()
print(
_('File "{file}" does not exist.').format(file=pretrained_path)
)
print(
_(
"You need to specify the correct path to the pre-trained weights.\n" # noqa: E501
"You can download the weights for HRNet from the repository:\n"
"https://github.com/HRNet/HRNet-Image-Classification"
)
)
exit(1)
pretrained_dict = torch.load(
pretrained_path, map_location={"cuda:0": "cpu"}
)
pretrained_dict = {
k.replace("last_layer", "aux_head").replace("model.", ""): v
for k, v in pretrained_dict.items()
}
pretrained_dict = {
k: v for k, v in pretrained_dict.items() if k in model_dict.keys()
}
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict) | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/model/modeling/hrnet_ocr.py | 0.842313 | 0.341363 | hrnet_ocr.py | pypi |
import torch
import torch.nn as nn
GLUON_RESNET_TORCH_HUB = "rwightman/pytorch-pretrained-gluonresnet"
class BasicBlockV1b(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
previous_dilation=1,
norm_layer=nn.BatchNorm2d,
):
super(BasicBlockV1b, self).__init__()
self.conv1 = nn.Conv2d(
inplanes,
planes,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False,
)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(
planes,
planes,
kernel_size=3,
stride=1,
padding=previous_dilation,
dilation=previous_dilation,
bias=False,
)
self.bn2 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
class BottleneckV1b(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
previous_dilation=1,
norm_layer=nn.BatchNorm2d,
):
super(BottleneckV1b, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(
planes,
planes,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False,
)
self.bn2 = norm_layer(planes)
self.conv3 = nn.Conv2d(
planes, planes * self.expansion, kernel_size=1, bias=False
)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
class ResNetV1b(nn.Module):
"""Pre-trained ResNetV1b Model, which produces the strides of 8 featuremaps at conv5.
Parameters
----------
block : Block
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
classes : int, default 1000
Number of classification classes.
dilated : bool, default False
Applying dilation strategy to pretrained ResNet yielding a stride-8 model,
typically used in Semantic Segmentation.
norm_layer : object
Normalization layer used (default: :class:`nn.BatchNorm2d`)
deep_stem : bool, default False
Whether to replace the 7x7 conv1 with 3 3x3 convolution layers.
avg_down : bool, default False
Whether to use average pooling for projection skip connection between stages/downsample.
final_drop : float, default 0.0
Dropout ratio before the final classification layer.
Reference:
- He, Kaiming, et al. "Deep residual learning for image recognition."
Proceedings of the IEEE conference on computer vision and pattern recognition. 2016.
- Yu, Fisher, and Vladlen Koltun. "Multi-scale context aggregation by dilated convolutions."
""" # noqa: E501
def __init__(
self,
block,
layers,
classes=1000,
dilated=True,
deep_stem=False,
stem_width=32,
avg_down=False,
final_drop=0.0,
norm_layer=nn.BatchNorm2d,
):
self.inplanes = stem_width * 2 if deep_stem else 64
super(ResNetV1b, self).__init__()
if not deep_stem:
self.conv1 = nn.Conv2d(
3, 64, kernel_size=7, stride=2, padding=3, bias=False
)
else:
self.conv1 = nn.Sequential(
nn.Conv2d(
3,
stem_width,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
norm_layer(stem_width),
nn.ReLU(True),
nn.Conv2d(
stem_width,
stem_width,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
norm_layer(stem_width),
nn.ReLU(True),
nn.Conv2d(
stem_width,
2 * stem_width,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(True)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.layer1 = self._make_layer(
block, 64, layers[0], avg_down=avg_down, norm_layer=norm_layer
)
self.layer2 = self._make_layer(
block,
128,
layers[1],
stride=2,
avg_down=avg_down,
norm_layer=norm_layer,
)
if dilated:
self.layer3 = self._make_layer(
block,
256,
layers[2],
stride=1,
dilation=2,
avg_down=avg_down,
norm_layer=norm_layer,
)
self.layer4 = self._make_layer(
block,
512,
layers[3],
stride=1,
dilation=4,
avg_down=avg_down,
norm_layer=norm_layer,
)
else:
self.layer3 = self._make_layer(
block,
256,
layers[2],
stride=2,
avg_down=avg_down,
norm_layer=norm_layer,
)
self.layer4 = self._make_layer(
block,
512,
layers[3],
stride=2,
avg_down=avg_down,
norm_layer=norm_layer,
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.drop = None
if final_drop > 0.0:
self.drop = nn.Dropout(final_drop)
self.fc = nn.Linear(512 * block.expansion, classes)
def _make_layer(
self,
block,
planes,
blocks,
stride=1,
dilation=1,
avg_down=False,
norm_layer=nn.BatchNorm2d,
):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = []
if avg_down:
if dilation == 1:
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False,
)
)
else:
downsample.append(
nn.AvgPool2d(
kernel_size=1,
stride=1,
ceil_mode=True,
count_include_pad=False,
)
)
downsample.extend(
[
nn.Conv2d(
self.inplanes,
out_channels=planes * block.expansion,
kernel_size=1,
stride=1,
bias=False,
),
norm_layer(planes * block.expansion),
]
)
downsample = nn.Sequential(*downsample)
else:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
out_channels=planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
norm_layer(planes * block.expansion),
)
layers = []
if dilation in (1, 2):
layers.append(
block(
self.inplanes,
planes,
stride,
dilation=1,
downsample=downsample,
previous_dilation=dilation,
norm_layer=norm_layer,
)
)
elif dilation == 4:
layers.append(
block(
self.inplanes,
planes,
stride,
dilation=2,
downsample=downsample,
previous_dilation=dilation,
norm_layer=norm_layer,
)
)
else:
raise RuntimeError(
_("=> unknown dilation size: {dilation}").format(dilation)
)
self.inplanes = planes * block.expansion
for __ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
dilation=dilation,
previous_dilation=dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.drop is not None:
x = self.drop(x)
x = self.fc(x)
return x
def _safe_state_dict_filtering(orig_dict, model_dict_keys):
filtered_orig_dict = {}
for k, v in orig_dict.items():
if k in model_dict_keys:
filtered_orig_dict[k] = v
else:
print(_("[ERROR] Failed to load <{k}> in backbone").format(k=k))
return filtered_orig_dict
def resnet34_v1b(pretrained=False, **kwargs):
model = ResNetV1b(BasicBlockV1b, [3, 4, 6, 3], **kwargs)
if pretrained:
model_dict = model.state_dict()
filtered_orig_dict = _safe_state_dict_filtering(
torch.hub.load(
GLUON_RESNET_TORCH_HUB, "gluon_resnet34_v1b", pretrained=True
).state_dict(),
model_dict.keys(),
)
model_dict.update(filtered_orig_dict)
model.load_state_dict(model_dict)
return model
def resnet50_v1s(pretrained=False, **kwargs):
model = ResNetV1b(
BottleneckV1b, [3, 4, 6, 3], deep_stem=True, stem_width=64, **kwargs
)
if pretrained:
model_dict = model.state_dict()
filtered_orig_dict = _safe_state_dict_filtering(
torch.hub.load(
GLUON_RESNET_TORCH_HUB, "gluon_resnet50_v1s", pretrained=True
).state_dict(),
model_dict.keys(),
)
model_dict.update(filtered_orig_dict)
model.load_state_dict(model_dict)
return model
def resnet101_v1s(pretrained=False, **kwargs):
model = ResNetV1b(
BottleneckV1b, [3, 4, 23, 3], deep_stem=True, stem_width=64, **kwargs
)
if pretrained:
model_dict = model.state_dict()
filtered_orig_dict = _safe_state_dict_filtering(
torch.hub.load(
GLUON_RESNET_TORCH_HUB, "gluon_resnet101_v1s", pretrained=True
).state_dict(),
model_dict.keys(),
)
model_dict.update(filtered_orig_dict)
model.load_state_dict(model_dict)
return model
def resnet152_v1s(pretrained=False, **kwargs):
model = ResNetV1b(
BottleneckV1b, [3, 8, 36, 3], deep_stem=True, stem_width=64, **kwargs
)
if pretrained:
model_dict = model.state_dict()
filtered_orig_dict = _safe_state_dict_filtering(
torch.hub.load(
GLUON_RESNET_TORCH_HUB, "gluon_resnet152_v1s", pretrained=True
).state_dict(),
model_dict.keys(),
)
model_dict.update(filtered_orig_dict)
model.load_state_dict(model_dict)
return model | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/model/modeling/resnetv1b.py | 0.955579 | 0.424412 | resnetv1b.py | pypi |
import torch.nn as nn
from ritm_annotation.model import ops
class ConvHead(nn.Module):
def __init__(
self,
out_channels,
in_channels=32,
num_layers=1,
kernel_size=3,
padding=1,
norm_layer=nn.BatchNorm2d,
):
super(ConvHead, self).__init__()
convhead = []
for i in range(num_layers):
convhead.extend(
[
nn.Conv2d(
in_channels, in_channels, kernel_size, padding=padding
),
nn.ReLU(),
norm_layer(in_channels)
if norm_layer is not None
else nn.Identity(),
]
)
convhead.append(nn.Conv2d(in_channels, out_channels, 1, padding=0))
self.convhead = nn.Sequential(*convhead)
def forward(self, *inputs):
return self.convhead(inputs[0])
class SepConvHead(nn.Module):
def __init__(
self,
num_outputs,
in_channels,
mid_channels,
num_layers=1,
kernel_size=3,
padding=1,
dropout_ratio=0.0,
dropout_indx=0,
norm_layer=nn.BatchNorm2d,
):
super(SepConvHead, self).__init__()
sepconvhead = []
for i in range(num_layers):
sepconvhead.append(
SeparableConv2d(
in_channels=in_channels if i == 0 else mid_channels,
out_channels=mid_channels,
dw_kernel=kernel_size,
dw_padding=padding,
norm_layer=norm_layer,
activation="relu",
)
)
if dropout_ratio > 0 and dropout_indx == i:
sepconvhead.append(nn.Dropout(dropout_ratio))
sepconvhead.append(
nn.Conv2d(
in_channels=mid_channels,
out_channels=num_outputs,
kernel_size=1,
padding=0,
)
)
self.layers = nn.Sequential(*sepconvhead)
def forward(self, *inputs):
x = inputs[0]
return self.layers(x)
class SeparableConv2d(nn.Module):
def __init__(
self,
in_channels,
out_channels,
dw_kernel,
dw_padding,
dw_stride=1,
activation=None,
use_bias=False,
norm_layer=None,
):
super(SeparableConv2d, self).__init__()
_activation = ops.select_activation_function(activation)
self.body = nn.Sequential(
nn.Conv2d(
in_channels,
in_channels,
kernel_size=dw_kernel,
stride=dw_stride,
padding=dw_padding,
bias=use_bias,
groups=in_channels,
),
nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=1,
bias=use_bias,
),
norm_layer(out_channels)
if norm_layer is not None
else nn.Identity(),
_activation(),
)
def forward(self, x):
return self.body(x) | /ritm_annotation-0.3.0.tar.gz/ritm_annotation-0.3.0/ritm_annotation/model/modeling/basic_blocks.py | 0.936532 | 0.190573 | basic_blocks.py | pypi |
(function($){
/**
* The bgiframe is chainable and applies the iframe hack to get
* around zIndex issues in IE6. It will only apply itself in IE6
* and adds a class to the iframe called 'bgiframe'. The iframe
* is appeneded as the first child of the matched element(s)
* with a tabIndex and zIndex of -1.
*
* By default the plugin will take borders, sized with pixel units,
* into account. If a different unit is used for the border's width,
* then you will need to use the top and left settings as explained below.
*
* NOTICE: This plugin has been reported to cause perfromance problems
* when used on elements that change properties (like width, height and
* opacity) a lot in IE6. Most of these problems have been caused by
* the expressions used to calculate the elements width, height and
* borders. Some have reported it is due to the opacity filter. All
* these settings can be changed if needed as explained below.
*
* @example $('div').bgiframe();
* @before <div><p>Paragraph</p></div>
* @result <div><iframe class="bgiframe".../><p>Paragraph</p></div>
*
* @param Map settings Optional settings to configure the iframe.
* @option String|Number top The iframe must be offset to the top
* by the width of the top border. This should be a negative
* number representing the border-top-width. If a number is
* is used here, pixels will be assumed. Otherwise, be sure
* to specify a unit. An expression could also be used.
* By default the value is "auto" which will use an expression
* to get the border-top-width if it is in pixels.
* @option String|Number left The iframe must be offset to the left
* by the width of the left border. This should be a negative
* number representing the border-left-width. If a number is
* is used here, pixels will be assumed. Otherwise, be sure
* to specify a unit. An expression could also be used.
* By default the value is "auto" which will use an expression
* to get the border-left-width if it is in pixels.
* @option String|Number width This is the width of the iframe. If
* a number is used here, pixels will be assume. Otherwise, be sure
* to specify a unit. An experssion could also be used.
* By default the value is "auto" which will use an experssion
* to get the offsetWidth.
* @option String|Number height This is the height of the iframe. If
* a number is used here, pixels will be assume. Otherwise, be sure
* to specify a unit. An experssion could also be used.
* By default the value is "auto" which will use an experssion
* to get the offsetHeight.
* @option Boolean opacity This is a boolean representing whether or not
* to use opacity. If set to true, the opacity of 0 is applied. If
* set to false, the opacity filter is not applied. Default: true.
* @option String src This setting is provided so that one could change
* the src of the iframe to whatever they need.
* Default: "javascript:false;"
*
* @name bgiframe
* @type jQuery
* @cat Plugins/bgiframe
* @author Brandon Aaron (brandon.aaron@gmail.com || http://brandonaaron.net)
*/
$.fn.bgIframe = $.fn.bgiframe = function(s) {
// This is only for IE6
if ( $.browser.msie && /6.0/.test(navigator.userAgent) ) {
s = $.extend({
top : 'auto', // auto == .currentStyle.borderTopWidth
left : 'auto', // auto == .currentStyle.borderLeftWidth
width : 'auto', // auto == offsetWidth
height : 'auto', // auto == offsetHeight
opacity : true,
src : 'javascript:false;'
}, s || {});
var prop = function(n){return n&&n.constructor==Number?n+'px':n;},
html = '<iframe class="bgiframe"frameborder="0"tabindex="-1"src="'+s.src+'"'+
'style="display:block;position:absolute;z-index:-1;'+
(s.opacity !== false?'filter:Alpha(Opacity=\'0\');':'')+
'top:'+(s.top=='auto'?'expression(((parseInt(this.parentNode.currentStyle.borderTopWidth)||0)*-1)+\'px\')':prop(s.top))+';'+
'left:'+(s.left=='auto'?'expression(((parseInt(this.parentNode.currentStyle.borderLeftWidth)||0)*-1)+\'px\')':prop(s.left))+';'+
'width:'+(s.width=='auto'?'expression(this.parentNode.offsetWidth+\'px\')':prop(s.width))+';'+
'height:'+(s.height=='auto'?'expression(this.parentNode.offsetHeight+\'px\')':prop(s.height))+';'+
'"/>';
return this.each(function() {
if ( $('> iframe.bgiframe', this).length == 0 )
this.insertBefore( document.createElement(html), this.firstChild );
});
}
return this;
};
})(jQuery); | /ritremixerator-0.3.tar.gz/ritremixerator-0.3/dorrie/comps/media/jquery/development-bundle/external/bgiframe/jquery.bgiframe.js | 0.502441 | 0.568536 | jquery.bgiframe.js | pypi |
from . import Item
from . import ResourceCoefficient
class WorkProduction():
"""Calculate work productivity based on parameters
Sources:
http://wiki.rivalregions.com/Work_formulas
"""
# Input
resource = None
user_level = 0
work_exp = 0
factory_level = 0
resource_max = 0
department_bonus = 0
wage_percentage = 100
tax_rate = 0
profit_share = 0
# Calculated
_withdrawn_points = 0
_productivity = 0
_wage = 0
_tax = 0
_factory_profit = 0
def __init__(self, item=None):
"""Initialize WorkProduction"""
if isinstance(item, Item) or item is None:
self.resource = item
else:
raise TypeError
def print_settings(self):
"""Print the settings"""
print(
"Resource: %16s\n" % (self.resource.name) +
"user_level: %16s\n" % (self.user_level) +
"work_exp: %16s\n" % (self.work_exp) +
"factory_level: %16s\n" % (self.factory_level) +
"resource_max: %16s\n" % (self.resource_max) +
"dep_bonus: %16s\n" % (self.department_bonus) +
"wage_percentage: %16s\n" % (self.wage_percentage) +
"tax_rate: %16s\n" % (self.tax_rate)
)
def productivity(self, energy=10):
"""Return productivity"""
return self._productivity * energy / 10
def withdrawn_points(self, energy=10):
"""Return withdrawn points"""
return self._withdrawn_points * energy / 10
def wage(self, energy=10):
"""Return wage"""
return self._wage * energy / 10
def tax(self, energy=10):
"""Return tax"""
return self._tax * energy / 10
def state_tax(self, energy=10):
"""Return state tax"""
return self._tax / 100 * (100 - self.profit_share) * energy / 10
def autonomy_tax(self, energy=10):
"""Return state tax"""
return self._tax / 100 * self.profit_share * energy / 10
def factory_profit(self, energy=10):
"""Calculate wage"""
return self._factory_profit * energy / 10
def calculate(self):
"""Calculate productivity"""
self._productivity = 0.2 * \
pow(self.user_level, 0.8) * \
ResourceCoefficient(
self.resource,
self.resource_max
).calculate() * \
pow(self.factory_level, 0.8) * \
pow(self.work_exp / 10, 0.6)
# Add the depricated nation bonus
self._productivity *= 1.2
self._productivity *= (1 + self.department_bonus / 100)
# Withdrawn
self._withdrawn_points = self._productivity / 40000000
# Calculate aditional resource coefficient
if self.resource.item_id == 6:
self._productivity = self._productivity * 4
elif self.resource.item_id == 15:
self._productivity = self._productivity / 1000
elif self.resource.item_id == 21:
self._productivity = self._productivity / 5
elif self.resource.item_id == 24:
self._productivity = self._productivity / 1000
# Tax
self._tax = self._productivity / 100 * self.tax_rate
self._wage = self._productivity - self._tax
# Factory profit
self._factory_profit = self._wage / 100 * (100 - self.wage_percentage)
self._wage = self._wage - self._factory_profit | /rival_regions_calc-1.1.2-py3-none-any.whl/rival_regions_calc/work_production.py | 0.845688 | 0.20264 | work_production.py | pypi |
rivalcfg: Configure SteelSeries gaming mice
===========================================
|Github| |Discord| |PYPI Version| |Github Actions| |Black| |License|
Rivalcfg is a **Python library** and a **CLI utility program** that allows you
to configure SteelSeries gaming mice on Linux and Windows (probably works on
BSD and Mac OS too, but not tested).
I first created this program to configure my Rival 100 and the original Rival
mice, then I added support for other Rival devices thanks to contributors.
Today this project aims to support any SteelSeries gaming mice (Rival,
Sensei,...).
**IMPORTANT:** This is an unofficial software. It was made by reverse
engineering devices and is not supported nor approved by SteelSeries.
.. figure:: https://flozz.github.io/rivalcfg/_images/steelseries_mice.jpg
:alt: SteelSeries Gaming Mice
If you have any trouble running this software, please open an issue on Github:
* https://github.com/flozz/rivalcfg/issues
Documentation
-------------
Main topics:
* `Requirements <https://flozz.github.io/rivalcfg/requirements.html>`_
* `Installing Rivalcfg <https://flozz.github.io/rivalcfg/install.html>`_
* `Documentation of supported devices <https://flozz.github.io/rivalcfg/devices/index.html>`_
* `FAQ <https://flozz.github.io/rivalcfg/faq.html>`_
* `Contributing <https://flozz.github.io/rivalcfg/contributing.html>`_ (please read before opening issues and PRs 😀️)
... and more at:
* https://flozz.github.io/rivalcfg/
Supported Devices
-----------------
.. devices-list-start
SteelSeries Aerox 3:
+--------------------------------------------------------------+-----------+
| SteelSeries Aerox 3 | 1038:1836 |
+--------------------------------------------------------------+-----------+
SteelSeries Aerox 3 Wireless:
+--------------------------------------------------------------+-----------+
| SteelSeries Aerox 3 Wireless (wired mode) | 1038:183a |
+--------------------------------------------------------------+-----------+
| SteelSeries Aerox 3 Wireless (2.4 GHz wireless mode) | 1038:1838 |
+--------------------------------------------------------------+-----------+
SteelSeries Aerox 5 Wireless:
+------------------------------------------------------------------------+-----------+
| SteelSeries Aerox 5 Wireless (wired mode) | 1038:1854 |
+------------------------------------------------------------------------+-----------+
| SteelSeries Aerox 5 Wireless Destiny 2 Edition (wired mode) | 1038:185e |
+------------------------------------------------------------------------+-----------+
| SteelSeries Aerox 5 Wireless Diablo IV Edition (wired mode) | 1038:1862 |
+------------------------------------------------------------------------+-----------+
| SteelSeries Aerox 5 Wireless (2.4 GHz wireless mode) | 1038:1852 |
+------------------------------------------------------------------------+-----------+
| SteelSeries Aerox 5 Wireless Destiny 2 Edition (2.4 GHz wireless mode) | 1038:185c |
+------------------------------------------------------------------------+-----------+
| SteelSeries Aerox 5 Wireless Diablo IV Edition (2.4 GHz wireless mode) | 1038:1860 |
+------------------------------------------------------------------------+-----------+
SteelSeries Aerox 9 Wireless:
+--------------------------------------------------------------+-----------+
| SteelSeries Aerox 9 Wireless (wired mode) | 1038:185a |
+--------------------------------------------------------------+-----------+
| SteelSeries Aerox 9 Wireless (2.4 GHz wireless mode) | 1038:1858 |
+--------------------------------------------------------------+-----------+
SteelSeries Kana v2:
+--------------------------------------------------------------+-----------+
| SteelSeries Kana v2 | 1038:137a |
+--------------------------------------------------------------+-----------+
SteelSeries Kinzu v2:
+--------------------------------------------------------------+-----------+
| SteelSeries Kinzu v2 | 1038:1366 |
+--------------------------------------------------------------+-----------+
| SteelSeries Kinzu v2 | 1038:1378 |
+--------------------------------------------------------------+-----------+
SteelSeries Prime:
+--------------------------------------------------------------+-----------+
| SteelSeries Prime | 1038:182e |
+--------------------------------------------------------------+-----------+
| SteelSeries Prime Rainbow 6 Siege Black Ice Edition | 1038:182a |
+--------------------------------------------------------------+-----------+
| SteelSeries Prime CS:GO Neo Noir Edition | 1038:1856 |
+--------------------------------------------------------------+-----------+
SteelSeries Prime Wireless:
+--------------------------------------------------------------+-----------+
| SteelSeries Prime Wireless (wired mode) | 1038:1842 |
+--------------------------------------------------------------+-----------+
| SteelSeries Prime Mini Wireless (wired mode) | 1038:184a |
+--------------------------------------------------------------+-----------+
| SteelSeries Prime Wireless (2.4 GHz wireless mode) | 1038:1840 |
+--------------------------------------------------------------+-----------+
| SteelSeries Prime Mini Wireless (2.4 GHz wireless mode) | 1038:1848 |
+--------------------------------------------------------------+-----------+
SteelSeries Rival 100 / SteelSeries Rival 105:
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 100 | 1038:1702 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 100 (Dell China) | 1038:170a |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 100 Dota 2 Edition (retail) | 1038:170b |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 100 Dota 2 Edition (Lenovo) | 1038:170c |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 105 | 1038:1814 |
+--------------------------------------------------------------+-----------+
SteelSeries Rival 110 / SteelSeries Rival 106:
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 110 | 1038:1729 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 106 | 1038:1816 |
+--------------------------------------------------------------+-----------+
SteelSeries Rival 3:
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 3 | 1038:1824 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 3 (firmware v0.37.0.0) | 1038:184c |
+--------------------------------------------------------------+-----------+
SteelSeries Rival 300 / SteelSeries Rival:
+--------------------------------------------------------------+-----------+
| SteelSeries Rival | 1038:1384 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival Dota 2 Edition | 1038:1392 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 300 | 1038:1710 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 300 Fallout 4 Edition | 1038:1712 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 300 Evil Geniuses Edition | 1038:171c |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 300 CS:GO Fade Edition | 1038:1394 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 300 CS:GO Hyper Beast Edition | 1038:171a |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 300 CS:GO Fade Edition (stm32) | 1038:1716 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 300 Acer Predator Edition | 1038:1714 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 300 HP OMEN Edition | 1038:1718 |
+--------------------------------------------------------------+-----------+
SteelSeries Rival 300S:
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 300S | 1038:1810 |
+--------------------------------------------------------------+-----------+
SteelSeries Rival 310:
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 310 | 1038:1720 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 310 CS:GO Howl Edition | 1038:171e |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 310 PUBG Edition | 1038:1736 |
+--------------------------------------------------------------+-----------+
SteelSeries Rival 3 Wireless:
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 3 Wireless (2.4 GHz mode) | 1038:1830 |
+--------------------------------------------------------------+-----------+
SteelSeries Rival 500:
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 500 | 1038:170e |
+--------------------------------------------------------------+-----------+
SteelSeries Rival 600:
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 600 | 1038:1724 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 600 Dota 2 Edition | 1038:172e |
+--------------------------------------------------------------+-----------+
SteelSeries Rival 650 Wireless:
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 650 Wireless (wired mode) | 1038:172b |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 650 Wireless (2.4 GHz wireless mode) | 1038:1726 |
+--------------------------------------------------------------+-----------+
SteelSeries Rival 700 / SteelSeries Rival 710:
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 700 | 1038:1700 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 710 | 1038:1730 |
+--------------------------------------------------------------+-----------+
SteelSeries Rival 95 / SteelSeries Rival 100 PC Bang:
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 95 | 1038:1706 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 95 MSI Edition | 1038:1707 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 95 PC Bang | 1038:1704 |
+--------------------------------------------------------------+-----------+
| SteelSeries Rival 100 PC Bang | 1038:1708 |
+--------------------------------------------------------------+-----------+
SteelSeries Sensei 310:
+--------------------------------------------------------------+-----------+
| SteelSeries Sensei 310 | 1038:1722 |
+--------------------------------------------------------------+-----------+
SteelSeries Sensei [RAW]:
+--------------------------------------------------------------+-----------+
| SteelSeries Sensei [RAW] | 1038:1369 |
+--------------------------------------------------------------+-----------+
| SteelSeries Sensei [RAW] Diablo III Edition | 1038:1362 |
+--------------------------------------------------------------+-----------+
| SteelSeries Sensei [RAW] Guild Wars 2 Edition | 1038:136d |
+--------------------------------------------------------------+-----------+
| SteelSeries Sensei [RAW] CoD Black Ops II Edition | 1038:136f |
+--------------------------------------------------------------+-----------+
| SteelSeries Sensei [RAW] World of Tanks Edition | 1038:1380 |
+--------------------------------------------------------------+-----------+
| SteelSeries Sensei [RAW] Heroes of the Storm Edition | 1038:1390 |
+--------------------------------------------------------------+-----------+
SteelSeries Sensei TEN:
+--------------------------------------------------------------+-----------+
| SteelSeries Sensei TEN | 1038:1832 |
+--------------------------------------------------------------+-----------+
| SteelSeries Sensei TEN CS:GO Neon Rider Edition | 1038:1834 |
+--------------------------------------------------------------+-----------+
.. devices-list-end
Supporting this project
-----------------------
Wanna support this project?
* `☕️ Buy me a coffee <https://www.buymeacoffee.com/flozz>`__,
* `❤️ sponsor me on Github <https://github.com/sponsors/flozz>`__,
* `💵️ or give me a tip on PayPal <https://www.paypal.me/0xflozz>`__.
Changelog
---------
* **[NEXT]** (changes on ``master`` that have not been released yet):
* Nothing yet :)
* **v4.10.0:**
* Added Prime Mini Wireless support (@intersectRaven, #210, #207)
* Code quality: more robust type comparison (@flozz)
* **v4.9.1:**
* Improved device profile generation (@airblast-dev, #211)
* Fixed battery level reported at 630% when the mouse is off
(@LennardKittner, #187, #212)
* Unrecognized arguments error when no device is found (@LennardKittner,
#213, #214)
* **v4.9.0:**
* Added Aerox 5 Wireless Destiny 2 Editon support (@flozz, #205)
* Added Aerox 5 Wireless Diablo IV Edition support (@flozz, #206)
* Updated HIDAPI to v0.14 to fix a macOS Ventura issue (@flozz, #200)
* Removed the default lighting option for the Prime mouse (reported not
working and not needed on this device) (@flozz, #196)
* **v4.8.0:**
* Improved CLI startup time (@gryzus24, #194)
* Added default lighting setting to following devices (@flozz, #191, #195):
* Aerox 3
* Aerox 3 Wireless
* Aerox 5 Wireless
* Aerox 9 Wireless
* Prime
* Prime Wireless
* Added Python 3.11 support (@flozz)
* **4.7.0:**
* Add initial Aerox 9 Wireless support (#188)
* Add Aerox 5 Wireless support (#184)
* Fix inverted firmware version minor and major digits
* Fix crash when reading battry level of a device in sleep mode
* Improve udev rules reloading
* Rival 100: Rivalcfg is now able to read the firmware version of this device
(#179, @staticssleever668)
* **4.6.0:**
* Add support for the Prime Wireless mouse (#172)
* Aerox 3 Wireless support improved:
* Sleep timer support implemented
* Dim timer support implemented
* Brightness removed to support Dim timer (it is still possible to dim the
LED by setting darker colors)
* Fix a crash when printing debug information with udev rules not installed
* Remove Python 2.7 compatibility code
* **4.5.0:**
* Do not try to open devices when not needed (#170)
* Add support for SteelSeries Prime Rainbow 6 Siege Black Ice Edition
(1038:182A)
* Add support for SteelSeries Prime CS:GO Neo Noir Edition (1038:1856)
* Add initial support for the Rival 3 Wireless mouse (#146)
* Add initial support for the Rival 650 mouse (#112)
* **4.4.0:**
* Add Prime support (#169, @sephiroth99)
* Add Aerox 3 (non wireless version) support (#156)
* Add Aerox 3 Wireless support (#167)
* Save devices settings on disk
* Add Black (code formatter)
* Drop Python 3.5 support
* **WARNING:** This version will be the last one to support Python 2.7
* **4.3.0:**
* Fixes Sensei TEN default config (#158)
* Adds the ``--print-udev`` to generate udev rules and print them to ``stdout`` (#157)
* CLI: Displays a usage message when no argument was given (#152)
* CLI: Write udev warning message to ``stderr`` instead of ``stdout``
* Adds a ``--print-debug`` option to display various information
* Adds a ``--firmware-version`` option to display the firmware version of some devices
* Rivalcfg can now read the firmware version of the following devices:
* Rival 3
* Rival 300
* Rival 310
* Rival 500
* Rival 700 / 710
* Sensei 310
* Sensei TEN
* **4.2.0:**
* Rival 3: support of firmware v0.37.0.0 (#147)
* Support of the Sensei TEN (1038:1832)
* Support of the Sensei TEN CS:GO Neon Rider Edition (1038:1834)
* Rival 500:
* Handles color shift
* Handles button mapping
* **4.1.0:**
* Support of the Rival 300S
* Rival 310 support improved:
* Support of button mapping
* Sensei 310 support improved:
* Support of button mapping
* Rival 3 support improved:
* Colors can now be defined separately
* Button mapping support implemented
* Light effects support implemented
* **4.0.0:**
* Full rewrite of most parts of the software
* Mice are now grouped by families to reduce code duplication
* Improved udev support on Linux:
* Dynamically generate udev rules instead of maintaining a static file
* Automatically check that the rules file is up to date
* Adds a command to update udev rules
* Improved testing:
* Better coverage
* Test the device output to avoid regressions
* Improved documentation:
* A Sphinx documentation was added instead of stacking everything in the
README
* Each device family now have its own documentation page to make it easier
to understand
* Python APIs are now documented
* A document was added to help contribute
* Installation instructions were updated to recommend using Python 3
* New devices support was added:
* Support of the Rival 100 Dota 2 Edition (retail version) (#17)
* Support of the Rival 300 Fallout 4 Edition (#44)
* Support of the Rival 310 CS:GO Howl Edition (#113)
* Support of the Rival 3 (#111)
* Support of the Rival 300 Evil Geniuses Edition
* Support of the Rival 95 MSI Edition
* Support of the Rival 95 PC Bang
* Support of the Rival 100 PC Bang
* Support of the Rival 100 (Dell China)
* Support of the Rival 600 Dota 2 Edition
* Support of the Rival 106 (#84, @SethDusek)
* Some devices gained a better support:
* Rival 300 / Original Rival family
* Support of buttons mapping
* Rival 700 / 710
* Support of gradients / Color shift (#129, @nixtux)
* A generic support of mouse buttons mapping was added (rewriting of what was
originally done for the Sensei [RAW]). The following devices now support
it:
* Rival 300 / Original Rival family
* Sensei [RAW] family
* Regressions:
The following things were removed for this release:
* Sensei Ten: this mouse needs more work to be added back.
* Colorshift of the Rival 500: this feature needs more work to be added back.
Older changelog entries were moved to the `CHANGELOG.rst
<https://github.com/flozz/rivalcfg/blob/master/CHANGELOG.rst>`_ file.
.. |Github| image:: https://img.shields.io/github/stars/flozz/rivalcfg?label=Github&logo=github
:target: https://github.com/flozz/rivalcfg
.. |Discord| image:: https://img.shields.io/badge/chat-Discord-8c9eff?logo=discord&logoColor=ffffff
:target: https://discord.gg/P77sWhuSs4
.. |PYPI Version| image:: https://img.shields.io/pypi/v/rivalcfg?logo=python&logoColor=f1f1f1
:target: https://pypi.org/project/rivalcfg/
.. |Github Actions| image:: https://github.com/flozz/rivalcfg/actions/workflows/python-ci.yml/badge.svg
:target: https://github.com/flozz/rivalcfg/actions
.. |Black| image:: https://img.shields.io/badge/code%20style-black-000000.svg
:target: https://black.readthedocs.io/en/stable/
.. |License| image:: https://img.shields.io/github/license/flozz/rivalcfg
:target: https://github.com/flozz/rivalcfg/blob/master/LICENSE
| /rivalcfg-4.10.0.tar.gz/rivalcfg-4.10.0/README.rst | 0.521471 | 0.685088 | README.rst | pypi |
# RIVALGAN 
[Background](#background)
[The Dataset](#the-dataset)
[Implementation Overview](#implementation-overview) <br/>
[Usage](#usage)<br/>
[Visualizing the Data Augmentation Process](#visualizing-the-data-augmentation-process)<br/>
[GitHub Folder Structure](#github-folder-structure)<br/>
[Setup script](#setup-script)<br/>
[Requirements](#requirements)
------------
## Background
Imbalanced data sets for Machine Learning classification problems could be difficult to solve.
For example suppose we have two classes in our data set where the majority class is more than 90% of the dataset
and the minority is less than 1% but we are more interested in identifying instances of the minority class.
Most of the ML classifiers will reach an accuracy of 90% or more but this is not useful for our intended case.
A more properly calibrated method may achieve a lower accuracy, but would have a higher true positive rate (or recall).
A lot of critical real-data sets are imbalanced by nature like in credit card fraud detection or in the
Health Care Industry due to the scarcity of data. Traditional ways to fix imbalanced datasets is either
by oversampling instances of the minority class or undersampling instances of the majority class (SMOTE).
Instead to tackle these issues, GANS are used to learn from the real dara and generate samples to augment
the training dataset. A GAN is composed of a generator and a discriminator (both are deep neural networks).
The generator will try its best to make a sample by learning from a dataset and the discriminator will learn
to predict if a given sample is generated by the generator or if it is from the original training dataset.
This indication is then used by Generator in turn to generate data as close to the real data so as to fool
the discriminator.
------------
## The Dataset
The dataset contains 284,807 transactions that occurred in two days, where we have 492 frauds.
The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.
Each of these observations corresponds to an individual transaction.
A binary response variable 'Class' indicates whether or not this transaction is a fraud.
The dataset contains 30 features:
* V1, V2, ... V28 which are anonymized features and uncorrelated after PCA transformations.
* 'Time' and 'Amount'which have not been transformed.
------------
## Process Overview and Tech Stack

------------
## Implementation Overview
This code trains a generative adversarial network to generate new synthetic data related to credit card fraud csv file.
It can also read data from any other csv files but the file will need to be transformed so the class variable to predict is
clearly identified. The code provides an api to visualize the synthetic data, compare the data distributions
between the real and the augmented data. It also allows to train different classifiers (LogisticRegression, SVM,
RandomForrest, XGBoost) and compare their performances with the real and augmented datasets. The synthetic data could
be generated using either SMOTE or GANs. Different GANs architectures are proposed (Vanilla GAN,
Wasserstein GAN, Improved Wasserstein GAN, Least Squares GAN). Finally a random n-classes data set for classification
problems is provided and the decision boundaries are plotted on the real and augmented datasets.
------------
## Usage
``` bash
$ python pipeline -h
usage: pipeline.py [-h]
[--CLASSIFIER {Logit,LinearSVC,RandomForest,SGDClassifier,SVC}]
[--SAMPLER {SMOTE,SMOTETomek}]
[--AUGMENTED_DATA_SIZE AUGMENTED_DATA_SIZE]
[--TOTAL_TRAINING_STEPS TOTAL_TRAINING_STEPS]
[--GEN_FILENAME GEN_FILENAME]
[--train_classifier TRAIN_CLASSIFIER]
[--classifier_scores CLASSIFIER_SCORES]
[--generate_data GENERATE_DATA]
[--compute_learning_curves COMPUTE_LEARNING_CURVES]
[--aug_model_scores AUG_MODEL_SCORES]
[--plot_augmented_learning_curves PLOT_AUGMENTED_LEARNING_CURVES]
[--generate_distribution_plots GENERATE_DISTRIBUTION_PLOTS]
[--compare_scores COMPARE_SCORES]
[--random_dataset RANDOM_DATASET]
[--retrieve_real_data_generated_data RETRIEVE_REAL_DATA_GENERATED_DATA]
```
## API
Examples
``` python
from pipeline import *
pipeline = Pipeline()
data = pipeline.read_process_data()
pipeline.run_train_classifier()
pipeline.run_classifier_scores_report()
dargs = {
'AUGMENTED_DATA_SIZE':5000,
'TOTAL_TRAINING_STEPS': 1000,
'GAN_NAME':'VGAN'}
pipeline.set_configuration(dargs)
pipeline.run_train_gan()
pipeline.compare_classifier_gan_scores()
pipeline.generate_distribution_plots()
pipeline.plot_augmented_learning_curves()
```
------------
#### Output
```text
------------- Reading data --------------
Loading data from /home/ubuntu/insight/data/creditcard.engineered.pkl
Shape of the data=(284807, 31)
Head:
Time V1 V2 V3 V4 V5 V6 \
0 -2.495776 -0.760474 -0.059825 1.778510 0.998741 -0.282036 0.366454
1 -2.495776 0.645665 0.177226 0.108889 0.326641 0.047566 -0.064642
2 -2.495729 -0.759673 -0.946238 1.240864 0.277228 -0.418463 1.425391
V7 V8 V9 ... V21 V22 V23 \
0 0.234118 0.091669 0.343867 ... -0.027953 0.392914 -0.259567
1 -0.078505 0.077453 -0.237661 ... -0.405091 -0.908272 0.228784
2 0.775964 0.247431 -1.420257 ... 0.456138 1.094031 2.092428
V24 V25 V26 V27 V28 Amount Class
0 0.111992 0.253257 -0.396610 0.399584 -0.090140 1.130025 0
1 -0.569582 0.329670 0.267951 -0.031113 0.069997 -1.138642 0
2 -1.155079 -0.649083 -0.291089 -0.171222 -0.263354 1.695499 0
[3 rows x 31 columns]
Number of frauds in training data: 379 out of 213605 cases (0.1774303036% fraud)
Number of frauds in test data: 113 out of 71202 cases (0.1587034072% fraud)
Number of features=30
------------- Training classifier --------------
Training 30 features with classifier SGDClassifier
Time elapsed to train: 0:00:00.34
Saving SGDClassifier in /home/ubuntu/insight/cache/SGDClassifier_Fraud.pkl
No sampler to train
------------- Baseline scores --------------
Baseline classifier SGDClassifier
Loading classifier SGDClassifier from file /home/ubuntu/insight/cache/SGDClassifier_Fraud.pkl
Predicting 30 features
Classification Report:
pre rec spe f1 geo iba sup
0 1.00 0.96 0.91 0.98 0.93 0.88 71089
1 0.03 0.91 0.96 0.06 0.93 0.87 113
avg / total 1.00 0.96 0.91 0.98 0.93 0.88 71202
Accuracy score: 0.9578523075194517
Precision score: 0.911504424778761
Recall score: 0.03329023917259211
F1 score: 0.06423448705955721
Confusion Matrix:
[[68098 2991]
[ 10 103]]
------------- Training GAN and generating synthetic data --------------
Training WGAN total_steps=1000, #generatedData=5000
Step: 0
Generator loss: 0.6404243111610413 | discriminator loss: 1.3558526039123535
Step: 100
Generator loss: 0.3018853962421417 | discriminator loss: 1.5490034818649292
```
------------
## Visualizing the Data Augmentation Process
<p align="left"><img width=60% src="https://github.com/InsightDataCommunity/Rival-AI.NY-18C/blob/master/images/5KPrecisionRecallCurves.png"></p>
<p align="center"><img width=60% src="https://github.com/InsightDataCommunity/Rival-AI.NY-18C/blob/master/images/DataDistributionComparison100K.png"></p>
<p align="center"><img width=60% src="https://github.com/InsightDataCommunity/Rival-AI.NY-18C/blob/master/images/ScoresWithIncreasingFakeData.png"></p>
<p align="center"><img width=60% src="https://github.com/InsightDataCommunity/Rival-AI.NY-18C/blob/master/images/OVOSVMAugData.png"></p>
------------
## GitHub Folder Structure
```text
.
|-- ./rivalgan <- Source code for use in this project.
|-- ./reports <- Generated analysis as text files.
|-- ./generated_data <- Generated data.
|-- ./scripts <- Helper scripts to use the API.
|-- ./notebooks <- Jupyter notebooks.
|-- ./images <- Generated graphics and figures to be used in reporting.
`-- ./README.md <- The top-level README for this project.
```
------------
## Setup script
A setup script is available to be used to generate RIVALGAN as a package.
------------
## Requirements
See requirements.txt file in the same directory of this README.md | /rivalgan-0.4.tar.gz/rivalgan-0.4/README.md | 0.588889 | 0.939858 | README.md | pypi |
from django.contrib.contenttypes.models import ContentType
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST
from river.core.workflowregistry import workflow_registry
from river.models import Workflow, State
import river_admin
from river_admin.views import get, post, delete, StateDto
from river_admin.views.serializers import WorkflowStateFieldDto, CreateWorkflowDto, WorkflowDto, \
TransitionMetaDto, TransitionDto, WorkflowMetadataDto
@get(r'^workflow/get/(?P<pk>\w+)/$')
def get_it(request, pk):
workflow = get_object_or_404(Workflow.objects.all(), pk=pk)
return Response(WorkflowDto(workflow).data, status=HTTP_200_OK)
@get(r'^workflow/list/$')
def list_it(request):
valid_workflows = [workflow for workflow in Workflow.objects.all() if workflow.content_type.model_class()]
return Response(WorkflowDto(valid_workflows, many=True).data, status=HTTP_200_OK)
@post(r'^workflow/create/$')
def create_it(request):
create_workflow_request = CreateWorkflowDto(data=request.data)
if create_workflow_request.is_valid():
workflow = create_workflow_request.save()
return Response({"id": workflow.id}, status=HTTP_200_OK)
else:
return Response(create_workflow_request.errors, status=HTTP_400_BAD_REQUEST)
@delete(r'^workflow/delete/(?P<pk>\w+)/$')
def delete_it(request, pk):
workflow = get_object_or_404(Workflow.objects.all(), pk=pk)
workflow.delete()
return Response(status=HTTP_200_OK)
@get(r'^workflow/state-field/list/$')
def list_available_state_fields(request):
class_by_id = lambda cid: workflow_registry.class_index[cid]
result = []
for class_id, field_names in workflow_registry.workflows.items():
cls = class_by_id(class_id)
content_type = ContentType.objects.get_for_model(cls)
for field_name in field_names:
if not Workflow.objects.filter(content_type=content_type, field_name=field_name).exists():
result.append(
{
"content_type": content_type,
"field_name": field_name
})
return Response(WorkflowStateFieldDto(result, many=True).data, status=HTTP_200_OK)
@get(r'^workflow/state/list/(?P<workflow_id>\w+)/$')
def list_states(request, workflow_id):
workflow = get_object_or_404(Workflow.objects.all(), pk=workflow_id)
state_ids = set()
state_ids.add(workflow.initial_state.pk)
for source_state_id, destination_state_id in workflow.transition_metas.values_list("source_state__pk", "destination_state__pk"):
state_ids.add(source_state_id)
state_ids.add(destination_state_id)
return Response(StateDto(State.objects.filter(pk__in=state_ids), many=True).data, status=HTTP_200_OK)
@get(r'^workflow/transition-meta/list/(?P<workflow_id>\w+)/$')
def list_transition_meta(request, workflow_id):
workflow = get_object_or_404(Workflow.objects.all(), pk=workflow_id)
return Response(TransitionMetaDto(workflow.transition_metas.all(), many=True).data, status=HTTP_200_OK)
@get(r'^workflow/transition/list/(?P<workflow_id>\w+)/$')
def list_transitions(request, workflow_id):
workflow = get_object_or_404(Workflow.objects.all(), pk=workflow_id)
return Response(TransitionDto(workflow.transitions.all(), many=True).data, status=HTTP_200_OK)
@get(r'^workflow/object/list/(?P<workflow_pk>\w+)/$')
def list_workflow_objects(request, workflow_pk):
workflow = get_object_or_404(Workflow.objects.all(), pk=workflow_pk)
model_class = workflow.content_type.model_class()
registered_admin = river_admin.site.get(model_class, workflow.field_name, river_admin.DefaultRiverAdmin.of(model_class, workflow.field_name))
return Response({"headers": registered_admin.admin_list_displays, "workflow_objects": list(registered_admin.get_objects())}, status=HTTP_200_OK)
@get(r'^workflow/metadata/$')
def get_workflow_metadata(request):
workflows = []
for workflow in Workflow.objects.all():
model_class = workflow.content_type.model_class()
if model_class:
registered_admin = river_admin.site.get(model_class, workflow.field_name, river_admin.DefaultRiverAdmin.of(model_class, workflow.field_name))
workflows.append({"id": workflow.id, "name": registered_admin.admin_name, "icon": registered_admin.admin_icon})
return Response(WorkflowMetadataDto(workflows, many=True).data, status=HTTP_200_OK) | /river_admin-0.7.0-py3-none-any.whl/river_admin/views/workflow_view.py | 0.476092 | 0.197929 | workflow_view.py | pypi |
from django.contrib.auth.models import Permission, Group
from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers
from river.models import Function, OnApprovedHook, State, TransitionApprovalMeta, \
OnTransitHook, TransitionMeta, Transition, Workflow, TransitionApproval, DONE, CANCELLED
# Custom Usermodel support patch
from django.contrib.auth import get_user_model
User = get_user_model()
from river.models.hook import AFTER, BEFORE
# CONTENT TYPE
class ContentTypeDto(serializers.ModelSerializer):
class Meta:
model = ContentType
fields = ['id', 'app_label', 'model']
# AUTH
class LoginDto(serializers.ModelSerializer):
username = serializers.CharField()
password = serializers.CharField()
class UserDto(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id', 'username', 'first_name', 'last_name', 'email']
class PermissionDto(serializers.ModelSerializer):
class Meta:
model = Permission
fields = ['id', 'content_type', 'name', 'codename']
class GroupDto(serializers.ModelSerializer):
class Meta:
model = Group
fields = ['id', 'name']
# STATE
class StateDto(serializers.ModelSerializer):
class Meta:
model = State
fields = ['id', 'label', "slug", "description"]
class CreateStateDto(serializers.ModelSerializer):
class Meta:
model = State
fields = ['label']
# WORKFLOW
class WorkflowDto(serializers.ModelSerializer):
admin_name = serializers.SerializerMethodField(allow_null=True)
admin_icon = serializers.SerializerMethodField(allow_null=True)
content_type = ContentTypeDto()
initial_state = StateDto()
class Meta:
model = Workflow
fields = ['id', 'content_type', 'initial_state', 'field_name', 'admin_name', 'admin_icon']
def get_admin_name(self, obj):
from river_admin import site
model_class = obj.content_type.model_class()
registered_admin = site.get(model_class, obj.field_name)
return registered_admin.name if registered_admin else None
def get_admin_icon(self, obj):
from river_admin import site
model_class = obj.content_type.model_class()
registered_admin = site.get(model_class, obj.field_name)
return registered_admin.icon if registered_admin else None
class WorkflowMetadataDto(serializers.Serializer):
id = serializers.CharField()
name = serializers.CharField()
icon = serializers.CharField()
class WorkflowStateFieldDto(serializers.Serializer):
content_type = ContentTypeDto()
field_name = serializers.CharField()
class CreateWorkflowDto(serializers.ModelSerializer):
class Meta:
model = Workflow
fields = ["content_type", "field_name", "initial_state"]
# TRANSITION META
class TransitionMetaDto(serializers.ModelSerializer):
class Meta:
model = TransitionMeta
fields = ['id', 'workflow', 'source_state', 'destination_state']
class CreateTransitionMetaDto(serializers.ModelSerializer):
class Meta:
model = TransitionMeta
fields = ["workflow", "source_state", "destination_state"]
# TRANSITION APPROVAL META
class TransitionApprovalMetaDto(serializers.ModelSerializer):
permissions = PermissionDto(many=True)
groups = GroupDto(many=True)
class Meta:
model = TransitionApprovalMeta
fields = ['id', 'workflow', 'transition_meta', 'permissions', 'groups', 'priority']
class CreateTransitionApprovalMetaDto(serializers.ModelSerializer):
class Meta:
model = TransitionApprovalMeta
fields = ['workflow', 'transition_meta', 'permissions', 'groups', 'priority']
class RePrioritizeTransitionApprovalMetaDto(serializers.Serializer):
transition_approval_meta_id = serializers.CharField()
priority = serializers.IntegerField()
# FUNCTION
class FunctionDto(serializers.ModelSerializer):
class Meta:
model = Function
fields = ['id', 'name', 'body', 'version', 'date_created', 'date_updated']
class CreateFunctionDto(serializers.ModelSerializer):
class Meta:
model = Function
fields = ['name', 'body']
class UpdateFunctionDto(serializers.ModelSerializer):
class Meta:
model = Function
fields = ['name', 'body']
# TRANSITION HOOK
class TransitionHookDto(serializers.ModelSerializer):
callback_function = FunctionDto()
class Meta:
model = OnTransitHook
fields = ['id', 'callback_function', 'transition_meta', 'transition', 'object_id']
class CreateTransitionHookDto(serializers.ModelSerializer):
hook_type = serializers.ChoiceField(choices=[BEFORE], default=BEFORE)
class Meta:
model = OnTransitHook
fields = ['workflow', 'callback_function', 'transition_meta', 'transition', 'object_id', 'content_type', 'hook_type']
# APPROVAL HOOK
class ApprovalHookDto(serializers.ModelSerializer):
callback_function = FunctionDto()
class Meta:
model = OnApprovedHook
fields = ['id', 'callback_function', 'transition_approval_meta', 'transition_approval', 'object_id']
class CreateApprovalHookDto(serializers.ModelSerializer):
hook_type = serializers.ChoiceField(choices=[AFTER], default=AFTER)
class Meta:
model = OnApprovedHook
fields = ['workflow', 'callback_function', 'transition_approval_meta', 'transition_approval', 'object_id', 'content_type', 'hook_type']
# TRANSITION
class TransitionDto(serializers.ModelSerializer):
is_done = serializers.SerializerMethodField()
is_cancelled = serializers.SerializerMethodField()
class Meta:
model = Transition
fields = ['id', 'workflow', 'source_state', 'destination_state', 'iteration', 'meta', 'object_id', 'is_done', 'is_cancelled']
def get_is_done(self, obj):
return obj.status == DONE
def get_is_cancelled(self, obj):
return obj.status == CANCELLED
class TransitionApprovalDto(serializers.ModelSerializer):
transactioner = UserDto()
permissions = PermissionDto(many=True)
groups = GroupDto(many=True)
class Meta:
model = TransitionApproval
fields = ['id', 'workflow', 'transition', 'permissions', 'groups', 'priority', 'status', 'transactioner', 'meta', 'object_id']
# WORKFLOW OBJECT
class WorkflowObjectStateDto(serializers.Serializer):
iteration = serializers.IntegerField()
state = StateDto() | /river_admin-0.7.0-py3-none-any.whl/river_admin/views/serializers.py | 0.611498 | 0.19546 | serializers.py | pypi |
from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand
from django.db import transaction
from river.models import TransitionApprovalMeta
from examples.shipping_example.models import Shipping
INITIALIZED = "Initialized"
SHIPPED = "Shipped"
ARRIVED = "Arrived"
RETURN_INITIALIZED = "Return Initialized"
RETURNED = "Returned"
RE_INITIALIZED = "Re-Initialized"
REFUNDED = "Refunded"
CLOSED = "Closed"
class Command(BaseCommand):
help = 'Bootstrapping database with necessary items'
@transaction.atomic()
def handle(self, *args, **options):
from river.models import State, Workflow, TransitionMeta
from django.contrib.contenttypes.models import ContentType
warehouse_attendant, _ = Group.objects.update_or_create(name="Warehouse Attendant")
delivery_person, _ = Group.objects.update_or_create(name="Delivery Person")
courier_company_attendant, _ = Group.objects.update_or_create(name="Courier Company Attendant")
finance_manager, _ = Group.objects.update_or_create(name="Finance Person")
shipping_content_type = ContentType.objects.get_for_model(Shipping)
initialized_state, _ = State.objects.update_or_create(label=INITIALIZED, defaults={"description": "First step of the workflow"})
shipped_state, _ = State.objects.update_or_create(label=SHIPPED, defaults={"description": "When the goods are physically shipped and it is confirmed by the courier company"})
arrived_state, _ = State.objects.update_or_create(label=ARRIVED, defaults={"description": "The goods are arrived on the customer end"})
return_initialized_state, _ = State.objects.update_or_create(label=RETURN_INITIALIZED, defaults={"description": "The customer wanted to return the goods and initialized the return at the courier company office"})
returned_state, _ = State.objects.update_or_create(label=RETURNED, defaults={"description": "The returned goods have been arrived on the warehouse"})
re_initialized_state, _ = State.objects.update_or_create(label=RE_INITIALIZED, defaults={"description": "There was a mistake with the shipment and the it corrected and re-initialized"})
refunded_state, _ = State.objects.update_or_create(label=REFUNDED, defaults={"description": "The purchase has been refunded"})
closed_state, _ = State.objects.update_or_create(label=CLOSED, defaults={"description": "The final step of the workflow"})
workflow = Shipping.river.shipping_status.workflow \
or Workflow.objects.create(content_type=shipping_content_type, field_name="shipping_status", initial_state=initialized_state)
workflow.transition_approvals.all().delete()
workflow.transitions.all().delete()
workflow.transition_approval_metas.all().delete()
workflow.transition_metas.all().delete()
initialized_to_shipped, _ = TransitionMeta.objects.get_or_create(workflow=workflow, source_state=initialized_state, destination_state=shipped_state)
shipped_to_arrived, _ = TransitionMeta.objects.get_or_create(workflow=workflow, source_state=shipped_state, destination_state=arrived_state)
arrived_to_closed, _ = TransitionMeta.objects.get_or_create(workflow=workflow, source_state=arrived_state, destination_state=closed_state)
arrived_to_return_initialized, _ = TransitionMeta.objects.get_or_create(workflow=workflow, source_state=arrived_state, destination_state=return_initialized_state)
return_initialized_to_returned, _ = TransitionMeta.objects.get_or_create(workflow=workflow, source_state=return_initialized_state, destination_state=returned_state)
returned_to_re_initialized, _ = TransitionMeta.objects.get_or_create(workflow=workflow, source_state=returned_state, destination_state=re_initialized_state)
returned_to_refunded, _ = TransitionMeta.objects.get_or_create(workflow=workflow, source_state=returned_state, destination_state=refunded_state)
refunded_to_closed, _ = TransitionMeta.objects.get_or_create(workflow=workflow, source_state=refunded_state, destination_state=closed_state)
re_initialized_to_shipped, _ = TransitionMeta.objects.get_or_create(workflow=workflow, source_state=re_initialized_state, destination_state=shipped_state)
initialized_to_shipped_rule_1, _ = TransitionApprovalMeta.objects.get_or_create(workflow=workflow, transition_meta=initialized_to_shipped, priority=0)
initialized_to_shipped_rule_1.groups.set([warehouse_attendant])
initialized_to_shipped_rule_2, _ = TransitionApprovalMeta.objects.get_or_create(workflow=workflow, transition_meta=initialized_to_shipped, priority=1)
initialized_to_shipped_rule_2.groups.set([courier_company_attendant])
shipped_to_arrived_rule_1, _ = TransitionApprovalMeta.objects.get_or_create(workflow=workflow, transition_meta=shipped_to_arrived, priority=0)
shipped_to_arrived_rule_1.groups.set([delivery_person])
shipped_to_arrived_rule_2, _ = TransitionApprovalMeta.objects.get_or_create(workflow=workflow, transition_meta=shipped_to_arrived, priority=1)
shipped_to_arrived_rule_2.groups.set([courier_company_attendant])
arrived_to_closed_rule, _ = TransitionApprovalMeta.objects.get_or_create(workflow=workflow, transition_meta=arrived_to_closed, priority=0)
arrived_to_closed_rule.groups.set([finance_manager])
arrived_to_return_initialized_rule, _ = TransitionApprovalMeta.objects.get_or_create(workflow=workflow, transition_meta=arrived_to_return_initialized, priority=0)
arrived_to_return_initialized_rule.groups.set([courier_company_attendant])
return_initialized_to_returned_rule, _ = TransitionApprovalMeta.objects.get_or_create(workflow=workflow, transition_meta=return_initialized_to_returned, priority=0)
return_initialized_to_returned_rule.groups.set([warehouse_attendant])
returned_to_re_initialized_rule, _ = TransitionApprovalMeta.objects.get_or_create(workflow=workflow, transition_meta=returned_to_re_initialized, priority=0)
returned_to_re_initialized_rule.groups.set([warehouse_attendant])
re_initialized_to_shipped_rule_1, _ = TransitionApprovalMeta.objects.get_or_create(workflow=workflow, transition_meta=re_initialized_to_shipped, priority=0)
re_initialized_to_shipped_rule_1.groups.set([warehouse_attendant])
re_initialized_to_shipped_rule_2, _ = TransitionApprovalMeta.objects.get_or_create(workflow=workflow, transition_meta=re_initialized_to_shipped, priority=1)
re_initialized_to_shipped_rule_2.groups.set([courier_company_attendant])
returned_to_refunded_rule, _ = TransitionApprovalMeta.objects.get_or_create(workflow=workflow, transition_meta=returned_to_refunded, priority=0)
returned_to_refunded_rule.groups.set([finance_manager])
refunded_to_closed_rule, _ = TransitionApprovalMeta.objects.get_or_create(workflow=workflow, transition_meta=refunded_to_closed, priority=0)
refunded_to_closed_rule.groups.set([finance_manager])
self.stdout.write(self.style.SUCCESS('Successfully bootstrapped the db ')) | /river_admin-0.7.0-py3-none-any.whl/examples/shipping_example/management/commands/bootstrap_shipping_example.py | 0.681621 | 0.198413 | bootstrap_shipping_example.py | pypi |
import importlib
import sys
import pluggy
gen_hookspec = pluggy.HookspecMarker("generator")
dut_hookspec = pluggy.HookspecMarker("dut")
class RandomGeneratorSpec(object):
""" Test generator specification"""
@gen_hookspec
def pre_gen(self, spec_config, output_dir):
"""
This stage is used to configure the generator, check and install
dependencies, download artifacts, create necessary directories, parse and capture the plugin
specific parameters present in the ``config.ini`` etc.
Before random generation of ASM begins
:param spec_config: Plugin specific parameters and general parameters captured from the original input `config.ini`
:param output_dir: absolute path of the ``work_dir`` where the tests and artifacts need to be generated/placed
:type spec_config: dict
:type output_dir: str
"""
@gen_hookspec
def gen(self, module_dir, output_dir):
"""
This stage is where the actual tests are generated. RiVer Core uses
the inherent pytest framework to run parallelized commands. Using pytest,
enables using default report templates which are quite verbose and helpful in
debugging as well.
The major output of this stage is a test-list YAML which
follows the syntax/schema mentioned in :ref:`Test List Format<testlist>`.
this test list capture all the information about the test and necessary
collaterals required to compile each test. By adopting a standard test-list
format, we inherently allow any source of tests to be integrated into
RiVer Core as a generator plugin as long as a valid test list is created.
:param module_dir: absolute path of the module directory
:param output_dir: absolute path of the ``work_dir`` where the tests and artifacts need to be generated/placed
:type module_dir: str
:type output_dir: str
:return: Test List basically containing the info about the tests generated and required compiler options as per the Test List Format
:rtype: dict
"""
@gen_hookspec
def post_gen(self, output_dir):
"""
This stage is called after all the tests are generated and can
be used to post-process the tests, validate the tests, profile the tests, remove
unwanted artifacts, etc.
:param output_dir: absolute path of the ``work_dir`` where the tests and artifacts need to be generated/placed
:type output_dir: str
"""
### creation of regress list into parameterize of tests: D
### simulate_test fixture in pytest calls compilespec plugin and model plugin and dut plugin
# DUT Class Specification
class DuTSpec(object):
""" DuT plugin specification"""
@dut_hookspec
def init(self, ini_config, test_list, work_dir, coverage_config,
plugin_path):
"""
This stage is used to capture configurations from the input ``config.ini`` and build
and set up the environment. If a core generator is the target, then this stage can be used to
configure it and generate and instance, build the relevant toolchain, setup simulator args like
coverage, verbosity, etc. The test list is also available at this stage. Which must be captured and
stored by the plugin for future use.
:param ini_config: Plugin specific configuration dictionary.
:param test_list: Path to the Test List YAML generated by Generator Plugin
:param work_dir: Path to the file where the output (files, logs, binaries, etc.) will be generated
:param coverage_config: Configuration options for coverage.
:param plugin_path: Path to the plugin module to be loaded
:type ini_config: dict
:type test_list: str
:type work_dir: str
:type coverage_config: dict
:type plugin_path: str
"""
@dut_hookspec
def build(self):
""" This stage is used to create a Makefile or script to actually compile each test,
simulate it on the target. A typical use case is to create a makefile-target for each test
that needs to be run on the target.
"""
@dut_hookspec
def run(self, module_dir):
"""
This stage is used to run the tests on the DUT. It is recommended to run the tests in
parallel. RiVer Core uses the inherent pytest framework to run terminal commands in parallel
fashion. This stage will generate all the artifacts of the simulation like : signature file,
execution logs, test-binary, target executable binary, coverage database, simulation logs, etc.
:param module_dir: Path to the module to be loaded.
:type module_dir: str
:return: Location of the JSON report generated by Pytest used for the final HTML report.
:rtype: str
"""
@dut_hookspec
def post_run(self, test_dict, config):
"""
This stage is run after the pass-fail results have been captured. This stage can be
used to clean up unwanted artifacts like : elfs, hexfiles, objdumps, logs, etc which are no
longer of consequence. One can further choose to only delete artifacts of passed tests and retain
it for tests that failed (the pass/fail result is captured in the test-list itself).
This stage can also further also be used to merge coverage databases of all the test runs, rank
the tests and generate appropriate reports. This is completely optional and up to the user to
define what happens as a "clean-up" process in this stage.
:param test_dict: The test-list YAML
:param config: Config.ini configuration options
:type test_dict: dict
:type config: dict
"""
@dut_hookspec
def merge_db(self, db_files, output_db, config):
""" Merging different databases together
:param db_files: List of coverage files detected.
:param output_db: Final output name
:param config: Config file for RiVerCore
:type db_files: list
:type output_db: str
:type config: str
:return: HTML files generated by merge
:rtype: list
""" | /river_core-1.4.0.tar.gz/river_core-1.4.0/river_core/sim_hookspecs.py | 0.715424 | 0.489686 | sim_hookspecs.py | pypi |
import logging
import colorlog
# a theme is just a dict of strings to represent each level
THEME = {
logging.CRITICAL: " critical ",
logging.ERROR: " error ",
logging.WARNING: " command ",
logging.INFO: " info ",
logging.DEBUG: " debug "
}
class Log:
"""
this class holds all the logic; see the end of the script to
see how it's instantiated in order to have the line
"from zenlog import log" work
"""
aliases = {
logging.CRITICAL: ("critical", "crit", "c", "fatal"),
logging.ERROR: ("error", "err", "e"),
logging.WARNING: ("warning", "warn", "w"),
logging.INFO: ("info", "inf", "nfo", "i"),
logging.DEBUG: ("debug", "dbg", "d")
}
def __init__(self, lvl=logging.DEBUG, format=None):
self._lvl = lvl
if not format:
format = " %(log_color)s%(styledname)-8s%(reset)s | %(log_color)s%(message)s%(reset)s"
self.format = format
logging.basicConfig(filename="river_core.log",
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
filemode='w')
logging.root.setLevel(self._lvl)
self.formatter = colorlog.ColoredFormatter(self.format)
self.stream = logging.StreamHandler()
self.stream.setLevel(self._lvl)
self.stream.setFormatter(self.formatter)
self.logger = logging.getLogger('pythonConfig')
self.logger.setLevel(self._lvl)
self.logger.addHandler(self.stream)
self.theme = THEME
self.extra = {"styledname": self.theme[self._lvl]}
# the magic happens here: we use the "extra" argument documented in
# https://docs.python.org/2/library/logging.html#logging.Logger.debug
# to inject new items into the logging.LogRecord objects
# we also create our convenience methods here
def critical(self, message, *args, **kwargs):
for line in str(message).splitlines():
self.logger.critical(
line,
extra={"styledname": self.theme[logging.CRITICAL]},
*args,
**kwargs)
crit = c = fatal = critical
def error(self, message, *args, **kwargs):
for line in str(message).splitlines():
self.logger.error(line,
extra={"styledname": self.theme[logging.ERROR]},
*args,
**kwargs)
err = e = error
def warn(self, message, *args, **kwargs):
for line in str(message).splitlines():
self.logger.warning(
line,
extra={"styledname": self.theme[logging.WARNING]},
*args,
**kwargs)
warning = w = warn
def info(self, message, *args, **kwargs):
for line in str(message).splitlines():
self.logger.info(line,
extra={"styledname": self.theme[logging.INFO]},
*args,
**kwargs)
inf = nfo = i = info
def debug(self, message, *args, **kwargs):
for line in str(message).splitlines():
self.logger.debug(line,
extra={"styledname": self.theme[logging.DEBUG]},
*args,
**kwargs)
dbg = d = debug
# other convenience functions to set the global logging level
def _parse_level(self, lvl):
for log_level in self.aliases:
if lvl == log_level or lvl in self.aliases[log_level]:
return log_level
raise TypeError("Unrecognized logging level: %s" % lvl)
def level(self, lvl=None):
'''Get or set the logging level.'''
if not lvl:
return self._lvl
self._lvl = self._parse_level(lvl)
self.stream.setLevel(self._lvl)
self.logger.setLevel(self._lvl)
logging.root.setLevel(self._lvl)
logger = Log() | /river_core-1.4.0.tar.gz/river_core-1.4.0/river_core/log.py | 0.45641 | 0.225843 | log.py | pypi |
import math
from river import metrics
__all__ = ["PrevalenceThreshold"]
class PrevalenceThreshold(metrics.base.BinaryMetric):
r"""Prevalence Threshold (PT).
The relationship between a positive predicted value and its target prevalence
is propotional - though not linear in all but a special case. In consequence,
there is a point of local extrema and maximum curvature defined only as a function
of the sensitivity and specificity beyond which the rate of change of a test's positive
predictive value drops at a differential pace relative to the disease prevalence.
Using differential equations, this point was first defined by Balayla et al. [^1] and
is termed the **prevalence threshold** (\phi_e).
The equation for the prevalence threshold [^2] is given by the following formula
$$
\phi_e = \frac{\sqrt{TPR(1 - TNR)} + TNR - 1}{TPR + TNR - 1}
$$
with
$$
TPR = \frac{TP}{P} = \frac{TP}{TP + FN}, TNR = = \frac{TN}{N} = \frac{TN}{TN + FP}
$$
Parameters
----------
cm
This parameter allows sharing the same confusion
matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage
and computation time.
pos_val
Value to treat as "positive".
Examples
--------
>>> from river import metrics
>>> y_true = [False, False, False, True, True, True]
>>> y_pred = [False, False, True, True, False, True]
>>> metric = metrics.PrevalenceThreshold()
>>> for yt, yp in zip(y_true, y_pred):
... print(metric.update(yt, yp).get())
0.0
0.0
1.0
0.36602540378443876
0.44948974278317827
0.41421356237309503
>>> metric
PrevalenceThreshold: 0.414214
References
----------
[^1]: Balayla, J. (2020). Prevalence threshold ($\phi$_e) and the geometry of screening curves.
PLOS ONE, 15(10), e0240215. DOI: 10.1371/journal.pone.0240215
[^2]: Wikipedia contributors. (2021, March 19). Sensitivity and specificity.
In Wikipedia, The Free Encyclopedia,
from https://en.wikipedia.org/w/index.php?title=Sensitivity_and_specificity&oldid=1013004476
"""
def get(self):
try:
tpr = self.cm.true_positives(self.pos_val) / (
self.cm.true_positives(self.pos_val)
+ self.cm.false_negatives(self.pos_val)
)
except ZeroDivisionError:
tpr = 0.0
try:
tnr = self.cm.true_negatives(self.pos_val) / (
self.cm.true_negatives(self.pos_val)
+ self.cm.false_positives(self.pos_val)
)
except ZeroDivisionError:
tnr = 0.0
try:
return (math.sqrt(tpr * (1 - tnr)) + tnr - 1) / (tpr + tnr - 1)
except (ZeroDivisionError, ValueError):
return 0.0 | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/prevalence_threshold.py | 0.922779 | 0.76986 | prevalence_threshold.py | pypi |
from river import metrics
class KappaM(metrics.base.MultiClassMetric):
r"""Kappa-M score.
The Kappa-M statistic compares performance with the majority class classifier.
It is defined as
$$
\kappa_{m} = (p_o - p_e) / (1 - p_e)
$$
where $p_o$ is the empirical probability of agreement on the label
assigned to any sample (prequential accuracy), and $p_e$ is
the prequential accuracy of the `majority classifier`.
Parameters
----------
cm
This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a
confusion matrix reduces the amount of storage and computation time.
Examples
--------
>>> from river import metrics
>>> y_true = ['cat', 'ant', 'cat', 'cat', 'ant', 'bird', 'cat', 'ant', 'cat', 'cat', 'ant']
>>> y_pred = ['ant', 'ant', 'cat', 'cat', 'ant', 'cat', 'ant', 'ant', 'cat', 'cat', 'ant']
>>> metric = metrics.KappaM()
>>> for yt, yp in zip(y_true, y_pred):
... metric = metric.update(yt, yp)
>>> metric
KappaM: 0.25
References
----------
[1^]: A. Bifet et al. "Efficient online evaluation of big data stream classifiers."
In Proceedings of the 21th ACM SIGKDD international conference on knowledge discovery
and data mining, pp. 59-68. ACM, 2015.
"""
def get(self):
try:
p0 = self.cm.sum_diag / self.cm.n_samples # same as accuracy
except ZeroDivisionError:
p0 = 0
try:
pe = self.cm.weight_majority_classifier / self.cm.n_samples
return (p0 - pe) / (1.0 - pe)
except ZeroDivisionError:
return 0.0
class KappaT(base.MultiClassMetric):
r"""Kappa-T score.
The Kappa-T measures the temporal correlation between samples.
It is defined as
$$
\kappa_{t} = (p_o - p_e) / (1 - p_e)
$$
where $p_o$ is the empirical probability of agreement on the label
assigned to any sample (prequential accuracy), and $p_e$ is
the prequential accuracy of the `no-change classifier` that predicts
only using the last class seen by the classifier.
Parameters
----------
cm
This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a
confusion matrix reduces the amount of storage and computation time.
Examples
--------
>>> from river import metrics
>>> y_true = ['cat', 'ant', 'cat', 'cat', 'ant', 'bird']
>>> y_pred = ['ant', 'ant', 'cat', 'cat', 'ant', 'cat']
>>> metric = metrics.KappaT()
>>> for yt, yp in zip(y_true, y_pred):
... metric = metric.update(yt, yp)
>>> metric
KappaT: 0.6
References
----------
[^1]: A. Bifet et al. (2013). "Pitfalls in benchmarking data stream classification
and how to avoid them." Proc. of the European Conference on Machine Learning
and Principles and Practice of Knowledge Discovery in Databases (ECMLPKDD'13),
Springer LNAI 8188, p. 465-479.
"""
def get(self):
try:
p0 = self.cm.sum_diag / self.cm.n_samples # same as accuracy
except ZeroDivisionError:
p0 = 0
try:
pe = self.cm.weight_no_change_classifier / self.cm.n_samples
return (p0 - pe) / (1.0 - pe)
except ZeroDivisionError:
return 0.0 | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/kappa.py | 0.967054 | 0.695435 | kappa.py | pypi |
import math
from scipy.special import factorial
from river import metrics
__all__ = ["Q0", "Q2"]
class Q0(metrics.base.MultiClassMetric):
r"""Q0 index.
Dom's Q0 measure [^2] uses conditional entropy to calculate the goodness of
a clustering solution. However, this term only evaluates the homogeneity of
a solution. To measure the completeness of the hypothesized clustering, Dom
includes a model cost term calculated using a coding theory argument. The
overall clustering quality measure presented is the sum of the costs of
representing the data's conditional entropy and the model.
The motivation for this approach is an appeal to parsimony: Given identical
conditional entropies, H(C|K), the clustering solution with the fewest clusters
should be preferred.
The Q0 measure can be calculated using the following formula [^1]
$$
Q_0(C, K) = H(C|K) + \frac{1}{n} \sum_{k=1}^{|K|} \log \binom{h(c) + |C| - 1}{|C| - 1}.
$$
Due to the complexity of the formula, this metric and its associated normalized version (Q2)
is one order of magnitude slower than most other implemented metrics.
Parameters
----------
cm
This parameter allows sharing the same confusion
matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage
and computation time.
Examples
--------
>>> from river import metrics
>>> y_true = [1, 1, 2, 2, 3, 3]
>>> y_pred = [1, 1, 1, 2, 2, 2]
>>> metric = metrics.Q0()
>>> for yt, yp in zip(y_true, y_pred):
... print(metric.update(yt, yp).get())
0.0
0.0
0.9182958340544896
1.208582260960826
1.4479588303902937
1.3803939544277863
>>> metric
Q0: 1.380394
References
----------
[^1]: Andrew Rosenberg and Julia Hirschberg (2007).
V-Measure: A conditional entropy-based external cluster evaluation measure.
Proceedings of the 2007 Joing Conference on Empirical Methods in Natural Language
Processing and Computational Natural Language Learning, pp. 410 - 420,
Prague, June 2007. URL: https://www.aclweb.org/anthology/D07-1043.pdf.
[^2]: Byron E. Dom. 2001. An information-theoretic external
cluster-validity measure. Technical Report RJ10219, IBM, October.
"""
def __init__(self, cm=None):
super().__init__(cm)
@staticmethod
def binomial_coeff(n, k):
return factorial(n) / (factorial(k) * factorial(n - k))
def get(self):
conditional_entropy_c_k = 0.0
sum_logs = 0.0
n_true_clusters = sum(1 for i in self.cm.sum_col.values() if i > 0)
for i in self.cm.classes:
for j in self.cm.classes:
try:
conditional_entropy_c_k -= (
self.cm[j][i]
/ self.cm.n_samples
* math.log(self.cm[j][i] / self.cm.sum_col[i], 2)
)
except (ValueError, ZeroDivisionError):
continue
try:
sum_logs += math.log(
self.binomial_coeff(
self.cm.sum_col[i] + n_true_clusters - 1, n_true_clusters - 1
)
)
except ValueError:
continue
return conditional_entropy_c_k + sum_logs / self.cm.n_samples
class Q2(Q0):
r"""Q2 index.
Q2 index is presented by Dom [^2] as a normalized version of the original Q0 index.
This index has a range of $(0, 1]$ [^1], with greater scores being representing more
preferred clustering.
The Q2 index can be calculated as follows [^1]
$$
Q2(C, K) = \frac{\frac{1}{n} \sum_{c=1}^{|C|} \log \binom{h(c) + |C| - 1}{|C| - 1} }{Q_0(C, K)}
$$
where $C$ is the target partition, $K$ is the hypothesized partition and $h(k)$ is
the size of cluster $k$.
Due to the complexity of the formula, this metric is one order of magnitude slower than
its original version (Q0) and most other implemented metrics.
Parameters
----------
cm
This parameter allows sharing the same confusion
matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage
and computation time.
Examples
--------
>>> from river import metrics
>>> y_true = [1, 1, 2, 2, 3, 3]
>>> y_pred = [1, 1, 1, 2, 2, 2]
>>> metric = metrics.Q2()
>>> for yt, yp in zip(y_true, y_pred):
... print(metric.update(yt, yp).get())
0.0
0.0
0.0
0.4545045563529578
0.39923396953448914
0.3979343306829813
>>> metric
Q2: 0.397934
References
----------
[^1]: Andrew Rosenberg and Julia Hirschberg (2007).
V-Measure: A conditional entropy-based external cluster evaluation measure.
Proceedings of the 2007 Joing Conference on Empirical Methods in Natural Language
Processing and Computational Natural Language Learning, pp. 410 - 420,
Prague, June 2007. URL: https://www.aclweb.org/anthology/D07-1043.pdf.
[^2]: Byron E. Dom. 2001. An information-theoretic external
cluster-validity measure. Technical Report RJ10219, IBM, October.
"""
def __init__(self, cm=None):
super().__init__(cm)
def get(self):
q0 = super().get()
sum_logs = 0.0
n_true_clusters = sum(1 for i in self.cm.sum_col.values() if i > 0)
for i in self.cm.classes:
try:
sum_logs += math.log(
self.binomial_coeff(
self.cm.sum_row[i] + n_true_clusters - 1, n_true_clusters - 1
)
)
except ValueError:
continue
try:
return (sum_logs / self.cm.n_samples) / q0
except ZeroDivisionError:
return 0.0 | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/q0.py | 0.913467 | 0.783326 | q0.py | pypi |
import math
from river import metrics
__all__ = ["VariationInfo"]
class VariationInfo(metrics.base.MultiClassMetric):
r"""Variation of Information.
Variation of Information (VI) [^1] [^2] is an information-based clustering measure.
It is presented as a distance measure for comparing partitions (or clusterings)
of the same data. It therefore does not distinguish between hypothesised and
target clustering. VI has a number of useful properties, as follows
* VI satisifes the metric axioms
* VI is convexly additive. This means that, if a cluster is split, the distance
from the new cluster to the original is the distance induced by the split times
the size of the cluster. This guarantees that all changes to the metrics are "local".
* VI is not affected by the number of data points in the cluster. However, it is bounded
by the logarithm of the maximum number of clusters in true and predicted labels.
The Variation of Information is calculated using the following formula
$$
VI(C, K) = H(C) + H(K) - 2 H(C, K) = H(C|K) + H(K|C)
$$
The bound of the variation of information [^3] can be written in terms of the number of elements,
$VI(C, K) \leq \log(n)$, or with respect to the maximum number of clusters $K^*$,
$VI(C, K) \leq 2 \log(K^*)$.
Parameters
----------
cm
This parameter allows sharing the same confusion
matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage
and computation time.
Examples
--------
>>> from river import metrics
>>> y_true = [1, 1, 2, 2, 3, 3]
>>> y_pred = [1, 1, 1, 2, 2, 2]
>>> metric = metrics.VariationInfo()
>>> for yt, yp in zip(y_true, y_pred):
... print(metric.update(yt, yp).get())
0.0
0.0
0.9182958340544896
1.1887218755408673
1.3509775004326938
1.2516291673878228
>>> metric
VariationInfo: 1.251629
References
----------
[^1]: Andrew Rosenberg and Julia Hirschberg (2007).
V-Measure: A conditional entropy-based external cluster evaluation measure.
Proceedings of the 2007 Joing Conference on Empirical Methods in Natural Language
Processing and Computational Natural Language Learning, pp. 410 - 420,
Prague, June 2007.
[^2]: Marina Meila and David Heckerman. 2001.
An experimental comparison of model-based clustering methods.
Mach. Learn., 42(1/2):9–29.
[^3]: Wikipedia contributors. (2021, February 18).
Variation of information. In Wikipedia, The Free Encyclopedia,
from https://en.wikipedia.org/w/index.php?title=Variation_of_information&oldid=1007562715
"""
def __init__(self, cm=None):
super().__init__(cm)
def get(self):
conditional_entropy_c_k = 0.0
conditional_entropy_k_c = 0.0
for i in self.cm.classes:
for j in self.cm.classes:
try:
conditional_entropy_c_k -= (
self.cm[j][i]
/ self.cm.n_samples
* math.log(self.cm[j][i] / self.cm.sum_col[i], 2)
)
except (ValueError, ZeroDivisionError):
pass
try:
conditional_entropy_k_c -= (
self.cm[i][j]
/ self.cm.n_samples
* math.log(self.cm[i][j] / self.cm.sum_row[i], 2)
)
except (ValueError, ZeroDivisionError):
pass
return conditional_entropy_c_k + conditional_entropy_k_c | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/variation_info.py | 0.90282 | 0.789112 | variation_info.py | pypi |
import math
from river import utils
from . import base
class XieBeni(base.ClusteringMetric):
"""Xie-Beni index (XB).
The Xie-Beni index [^1] has the form of (Compactness)/(Separation), which defines the
inter-cluster separation as the minimum squared distance between cluster centers,
and the intra-cluster compactness as the mean squared distance between each data
object and its cluster centers. The smaller the value of XB, the better the
clustering quality.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.XieBeni()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
XieBeni: 0.397043
References
----------
[^1]: X. L. Xie, G. Beni (1991). A validity measure for fuzzy clustering. In: IEEE
Transactions on Pattern Analysis and Machine Intelligence 13(8), 841 - 847.
DOI: 10.1109/34.85677
"""
def __init__(self):
super().__init__()
self._ssw = 0
self._minimum_separation = 0
self._total_points = 0
@staticmethod
def _find_minimum_separation(centers):
minimum_separation = math.inf
n_centers = max(centers) + 1
for i in range(n_centers):
for j in range(i + 1, n_centers):
separation_ij = utils.math.minkowski_distance(centers[i], centers[j], 2)
if separation_ij < minimum_separation:
minimum_separation = separation_ij
return minimum_separation
def update(self, x, y_pred, centers, sample_weight=1.0):
squared_distance = utils.math.minkowski_distance(centers[y_pred], x, 2)
minimum_separation = self._find_minimum_separation(centers)
self._ssw += squared_distance
self._total_points += 1
self._minimum_separation = minimum_separation
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
squared_distance = utils.math.minkowski_distance(centers[y_pred], x, 2)
minimum_separation = self._find_minimum_separation(centers)
self._ssw -= squared_distance
self._total_points -= 1
self._minimum_separation -= minimum_separation
return self
def get(self):
try:
return self._ssw / (self._total_points * self._minimum_separation)
except ZeroDivisionError:
return math.inf
@property
def bigger_is_better(self):
return False | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/cluster/xiebeni.py | 0.931544 | 0.675055 | xiebeni.py | pypi |
import math
from river import utils
from . import base
__all__ = ["MSSTD", "RMSSTD"]
class MSSTD(base.ClusteringMetric):
"""Mean Squared Standard Deviation.
This is the pooled sample variance of all the attributes, which measures
only the compactness of found clusters.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.MSSTD()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
MSSTD: 2.635708
References
----------
[^1]: Halkidi, M., Batistakis, Y. and Vazirgiannis, M. (2001). On Clustering Validation Techniques.
Journal of Intelligent Information Systems, 17, 107 - 145.
DOI: 10.1023/a:1012801612483.
"""
def __init__(self):
super().__init__()
self._ssq = 0
self._total_points = 0
self._total_clusters = 0
self._dim = 0
def update(self, x, y_pred, centers, sample_weight=1.0):
squared_distance = utils.math.minkowski_distance(centers[y_pred], x, 2)
n_added_centers = len(centers) - self._total_clusters
self._ssq += squared_distance
self._total_points += 1
self._total_clusters += n_added_centers
self._dim = len(x)
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
squared_distance = utils.math.minkowski_distance(centers[y_pred], x, 2)
n_added_centers = len(centers) - self._total_clusters
self._ssq -= squared_distance
self._total_clusters -= n_added_centers
self._total_points -= 1
return self
def get(self):
try:
return self._ssq / (self._dim * (self._total_points - self._total_clusters))
except ZeroDivisionError:
return math.inf
@property
def bigger_is_better(self):
return False
class RMSSTD(MSSTD):
"""Root Mean Squared Standard Deviation.
This is the square root of the pooled sample variance of all the attributes, which
measures only the compactness of found clusters.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.RMSSTD()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
RMSSTD: 1.623486
References
----------
[^1]: Halkidi, M., Batistakis, Y. and Vazirgiannis, M. (2001). On Clustering Validation Techniques.
Journal of Intelligent Information Systems, 17, 107 - 145.
DOI: 10.1023/a:1012801612483.
"""
def get(self):
return super().get() ** 0.5 | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/cluster/rmsstd.py | 0.91502 | 0.482368 | rmsstd.py | pypi |
import math
from river import metrics
from . import base
from .ssw import SSW
class BIC(base.ClusteringMetric):
r"""Bayesian Information Criterion (BIC).
In statistics, the Bayesian Information Criterion (BIC) [^1], or Schwarz Information
Criterion (SIC), is a criterion for model selection among a finite set of models;
the model with the highest BIC is preferred. It is based, in part, on the likelihood
function and is closely related to the Akaike Information Criterion (AIC).
Let
* k being the number of clusters,
* $n_i$ being the number of points within each cluster, $n_1 + n_2 + ... + n_k = n$,
* $d$ being the dimension of the clustering problem.
Then, the variance of the clustering solution will be calculated as
$$
\hat{\sigma}^2 = \frac{1}{(n - m) \times d} \sum_{i = 1}^n \lVert x_i - c_j \rVert^2.
$$
The maximum likelihood function, used in the BIC version of `River`, would be
$$
\hat{l}(D) = \sum_{i = 1}^k n_i \log(n_i) - n \log n - \frac{n_i \times d}{2} \times \log(2 \pi \hat{\sigma}^2) - \frac{(n_i - 1) \times d}{2},
$$
and the BIC will then be calculated as
$$
BIC = \hat{l}(D) - 0.5 \times k \times log(n) \times (d+1).
$$
Using the previously mentioned maximum likelihood function, the higher the BIC value, the
better the clustering solution is. Moreover, the BIC calculated will always be less than 0 [^2].
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.BIC()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
BIC: -30.060416
References
----------
[^1]: Wikipedia contributors. (2020, December 14). Bayesian information criterion.
In Wikipedia, The Free Encyclopedia,
from https://en.wikipedia.org/w/index.php?title=Bayesian_information_criterion&oldid=994127616
[^2]: BIC Notes, https://github.com/bobhancock/goxmeans/blob/master/doc/BIC_notes.pdf
"""
def __init__(self):
super().__init__()
self._ssw = SSW()
self._n_points_by_clusters = {}
self._n_clusters = 0
self._dim = 0
self._initialized = False
def update(self, x, y_pred, centers, sample_weight=1.0):
if not self._initialized:
self._dim = len(x)
self._ssw.update(x, y_pred, centers, sample_weight)
try:
self._n_points_by_clusters[y_pred] += 1
except KeyError:
self._n_points_by_clusters[y_pred] = 1
self._n_clusters = len(centers)
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
self._ssw.revert(x, y_pred, centers, sample_weight)
self._n_points_by_clusters[y_pred] -= 1
self._n_clusters = len(centers)
return self
def get(self):
BIC = 0
total_points = sum(self._n_points_by_clusters.values())
try:
variance = (
1 / (total_points - self._n_clusters) / self._dim * self._ssw.get()
)
except ZeroDivisionError:
return -math.inf
const_term = 0.5 * self._n_clusters * math.log(total_points) * (self._dim + 1)
for i in self._n_points_by_clusters:
try:
BIC += (
self._n_points_by_clusters[i]
* math.log(self._n_points_by_clusters[i])
- self._n_points_by_clusters[i] * math.log(total_points)
- (self._n_points_by_clusters[i] * self._dim)
/ 2
* math.log(2 * math.pi * variance)
- (self._n_points_by_clusters[i] - 1) * self._dim / 2
)
except ValueError:
continue
BIC -= const_term
return BIC
@property
def bigger_is_better(self):
return True | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/cluster/bic.py | 0.907392 | 0.851181 | bic.py | pypi |
import math
from river import metrics
from . import base
from .ssb import SSB
from .ssw import SSW
__all__ = ["CalinskiHarabasz", "Hartigan", "WB"]
class CalinskiHarabasz(base.ClusteringMetric):
"""Calinski-Harabasz index (CH).
The Calinski-Harabasz index (CH) index measures the criteria simultaneously
with the help of average between and within cluster sum of squares.
* The **numerator** reflects the degree of separation in the way of how much centers are spread.
* The **denominator** corresponds to compactness, to reflect how close the in-cluster objects
are gathered around the cluster center.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.CalinskiHarabasz()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
CalinskiHarabasz: 6.922666
References
----------
[^1]: Calinski, T., Harabasz, J.-A. (1974). A Dendrite Method for Cluster Analysis.
Communications in Statistics 3(1), 1 - 27. DOI: 10.1080/03610927408827101
"""
def __init__(self):
super().__init__()
self._ssb = SSB()
self._ssw = SSW()
self._n_clusters = 0
self._n_points = 0
def update(self, x, y_pred, centers, sample_weight=1.0):
self._ssb.update(x, y_pred, centers, sample_weight)
self._ssw.update(x, y_pred, centers, sample_weight)
self._n_clusters = len(centers)
self._n_points += 1
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
self._ssb.revert(x, y_pred, centers, sample_weight)
self._ssw.revert(x, y_pred, centers, sample_weight)
self._n_clusters = len(centers)
self._n_points -= 1
return self
def get(self):
try:
return (self._ssb.get() / (self._n_clusters - 1)) / (
self._ssw.get() / (self._n_points - self._n_clusters)
)
except ZeroDivisionError:
return -math.inf
@property
def bigger_is_better(self):
return True
class Hartigan(base.ClusteringMetric):
"""Hartigan Index (H - Index)
Hartigan Index (H - Index) [^1] is a sum-of-square based index [^2], which is
equal to the negative log of the division of SSW (Sum-of-Squares Within Clusters)
by SSB (Sum-of-Squares Between Clusters).
The higher the Hartigan index, the higher the clustering quality is.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.Hartigan()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
Hartigan: 0.836189
References
----------
[^1]: Hartigan JA (1975). Clustering Algorithms. John Wiley & Sons, Inc.,
New York, NY, USA. ISBN 047135645X.
[^2]: Q. Zhao, M. Xu, and P. Franti, "Sum-of-squares based cluster validity index
and significance analysis," in Adaptive and Natural Computing Algorithms,
M. Kolehmainen, P. Toivanen, and B. Beliczynski, Eds.
Berlin, Germany: Springer, 2009, pp. 313–322.
"""
def __init__(self):
super().__init__()
self._ssb = metrics.cluster.SSB()
self._ssw = metrics.cluster.SSW()
def update(self, x, y_pred, centers, sample_weight=1.0):
self._ssb.update(x, y_pred, centers, sample_weight)
self._ssw.update(x, y_pred, centers, sample_weight)
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
self._ssb.revert(x, y_pred, centers, sample_weight)
self._ssw.revert(x, y_pred, centers, sample_weight)
return self
def get(self):
try:
return -math.log(self._ssw.get() / self._ssb.get())
except ZeroDivisionError:
return -math.inf
@property
def bigger_is_better(self):
return True
class WB(base.ClusteringMetric):
"""WB Index
WB Index is a simple sum-of-square method, calculated by dividing the within
cluster sum-of-squares by the between cluster sum-of-squares. Its effect is emphasized
by multiplying the number of clusters. The advantages of the proposed method are
that one can determine the number of clusters by minimizing the WB value, without
relying on any knee point detection, and this metric is straightforward to implement.
The lower the WB index, the higher the clustering quality is.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.WB()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
WB: 1.300077
References
----------
[^1]: Q. Zhao, M. Xu, and P. Franti, "Sum-of-squares based cluster validity index
and significance analysis," in Adaptive and Natural Computing Algorithms,
M. Kolehmainen, P. Toivanen, and B. Beliczynski, Eds.
Berlin, Germany: Springer, 2009, pp. 313–322.
"""
def __init__(self):
super().__init__()
self._ssb = metrics.cluster.SSB()
self._ssw = metrics.cluster.SSW()
self._n_clusters = 0
def update(self, x, y_pred, centers, sample_weight=1.0):
self._ssb.update(x, y_pred, centers, sample_weight)
self._ssw.update(x, y_pred, centers, sample_weight)
self._n_clusters = len(centers)
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
self._ssb.revert(x, y_pred, centers, sample_weight)
self._ssw.revert(x, y_pred, centers, sample_weight)
self._n_clusters = len(centers)
return self
def get(self):
try:
return self._n_clusters * self._ssw.get() / self._ssb.get()
except ZeroDivisionError:
return math.inf
@property
def bigger_is_better(self):
return False | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/cluster/ssq_based.py | 0.916393 | 0.672963 | ssq_based.py | pypi |
import abc
import numbers
import typing
from river import base, stats, utils
from river.base.typing import FeatureName
__all__ = ["ClusteringMetric"]
class ClusteringMetric(abc.ABC):
"""
Mother class of all internal clustering metrics.
"""
# Define the format specification used for string representation.
_fmt = ",.6f" # Use commas to separate big numbers and show 6 decimals
@abc.abstractmethod
def update(self, x, y_pred, centers, sample_weight=1.0) -> "ClusteringMetric":
"""Update the metric."""
@abc.abstractmethod
def revert(self, x, y_pred, centers, sample_weight=1.0) -> "ClusteringMetric":
"""Revert the metric."""
@abc.abstractmethod
def get(self) -> float:
"""Return the current value of the metric."""
@property
@abc.abstractmethod
def bigger_is_better(self) -> bool:
"""Indicates if a high value is better than a low one or not."""
def works_with(self, model: base.Estimator) -> bool:
"""Indicates whether or not a metric can work with a given model."""
return utils.inspect.isclusterer(model)
def __repr__(self):
"""Returns the class name along with the current value of the metric."""
return f"{self.__class__.__name__}: {self.get():{self._fmt}}".rstrip("0")
class MeanClusteringMetric(ClusteringMetric):
"""Many metrics are just running averages. This is a utility class that avoids repeating
tedious stuff throughout the module for such metrics.
"""
def __init__(self):
self._mean = stats.Mean()
@abc.abstractmethod
def _eval(
self,
x: typing.Dict[FeatureName, numbers.Number],
y_pred: numbers.Number,
centers,
sample_weight=1.0,
):
pass
def update(
self,
x: typing.Dict[FeatureName, numbers.Number],
y_pred: numbers.Number,
centers,
sample_weight=1.0,
):
self._mean.update(x=self._eval(x, y_pred, centers), w=sample_weight)
return self
def revert(
self,
x: typing.Dict[FeatureName, numbers.Number],
y_pred: numbers.Number,
centers,
sample_weight=1.0,
):
self._mean.revert(x=self._eval(x, y_pred, centers), w=sample_weight)
return self
def get(self):
return self._mean.get() | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/cluster/base.py | 0.888378 | 0.512266 | base.py | pypi |
import math
from river import stats, utils
from . import base
class R2(base.ClusteringMetric):
"""R-Squared
R-Squared (RS) [^1] is the complement of the ratio of sum of squared distances between objects
in different clusters to the total sum of squares. It is an intuitive and simple formulation
of measuring the differences between clusters.
The maximum value of R-Squared is 1, which means that the higher the index, the better
the clustering results.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.R2()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
R2: 0.509203
References
----------
[^1]: Halkidi, M., Vazirgiannis, M., & Batistakis, Y. (2000). Quality Scheme Assessment in the
Clustering Process. Principles Of Data Mining And Knowledge Discovery, 265-276.
DOI: 10.1007/3-540-45372-5_26
"""
def __init__(self):
super().__init__()
self._center_all_points = {}
self._ssq_point_center = 0
self._ssq_point_cluster_centers = 0
self._cluster_variance = {}
self._centers = {}
self._initialized = False
def update(self, x, y_pred, centers, sample_weight=1.0):
if not self._initialized:
self._center_all_points = {i: stats.Mean() for i in x}
self._initialized = True
for i in self._center_all_points:
self._center_all_points[i].update(x[i], w=sample_weight)
center_all_points = {
i: self._center_all_points[i].get() for i in self._center_all_points
}
squared_distance_center = utils.math.minkowski_distance(x, center_all_points, 2)
squared_distance_cluster_center = utils.math.minkowski_distance(
x, centers[y_pred], 2
)
self._ssq_point_center += squared_distance_center
self._ssq_point_cluster_centers += squared_distance_cluster_center
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
for i in self._center_all_points:
self._center_all_points[i].update(x[i], w=-sample_weight)
center_all_points = {
i: self._center_all_points[i].get() for i in self._center_all_points
}
squared_distance_center = utils.math.minkowski_distance(x, center_all_points, 2)
squared_distance_cluster_center = utils.math.minkowski_distance(
x, centers[y_pred], 2
)
self._ssq_point_center -= squared_distance_center
self._ssq_point_cluster_centers -= squared_distance_cluster_center
return self
def get(self):
try:
return 1 - self._ssq_point_cluster_centers / self._ssq_point_center
except ZeroDivisionError:
return -math.inf
@property
def bigger_is_better(self):
return True | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/cluster/r2.py | 0.94502 | 0.706209 | r2.py | pypi |
from river import stats, utils
from . import base
class SSB(base.ClusteringMetric):
"""Sum-of-Squares Between Clusters (SSB).
The Sum-of-Squares Between Clusters is the weighted mean of the squares of distances
between cluster centers to the mean value of the whole dataset.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.SSB()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
SSB: 8.109389
References
----------
[^1]: Q. Zhao, M. Xu, and P. Franti, "Sum-of-squares based cluster validity index
and significance analysis," in Adaptive and Natural Computing Algorithms,
M. Kolehmainen, P. Toivanen, and B. Beliczynski, Eds.
Berlin, Germany: Springer, 2009, pp. 313–322.
"""
def __init__(self):
super().__init__()
self._center_all_points = {}
self._n_points = 0
self._n_points_by_clusters = {}
self._squared_distances = {}
self._initialized = False
def update(self, x, y_pred, centers, sample_weight=1.0):
if not self._initialized:
self._center_all_points = {i: stats.Mean() for i in x}
self._initialized = True
for i in self._center_all_points:
self._center_all_points[i].update(x[i], w=sample_weight)
center_all_points = {
i: self._center_all_points[i].get() for i in self._center_all_points
}
self._n_points += 1
try:
self._n_points_by_clusters[y_pred] += 1
except KeyError:
self._n_points_by_clusters[y_pred] = 1
for i in centers:
self._squared_distances[i] = utils.math.minkowski_distance(
centers[i], center_all_points, 2
)
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
for i in self._center_all_points:
self._center_all_points[i].update(x[i], w=-sample_weight)
center_all_points = {
i: self._center_all_points[i].get() for i in self._center_all_points
}
self._n_points -= 1
self._n_points_by_clusters[y_pred] -= 1
for i in centers:
self._squared_distances[i] = utils.math.minkowski_distance(
centers[i], center_all_points, 2
)
return self
def get(self):
ssb = 0
for i in self._n_points_by_clusters:
try:
ssb += (
1
/ self._n_points
* self._n_points_by_clusters[i]
* self._squared_distances[i]
)
except ZeroDivisionError:
ssb += 0
return ssb
@property
def bigger_is_better(self):
return True | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/cluster/ssb.py | 0.946621 | 0.672224 | ssb.py | pypi |
import math
from river import utils
from . import base
class DaviesBouldin(base.ClusteringMetric):
"""Davies-Bouldin index (DB).
The Davies-Bouldin index (DB) [^1] is an old but still widely used inernal validaion measure.
DB uses intra-cluster variance and inter-cluster center distance to find the worst partner
cluster, i.e., the closest most scattered one for each cluster. Thus, minimizing DB gives
us the optimal number of clusters.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.DaviesBouldin()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
DaviesBouldin: 0.22583
References
----------
[^1]: David L., D., Don, B. (1979). A Cluster Separation Measure. In: IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI) 1(2), 224 - 227.
DOI: 10.1109/TPAMI.1979.4766909
"""
def __init__(self):
super().__init__()
self._inter_cluster_distances = {}
self._n_points_by_clusters = {}
self._total_points = 0
self._centers = {}
def update(self, x, y_pred, centers, sample_weight=1.0):
distance = math.sqrt(utils.math.minkowski_distance(centers[y_pred], x, 2))
if y_pred not in self._inter_cluster_distances:
self._inter_cluster_distances[y_pred] = distance
self._n_points_by_clusters[y_pred] = 1
else:
self._inter_cluster_distances[y_pred] += distance
self._n_points_by_clusters[y_pred] += 1
self._centers = centers
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
distance = math.sqrt(utils.math.minkowski_distance(centers[y_pred], x, 2))
self._inter_cluster_distances[y_pred] -= distance
self._n_points_by_clusters[y_pred] -= 1
self._centers = centers
return self
def get(self):
max_partner_clusters_index = -math.inf
n_clusters = len(self._inter_cluster_distances)
for i in range(n_clusters):
for j in range(i + 1, n_clusters):
distance_ij = math.sqrt(
utils.math.minkowski_distance(self._centers[i], self._centers[j], 2)
)
ij_partner_cluster_index = (
self._inter_cluster_distances[i] / self._n_points_by_clusters[i]
+ self._inter_cluster_distances[j] / self._n_points_by_clusters[j]
) / distance_ij
if ij_partner_cluster_index > max_partner_clusters_index:
max_partner_clusters_index = ij_partner_cluster_index
try:
return max_partner_clusters_index / n_clusters
except ZeroDivisionError:
return math.inf
@property
def bigger_is_better(self):
return False | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/cluster/daviesbouldin.py | 0.912199 | 0.651466 | daviesbouldin.py | pypi |
import math
from river import metrics, utils
from . import base
__all__ = ["BallHall", "Cohesion", "SSW", "Xu"]
class SSW(base.MeanClusteringMetric):
"""Sum-of-Squares Within Clusters (SSW).
Mean of sum of squared distances from data points to their assigned cluster centroids.
The bigger the better.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.SSW()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
SSW: 3.514277
References
----------
[^1]: Bifet, A. et al. (2018). "Machine Learning for Data Streams".
DOI: 10.7551/mitpress/10654.001.0001.
"""
@property
def bigger_is_better(self):
return False
def _eval(self, x, y_pred, centers):
return utils.math.minkowski_distance(centers[y_pred], x, 2)
class Cohesion(base.MeanClusteringMetric):
"""Mean distance from the points to their assigned cluster centroids. The smaller the better.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.Cohesion()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
Cohesion: 1.682748
References
----------
[^1]: Bifet, A. et al. (2018). "Machine Learning for Data Streams".
DOI: 10.7551/mitpress/10654.001.0001.
"""
@property
def bigger_is_better(self):
return False
def _eval(self, x, y_pred, centers):
return math.sqrt(utils.math.minkowski_distance(centers[y_pred], x, 2))
class BallHall(base.ClusteringMetric):
"""Ball-Hall index
Ball-Hall index is a sum-of-squared based index. It is calculated by
dividing the sum-of-squares between clusters by the number of generated
clusters.
The index is usually used to evaluate the number of clusters by the following
criteria: the maximum value of the successive difference is determined as the
optimal number of clusters.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.BallHall()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
BallHall: 1.171426
References
----------
[^1]: Ball, G.H., Hubert, L.J.: ISODATA, A novel method of data analysis
and pattern classification (Tech. Rep. NTIS No. AD 699616).
Standford Research Institute, Menlo Park (1965)
"""
def __init__(self):
self._ssw = metrics.cluster.SSW()
self._n_clusters = 0
def update(self, x, y_pred, centers, sample_weight=1.0):
self._ssw.update(x, y_pred, centers, sample_weight)
self._n_clusters = len(centers)
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
self._ssw.revert(x, y_pred, centers, sample_weight)
self._n_clusters = len(centers)
return self
def get(self):
try:
return self._ssw.get() / self._n_clusters
except ZeroDivisionError:
return math.inf
@property
def bigger_is_better(self):
return False
class Xu(base.ClusteringMetric):
"""Xu Index
Xu Index is among the most complicated sum-of-squares based metrics [^1].
It is calculated based on the Sum-of-Squares Within Clusters (SSW), total
number of points, number of clusters, and the dimension of the cluserting problem.
The lower the Xu index, the higher the clustering quality is.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.Xu()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
Xu: -2.73215
References
----------
[^1]: Q. Zhao, M. Xu, and P. Franti, "Sum-of-squares based cluster validity index
and significance analysis," in Adaptive and Natural Computing Algorithms,
M. Kolehmainen, P. Toivanen, and B. Beliczynski, Eds.
Berlin, Germany: Springer, 2009, pp. 313–322.
"""
def __init__(self):
super().__init__()
self._ssw = metrics.cluster.SSW()
self._n_points = 0
self._n_clusters = 0
self._dim = 0
self._initialized = False
def update(self, x, y_pred, centers, sample_weight=1.0):
if not self._initialized:
self._dim = len(x)
if len(x) == self._dim:
self._ssw.update(x, y_pred, centers, sample_weight)
self._n_points += 1
self._n_clusters = len(centers)
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
if len(x) == self._dim:
self._ssw.revert(x, y_pred, centers, sample_weight)
self._n_points -= 1
self._n_clusters = len(centers)
return self
def get(self):
try:
return self._dim * math.log(
math.sqrt(
self._ssw.get() / (self._dim * (self._n_points * self._n_points))
)
) + math.log(self._n_clusters)
except ZeroDivisionError:
return math.inf
@property
def bigger_is_better(self):
return False | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/cluster/ssw.py | 0.919697 | 0.67178 | ssw.py | pypi |
import math
from river import stats, utils
from . import base
class SD(base.ClusteringMetric):
"""The SD validity index (SD).
The SD validity index (SD) [^1] is a more recent clustering validation measure. It is composed of
two terms:
* Scat(NC) stands for the scattering within clusters,
* Dis(NC) stands for the dispersion between clusters.
Like DB and SB, SD measures the compactness with variance of clustered objects and separation
with distance between cluster centers, but uses them in a different way. The smaller the value
of SD, the better.
In the original formula for SD validation index, the ratio between the maximum and the actual
number of clusters is taken into account. However, due to the fact that metrics are updated in
an incremental fashion, this ratio will be automatically set to default as 1.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.SD()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
SD: 2.339016
References
----------
[^1]: Halkidi, M., Vazirgiannis, M., & Batistakis, Y. (2000). Quality Scheme Assessment in the
Clustering Process. Principles Of Data Mining And Knowledge Discovery, 265-276.
DOI: 10.1007/3-540-45372-5_26
"""
def __init__(self):
super().__init__()
self._center_all_points = {}
self._overall_variance = {}
self._cluster_variance = {}
self._centers = {}
self._initialized = False
@staticmethod
def _calculate_dispersion_nc(centers):
min_distance_clusters = math.inf
max_distance_clusters = -math.inf
sum_inverse_distances = 0
n_clusters = len(centers)
for i in range(n_clusters):
for j in range(i + 1, n_clusters):
distance_ij = math.sqrt(
utils.math.minkowski_distance(centers[i], centers[j], 2)
)
if distance_ij > max_distance_clusters:
max_distance_clusters = distance_ij
if distance_ij < min_distance_clusters:
min_distance_clusters = distance_ij
sum_inverse_distances += 1 / distance_ij
try:
return (
max_distance_clusters / min_distance_clusters
) * sum_inverse_distances
except ZeroDivisionError:
return math.inf
@staticmethod
def _norm(x):
origin = {i: 0 for i in x}
return math.sqrt(utils.math.minkowski_distance(x, origin, 2))
def update(self, x, y_pred, centers, sample_weight=1.0):
if not self._initialized:
self._overall_variance = {i: stats.Var() for i in x}
self._initialized = True
if y_pred not in self._cluster_variance:
self._cluster_variance[y_pred] = {i: stats.Var() for i in x}
for i in x:
self._cluster_variance[y_pred][i].update(x[i], w=sample_weight)
self._overall_variance[i].update(x[i], w=sample_weight)
self._centers = centers
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
for i in x:
self._overall_variance[i].update(x[i], w=-sample_weight)
self._cluster_variance[y_pred][i].update(x[i], w=-sample_weight)
self._centers = centers
return self
def get(self):
dispersion_nc = self._calculate_dispersion_nc(self._centers)
overall_variance = {
i: self._overall_variance[i].get() for i in self._overall_variance
}
cluster_variance = {}
for i in self._cluster_variance:
cluster_variance[i] = {
j: self._cluster_variance[i][j].get() for j in self._cluster_variance[i]
}
scat_nc = 0
for i in cluster_variance:
scat_nc += self._norm(cluster_variance[i]) / self._norm(overall_variance)
try:
return scat_nc + dispersion_nc
except ZeroDivisionError:
return math.inf
@property
def bigger_is_better(self):
return False | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/cluster/sd_validation.py | 0.910679 | 0.779343 | sd_validation.py | pypi |
import math
from river import stats, utils
from . import base
class IIndex(base.ClusteringMetric):
"""I-Index (I).
I-Index (I) [^1] adopts the maximum distance between cluster centers. It also shares the type of
formulation numerator-separation/denominator-compactness. For compactness, the distance from
a data point to its cluster center is also used like CH.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.IIndex()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
IIndex: 6.836566
References
--------
[^1]: Maulik, U., Bandyopadhyay, S. (2002). Performance evaluation of some clustering algorithms and
validity indices. In: IEEE Transactions on Pattern Analysis and Machine Intelligence 24(12)
1650 - 1654. DOI: 10.1109/TPAMI.2002.1114856
"""
def __init__(self):
super().__init__()
self._center_all_points = {}
self._ssq_points_cluster_centers = 0
self._ssq_points_center = 0
self._furthest_cluster_distance = 0
self._n_clusters = 0
self._dim = 0
self.sample_correction = {}
self._initialized = False
@staticmethod
def _find_furthest_cluster_distance(centers):
n_centers = len(centers)
max_distance = -math.inf
for i in range(n_centers):
for j in range(i + 1, n_centers):
distance_ij = math.sqrt(
utils.math.minkowski_distance(centers[i], centers[j], 2)
)
if distance_ij > max_distance:
max_distance = distance_ij
return max_distance
def update(self, x, y_pred, centers, sample_weight=1.0):
self._furthest_cluster_distance = self._find_furthest_cluster_distance(centers)
if not self._initialized:
self._center_all_points = {i: stats.Mean() for i in x}
self._dim = len(x)
self._initialized = True
for i in self._center_all_points:
self._center_all_points[i].update(x[i], w=sample_weight)
center_all_points = {
i: self._center_all_points[i].get() for i in self._center_all_points
}
distance_point_cluster_center = math.sqrt(
utils.math.minkowski_distance(centers[y_pred], x, 2)
)
distance_point_center = math.sqrt(
utils.math.minkowski_distance(center_all_points, x, 2)
)
self._ssq_points_cluster_centers += distance_point_cluster_center
self._ssq_points_center += distance_point_center
self._n_clusters = len(centers)
# To trace back
self.sample_correction = {
"distance_point_cluster_center": distance_point_cluster_center,
"distance_point_center": distance_point_center,
}
return self
def revert(self, x, y_pred, centers, sample_weight=1.0, correction=None):
self._furthest_cluster_distance = self._find_furthest_cluster_distance(centers)
for i in self._center_all_points:
self._center_all_points[i].update(x[i], w=-sample_weight)
self._ssq_points_cluster_centers -= correction["distance_point_cluster_center"]
self._ssq_points_center -= correction["distance_point_center"]
self._n_clusters = len(centers)
self._dim = len(x)
return self
def get(self):
try:
return (
1
/ self._n_clusters
* self._ssq_points_center
/ self._ssq_points_cluster_centers
* self._furthest_cluster_distance
) ** self._dim
except ZeroDivisionError:
return -math.inf
@property
def bigger_is_better(self):
return True | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/cluster/i_index.py | 0.901929 | 0.552962 | i_index.py | pypi |
import math
from river import stats, utils
from . import base
__all__ = ["GD43", "GD53"]
class GD43(base.ClusteringMetric):
r"""Generalized Dunn's index 43 (GD43).
The Generalized Dunn's indices comprise a set of 17 variants of the original
Dunn's index devised to address sensitivity to noise in the latter. The formula
of this index is given by:
$$
GD_{rs} = \frac{\min_{i \new q} [\delta_r (\omega_i, \omega_j)]}{\max_k [\Delta_s (\omega_k)]},
$$
where $\delta_r(.)$ is a measure of separation, and $\Delta_s(.)$ is a measure of compactness,
the parameters $r$ and $s$ index the measures' formulations. In particular, when employing
Euclidean distance, GD43 is formulated using:
$$
\delta_4 (\omega_i, \omega_j) = \lVert v_i - v_j \rVert_2,
$$
and
$$
\Delta_3 (\omega_k) = \frac{2 \times CP_1^2 (v_k, \omega_k)}{n_k}.
$$
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.GD43()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
GD43: 0.731369
References
----------
[^1]: J. Bezdek and N. Pal, "Some new indexes of cluster validity,"
IEEE Trans. Syst., Man, Cybern. B, vol. 28, no. 3, pp. 301–315, Jun. 1998.
"""
def __init__(self):
super().__init__()
self._minimum_separation = 0
self._avg_cp_by_clusters = {}
@staticmethod
def _find_minimum_separation(centers):
minimum_separation = math.inf
n_centers = max(centers) + 1
for i in range(n_centers):
for j in range(i + 1, n_centers):
separation_ij = math.sqrt(
utils.math.minkowski_distance(centers[i], centers[j], 2)
)
if separation_ij < minimum_separation:
minimum_separation = separation_ij
return minimum_separation
def update(self, x, y_pred, centers, sample_weight=1.0):
self._minimum_separation = self._find_minimum_separation(centers)
distance = math.sqrt(utils.math.minkowski_distance(centers[y_pred], x, 2))
if y_pred in self._avg_cp_by_clusters:
self._avg_cp_by_clusters[y_pred].update(distance, w=sample_weight)
else:
self._avg_cp_by_clusters[y_pred] = stats.Mean()
self._avg_cp_by_clusters[y_pred].update(distance, w=sample_weight)
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
self._minimum_separation = self._find_minimum_separation(centers)
distance = math.sqrt(utils.math.minkowski_distance(centers[y_pred], x, 2))
self._avg_cp_by_clusters[y_pred].update(distance, w=-sample_weight)
return self
def get(self):
avg_cp_by_clusters = {
i: self._avg_cp_by_clusters[i].get() for i in self._avg_cp_by_clusters
}
try:
return self._minimum_separation / (2 * max(avg_cp_by_clusters.values()))
except ZeroDivisionError:
return -math.inf
@property
def bigger_is_better(self):
return True
class GD53(base.ClusteringMetric):
r"""Generalized Dunn's index 53 (GD53).
The Generalized Dunn's indices comprise a set of 17 variants of the original
Dunn's index devised to address sensitivity to noise in the latter. The formula
of this index is given by:
$$
GD_{rs} = \frac{\min_{i \new q} [\delta_r (\omega_i, \omega_j)]}{\max_k [\Delta_s (\omega_k)]},
$$
where $\delta_r(.)$ is a measure of separation, and $\Delta_s(.)$ is a measure of compactness,
the parameters $r$ and $s$ index the measures' formulations. In particular, when employing
Euclidean distance, GD43 is formulated using:
$$
\delta_5 (\omega_i, \omega_j) = \frac{CP_1^2 (v_i, \omega_i) + CP_1^2 (v_j, \omega_j)}{n_i + n_j},
$$
and
$$
\Delta_3 (\omega_k) = \frac{2 \times CP_1^2 (v_k, \omega_k)}{n_k}.
$$
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.GD53()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
GD53: 0.158377
References
----------
[^1]: J. Bezdek and N. Pal, "Some new indexes of cluster validity,"
IEEE Trans. Syst., Man, Cybern. B, vol. 28, no. 3, pp. 301–315, Jun. 1998.
"""
def __init__(self):
super().__init__()
self._minimum_separation = 0
self._cp_by_clusters = {}
self._n_points_by_clusters = {}
self._n_clusters = 0
@staticmethod
def _find_minimum_separation(centers):
minimum_separation = math.inf
n_centers = max(centers) + 1
for i in range(n_centers):
for j in range(i + 1, n_centers):
separation_ij = utils.math.minkowski_distance(centers[i], centers[j], 2)
if separation_ij < minimum_separation:
minimum_separation = separation_ij
return minimum_separation
def update(self, x, y_pred, centers, sample_weight=1.0):
self._minimum_separation = self._find_minimum_separation(centers)
distance = math.sqrt(utils.math.minkowski_distance(centers[y_pred], x, 2))
try:
self._cp_by_clusters[y_pred] += distance
self._n_points_by_clusters[y_pred] += 1
except KeyError:
self._cp_by_clusters[y_pred] = distance
self._n_points_by_clusters[y_pred] = 1
self._n_clusters = len(centers)
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
self._minimum_separation = self._find_minimum_separation(centers)
distance = math.sqrt(utils.math.minkowski_distance(centers[y_pred], x, 2))
self._cp_by_clusters[y_pred] -= distance
self._n_points_by_clusters[y_pred] -= 1
self._n_clusters = len(centers)
return self
def get(self):
min_delta_5 = math.inf
for i in range(self._n_clusters):
for j in range(i + 1, self._n_clusters):
try:
delta_5 = (self._cp_by_clusters[i] + self._cp_by_clusters[j]) / (
self._n_points_by_clusters[i] + self._n_points_by_clusters[j]
)
except KeyError:
continue
if delta_5 < min_delta_5:
min_delta_5 = delta_5
try:
return min_delta_5 / self._minimum_separation
except ZeroDivisionError:
return -math.inf
@property
def bigger_is_better(self):
return True | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/cluster/generalized_dunn.py | 0.906218 | 0.81946 | generalized_dunn.py | pypi |
import math
from river import stats, utils
from . import base
class PS(base.ClusteringMetric):
r"""Partition Separation (PS).
The PS index [^1] was originally developed for fuzzy clustering. This index
only comprises a measure of separation between prototypes. Although classified
as a batch clustering validity index (CVI), it can be readily used to evaluate
the partitions idenified by unsupervised incremental learners tha model clusters
using cenroids.
Larger values of PS indicate better clustering solutions.
The PS value is given by
$$
PS = \sum_{i=1}^k PS_i,
$$
where
$$
PS_i = \frac{n_i}{\max_j n_j} - exp \left[ - \frac{\min{i \neq j} (\lVert v_i - v_j \rVert_2^2)}{\beta_T} \right],
$$
$$
\beta_T = \frac{1}{k} \sum_{l=1}^k \lVert v_l - \bar{v} \rVert_2 ^2,
$$
and
$$
\bar{v} = \frac{1}{k} \sum_{l=1}^k v_l.
$$
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.PS()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
PS: 1.336026
References
----------
[^1]: E. Lughofer, "Extensions of vector quantization for incremental clustering,"
Pattern Recognit., vol. 41, no. 3, pp. 995–1011, Mar. 2008.
"""
def __init__(self):
super().__init__()
self._minimum_separation = 0
self._center_centers = {}
self._n_points_by_cluster = {}
self._beta_t = 0
self._n_clusters = 0
@staticmethod
def _find_minimum_separation(centers):
minimum_separation = math.inf
n_centers = max(centers) + 1
for i in range(n_centers):
for j in range(i + 1, n_centers):
separation_ij = utils.math.minkowski_distance(centers[i], centers[j], 2)
if separation_ij < minimum_separation:
minimum_separation = separation_ij
return minimum_separation
def update(self, x, y_pred, centers, sample_weight=1.0):
self._minimum_separation = self._find_minimum_separation(centers)
self._center_centers = {i: stats.Mean() for i in x}
for i in self._center_centers:
for j in centers:
self._center_centers[i].update(centers[j][i], w=sample_weight)
center_centers = {
i: self._center_centers[i].get() for i in self._center_centers
}
beta_t = stats.Mean()
for i in centers:
beta_t.update(utils.math.minkowski_distance(centers[i], center_centers, 2))
self._beta_t = beta_t.get()
try:
self._n_points_by_cluster[y_pred] += 1
except KeyError:
self._n_points_by_cluster[y_pred] = 1
self._n_clusters = len(centers)
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
self._minimum_separation = self._find_minimum_separation(centers)
self._center_centers = {i: stats.Mean() for i in x}
for i in self._center_centers:
for j in centers:
self._center_centers[i].update(centers[j][i], w=sample_weight)
center_centers = {
i: self._center_centers[i].get() for i in self._center_centers
}
beta_t = stats.Mean()
for i in centers:
beta_t.update(utils.math.minkowski_distance(centers[i], center_centers, 2))
self._beta_t = beta_t.get()
self._n_points_by_cluster[y_pred] -= 1
self._n_clusters = len(centers)
return self
def get(self):
try:
return sum(self._n_points_by_cluster.values()) / max(
self._n_points_by_cluster.values()
) - self._n_clusters * math.exp(-self._minimum_separation / self._beta_t)
except (ZeroDivisionError, ValueError):
return -math.inf
@property
def bigger_is_better(self):
return True | /river_extra-0.14.0-py3-none-any.whl/river_extra/metrics/cluster/ps.py | 0.886033 | 0.765769 | ps.py | pypi |
import random
import typing
from river.tree.nodes.htc_nodes import LeafMajorityClass
from river.tree.nodes.htr_nodes import LeafAdaptive, LeafMean, LeafModel
from river.tree.nodes.leaf import HTLeaf
class ETLeaf(HTLeaf):
"""The Extra Tree leaves change the way in which the splitters are updated
(by using subsets of features).
Parameters
----------
stats
Initial class observations.
depth
The depth of the node.
splitter
The numeric attribute observer algorithm used to monitor target statistics
and perform split attempts.
max_features
Number of attributes per subset for each node split.
seed
Seed to ensure reproducibility.
kwargs
Other parameters passed to the learning node.
"""
def __init__(self, stats, depth, splitter, max_features, seed, **kwargs):
super().__init__(stats, depth, splitter, **kwargs)
self.max_features = max_features
self.seed = seed
self._rng = random.Random(self.seed)
self.feature_indices = []
def _iter_features(self, x) -> typing.Iterable:
# Only a random subset of the features is monitored
if len(self.feature_indices) == 0:
self.feature_indices = self._sample_features(x, self.max_features)
for att_id in self.feature_indices:
# First check if the feature is available
if att_id in x:
yield att_id, x[att_id]
def _sample_features(self, x, max_features):
return self._rng.sample(list(x.keys()), max_features)
class ETLeafMajorityClass(ETLeaf, LeafMajorityClass):
def __init__(self, stats, depth, splitter, max_features, seed, **kwargs):
super().__init__(stats, depth, splitter, max_features, seed, **kwargs)
class ETLeafMean(ETLeaf, LeafMean):
def __init__(self, stats, depth, splitter, max_features, seed, **kwargs):
super().__init__(stats, depth, splitter, max_features, seed, **kwargs)
class ETLeafModel(ETLeaf, LeafModel):
def __init__(self, stats, depth, splitter, max_features, seed, **kwargs):
super().__init__(stats, depth, splitter, max_features, seed, **kwargs)
class ETLeafAdaptive(ETLeaf, LeafAdaptive):
def __init__(self, stats, depth, splitter, max_features, seed, **kwargs):
super().__init__(stats, depth, splitter, max_features, seed, **kwargs) | /river_extra-0.14.0-py3-none-any.whl/river_extra/tree/nodes/et_nodes.py | 0.882719 | 0.450359 | et_nodes.py | pypi |
import abc
import collections
import random
import sys
from river import stats
from river.tree.splitter import Splitter
from river.tree.utils import BranchFactory
class RandomSplitter(Splitter):
def __init__(self, seed, buffer_size):
super().__init__()
self.seed = seed
self.buffer_size = buffer_size
self.threshold = None
self.stats = None
self._rng = random.Random(self.seed)
self._buffer = []
def __deepcopy__(self, memo):
"""Change the behavior of deepcopy to allow copies have a different rng."""
seed = self._rng.randint(0, sys.maxsize)
new = self.__class__(seed=seed, buffer_size=self.buffer_size)
return new
@abc.abstractmethod
def _update_stats(self, branch, target_val, sample_weight):
pass
def cond_proba(self, att_val, class_val) -> float:
"""This attribute observer does not support probability density estimation."""
raise NotImplementedError
def update(self, att_val, target_val, sample_weight) -> "Splitter":
if self.threshold is None:
if len(self._buffer) < self.buffer_size:
self._buffer.append((att_val, target_val, sample_weight))
return self
mn = min(self._buffer, key=lambda t: t[0])[0]
mx = max(self._buffer, key=lambda t: t[0])[0]
self.threshold = self._rng.uniform(mn, mx)
for a, t, w in self._buffer:
self._update_stats(0 if a <= self.threshold else 1, t, w)
self._buffer = None
return self
self._update_stats(0 if att_val <= self.threshold else 1, target_val, sample_weight)
return self
def best_evaluated_split_suggestion(self, criterion, pre_split_dist, att_idx, binary_only):
post_split_dist = [self.stats[0], self.stats[1]]
merit = criterion.merit_of_split(pre_split_dist, post_split_dist)
split_suggestion = BranchFactory(
merit=merit,
feature=att_idx,
split_info=self.threshold,
children_stats=post_split_dist,
)
return split_suggestion
class ClassRandomSplitter(RandomSplitter):
def __init__(self, seed, buffer_size):
super().__init__(seed, buffer_size)
self.stats = {0: collections.Counter(), 1: collections.Counter()}
def _update_stats(self, branch, target_val, sample_weight):
self.stats[branch].update({target_val: sample_weight})
class RegRandomSplitter(RandomSplitter):
def __init__(self, seed, buffer_size):
super().__init__(seed, buffer_size)
self.stats = {0: stats.Var(), 1: stats.Var()}
def _update_stats(self, branch, target_val, sample_weight):
self.stats[branch].update(target_val, sample_weight)
@property
def is_target_class(self) -> bool:
return False | /river_extra-0.14.0-py3-none-any.whl/river_extra/tree/splitter/random_splitter.py | 0.638159 | 0.284297 | random_splitter.py | pypi |
import abc
import collections
import copy
import math
import random
import sys
import typing
from river import base, drift, metrics, tree
from ..tree.nodes.et_nodes import ETLeafAdaptive, ETLeafMean, ETLeafModel
from ..tree.splitter import RegRandomSplitter
class ExtraTrees(base.Ensemble, metaclass=abc.ABCMeta):
_FEATURES_SQRT = "sqrt"
_FEATURES_LOG2 = "log2"
_FEATURES_RANDOM = "random"
_BAGGING = "bagging"
_SUBBAGGING = "subbagging"
_DETECTION_ALL = "all"
_DETECTION_DROP = "drop"
_DETECTION_OFF = "off"
def __init__(
self,
n_models: int,
max_features: typing.Union[bool, str, int],
resampling_strategy: typing.Optional[str],
resampling_rate: typing.Union[int, float],
detection_mode: str,
warning_detector: base.DriftDetector,
drift_detector: base.DriftDetector,
max_depth: typing.Union[str, int],
randomize_tree_depth: bool,
track_metric: typing.Union[metrics.base.MultiClassMetric, metrics.base.RegressionMetric],
disable_weighted_vote: bool,
split_buffer_size: int,
seed: int,
):
self.data = []
self.n_models = n_models
self.max_features = max_features
if resampling_strategy not in [None, self._BAGGING, self._SUBBAGGING]:
raise ValueError(f"Invalid resampling strategy: {resampling_strategy}")
self.resampling_strategy = resampling_strategy
if self.resampling_strategy is not None:
if self.resampling_strategy == self._BAGGING:
if resampling_rate < 1:
raise ValueError(
"'resampling_rate' must be an integer greater than or"
"equal to 1, when resample_strategy='bagging'."
)
# Cast to integer (online bagging using poisson sampling)
self.resampling_rate = int(resampling_rate)
if self.resampling_strategy == self._SUBBAGGING:
if not 0 < resampling_rate <= 1:
raise ValueError(
"resampling_rate must be a float in the interval (0, 1],"
"when resampling_strategy='subbagging'."
)
self.resampling_rate = resampling_rate
if detection_mode not in [
self._DETECTION_ALL,
self._DETECTION_DROP,
self._DETECTION_OFF,
]:
raise ValueError(
f"Invalid drift detection mode. Valid values are: '{self._DETECTION_ALL}',"
f" {self._DETECTION_DROP}, and '{self._DETECTION_OFF}'."
)
self.detection_mode = detection_mode
self.warning_detector = (
warning_detector if warning_detector is not None else drift.ADWIN(delta=0.01)
)
self.drift_detector = (
drift_detector if drift_detector is not None else drift.ADWIN(delta=0.001)
)
self.max_depth = max_depth
self.randomize_tree_depth = randomize_tree_depth
self.track_metric = track_metric
self.disable_weighted_vote = disable_weighted_vote
self.split_buffer_size = split_buffer_size
self.seed = seed
# The predictive performance of each tree
self._perfs: typing.List = []
# Keep a running estimate of the sum of performances
self._perf_sum: float = 0
# Number of times a tree will use each instance to learn from it
self._weight_sampler = self.__weight_sampler_factory()
# General statistics
# Counter of the number of instances each ensemble member has processed (instance weights
# are not accounted for, just the number of instances)
self._sample_counter = collections.Counter()
# Total of samples processed by the Extra Trees ensemble
self._total_instances: float = 0
# Number of warnings triggered
self._n_warnings = collections.Counter()
# Number of drifts detected
self._n_drifts = collections.Counter()
# Number of tree swaps
self._n_tree_swaps = collections.Counter()
self._background_trees = {}
# Initialize drift detectors and select the detection mode procedure
if self.detection_mode == self._DETECTION_ALL:
self._warn_detectors = {i: self.warning_detector.clone() for i in range(self.n_models)}
self._drift_detectors = {i: self.drift_detector.clone() for i in range(self.n_models)}
elif self.detection_mode == self._DETECTION_DROP:
self._warn_detectors = {}
self._drift_detectors = {i: self.drift_detector.clone() for i in range(self.n_models)}
else: # detection_mode: "off"
self._warn_detectors = {}
self._drift_detectors = {}
self._detect = self.__detection_mode_factory()
# Set the rng
self._rng = random.Random(seed)
@abc.abstractmethod
def _new_member(
self, max_features, max_depth, seed
) -> typing.Union[base.Classifier, base.Regressor]:
pass
@abc.abstractmethod
def _drift_input(self, y, y_hat) -> typing.Union[int, float]:
pass
def _calculate_tree_depth(self) -> float:
if self.max_depth is None:
return math.inf
if not self.randomize_tree_depth:
return self.max_depth
else: # Randomize tree depth
return self._rng.randint(1, self.max_depth if not math.isinf(self.max_depth) else 9999)
def _calculate_max_features(self, n_features) -> int:
if self.max_features == self._FEATURES_RANDOM:
# Generate a random integer
return self._rng.randint(2, n_features)
else:
if self.max_features == self._FEATURES_SQRT:
max_feat = round(math.sqrt(n_features))
elif self.max_features == self._FEATURES_LOG2:
max_feat = round(math.log2(n_features))
elif isinstance(self.max_features, int):
max_feat = n_features
elif isinstance(self.max_features, float):
# Consider 'max_features' as a percentage
max_feat = int(self.max_features * n_features)
elif self.max_features is None:
max_feat = n_features
else:
raise AttributeError(
f"Invalid max_features: {self.max_features}.\n"
f"Valid options are: int [2, M], float (0., 1.],"
f" {self._FEATURES_SQRT}, {self._FEATURES_LOG2}"
)
# Sanity checks
# max_feat is negative, use max_feat + n
if max_feat < 0:
max_feat += n_features
# max_feat <= 0
# (m can be negative if max_feat is negative and abs(max_feat) > n),
# use max_features = 1
if max_feat <= 0:
max_feat = 1
# max_feat > n, then use n
if max_feat > n_features:
max_feat = n_features
return max_feat
def _init_trees(self, n_features: int):
for i in range(self.n_models):
self.data.append(
self._new_member(
max_features=self._calculate_max_features(n_features),
max_depth=self._calculate_tree_depth(),
seed=self._rng.randint(0, sys.maxsize), # randomly creates a new seed
)
)
self._perfs.append(copy.deepcopy(self.track_metric))
# TODO check if it can be pickled
def __weight_sampler_factory(self):
def constant_sampler():
return 1
def bagging_sampler():
return self._poisson_sample(self.resampling_rate)
def subbagging_sampler():
return 1 if self._rng.random() <= self.resampling_rate else 0
if self.resampling_strategy == self._BAGGING:
return bagging_sampler
elif self.resampling_strategy == self._SUBBAGGING:
return subbagging_sampler
else:
return constant_sampler
# TODO check if there is a more elegant solution
def __detection_mode_factory(self):
def detection_mode_all(drift_detector, warning_detector, detector_input):
in_warning = warning_detector.update(detector_input).drift_detected
in_drift = drift_detector.update(detector_input).drift_detected
return in_drift, in_warning
def detection_mode_drop(drift_detector, warning_detector, detector_input):
in_drift = drift_detector.update(detector_input).drift_detected
return in_drift, False
def detection_mode_off(drift_detector, warning_detector, detector_input):
return False, False
if self.detection_mode == self._DETECTION_ALL:
return detection_mode_all
elif self.detection_mode == self._DETECTION_DROP:
return detection_mode_drop
else:
return detection_mode_off
def learn_one(self, x, y):
if not self.models:
self._init_trees(len(x))
self._total_instances += 1
trained = []
for i, model in enumerate(self.models):
y_hat = model.predict_one(x)
in_drift, in_warning = self._detect(
self._drift_detectors.get(i),
self._warn_detectors.get(i),
self._drift_input(y, y_hat),
)
if in_warning:
self._background_trees[i] = self._new_member(
max_features=self._calculate_max_features(len(x)),
max_depth=self._calculate_tree_depth(),
seed=self._rng.randint(0, sys.maxsize), # randomly creates a new seed
)
# Reset the warning detector
self._warn_detectors[i] = self.warning_detector.clone()
# Update statistics
self._n_warnings.update([i])
# Drift detected: time to change (swap or reset) the affected tree
if in_drift:
if i in self._background_trees:
self.data[i] = self._background_trees[i]
del self._background_trees[i]
self._n_tree_swaps.update([i])
else:
self.data[i] = self._new_member(
max_features=self._calculate_max_features(len(x)),
max_depth=self._calculate_tree_depth(),
seed=self._rng.randint(0, sys.maxsize), # randomly creates a new seed
)
# Reset the drift detector
self._drift_detectors[i] = self.drift_detector.clone()
# Update statistics
self._n_drifts.update([i])
# Also reset tree's error estimates
self._perf_sum -= self._perfs[i].get()
self._perfs[i] = copy.deepcopy(self.track_metric)
self._perf_sum += self._perfs[i].get()
# And the number of observations of the new model
self._sample_counter[i] = 0
# Remove the old performance estimate
self._perf_sum -= self._perfs[i].get()
# Update metric
self._perfs[i].update(y, y_hat)
# Add the new performance estimate
self._perf_sum += self._perfs[i].get()
# Define the weight of the instance
w = self._weight_sampler()
if w == 0: # Skip model update if w is zero
continue
model.learn_one(x, y, sample_weight=w)
if i in self._background_trees:
self._background_trees[i].learn_one(x, y, sample_weight=w)
trained.append(i)
# Increase by one the count of instances observed by each trained model
self._sample_counter.update(trained)
return self
def _poisson_sample(self, lambda_value) -> int:
"""Helper function to sample from poisson distributions without relying on numpy."""
l_val = math.exp(-lambda_value)
k = 0
p = 1
while p > l_val:
k += 1
p *= self._rng.random()
return k - 1
# Properties
@property
def n_warnings(self) -> collections.Counter:
"""The number of warnings detected per ensemble member."""
return self._n_warnings
@property
def n_drifts(self) -> collections.Counter:
"""The number of concept drifts detected per ensemble member."""
return self._n_drifts
@property
def n_tree_swaps(self) -> collections.Counter:
"""The number of performed alternate tree swaps.
Not applicable if the warning detectors are disabled.
"""
return self._n_tree_swaps
@property
def total_instances(self) -> float:
"""The total number of instances processed by the ensemble."""
return self._total_instances
@property
def instances_per_tree(self) -> collections.Counter:
"""The number of instances processed by each one of the current forest members.
Each time a concept drift is detected, the count corresponding to the affected tree is
reset.
"""
return self._sample_counter
class ETRegressor(tree.HoeffdingTreeRegressor):
"""Extra Tree regressor.
This is the base-estimator of the Extra Trees regressor.
This variant of the Hoeffding Tree regressor includes the `max_features` parameter,
which defines the number of randomly selected features to be considered at each split.
It also evaluates split candidates randomly.
"""
def __init__(
self,
max_features,
grace_period,
max_depth,
delta,
tau,
leaf_prediction,
leaf_model,
model_selector_decay,
nominal_attributes,
min_samples_split,
binary_split,
max_size,
memory_estimate_period,
stop_mem_management,
remove_poor_attrs,
merit_preprune,
split_buffer_size,
seed,
):
self.max_features = max_features
self.split_buffer_size = split_buffer_size
self.seed = seed
self._rng = random.Random(self.seed)
super().__init__(
grace_period=grace_period,
max_depth=max_depth,
delta=delta,
tau=tau,
leaf_prediction=leaf_prediction,
leaf_model=leaf_model,
model_selector_decay=model_selector_decay,
nominal_attributes=nominal_attributes,
splitter=RegRandomSplitter(
seed=self._rng.randint(0, sys.maxsize),
buffer_size=self.split_buffer_size,
),
min_samples_split=min_samples_split,
binary_split=binary_split,
max_size=max_size,
memory_estimate_period=memory_estimate_period,
stop_mem_management=stop_mem_management,
remove_poor_attrs=remove_poor_attrs,
merit_preprune=merit_preprune,
)
def _new_learning_node(self, initial_stats=None, parent=None): # noqa
"""Create a new learning node.
The type of learning node depends on the tree configuration.
"""
if parent is not None:
depth = parent.depth + 1
else:
depth = 0
# Generate a random seed for the new learning node
seed = self._rng.randint(0, sys.maxsize)
leaf_model = None
if self.leaf_prediction in {self._MODEL, self._ADAPTIVE}:
if parent is None:
leaf_model = copy.deepcopy(self.leaf_model)
else:
leaf_model = copy.deepcopy(parent._leaf_model) # noqa
if self.leaf_prediction == self._TARGET_MEAN:
return ETLeafMean(
initial_stats,
depth,
self.splitter,
self.max_features,
seed,
)
elif self.leaf_prediction == self._MODEL:
return ETLeafModel(
initial_stats,
depth,
self.splitter,
self.max_features,
seed,
leaf_model=leaf_model,
)
else: # adaptive learning node
new_adaptive = ETLeafAdaptive(
initial_stats,
depth,
self.splitter,
self.max_features,
seed,
leaf_model=leaf_model,
)
if parent is not None and isinstance(parent, ETLeafAdaptive):
new_adaptive._fmse_mean = parent._fmse_mean # noqa
new_adaptive._fmse_model = parent._fmse_model # noqa
return new_adaptive
class ExtraTreesRegressor(ExtraTrees, base.Regressor):
"""Online Extra Trees regressor.
The online Extra Trees[^1] ensemble takes some steps further into randomization when
compared to Adaptive Random Forests (ARF). A subspace of the feature space is considered
at each split attempt, as ARF does, and online bagging or subbagging can also be
(optionally) used. Nonetheless, Extra Trees randomizes the split candidates evaluated by each
leaf node (just a single split is tested by numerical feature, which brings significant
speedups to the ensemble), and might also randomize the maximum depth of the forest members,
as well as the size of the feature subspace processed by each of its trees' leaves.
Parameters
----------
n_models
The number of trees in the ensemble.
max_features
Max number of attributes for each node split.</br>
- If int, then consider `max_features` at each split.</br>
- If float, then `max_features` is a percentage and `int(max_features * n_features)`
features are considered per split.</br>
- If "sqrt", then `max_features=sqrt(n_features)`.</br>
- If "log2", then `max_features=log2(n_features)`.</br>
- If "random", then `max_features` will assume a different random number in the interval
`[2, n_features]` for each tree leaf.</br>
- If None, then `max_features=n_features`.
resampling_strategy
The chosen instance resampling strategy:</br>
- If `None`, no resampling will be done and the trees will process all instances.
- If `'baggging'`, online bagging will be performed (sampling with replacement).
- If `'subbagging'`, online subbagging will be performed (sampling without replacement).
resampling_rate
Only valid if `resampling_strategy` is not None. Controls the parameters of the resampling
strategy.</br>.
- If `resampling_strategy='bagging'`, must be an integer greater than or equal to 1 that
parameterizes the poisson distribution used to simulate bagging in online learning
settings. It acts as the lambda parameter of Oza Bagging and Leveraging Bagging.</br>
- If `resampling_strategy='subbagging'`, must be a float in the interval $(0, 1]$ that
controls the chance of each instance being used by a tree for learning.
detection_mode
The concept drift detection mode in which the forest operates. Valid values are:</br>
- "all": creates both warning and concept drift detectors. If a warning is detected,
an alternate tree starts being trained in the background. If the warning trigger escalates
to a concept drift, the affected tree is replaced by the alternate tree.</br>
- "drop": only the concept drift detectors are created. If a drift is detected, the
affected tree is dropped and replaced by a new tree.</br>
- "off": disables the concept drift adaptation capabilities. The forest will act as if
the processed stream is stationary.
warning_detector
The detector that will be used to trigger concept drift warnings.
drift_detector
The detector used to detect concept drifts.
max_depth
The maximum depth the ensemble members might reach. If `None`, the trees will grow
indefinitely.
randomize_tree_depth
Whether or not randomize the maximum depth of each tree in the ensemble. If `max_depth`
is provided, it is going to act as an upper bound to generate the maximum depth for each
tree.
track_metric
The performance metric used to weight predictions.
disable_weighted_vote
Defines whether or not to use predictions weighted by each trees' prediction performance.
split_buffer_size
Defines the size of the buffer used by the tree splitters when determining the feature
range and a random split point in this interval.
seed
Random seed to support reproducibility.
grace_period
[*Tree parameter*] Number of instances a leaf should observe between
split attempts.
max_depth
[*Tree parameter*] The maximum depth a tree can reach. If `None`, the
tree will grow indefinitely.
delta
[*Tree parameter*] Allowed error in split decision, a value closer to 0
takes longer to decide.
tau
[*Tree parameter*] Threshold below which a split will be forced to break
ties.
leaf_prediction
[*Tree parameter*] Prediction mechanism used at leaves.</br>
- 'mean' - Target mean</br>
- 'model' - Uses the model defined in `leaf_model`</br>
- 'adaptive' - Chooses between 'mean' and 'model' dynamically</br>
leaf_model
[*Tree parameter*] The regression model used to provide responses if
`leaf_prediction='model'`. If not provided, an instance of
`river.linear_model.LinearRegression` with the default hyperparameters
is used.
model_selector_decay
[*Tree parameter*] The exponential decaying factor applied to the learning models'
squared errors, that are monitored if `leaf_prediction='adaptive'`. Must be
between `0` and `1`. The closer to `1`, the more importance is going to
be given to past observations. On the other hand, if its value
approaches `0`, the recent observed errors are going to have more
influence on the final decision.
nominal_attributes
[*Tree parameter*] List of Nominal attributes. If empty, then assume that
all attributes are numerical.
min_samples_split
[*Tree parameter*] The minimum number of samples every branch resulting from a split
candidate must have to be considered valid.
binary_split
[*Tree parameter*] If True, only allow binary splits.
max_size
[*Tree parameter*] Maximum memory (MB) consumed by the tree.
memory_estimate_period
[*Tree parameter*] Number of instances between memory consumption checks.
stop_mem_management
[*Tree parameter*] If True, stop growing as soon as memory limit is hit.
remove_poor_attrs
[*Tree parameter*] If True, disable poor attributes to reduce memory usage.
merit_preprune
[*Tree parameter*] If True, enable merit-based tree pre-pruning.
Notes
-----
As the Online Extra Trees change the way in which Hoeffding Trees perform split attempts
and monitor numerical input features, some of the parameters of the vanilla Hoeffding Tree
algorithms are not available.
Examples
--------
>>> from river import datasets
>>> from river import evaluate
>>> from river import metrics
>>> from river_extra import ensemble
>>> dataset = dataset.synth.Friedman(seed=42).take(2000)
>>> model = ensemble.ExtraTreesRegressor(
... n_models=3,
... seed=42
... )
>>> metric = metrics.RMSE()
>>> evaluate.progressive_val_score(dataset, model, metric)
RMSE: 3.24238
References
----------
[^1]: Mastelini, S. M., Nakano, F. K., Vens, C., & de Leon Ferreira, A. C. P. (2022).
Online Extra Trees Regressor. IEEE Transactions on Neural Networks and Learning Systems.
"""
def __init__(
self,
n_models: int = 10,
max_features: typing.Union[bool, str, int] = "random",
resampling_strategy: typing.Optional[str] = None,
resampling_rate: typing.Union[int, float] = 0.5,
detection_mode: str = "all",
warning_detector: base.DriftDetector = None,
drift_detector: base.DriftDetector = None,
max_depth: typing.Union[str, int] = None,
randomize_tree_depth: bool = False,
track_metric: metrics.base.RegressionMetric = metrics.MAE(),
disable_weighted_vote: bool = True,
split_buffer_size: int = 5,
seed: int = None,
grace_period: int = 50,
delta: float = 0.01,
tau: float = 0.05,
leaf_prediction: str = "model",
leaf_model: base.Regressor = None,
model_selector_decay: float = 0.95,
nominal_attributes: list = None,
min_samples_split: int = 5,
binary_split: bool = False,
max_size: int = 500,
memory_estimate_period: int = 2_000_000,
stop_mem_management: bool = False,
remove_poor_attrs: bool = False,
merit_preprune: bool = True,
):
super().__init__(
n_models=n_models,
max_features=max_features,
resampling_strategy=resampling_strategy,
resampling_rate=resampling_rate,
detection_mode=detection_mode,
warning_detector=warning_detector,
drift_detector=drift_detector,
max_depth=max_depth,
randomize_tree_depth=randomize_tree_depth,
track_metric=track_metric,
disable_weighted_vote=disable_weighted_vote,
split_buffer_size=split_buffer_size,
seed=seed,
)
# Tree parameters
self.grace_period = grace_period
self.delta = delta
self.tau = tau
self.leaf_prediction = leaf_prediction
self.leaf_model = leaf_model
self.model_selector_decay = model_selector_decay
self.nominal_attributes = nominal_attributes
self.min_samples_split = min_samples_split
self.binary_split = binary_split
self.max_size = max_size
self.memory_estimate_period = memory_estimate_period
self.stop_mem_management = stop_mem_management
self.remove_poor_attrs = remove_poor_attrs
self.merit_preprune = merit_preprune
def _new_member(self, max_features, max_depth, seed):
return ETRegressor(
max_features=max_features,
grace_period=self.grace_period,
max_depth=max_depth,
delta=self.delta,
tau=self.tau,
leaf_prediction=self.leaf_prediction,
leaf_model=self.leaf_model,
model_selector_decay=self.model_selector_decay,
nominal_attributes=self.nominal_attributes,
min_samples_split=self.min_samples_split,
binary_split=self.binary_split,
max_size=self.max_size,
memory_estimate_period=self.memory_estimate_period,
stop_mem_management=self.stop_mem_management,
remove_poor_attrs=self.remove_poor_attrs,
merit_preprune=self.merit_preprune,
split_buffer_size=self.split_buffer_size,
seed=seed,
)
def _drift_input(self, y, y_hat) -> typing.Union[int, float]:
return abs(y - y_hat)
def predict_one(self, x: dict) -> base.typing.RegTarget:
if not self.models:
self._init_trees(len(x))
return 0
if not self.disable_weighted_vote:
preds = []
weights = []
for perf, model in zip(self._perfs, self.models):
preds.append(model.predict_one(x))
weights.append(perf.get())
sum_weights = sum(weights)
if sum_weights != 0:
if self.track_metric.bigger_is_better:
preds = [(w / sum_weights) * pred for w, pred in zip(weights, preds)]
else:
weights = [(1 + 1e-8) / (w + 1e-8) for w in weights]
sum_weights = sum(weights)
preds = [(w / sum_weights) * pred for w, pred in zip(weights, preds)]
return sum(preds)
else:
preds = [model.predict_one(x) for model in self.models]
return sum(preds) / len(preds) | /river_extra-0.14.0-py3-none-any.whl/river_extra/ensemble/online_extra_trees.py | 0.582847 | 0.257246 | online_extra_trees.py | pypi |
from __future__ import annotations
import collections
import inspect
import typing
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from river import base
__all__ = ["PyTorch2RiverBase", "PyTorch2RiverRegressor", "PyTorch2RiverClassifier"]
class PyTorch2RiverBase(base.Estimator, base.MultiOutputMixin):
"""An estimator that integrates neural Networks from PyTorch."""
def __init__(
self,
build_fn,
loss_fn: typing.Type[torch.nn.modules.loss._Loss],
optimizer_fn: typing.Type[torch.optim.Optimizer] = torch.optim.Adam,
learning_rate=1e-3,
seed=42,
**net_params,
):
self.build_fn = build_fn
self.loss_fn = loss_fn
self.loss = loss_fn()
self.optimizer_fn = optimizer_fn
self.learning_rate = learning_rate
self.net_params = net_params
self.seed = seed
torch.manual_seed(seed)
np.random.seed(seed)
self.net = None
@classmethod
def _unit_test_params(cls):
def build_torch_linear_regressor(n_features):
net = torch.nn.Sequential(
torch.nn.Linear(n_features, 1), torch.nn.Sigmoid()
)
return net
yield {
"build_fn": build_torch_linear_regressor,
"loss_fn": torch.nn.MSELoss,
"optimizer_fn": torch.optim.SGD,
}
@classmethod
def _unit_test_skips(self):
"""Indicates which checks to skip during unit testing.
Most estimators pass the full test suite. However, in some cases, some estimators might not
be able to pass certain checks.
"""
return {
"check_pickling",
"check_shuffle_features_no_impact",
"check_emerging_features",
"check_disappearing_features",
"check_predict_proba_one",
"check_predict_proba_one_binary",
}
def _learn_one(self, x: torch.Tensor, y: torch.Tensor):
self.net_.zero_grad()
y_pred = self.net_(x)
loss = self.loss(y_pred, y)
loss.backward()
self.optimizer.step()
def learn_one(self, x: dict, y: base.typing.ClfTarget) -> "PyTorch2RiverBase":
"""Update the model with a set of features `x` and a label `y`.
Parameters
----------
x
A dictionary of features.
y
A label.
Returns
-------
self
"""
if not hasattr(self, "net_"):
self._init_net(n_features=len(list(x.values())))
x = torch.Tensor([list(x.values())])
y = torch.Tensor([[y]])
self._learn_one(x=x, y=y)
return self
def _filter_torch_params(self, fn, override=None):
"""Filters `torch_params` and returns those in `fn`'s arguments.
Parameters
----------
fn
arbitrary function
override
dictionary, values to override `torch_params`
Returns
-------
res
dictionary containing variables in both and fn's arguments
"""
override = override or {}
res = {}
for name, value in self.net_params.items():
args = list(inspect.signature(fn).parameters)
if name in args:
res.update({name: value})
res.update(override)
return res
def _init_net(self, n_features):
self.net_: nn.Module = self.build_fn(
n_features=n_features, **self._filter_torch_params(self.build_fn)
)
# Only optimizers with learning rate as parameter are supported, needs to be fixed
self.optimizer = self.optimizer_fn(self.net_.parameters(), self.learning_rate)
class PyTorch2RiverClassifier(PyTorch2RiverBase, base.Classifier):
"""A river classifier that integrates neural Networks from PyTorch.
Parameters
----------
build_fn
loss_fn
optimizer_fn
learning_rate
net_params
Examples
--------
>>> from river import compat
>>> from river import datasets
>>> from river import evaluate
>>> from river import metrics
>>> from river import preprocessing
>>> from torch import nn
>>> from torch import optim
>>> from torch import manual_seed
>>> _ = manual_seed(0)
>>> def build_torch_mlp_classifier(n_features):
... net = nn.Sequential(
... nn.Linear(n_features, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 1),
... nn.Sigmoid()
... )
... return net
...
>>> model = compat.PyTorch2RiverClassifier(
... build_fn= build_torch_mlp_classifier,
... loss_fn=nn.BCELoss,
... optimizer_fn=optim.Adam,
... learning_rate=1e-3
... )
>>> dataset = datasets.Phishing()
>>> metric = metrics.Accuracy()
>>> evaluate.progressive_val_score(dataset=dataset, model=model, metric=metric)
Accuracy: 74.38%
"""
def __init__(
self,
build_fn,
loss_fn: typing.Type[torch.nn.modules.loss._Loss],
optimizer_fn: typing.Type[torch.optim.Optimizer] = torch.optim.Adam,
learning_rate=1e-3,
**net_params,
):
self.classes: typing.Counter = collections.Counter()
self.n_classes = 1
super().__init__(
build_fn=build_fn,
loss_fn=loss_fn,
optimizer_fn=optimizer_fn,
learning_rate=learning_rate,
**net_params,
)
def _update_classes(self):
self.n_classes = len(self.classes)
layers = list(self.net_.children())
# Get last trainable layer
i = -1
layer_to_convert = layers[i]
while not hasattr(layer_to_convert, "weight"):
layer_to_convert = layers[i]
i -= 1
if i == -1:
i = -2
# Get first Layers
new_net = list(self.net_.children())[: i + 1]
new_layer = torch.nn.Linear(
in_features=layer_to_convert.in_features, out_features=self.n_classes
)
# Copy the original weights back
with torch.no_grad():
new_layer.weight[:-1, :] = layer_to_convert.weight
new_layer.weight[-1:, :] = torch.mean(layer_to_convert.weight, 0)
# Append new Layer
new_net.append(new_layer)
# Add non trainable layers
if i + 1 < -1:
for layer in layers[i + 2 :]:
new_net.append(layer)
self.net = torch.nn.Sequential(*new_net)
self.optimizer = self.optimizer_fn(self.net_.parameters(), self.learning_rate)
def learn_one(self, x: dict, y: base.typing.ClfTarget) -> "PyTorch2RiverClassifier":
self.classes.update([y])
# check if model is initialized
if self.net is None:
self._init_net(len(list(x.values())))
# check last layer and update if needed
if len(self.classes) != self.n_classes:
self._update_classes()
# training process
proba = {c: 0.0 for c in self.classes}
proba[y] = 1.0
x: list[float] = list(x.values()) # type: ignore[no-redef]
y: list[float] = list(proba.values()) # type: ignore[no-redef]
x = torch.Tensor([x])
y = torch.Tensor([y])
self._learn_one(x=x, y=y)
return self
def predict_proba_one(self, x: dict) -> typing.Dict[base.typing.ClfTarget, float]:
if not hasattr(self, "net_"):
self._init_net(len(list(x.values())))
x = torch.Tensor(list(x.values()))
yp = self.net_(x).detach().numpy()
proba = {c: 0.0 for c in self.classes}
for idx, val in enumerate(self.classes):
proba[val] = yp[idx]
return proba
def predict_proba_many(self, X: pd.DataFrame) -> pd.DataFrame:
if not hasattr(self, "net_"):
self._init_net(len(X.columns))
x = torch.Tensor(list(X.to_numpy()))
yp = self.net_(x).detach().numpy()
proba = {c: [0.0] * len(X) for c in self.classes}
for idx, val in enumerate(self.classes):
proba[val] = yp[idx]
return pd.DataFrame(proba)
class PyTorch2RiverRegressor(PyTorch2RiverBase, base.MiniBatchRegressor): # type: ignore[misc]
"""Compatibility layer from PyTorch to River for regression.
Parameters
----------
build_fn
loss_fn
optimizer_fn
learning_rate
net_params
Examples
--------
>>> from river import compat
>>> from river import datasets
>>> from river import evaluate
>>> from river import metrics
>>> from river import preprocessing
>>> from torch import nn
>>> from torch import optim
>>> _ = torch.manual_seed(0)
>>> dataset = datasets.TrumpApproval()
>>> def build_torch_mlp_regressor(n_features):
... net = nn.Sequential(
... nn.Linear(n_features, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 1)
... )
... return net
...
>>> model = compat.PyTorch2RiverRegressor(
... build_fn= build_torch_mlp_regressor,
... loss_fn=nn.MSELoss,
... optimizer_fn=optim.Adam,
... )
>>> metric = metrics.MAE()
>>> metric = evaluate.progressive_val_score(dataset=dataset, model=model, metric=metric)
>>> round(metric.get(), 2)
78.98
"""
def __init__(
self,
build_fn,
loss_fn: typing.Type[torch.nn.modules.loss._Loss],
optimizer_fn: typing.Type[torch.optim.Optimizer],
learning_rate=1e-3,
**net_params,
):
super().__init__(
build_fn=build_fn,
loss_fn=loss_fn,
optimizer_fn=optimizer_fn,
learning_rate=learning_rate,
**net_params,
)
def learn_many(self, X: pd.DataFrame, y: pd.Series):
if not hasattr(self, "net_"):
self._init_net(n_features=len(X.columns))
x = torch.Tensor(X.to_numpy())
y = torch.Tensor([y])
self._learn_one(x=x, y=y)
return self
def predict_one(self, x):
if not hasattr(self, "net_"):
self._init_net(len(x))
x = torch.Tensor(list(x.values()))
return self.net_(x).item()
def predict_many(self, X: pd.DataFrame) -> pd.Series:
if not hasattr(self, "net_"):
self._init_net(len(X.columns))
x = torch.Tensor(X.to_numpy())
return pd.Series(self.net_(x).item()) | /river_extra-0.14.0-py3-none-any.whl/river_extra/compat/pytorch.py | 0.960221 | 0.409191 | pytorch.py | pypi |
import re
from .auth_credentials import AuthCredentials
from .cargo_client import CargoClient
from .errors import CantFindMatchHistory
from .esports_lookup_cache import EsportsLookupCache
from .gamepedia_client import GamepediaClient
from .site import Site
ALL_ESPORTS_WIKIS = ['lol', 'halo', 'smite', 'vg', 'rl', 'pubg', 'fortnite',
'apexlegends', 'fifa', 'gears', 'nba2k', 'paladins', 'siege',
'splatoon2', 'legendsofruneterra',
'default-loadout', 'commons', 'teamfighttactics', 'valorant']
class EsportsClient(GamepediaClient):
"""
Functions for connecting to and editing specifically to Gamepedia esports wikis.
If not using an esports wiki, please use GamepediaSite instead.
"""
ALL_ESPORTS_WIKIS = ALL_ESPORTS_WIKIS
cargo_client: CargoClient = None
client: Site = None
wiki: str = None
def __init__(self, wiki: str, client: Site = None,
credentials: AuthCredentials = None, stg: bool = False,
cache: EsportsLookupCache = None,
**kwargs):
"""
Create a site object.
:param wiki: Name of a wiki
:param client: WikiClient object. If this is provided, SessionManager will not be used.
:param credentials: Optional. Provide if you want a logged-in session.
:param stg: if it's a staging wiki or not
"""
self.wiki = self.get_wiki(wiki)
super().__init__(wiki, credentials=credentials, stg=stg, client=client, **kwargs)
if cache:
self.cache = cache
else:
self.cache = EsportsLookupCache(self.client, cargo_client=self.cargo_client)
@staticmethod
def get_wiki(wiki):
if wiki in ['lol', 'teamfighttactics'] or wiki not in ALL_ESPORTS_WIKIS:
return wiki
return wiki + '-esports'
def setup_tables(self, tables):
if isinstance(tables, str):
tables = [tables]
summary = "Setting up Cargo declaration"
for table in tables:
tl_page = self.client.pages['Template:{}/CargoDec'.format(table)]
doc_page = self.client.pages['Template:{}/CargoDec/doc'.format(table)]
self.save(
tl_page,
'{{Declare|doc={{{1|}}}}}<noinclude>{{documentation}}</noinclude>',
summary=summary
)
self.save(doc_page, '{{Cargodoc}}', summary=summary)
tl_page.touch()
self.create_tables(tables)
for table in tables:
self.client.pages['Template:{}/CargoDec'.format(table)].touch()
def create_tables(self, tables):
self.recreate_tables(tables, replacement=False)
def recreate_tables(self, tables, replacement=True):
if isinstance(tables, str):
tables = [tables]
templates = ['{}/CargoDec'.format(_) for _ in tables]
self.cargo_client.recreate(templates, replacement=replacement)
def get_one_data_page(self, event, i):
"""
Find one data page for an event
:param event: Overview Page of an event
:param i: the ith page to return
:return: a Page object of a single data page
"""
if i == 1:
return self.client.pages['Data:' + event]
return self.client.pages['Data:{}/{}'.format(event, str(i))]
def data_pages(self, event):
"""
Find all the data pages for an event.
:param event: Overview Page of event
:return: generator of data pages
"""
event = self.cache.get_target(event)
i = 1
data_page = self.get_one_data_page(event, i)
while data_page.exists:
yield data_page
i += 1
data_page = self.get_one_data_page(event, i)
def query_riot_mh(self, riot_mh):
match = re.search(r'match-details/(.+?)(&tab=.*)?$', riot_mh)
if match[1] is None:
raise CantFindMatchHistory
to_search = '%{}%'.format(match[1])
result = self.cargo_client.query(
tables="MatchScheduleGame=MSG, Tournaments=T, MatchSchedule=MS",
join_on="MSG.OverviewPage=T.OverviewPage, MSG.UniqueMatch=MS.UniqueMatch",
fields="T.StandardName=Event, MSG.Blue=Blue, MSG.Red=Red,MS.Patch=Patch",
where="MSG.MatchHistory LIKE\"{}\"".format(to_search)
)
if len(result) == 0:
raise CantFindMatchHistory
return result[0]
def query_qq_mh(self, qq_id):
result = self.cargo_client.query(
tables="MatchSchedule=MS, Tournaments=T",
join_on="MS.OverviewPage=T.OverviewPage",
fields="MS.Patch=Patch, T.StandardName=Event",
where="MS.QQ=\"{}\"".format(qq_id)
)
if len(result) == 0:
raise CantFindMatchHistory
return result[0]
def query_wp_mh(self, wp_id):
result = self.cargo_client.query(
tables="MatchSchedule=MS, Tournaments=T",
join_on="MS.OverviewPage=T.OverviewPage",
fields="MS.Patch=Patch, T.StandardName=Event",
where="MS.WanplusId=\"{}\"".format(wp_id)
)
if len(result) == 0:
raise CantFindMatchHistory
return result[0] | /river_mwclient-0.5.0.tar.gz/river_mwclient-0.5.0/river_mwclient/esports_client.py | 0.509276 | 0.193909 | esports_client.py | pypi |
from pytz import timezone, utc
from datetime import datetime
class WikiTime(object):
"""
Leaguepedia and the other esports wikis us an EXTREMELY simplified time zone model.
In this model, there are only three time zones: PST, CET, and KST.
Additionally, the wiki itself does not know anything about daylight savings;
instead, it relies on the user supplying DST metadata along with every timestamp.
DST metadata can be supplied with three options: Yes, Spring [or Fall], or No.
Yes means that both CET and PST have DST; No means that neither does, and
Spring/Fall mean that PST does but CET does not. (KST, sanely, never has DST.)
For simplicity, even in the fall, we return `Spring`.
This class reduces timestamps to this simple model and provides information
in the format expected by the esports wikis.
"""
pst = timezone('America/Los_Angeles')
cet = timezone('Europe/Berlin')
kst = timezone('Asia/Seoul')
def __init__(self, timestamp: datetime, tz: timezone = utc):
"""
Creates a wiki date-time object
:param timestamp: a datetime object
:param tz: optional, a timezone. if not provided, utc will be assumed.
"""
if timestamp.tzinfo is None:
timestamp = tz.localize(timestamp)
self.pst_object = timestamp.astimezone(self.pst)
self.cet_object = timestamp.astimezone(self.cet)
self.kst_object = timestamp.astimezone(self.kst)
self.pst_date = self.pst_object.strftime('%Y-%m-%d')
self.cet_date = self.cet_object.strftime('%Y-%m-%d')
self.kst_date = self.kst_object.strftime('%Y-%m-%d')
self.pst_time = self.pst_object.strftime('%H:%M')
self.cet_time = self.cet_object.strftime('%H:%M')
self.kst_time = self.kst_object.strftime('%H:%M')
self.dst = self._determine_dst()
def _determine_dst(self):
is_dst_pst = self.pst_object.dst()
is_dst_cet = self.cet_object.dst()
if is_dst_pst and is_dst_cet:
return 'yes'
elif is_dst_pst:
return 'spring'
else:
return 'no' | /river_mwclient-0.5.0.tar.gz/river_mwclient-0.5.0/river_mwclient/wiki_time.py | 0.692434 | 0.434941 | wiki_time.py | pypi |
import json
import re
from unidecode import unidecode
from .errors import EsportsCacheKeyError
from .site import Site
from .cargo_client import CargoClient
class EsportsLookupCache(object):
def __init__(self, site: Site, cargo_client: CargoClient = None):
self.site = site
self.cargo_client = cargo_client
self.cache = {}
self.redirect_cache = {}
self.event_tricode_cache = {}
self.event_playername_cache = {}
def clear(self):
self.cache = {}
self.redirect_cache = {}
self.event_tricode_cache = {}
self.event_playername_cache = {}
def _get_json_lookup(self, filename):
"""
Returns a json representation of the requested file, queriying the site to retrieve it if needed
:param filename: The name of the file to return, e.g. "Champion" or "Role"
:return: A json object representing the lookup file
"""
if filename in self.cache:
return self.cache[filename]
# this compartmentalization is in place for Module:Teamnames, whose halfway point is somewhere in the middle
# of the letter T because of `Team` so a-s not a-m
dict1 = self._get_one_encoded_json(filename, 'include_match=^[a-s].*')
dict2 = self._get_one_encoded_json(filename, 'exclude_match=^[a-s].*')
self.cache[filename] = {**dict1, **dict2}
return self.cache[filename]
def _get_one_encoded_json(self, filename, mask):
result = self.site.api(
'expandtemplates',
prop='wikitext',
text='{{{{JsonEncode|{}|{}}}}}'.format(filename, mask)
)
return json.loads(result['expandtemplates']['wikitext'])
def get(self, filename, key, length, allow_fallback=False):
"""
Returrns the length of the lookup of a key requested from the filename requested. Assumes the file has
the same structure as the -names modules on Leaguepedia.
:param filename: "Champion", "Role", etc. - the name of the file
:param key: The lookup key, e.g. "Morde"
:param length: The length of value to return, e.g. "long" or "link"
:param allow_fallback: Whether or not to fallback to returning the key if it's missing in the lookup
:return: Correct lookup value provided, or None if it's not found
:return: Correct lookup value provided, or None if it's not found
"""
file = self._get_json_lookup(filename)
if key is None:
return None
key = key.lower()
if key not in file:
if allow_fallback:
return key
return None
value_table = file[key]
if isinstance(value_table, str):
key = value_table
value_table = file[value_table]
if length not in value_table:
raise EsportsCacheKeyError(filename, key, length, value_table)
return value_table[length]
def get_target(self, title):
"""
Caches & returns the target of a title of a wiki page, caching the result and returning
the cached result if possible
:param title: Title of a page on the wiki
:return: Redirect target of the title
"""
title = title.replace('_', ' ')
if title in self.redirect_cache:
return self.redirect_cache[title]
return self.site.pages[title].resolve_redirect().name
def get_team_from_event_tricode(self, event, tricode):
"""
Determines the full name of a team based on its tricode, assuming tricode matches the short name on the wiki
and that tricodes are unique within the provided event
:param event: Event within which to restrict the lookup
:param tricode: Official tricode of the team, must match wiki teamshort
:return: Wiki teamlinkname
"""
event = self.get_target(event)
tricode = tricode.lower()
result = self._get_team_from_event_tricode_raw(event, tricode)
if result is not None:
return result
self._populate_event_tricodes(event)
return self._get_team_from_event_tricode_raw(event, tricode)
def _get_team_from_event_tricode_raw(self, event, tricode):
if event in self.event_tricode_cache:
if tricode in self.event_tricode_cache[event]:
return self.event_tricode_cache[event][tricode]
return None
def _populate_event_tricodes(self, event):
result = self.cargo_client.query(
tables="TournamentRosters=Ros,TeamRedirects=TRed,Teams",
join_on="Ros.Team=TRed.AllName,TRed._pageName=Teams.OverviewPage",
where='Ros.OverviewPage="{}"'.format(event),
fields="Ros.Team=Team, COALESCE(Ros.Short,Teams.Short)=Short"
)
d = {}
for item in result:
team = item['Team']
link = self.unescape(self.get('Team', team, 'link', allow_fallback=True))
short = self.unescape(item['Short'])
if short == '':
short = self.get('Team', team, 'short')
if short is not None and short != '':
d[short.lower()] = link
self.event_tricode_cache[event] = d
@staticmethod
def unescape(string):
return string.replace('&', '&')
def get_disambiguated_player_from_event(self, event, team, player):
"""
Returns the disambiguated ID of the player based on the team they were playing on in the event.
For performance, the first time a team & event are queried, a single network call will be made
to retrieve all possible player names for that event.
These will be stored in a three-layer dictionary keyed first by event, then by team,
then finally by player ID (not disambiguated), and ultimately yielding disambiguated player ID
Because it's possible for a player to rename mid-event, we will just include every lifetime ID
the player has ever had, so that future requests in the same session can use other IDs.
The current request will use the ID requested by the current function call.
This method has a failure chance if two players on the same team historically shared an ID
We could attempt to mitigate this failure chance by checking position
TODO: Add support for low-priority disambiguation table, this will also mitigate this possibility
teams are themselves listed within tournaments
:param event: will be resolved as a redirect if needed
:param team: can be a tricode if needed
:param player: the current player ID to return the disambiguated name of
:return: the disambiguated form of the player param
"""
event = self.get_target(event)
# we'll keep all player keys lowercase
player_lookup = unidecode(player).lower()
team = self.get('Team', team, 'link', allow_fallback=True)
disambiguation = self._get_player_from_event_and_team_raw(event, team, player_lookup)
if disambiguation is not None:
return player + disambiguation
self._populate_event_team_players(event)
disambiguation = self._get_player_from_event_and_team_raw(event, team, player_lookup)
if disambiguation is not None:
return player + disambiguation
return None
def _get_player_from_event_and_team_raw(self, event, team, player_lookup):
if event in self.event_playername_cache:
if team in self.event_playername_cache[event]:
if player_lookup in self.event_playername_cache[event][team]:
return self.event_playername_cache[event][team][player_lookup]
return None
def _populate_event_team_players(self, event):
result = self.cargo_client.query(
tables="TournamentPlayers=TP,PlayerRedirects=PR1,PlayerRedirects=PR2,LowPriorityRedirects=LPR",
join_on="TP.Player=PR1.AllName,PR1.OverviewPage=PR2.OverviewPage,PR2.AllName=LPR._pageName",
where="TP.OverviewPage=\"{}\" AND LPR.IsLowPriority IS NULL".format(event),
fields="TP.Team=Team,PR2.AllName=DisambiguatedName,PR2.ID=ID,TP.Player=TournamentName,PR2.OverviewPage=CurrentName",
limit='max'
)
d = {}
for item in result:
if item['Team'] not in d:
d[item['Team']] = {}
team_entry = d[item['Team']]
if unidecode(item['CurrentName']) == unidecode(item['DisambiguatedName']):
item['DisambiguatedName'] = item['CurrentName']
disambiguation = re.sub(r'^' + re.escape(item['ID']), '', item['DisambiguatedName'])
key = unidecode(item['ID']).lower()
if key not in team_entry or disambiguation != '':
team_entry[key] = disambiguation
self.event_playername_cache[event] = d | /river_mwclient-0.5.0.tar.gz/river_mwclient-0.5.0/river_mwclient/esports_lookup_cache.py | 0.569494 | 0.167185 | esports_lookup_cache.py | pypi |
import abc
import collections
import inspect
from typing import Any, Callable, Deque, Optional, Type, Union, cast
import torch
from river import base
from river_torch.utils import get_loss_fn, get_optim_fn
class DeepEstimator(base.Estimator):
"""
Abstract base class that implements basic functionality of
River-compatible PyTorch wrappers.
Parameters
----------
module
Torch Module that builds the autoencoder to be wrapped.
The Module should accept parameter `n_features` so that the returned
model's input shape can be determined based on the number of features
in the initial training example.
loss_fn
Loss function to be used for training the wrapped model. Can be a loss
function provided by `torch.nn.functional` or one of the following:
'mse', 'l1', 'cross_entropy', 'binary_crossentropy',
'smooth_l1', 'kl_div'.
optimizer_fn
Optimizer to be used for training the wrapped model.
Can be an optimizer class provided by `torch.optim` or one of the
following: "adam", "adam_w", "sgd", "rmsprop", "lbfgs".
lr
Learning rate of the optimizer.
device
Device to run the wrapped model on. Can be "cpu" or "cuda".
seed
Random seed to be used for training the wrapped model.
**kwargs
Parameters to be passed to the `Module` or the `optimizer`.
"""
def __init__(
self,
module: Type[torch.nn.Module],
loss_fn: Union[str, Callable] = "mse",
optimizer_fn: Union[str, Callable] = "sgd",
lr: float = 1e-3,
device: str = "cpu",
seed: int = 42,
**kwargs,
):
super().__init__()
self.module_cls = module
self.module: torch.nn.Module = cast(torch.nn.Module, None) # cleaner
self.loss_fn = get_loss_fn(loss_fn)
self.optimizer_fn = get_optim_fn(optimizer_fn)
self.lr = lr
self.device = device
self.kwargs = kwargs
self.seed = seed
self.module_initialized = False
torch.manual_seed(seed)
@abc.abstractmethod
def learn_one(self, x: dict, y: Optional[Any]) -> "DeepEstimator":
"""
Performs one step of training with a single example.
Parameters
----------
x
Input example.
y
Target value.
Returns
-------
DeepEstimator
The estimator itself.
"""
raise NotImplementedError
def _filter_kwargs(self, fn: Callable, override=None, **kwargs) -> dict:
"""Filters `net_params` and returns those in `fn`'s arguments.
Parameters
----------
fn
Arbitrary function
override
Dictionary, values to override `torch_params`
Returns
-------
dict
Dictionary containing variables in both `sk_params` and
`fn`'s arguments.
"""
override = override or {}
res = {}
for name, value in kwargs.items():
args = list(inspect.signature(fn).parameters)
if name in args:
res.update({name: value})
res.update(override)
return res
def initialize_module(self, **kwargs):
"""
Parameters
----------
module
The instance or class or callable to be initialized, e.g.
``self.module``.
kwargs : dict
The keyword arguments to initialize the instance or class. Can be an
empty dict.
Returns
-------
instance
The initialized component.
"""
if not isinstance(self.module_cls, torch.nn.Module):
self.module = self.module_cls(
**self._filter_kwargs(self.module_cls, kwargs)
)
self.module.to(self.device)
self.optimizer = self.optimizer_fn(
self.module.parameters(), lr=self.lr
)
self.module_initialized = True
class RollingDeepEstimator(DeepEstimator):
"""
Abstract base class that implements basic functionality of
River-compatible PyTorch wrappers including a rolling window to allow the
model to make predictions based on multiple previous examples.
Parameters
----------
module
Torch Module that builds the autoencoder to be wrapped. The Module
should accept parameter `n_features` so that the returned model's
input shape can be determined based on the number of features in the
initial training example.
loss_fn
Loss function to be used for training the wrapped model. Can be a loss
function provided by `torch.nn.functional` or one of the following:
'mse', 'l1', 'cross_entropy', 'binary_crossentropy',
'smooth_l1', 'kl_div'.
optimizer_fn
Optimizer to be used for training the wrapped model.
Can be an optimizer class provided by `torch.optim` or one of the
following: "adam", "adam_w", "sgd", "rmsprop", "lbfgs".
lr
Learning rate of the optimizer.
device
Device to run the wrapped model on. Can be "cpu" or "cuda".
seed
Random seed to be used for training the wrapped model.
window_size
Size of the rolling window used for storing previous examples.
append_predict
Whether to append inputs passed for prediction to the rolling window.
**kwargs
Parameters to be passed to the `Module` or the `optimizer`.
"""
def __init__(
self,
module: Type[torch.nn.Module],
loss_fn: Union[str, Callable] = "mse",
optimizer_fn: Union[str, Callable] = "sgd",
lr: float = 1e-3,
device: str = "cpu",
seed: int = 42,
window_size: int = 10,
append_predict: bool = False,
**kwargs,
):
super().__init__(
module=module,
loss_fn=loss_fn,
optimizer_fn=optimizer_fn,
lr=lr,
device=device,
seed=seed,
**kwargs,
)
self.window_size = window_size
self.append_predict = append_predict
self._x_window: Deque = collections.deque(maxlen=window_size)
self._batch_i = 0 | /river_torch-0.1.2.tar.gz/river_torch-0.1.2/river_torch/base.py | 0.944982 | 0.573947 | base.py | pypi |
from typing import Callable, List, Type, Union
import pandas as pd
import torch
from river import base
from river.base.typing import RegTarget
from river_torch.base import DeepEstimator
from river_torch.utils.tensor_conversion import (
df2tensor,
dict2tensor,
float2tensor,
)
class _TestModule(torch.nn.Module):
def __init__(self, n_features):
super().__init__()
self.dense0 = torch.nn.Linear(n_features, 10)
self.nonlin = torch.nn.ReLU()
self.dropout = torch.nn.Dropout(0.5)
self.dense1 = torch.nn.Linear(10, 5)
self.output = torch.nn.Linear(5, 1)
self.softmax = torch.nn.Softmax(dim=-1)
def forward(self, X, **kwargs):
X = self.nonlin(self.dense0(X))
X = self.dropout(X)
X = self.nonlin(self.dense1(X))
X = self.softmax(self.output(X))
return X
class Regressor(DeepEstimator, base.Regressor):
"""
Wrapper for PyTorch regression models that enables
compatibility with River.
Parameters
----------
module
Torch Module that builds the autoencoder to be wrapped.
The Module should accept parameter `n_features` so that the
returned model's input shape can be determined based on the number
of features in the initial training example.
loss_fn
Loss function to be used for training the wrapped model.
Can be a loss function provided by `torch.nn.functional` or one of
the following: 'mse', 'l1', 'cross_entropy', 'binary_crossentropy',
'smooth_l1', 'kl_div'.
optimizer_fn
Optimizer to be used for training the wrapped model.
Can be an optimizer class provided by `torch.optim` or one of the
following: "adam", "adam_w", "sgd", "rmsprop", "lbfgs".
lr
Learning rate of the optimizer.
device
Device to run the wrapped model on. Can be "cpu" or "cuda".
seed
Random seed to be used for training the wrapped model.
**kwargs
Parameters to be passed to the `Module` or the `optimizer`.
Examples
--------
"""
def __init__(
self,
module: Type[torch.nn.Module],
loss_fn: Union[str, Callable] = "mse",
optimizer_fn: Union[str, Callable] = "sgd",
lr: float = 1e-3,
device: str = "cpu",
seed: int = 42,
**kwargs,
):
super().__init__(
module=module,
loss_fn=loss_fn,
device=device,
optimizer_fn=optimizer_fn,
lr=lr,
seed=seed,
**kwargs,
)
@classmethod
def _unit_test_params(cls):
"""
Returns a dictionary of parameters to be used for unit
testing the respective class.
Yields
-------
dict
Dictionary of parameters to be used for unit testing the
respective class.
"""
yield {
"module": _TestModule,
"loss_fn": "l1",
"optimizer_fn": "sgd",
}
@classmethod
def _unit_test_skips(cls) -> set:
"""
Indicates which checks to skip during unit testing.
Most estimators pass the full test suite. However, in some cases,
some estimators might not
be able to pass certain checks.
Returns
-------
set
Set of checks to skip during unit testing.
"""
return {
"check_shuffle_features_no_impact",
"check_emerging_features",
"check_disappearing_features",
"check_predict_proba_one",
"check_predict_proba_one_binary",
}
def learn_one(self, x: dict, y: RegTarget) -> "Regressor":
"""
Performs one step of training with a single example.
Parameters
----------
x
Input example.
y
Target value.
Returns
-------
Regressor
The regressor itself.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(x)
self.initialize_module(**self.kwargs)
x_t = dict2tensor(x, self.device)
y = float2tensor(y, device=self.device)
self._learn(x_t, y)
return self
def _learn(self, x: torch.Tensor, y: torch.Tensor):
self.module.train()
self.optimizer.zero_grad()
y_pred = self.module(x)
loss = self.loss_fn(y_pred, y)
loss.backward()
self.optimizer.step()
def predict_one(self, x: dict) -> RegTarget:
"""
Predicts the target value for a single example.
Parameters
----------
x
Input example.
Returns
-------
RegTarget
Predicted target value.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(x)
self.initialize_module(**self.kwargs)
x_t = dict2tensor(x, self.device)
self.module.eval()
return self.module(x_t).item()
def learn_many(self, X: pd.DataFrame, y: List) -> "Regressor":
"""
Performs one step of training with a batch of examples.
Parameters
----------
x
Input examples.
y
Target values.
Returns
-------
Regressor
The regressor itself.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(X.columns)
self.initialize_module(**self.kwargs)
X_t = df2tensor(X, device=self.device)
y_t = torch.tensor(y, device=self.device, dtype=torch.float32)
self._learn(X_t, y_t)
return self
def predict_many(self, X: pd.DataFrame) -> List:
"""
Predicts the target value for a batch of examples.
Parameters
----------
x
Input examples.
Returns
-------
List
Predicted target values.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(X.columns)
self.initialize_module(**self.kwargs)
X = df2tensor(X, device=self.device)
self.module.eval()
return self.module(X).detach().tolist() | /river_torch-0.1.2.tar.gz/river_torch-0.1.2/river_torch/regression/regressor.py | 0.958673 | 0.647798 | regressor.py | pypi |
from typing import Any, Callable, List, Type, Union
import pandas as pd
import torch
from river.base.typing import RegTarget
from river_torch.base import RollingDeepEstimator
from river_torch.regression import Regressor
from river_torch.utils.tensor_conversion import (
deque2rolling_tensor,
float2tensor,
)
class _TestLSTM(torch.nn.Module):
def __init__(self, n_features):
super().__init__()
self.hidden_size = 1
self.lstm = torch.nn.LSTM(
input_size=n_features, hidden_size=self.hidden_size, num_layers=1
)
def forward(self, X, **kwargs):
# lstm with input, hidden, and internal state
output, (hn, cn) = self.lstm(X)
hn = hn.view(-1, self.hidden_size)
return hn
class RollingRegressor(RollingDeepEstimator, Regressor):
"""
Wrapper that feeds a sliding window of the most recent examples to the
wrapped PyTorch regression model.
Parameters
----------
module
Torch Module that builds the autoencoder to be wrapped.
The Module should accept parameter `n_features` so that the returned
model's input shape can be determined based on the number of features
in the initial training example.
loss_fn
Loss function to be used for training the wrapped model. Can be a
loss function provided by `torch.nn.functional` or one of the
following: 'mse', 'l1', 'cross_entropy', 'binary_crossentropy',
'smooth_l1', 'kl_div'.
optimizer_fn
Optimizer to be used for training the wrapped model. Can be an
optimizer class provided by `torch.optim` or one of the following:
"adam", "adam_w", "sgd", "rmsprop", "lbfgs".
lr
Learning rate of the optimizer.
device
Device to run the wrapped model on. Can be "cpu" or "cuda".
seed
Random seed to be used for training the wrapped model.
window_size
Number of recent examples to be fed to the wrapped model at each step.
append_predict
Whether to append inputs passed for prediction to the rolling window.
**kwargs
Parameters to be passed to the `Module` or the `optimizer`.
"""
def __init__(
self,
module: Type[torch.nn.Module],
loss_fn: Union[str, Callable] = "mse",
optimizer_fn: Union[str, Callable] = "sgd",
lr: float = 1e-3,
window_size: int = 10,
append_predict: bool = False,
device: str = "cpu",
seed: int = 42,
**kwargs,
):
super().__init__(
module=module,
loss_fn=loss_fn,
device=device,
optimizer_fn=optimizer_fn,
lr=lr,
window_size=window_size,
append_predict=append_predict,
seed=seed,
**kwargs,
)
@classmethod
def _unit_test_params(cls):
"""
Returns a dictionary of parameters to be used for unit testing
the respective class.
Yields
-------
dict
Dictionary of parameters to be used for unit testing
the respective class.
"""
yield {
"module": _TestLSTM,
"loss_fn": "mse",
"optimizer_fn": "sgd",
"lr": 1e-3,
}
@classmethod
def _unit_test_skips(cls) -> set:
"""
Indicates which checks to skip during unit testing.
Most estimators pass the full test suite. However,
in some cases, some estimators might not
be able to pass certain checks.
Returns
-------
set
Set of checks to skip during unit testing.
"""
return {
"check_shuffle_features_no_impact",
"check_emerging_features",
"check_disappearing_features",
"check_predict_proba_one",
"check_predict_proba_one_binary",
}
def learn_one(self, x: dict, y: RegTarget) -> "RollingRegressor":
"""
Performs one step of training with the sliding
window of the most recent examples.
Parameters
----------
x
Input example.
y
Target value.
Returns
-------
RollingRegressor
The regressor itself.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(x)
self.initialize_module(**self.kwargs)
self._x_window.append(list(x.values()))
if len(self._x_window) == self.window_size:
x_t = deque2rolling_tensor(self._x_window, device=self.device)
y_t = float2tensor(y, device=self.device)
self._learn(x_t, y_t)
return self
def learn_many(
self, X: pd.DataFrame, y: List[Any]
) -> "RollingDeepEstimator":
if not self.module_initialized:
self.kwargs["n_features"] = len(X.columns)
self.initialize_module(**self.kwargs)
self._x_window.extend(X.values.tolist())
if len(self._x_window) == self.window_size:
x_t = deque2rolling_tensor(self._x_window, device=self.device)
y_t = torch.unsqueeze(torch.tensor(y, device=self.device), 1)
self._learn(x_t, y_t)
return self
def predict_one(self, x: dict) -> RegTarget:
"""
Predicts the target value for the current sliding
window of most recent examples.
Parameters
----------
x
Input example.
Returns
-------
RegTarget
Predicted target value.
"""
res = 0.0
if not self.module_initialized:
self.kwargs["n_features"] = len(x)
self.initialize_module(**self.kwargs)
if len(self._x_window) == self.window_size:
self.module.eval()
x_win = self._x_window.copy()
x_win.append(list(x.values()))
x_t = deque2rolling_tensor(x_win, device=self.device)
res = self.module(x_t).detach().numpy().item()
if self.append_predict:
self._x_window.append(list(x.values()))
return res
def predict_many(self, X: pd.DataFrame) -> List:
res = [0.0] * len(X)
if not self.module_initialized:
self.kwargs["n_features"] = len(X.columns)
self.initialize_module(**self.kwargs)
x_win = self._x_window.copy()
x_win.extend(X.values.tolist())
if len(x_win) == self.window_size:
self.module.eval()
x_t = deque2rolling_tensor(x_win, device=self.device)
res = self.module(x_t).detach().tolist()
if self.append_predict:
self._x_window.extend(X.values.tolist())
return res | /river_torch-0.1.2.tar.gz/river_torch-0.1.2/river_torch/regression/rolling_regressor.py | 0.952286 | 0.619903 | rolling_regressor.py | pypi |
import math
from typing import Any, Callable, Type, Union
import pandas as pd
import torch
from river import stats, utils
from scipy.special import ndtr
from river_torch.anomaly import ae
from river_torch.utils import dict2tensor
class ProbabilityWeightedAutoencoder(ae.Autoencoder):
"""
Wrapper for PyTorch autoencoder models for anomaly detection that
reduces the employed learning rate based on an outlier probability
estimate of the input example as well as a threshold probability
`skip_threshold`. If the outlier probability is above the threshold,
the learning rate is reduced to less than 0. Given the probability
estimate $p_out$, the adjusted learning rate
$lr_adj$ is $lr * 1 - (\frac{p_out}{skip_threshold})$.
Parameters
----------
module
Torch Module that builds the autoencoder to be wrapped.
The Module should accept parameter `n_features` so that the returned
model's input shape can be determined based on the number of features
in the initial training example.
loss_fn
Loss function to be used for training the wrapped model.
Can be a loss function provided by `torch.nn.functional` or one of the
following: 'mse', 'l1', 'cross_entropy', 'binary_crossentropy',
'smooth_l1', 'kl_div'.
optimizer_fn
Optimizer to be used for training the wrapped model.
Can be an optimizer class provided by `torch.optim` or one of the
following: "adam", "adam_w", "sgd", "rmsprop", "lbfgs".
lr
Base learning rate of the optimizer.
skip_threshold
Threshold probability to use as a reference for the reduction
of the base learning rate.
device
Device to run the wrapped model on. Can be "cpu" or "cuda".
seed
Random seed to be used for training the wrapped model.
**kwargs
Parameters to be passed to the `module` function
aside from `n_features`.
Examples
--------
>>> from river_torch.anomaly import ProbabilityWeightedAutoencoder
>>> from river import metrics
>>> from river.datasets import CreditCard
>>> from torch import nn, manual_seed
>>> import math
>>> from river.compose import Pipeline
>>> from river.preprocessing import MinMaxScaler
>>> _ = manual_seed(42)
>>> dataset = CreditCard().take(5000)
>>> metric = metrics.ROCAUC(n_thresholds=50)
>>> class MyAutoEncoder(torch.nn.Module):
... def __init__(self, n_features, latent_dim=3):
... super(MyAutoEncoder, self).__init__()
... self.linear1 = nn.Linear(n_features, latent_dim)
... self.nonlin = torch.nn.LeakyReLU()
... self.linear2 = nn.Linear(latent_dim, n_features)
... self.sigmoid = nn.Sigmoid()
...
... def forward(self, X, **kwargs):
... X = self.linear1(X)
... X = self.nonlin(X)
... X = self.linear2(X)
... return self.sigmoid(X)
>>> ae = ProbabilityWeightedAutoencoder(module=MyAutoEncoder, lr=0.005)
>>> scaler = MinMaxScaler()
>>> model = Pipeline(scaler, ae)
>>> for x, y in dataset:
... score = model.score_one(x)
... model = model.learn_one(x=x)
... metric = metric.update(y, score)
...
>>> print(f"ROCAUC: {metric.get():.4f}")
ROCAUC: 0.8599
"""
def __init__(
self,
module: Type[torch.nn.Module],
loss_fn: Union[str, Callable] = "mse",
optimizer_fn: Union[str, Callable] = "sgd",
lr: float = 1e-3,
device: str = "cpu",
seed: int = 42,
skip_threshold: float = 0.9,
window_size=250,
**kwargs,
):
super().__init__(
module=module,
loss_fn=loss_fn,
optimizer_fn=optimizer_fn,
lr=lr,
device=device,
seed=seed,
**kwargs,
)
self.window_size = window_size
self.skip_threshold = skip_threshold
self.rolling_mean = utils.Rolling(
stats.Mean(), window_size=window_size
)
self.rolling_var = utils.Rolling(stats.Var(), window_size=window_size)
def learn_one(
self, x: dict, y: Any = None, **kwargs
) -> "ProbabilityWeightedAutoencoder":
"""
Performs one step of training with a single example,
scaling the employed learning rate based on the outlier
probability estimate of the input example.
Parameters
----------
**kwargs
x
Input example.
Returns
-------
ProbabilityWeightedAutoencoder
The autoencoder itself.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(x)
self.initialize_module(**self.kwargs)
x_t = dict2tensor(x, device=self.device)
self.module.train()
x_pred = self.module(x_t)
loss = self.loss_fn(x_pred, x_t)
self._apply_loss(loss)
return self
def _apply_loss(self, loss):
losses_numpy = loss.detach().numpy()
mean = self.rolling_mean.get()
var = self.rolling_var.get() if self.rolling_var.get() > 0 else 1
if losses_numpy.ndim == 0:
self.rolling_mean.update(losses_numpy)
self.rolling_var.update(losses_numpy)
else:
for loss_numpy in range(len(losses_numpy)):
self.rolling_mean.update(loss_numpy)
self.rolling_var.update(loss_numpy)
loss_scaled = (losses_numpy - mean) / math.sqrt(var)
prob = ndtr(loss_scaled)
loss = (
torch.tensor((self.skip_threshold - prob) / self.skip_threshold)
* loss
)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def learn_many(self, X: pd.DataFrame) -> "ProbabilityWeightedAutoencoder":
if not self.module_initialized:
self.kwargs["n_features"] = len(X.columns)
self.initialize_module(**self.kwargs)
X = dict2tensor(X.to_dict(), device=self.device)
self.module.train()
x_pred = self.module(X)
loss = torch.mean(
self.loss_fn(x_pred, X, reduction="none"),
dim=list(range(1, X.dim())),
)
self._apply_loss(loss)
return self | /river_torch-0.1.2.tar.gz/river_torch-0.1.2/river_torch/anomaly/probability_weighted_ae.py | 0.953373 | 0.720835 | probability_weighted_ae.py | pypi |
import abc
import numpy as np
from river import base, utils
from river.anomaly import HalfSpaceTrees
from river.anomaly.base import AnomalyDetector
from river.stats import Mean, Min
class AnomalyScaler(base.Wrapper, AnomalyDetector):
"""Wrapper around an anomaly detector that scales the output of the model
to account for drift in the wrapped model's anomaly scores.
Parameters
----------
anomaly_detector
Anomaly detector to be wrapped.
"""
def __init__(self, anomaly_detector: AnomalyDetector):
self.anomaly_detector = anomaly_detector
@classmethod
def _unit_test_params(self) -> dict:
"""
Returns a dictionary of parameters to be used for unit testing
the respective class.
Yields
-------
dict
Dictionary of parameters to be used for unit testing the
respective class.
"""
return {"anomaly_detector": HalfSpaceTrees()}
@classmethod
def _unit_test_skips(self) -> set:
"""
Indicates which checks to skip during unit testing.
Most estimators pass the full test suite. However, in some cases,
some estimators might not
be able to pass certain checks.
Returns
-------
set
Set of checks to skip during unit testing.
"""
return {
"check_shuffle_features_no_impact",
"check_emerging_features",
"check_disappearing_features",
"check_predict_proba_one",
"check_predict_proba_one_binary",
}
@property
def _wrapped_model(self):
return self.anomaly_detector
@abc.abstractmethod
def score_one(self, *args) -> float:
"""Return a scaled anomaly score based on raw score provided by
the wrapped anomaly detector.
A high score is indicative of an anomaly. A low score corresponds
to a normal observation.
Parameters
----------
*args
Depends on whether the underlying anomaly detector
is supervised or not.
Returns
-------
An scaled anomaly score. Larger values indicate
more anomalous examples.
"""
def learn_one(self, *args) -> "AnomalyScaler":
"""
Update the scaler and the underlying anomaly scaler.
Parameters
----------
*args
Depends on whether the underlying anomaly detector
is supervised or not.
Returns
-------
AnomalyScaler
The model itself.
"""
self.anomaly_detector.learn_one(*args)
return self
@abc.abstractmethod
def score_many(self, *args) -> np.ndarray:
"""Return scaled anomaly scores based on raw score provided by
the wrapped anomaly detector.
A high score is indicative of an anomaly. A low score corresponds
to a normal observation.
Parameters
----------
*args
Depends on whether the underlying anomaly detector is
supervised or not.
Returns
-------
Scaled anomaly scores. Larger values indicate more anomalous examples.
"""
class AnomalyStandardScaler(AnomalyScaler):
"""
Wrapper around an anomaly detector that standardizes the model's output
using incremental mean and variance metrics.
Parameters
----------
anomaly_detector
The anomaly detector to wrap.
with_std
Whether to use standard deviation for scaling.
rolling
Choose whether the metrics are rolling metrics or not.
window_size
The window size used for the metrics if rolling==True.
"""
def __init__(
self,
anomaly_detector: AnomalyDetector,
with_std: bool = True,
rolling: bool = True,
window_size: int = 250,
):
super().__init__(anomaly_detector)
self.rolling = rolling
self.window_size = window_size
self.mean = (
utils.Rolling(Mean(), self.window_size) if self.rolling else Mean()
)
self.sq_mean = (
utils.Rolling(Mean(), self.window_size) if self.rolling else Mean()
)
self.with_std = with_std
def score_one(self, *args):
"""
Return a scaled anomaly score based on raw score provided by the
wrapped anomaly detector. Larger values indicate more
anomalous examples.
Parameters
----------
*args
Depends on whether the underlying anomaly detector
is supervised or not.
Returns
-------
An scaled anomaly score. Larger values indicate more
anomalous examples.
"""
raw_score = self.anomaly_detector.score_one(*args)
mean = self.mean.update(raw_score).get()
if self.with_std:
var = (
self.sq_mean.update(raw_score**2).get() - mean**2
) # todo is this correct?
score = (raw_score - mean) / var**0.5
else:
score = raw_score - mean
return score
class AnomalyMeanScaler(AnomalyScaler):
"""Wrapper around an anomaly detector that scales the model's output
by the incremental mean of previous scores.
Parameters
----------
anomaly_detector
The anomaly detector to wrap.
metric_type
The type of metric to use.
rolling
Choose whether the metrics are rolling metrics or not.
window_size
The window size used for mean computation if rolling==True.
"""
def __init__(
self,
anomaly_detector: AnomalyDetector,
rolling: bool = True,
window_size=250,
):
super().__init__(anomaly_detector=anomaly_detector)
self.rolling = rolling
self.window_size = window_size
self.mean = (
utils.Rolling(Mean(), self.window_size) if self.rolling else Mean()
)
def score_one(self, *args):
"""
Return a scaled anomaly score based on raw score provided by the
wrapped anomaly detector. Larger values indicate more
anomalous examples.
Parameters
----------
*args
Depends on whether the underlying anomaly detector is
supervised or not.
Returns
-------
An scaled anomaly score. Larger values indicate more
anomalous examples.
"""
raw_score = self.anomaly_detector.score_one(*args)
mean = self.mean.update(raw_score).get()
score = raw_score / mean
return score
class AnomalyMinMaxScaler(AnomalyScaler):
"""Wrapper around an anomaly detector that scales the model's output to
$[0, 1]$ using rolling min and max metrics.
Parameters
----------
anomaly_detector
The anomaly detector to wrap.
rolling
Choose whether the metrics are rolling metrics or not.
window_size
The window size used for the metrics if rolling==True
"""
def __init__(
self,
anomaly_detector: AnomalyDetector,
rolling: bool = True,
window_size: int = 250,
):
super().__init__(anomaly_detector)
self.rolling = rolling
self.window_size = window_size
self.min = (
utils.Rolling(Min(), self.window_size) if self.rolling else Min()
)
self.max = (
utils.Rolling(Min(), self.window_size) if self.rolling else Min()
)
def score_one(self, *args):
"""
Return a scaled anomaly score based on raw score provided by the
wrapped anomaly detector. Larger values indicate more
anomalous examples.
Parameters
----------
*args
Depends on whether the underlying anomaly detector is
supervised or not.
Returns
-------
An scaled anomaly score. Larger values indicate more
anomalous examples.
"""
raw_score = self.anomaly_detector.score_one(*args)
min = self.min.update(raw_score).get()
max = self.max.update(raw_score).get()
score = (raw_score - min) / (max - min)
return score | /river_torch-0.1.2.tar.gz/river_torch-0.1.2/river_torch/anomaly/scaler.py | 0.911088 | 0.614452 | scaler.py | pypi |
from typing import Any, Callable, List, Type, Union
import numpy as np
import pandas as pd
import torch
from river import anomaly
from torch import nn
from river_torch.base import RollingDeepEstimator
from river_torch.utils.tensor_conversion import deque2rolling_tensor
class _TestLSTMAutoencoder(nn.Module):
def __init__(
self, n_features, hidden_size=10, n_layers=1, batch_first=False
):
super().__init__()
self.n_features = n_features
self.hidden_size = hidden_size
self.n_layers = n_layers
self.batch_first = batch_first
self.time_axis = 1 if batch_first else 0
self.encoder = nn.LSTM(
input_size=n_features,
hidden_size=hidden_size,
num_layers=n_layers,
batch_first=batch_first,
)
self.decoder = nn.LSTM(
input_size=hidden_size,
hidden_size=n_features,
num_layers=n_layers,
batch_first=batch_first,
)
def forward(self, x):
_, (h, _) = self.encoder(x)
h = h[-1].view(1, 1, -1)
x_flipped = torch.flip(x[1:], dims=[self.time_axis])
input = torch.cat((h, x_flipped), dim=self.time_axis)
x_hat, _ = self.decoder(input)
x_hat = torch.flip(x_hat, dims=[self.time_axis])
return x_hat
class RollingAutoencoder(RollingDeepEstimator, anomaly.base.AnomalyDetector):
"""
Wrapper for PyTorch autoencoder models that uses the networks
reconstruction error for scoring the anomalousness of a given example.
The class also features a rolling window to allow the model to make
predictions based on the reconstructability of multiple previous examples.
Parameters
----------
module
Torch module that builds the autoencoder to be wrapped.
The module should accept inputs with shape
`(window_size, batch_size, n_features)`. It should also
feature a parameter `n_features` used to adapt the network to the
number of features in the initial training example.
loss_fn
Loss function to be used for training the wrapped model. Can be a
loss function provided by `torch.nn.functional` or one of the
following: 'mse', 'l1', 'cross_entropy', 'binary_crossentropy',
'smooth_l1', 'kl_div'.
optimizer_fn
Optimizer to be used for training the wrapped model. Can be an
optimizer class provided by `torch.optim` or one of the following:
"adam", "adam_w", "sgd", "rmsprop", "lbfgs".
lr
Learning rate of the optimizer.
device
Device to run the wrapped model on. Can be "cpu" or "cuda".
seed
Random seed to be used for training the wrapped model.
window_size
Size of the rolling window used for storing previous examples.
append_predict
Whether to append inputs passed for prediction to the rolling window.
**kwargs
Parameters to be passed to the `Module` or the `optimizer`.
"""
def __init__(
self,
module: Type[torch.nn.Module],
loss_fn: Union[str, Callable] = "mse",
optimizer_fn: Union[str, Callable] = "sgd",
lr: float = 1e-3,
device: str = "cpu",
seed: int = 42,
window_size: int = 10,
append_predict: bool = False,
**kwargs,
):
super().__init__(
module=module,
loss_fn=loss_fn,
optimizer_fn=optimizer_fn,
lr=lr,
device=device,
seed=seed,
window_size=window_size,
append_predict=append_predict,
**kwargs,
)
@classmethod
def _unit_test_params(cls):
"""
Returns a dictionary of parameters to be used for unit testing
the respective class.
Yields
-------
dict
Dictionary of parameters to be used for unit testing
the respective class.
"""
yield {
"module": _TestLSTMAutoencoder,
"loss_fn": "mse",
"optimizer_fn": "sgd",
}
@classmethod
def _unit_test_skips(self) -> set:
"""
Indicates which checks to skip during unit testing.
Most estimators pass the full test suite. However, in some cases,
some estimators might not
be able to pass certain checks.
Returns
-------
set
Set of checks to skip during unit testing.
"""
return {
"check_shuffle_features_no_impact",
"check_emerging_features",
"check_disappearing_features",
"check_predict_proba_one",
"check_predict_proba_one_binary",
}
def _learn(self, x: torch.Tensor):
self.module.train()
x_pred = self.module(x)
loss = self.loss_fn(x_pred, x)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def learn_one(self, x: dict, y: None) -> "RollingAutoencoder":
"""
Performs one step of training with a single example.
Parameters
----------
x
Input example.
Returns
-------
RollingAutoencoder
The estimator itself.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(x)
self.initialize_module(**self.kwargs)
self._x_window.append(list(x.values()))
if len(self._x_window) == self.window_size:
x_t = deque2rolling_tensor(self._x_window, device=self.device)
self._learn(x=x_t)
return self
def learn_many(self, X: pd.DataFrame, y=None) -> "RollingAutoencoder":
"""
Performs one step of training with a batch of examples.
Parameters
----------
X
Input batch of examples.
y
Should be None
Returns
-------
RollingAutoencoder
The estimator itself.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(X.columns)
self.initialize_module(**self.kwargs)
self._x_window.append(X.values.tolist())
if len(self._x_window) == self.window_size:
X_t = deque2rolling_tensor(self._x_window, device=self.device)
self._learn(x=X_t)
return self
def score_one(self, x: dict) -> float:
res = 0.0
if not self.module_initialized:
self.kwargs["n_features"] = len(x)
self.initialize_module(**self.kwargs)
if len(self._x_window) == self.window_size:
x_win = self._x_window.copy()
x_win.append(list(x.values()))
x_t = deque2rolling_tensor(x_win, device=self.device)
self.module.eval()
x_pred = self.module(x_t)
loss = self.loss_fn(x_pred, x_t)
res = loss.item()
if self.append_predict:
self._x_window.append(list(x.values()))
return res
def score_many(self, X: pd.DataFrame) -> List[Any]:
if not self.module_initialized:
self.kwargs["n_features"] = len(X.columns)
self.initialize_module(**self.kwargs)
x_win = self._x_window.copy()
x_win.append(X.values.tolist())
if self.append_predict:
self._x_window.append(X.values.tolist())
if len(self._x_window) == self.window_size:
self.module.eval()
X_t = deque2rolling_tensor(x_win, device=self.device)
x_pred = self.module(X_t)
loss = torch.mean(
self.loss_fn(x_pred, x_pred, reduction="none"),
dim=list(range(1, x_pred.dim())),
)
losses = loss.detach().numpy()
if len(losses) < len(X):
losses = np.pad(losses, (len(X) - len(losses), 0))
return losses.tolist()
else:
return np.zeros(len(X)).tolist() | /river_torch-0.1.2.tar.gz/river_torch-0.1.2/river_torch/anomaly/rolling_ae.py | 0.962839 | 0.558267 | rolling_ae.py | pypi |
from typing import Any, Callable, Type, Union
import numpy as np
import pandas as pd
import torch
from river.anomaly.base import AnomalyDetector
from torch import nn
from river_torch.base import DeepEstimator
from river_torch.utils import dict2tensor
from river_torch.utils.tensor_conversion import df2tensor
class _TestAutoencoder(torch.nn.Module):
def __init__(self, n_features, latent_dim=3):
super().__init__()
self.linear1 = nn.Linear(n_features, latent_dim)
self.nonlin = torch.nn.LeakyReLU()
self.linear2 = nn.Linear(latent_dim, n_features)
def forward(self, X, **kwargs):
X = self.linear1(X)
X = self.nonlin(X)
X = self.linear2(X)
return nn.functional.sigmoid(X)
class Autoencoder(DeepEstimator, AnomalyDetector):
"""
Wrapper for PyTorch autoencoder models that uses the networks
reconstruction error for scoring the anomalousness of a given example.
Parameters
----------
module
Torch Module that builds the autoencoder to be wrapped.
The Module should accept parameter `n_features` so that the returned
model's input shape can be determined based on the number of features
in the initial training example.
loss_fn
Loss function to be used for training the wrapped model. Can be a
loss function provided by `torch.nn.functional` or one of the
following: 'mse', 'l1', 'cross_entropy', 'binary_crossentropy',
'smooth_l1', 'kl_div'.
optimizer_fn
Optimizer to be used for training the wrapped model. Can be an
optimizer class provided by `torch.optim` or one of the following:
"adam", "adam_w", "sgd", "rmsprop", "lbfgs".
lr
Learning rate of the optimizer.
device
Device to run the wrapped model on. Can be "cpu" or "cuda".
seed
Random seed to be used for training the wrapped model.
**kwargs
Parameters to be passed to the `torch.Module` class
aside from `n_features`.
Examples
--------
>>> from river_torch.anomaly import Autoencoder
>>> from river import metrics
>>> from river.datasets import CreditCard
>>> from torch import nn
>>> import math
>>> from river.compose import Pipeline
>>> from river.preprocessing import MinMaxScaler
>>> dataset = CreditCard().take(5000)
>>> metric = metrics.ROCAUC(n_thresholds=50)
>>> class MyAutoEncoder(torch.nn.Module):
... def __init__(self, n_features, latent_dim=3):
... super(MyAutoEncoder, self).__init__()
... self.linear1 = nn.Linear(n_features, latent_dim)
... self.nonlin = torch.nn.LeakyReLU()
... self.linear2 = nn.Linear(latent_dim, n_features)
... self.sigmoid = nn.Sigmoid()
...
... def forward(self, X, **kwargs):
... X = self.linear1(X)
... X = self.nonlin(X)
... X = self.linear2(X)
... return self.sigmoid(X)
>>> ae = Autoencoder(module=MyAutoEncoder, lr=0.005)
>>> scaler = MinMaxScaler()
>>> model = Pipeline(scaler, ae)
>>> for x, y in dataset:
... score = model.score_one(x)
... model = model.learn_one(x=x)
... metric = metric.update(y, score)
...
>>> print(f"ROCAUC: {metric.get():.4f}")
ROCAUC: 0.7447
"""
def __init__(
self,
module: Type[torch.nn.Module],
loss_fn: Union[str, Callable] = "mse",
optimizer_fn: Union[str, Callable] = "sgd",
lr: float = 1e-3,
device: str = "cpu",
seed: int = 42,
**kwargs,
):
super().__init__(
module=module,
loss_fn=loss_fn,
optimizer_fn=optimizer_fn,
lr=lr,
device=device,
seed=seed,
**kwargs,
)
@classmethod
def _unit_test_params(cls):
"""
Returns a dictionary of parameters to be used for unit testing
the respective class.
Yields
-------
dict
Dictionary of parameters to be used for unit testing
the respective class.
"""
yield {
"module": _TestAutoencoder,
"loss_fn": "mse",
"optimizer_fn": "sgd",
}
@classmethod
def _unit_test_skips(self) -> set:
"""
Indicates which checks to skip during unit testing.
Most estimators pass the full test suite. However, in some cases,
some estimators might not
be able to pass certain checks.
Returns
-------
set
Set of checks to skip during unit testing.
"""
return {
"check_shuffle_features_no_impact",
"check_emerging_features",
"check_disappearing_features",
"check_predict_proba_one",
"check_predict_proba_one_binary",
}
def learn_one(self, x: dict, y: Any = None, **kwargs) -> "Autoencoder":
"""
Performs one step of training with a single example.
Parameters
----------
x
Input example.
**kwargs
Returns
-------
Autoencoder
The model itself.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(x)
self.initialize_module(**self.kwargs)
return self._learn(dict2tensor(x, device=self.device))
def _learn(self, x: torch.Tensor) -> "Autoencoder":
self.module.train()
x_pred = self.module(x)
loss = self.loss_fn(x_pred, x)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
return self
def score_one(self, x: dict) -> float:
"""
Returns an anomaly score for the provided example in the form of
the autoencoder's reconstruction error.
Parameters
----------
x
Input example.
Returns
-------
float
Anomaly score for the given example. Larger values indicate
more anomalous examples.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(x)
self.initialize_module(**self.kwargs)
x_t = dict2tensor(x, device=self.device)
self.module.eval()
with torch.inference_mode():
x_pred = self.module(x_t)
loss = self.loss_fn(x_pred, x_t).item()
return loss
def learn_many(self, X: pd.DataFrame) -> "Autoencoder":
"""
Performs one step of training with a batch of examples.
Parameters
----------
X
Input batch of examples.
Returns
-------
Autoencoder
The model itself.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(X.columns)
self.initialize_module(**self.kwargs)
X = df2tensor(X, device=self.device)
return self._learn(X)
def score_many(self, X: pd.DataFrame) -> np.ndarray:
"""
Returns an anomaly score for the provided batch of examples in
the form of the autoencoder's reconstruction error.
Parameters
----------
x
Input batch of examples.
Returns
-------
float
Anomaly scores for the given batch of examples. Larger values
indicate more anomalous examples.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(X.columns)
self.initialize_module(**self.kwargs)
X = df2tensor(X, device=self.device)
self.module.eval()
with torch.inference_mode():
X_pred = self.module(X)
loss = torch.mean(
self.loss_fn(X_pred, X, reduction="none"),
dim=list(range(1, X.dim())),
)
score = loss.cpu().detach().numpy()
return score | /river_torch-0.1.2.tar.gz/river_torch-0.1.2/river_torch/anomaly/ae.py | 0.97362 | 0.662906 | ae.py | pypi |
from typing import Callable, Union
import torch
import torch.nn.functional as F
from torch import nn, optim
ACTIVATION_FNS = {
"selu": nn.SELU,
"relu": nn.ReLU,
"leaky_relu": nn.LeakyReLU,
"gelu": nn.GELU,
"tanh": nn.Tanh,
"sigmoid": nn.Sigmoid,
"elu": nn.ELU,
"linear": nn.Identity,
}
LOSS_FNS = {
"mse": F.mse_loss,
"l1": F.l1_loss,
"smooth_l1": F.smooth_l1_loss,
"binary_cross_entropy": F.binary_cross_entropy,
"cross_entropy": F.cross_entropy,
"kl_div": F.kl_div,
"huber": F.huber_loss,
"binary_cross_entropy_with_logits": F.binary_cross_entropy_with_logits,
}
OPTIMIZER_FNS = {
"adam": optim.Adam,
"adam_w": optim.AdamW,
"sgd": optim.SGD,
"rmsprop": optim.RMSprop,
"lbfgs": optim.LBFGS,
}
INIT_FNS = {
"uniform": nn.init.uniform_,
"normal": nn.init.normal_,
"xavier_uniform": nn.init.xavier_uniform_,
"xavier_normal": nn.init.xavier_normal_,
"kaiming_uniform": nn.init.kaiming_uniform_,
"kaiming_normal": nn.init.kaiming_normal_,
}
def get_init_fn(init_fn):
"""Returns the requested init function.
Parameters
----------
init_fn
The init function to fetch. Must be one of ["xavier_uniform",
"uniform", "kaiming_uniform"].
Returns
-------
Callable
The class of the requested activation function.
"""
init_fn_ = INIT_FNS.get(init_fn, "xavier_uniform")
if init_fn.startswith("xavier"):
def result(weight, activation_fn):
return init_fn_(weight, gain=nn.init.calculate_gain(activation_fn))
elif init_fn.startswith("kaiming"):
def result(weight, activation_fn):
return init_fn_(weight, nonlinearity=activation_fn)
elif init_fn == "uniform":
def result(weight, activation_fn):
return 0
else:
def result(weight, activation_fn):
return init_fn_(weight)
return result
BASE_PARAM_ERROR = "Unknown {}: {}. A valid string or {} is required."
def get_activation_fn(activation_fn: Union[str, Callable]) -> Callable:
"""Returns the requested activation function as a nn.Module class.
Parameters
----------
activation_fn
The activation function to fetch. Can be a string or a nn.Module class.
Returns
-------
Callable
The class of the requested activation function.
"""
err = ValueError(
BASE_PARAM_ERROR.format(
"activation function", activation_fn, "nn.Module"
)
)
if isinstance(activation_fn, str):
try:
activation_fn = ACTIVATION_FNS[activation_fn]
except KeyError:
raise err
elif not isinstance(activation_fn(), nn.Module):
raise err
return activation_fn
def get_optim_fn(optim_fn: Union[str, Callable]):
"""Returns the requested optimizer as a nn.Module class.
Parameters
----------
optim_fn
The optimizer to fetch. Can be a string or a nn.Module class.
Returns
-------
Callable
The class of the requested optimizer.
"""
err = ValueError(
BASE_PARAM_ERROR.format("optimizer", optim_fn, "nn.Module")
)
if isinstance(optim_fn, str):
try:
optim_fn = OPTIMIZER_FNS[optim_fn]
except KeyError:
raise err
elif not isinstance(
optim_fn(params=[torch.empty(1)], lr=1e-3), torch.optim.Optimizer
):
raise err
return optim_fn
def get_loss_fn(loss_fn: Union[str, Callable]):
"""Returns the requested loss function as a function.
Parameters
----------
loss_fn
The loss function to fetch. Can be a string or a function.
Returns
-------
Callable
The function of the requested loss function.
"""
err = ValueError(
BASE_PARAM_ERROR.format("loss function", loss_fn, "function")
)
if isinstance(loss_fn, str):
try:
return LOSS_FNS[loss_fn]
except KeyError:
raise err
elif not callable(loss_fn):
raise err
return loss_fn | /river_torch-0.1.2.tar.gz/river_torch-0.1.2/river_torch/utils/params.py | 0.932982 | 0.57063 | params.py | pypi |
from typing import Deque, Dict, Optional, Union
import numpy as np
import pandas as pd
import torch
from ordered_set import OrderedSet
from river import base
from river.base.typing import ClfTarget, RegTarget
def dict2tensor(
x: dict, device: str = "cpu", dtype: torch.dtype = torch.float32
) -> torch.Tensor:
"""
Convert a dictionary to a tensor.
Parameters
----------
x
Dictionary.
device
Device.
dtype
Dtype.
Returns
-------
torch.Tensor
"""
return torch.tensor([list(x.values())], device=device, dtype=dtype)
def float2tensor(
y: Union[float, int, RegTarget], device="cpu", dtype=torch.float32
) -> torch.Tensor:
"""
Convert a float to a tensor.
Parameters
----------
y
Float.
device
Device.
dtype
Dtype.
Returns
-------
torch.Tensor
"""
return torch.tensor([[y]], device=device, dtype=dtype)
def deque2rolling_tensor(
window: Deque,
device="cpu",
dtype=torch.float32,
) -> torch.Tensor:
"""
Convert a dictionary to a rolling tensor.
Parameters
----------
x
Dictionary.
window
Rolling window.
device
Device.
dtype
Dtype.
Returns
-------
torch.Tensor
"""
output = torch.tensor(window, device=device, dtype=dtype)
return torch.unsqueeze(output, 1)
def df2tensor(
X: pd.DataFrame, device="cpu", dtype=torch.float32
) -> torch.Tensor:
"""
Convert a dataframe to a tensor.
Parameters
----------
X
Dataframe.
device
Device.
dtype
Dtype.
Returns
-------
torch.Tensor
"""
return torch.tensor(X.values, device=device, dtype=dtype)
def labels2onehot(
y: Union[base.typing.ClfTarget, pd.Series],
classes: OrderedSet[base.typing.ClfTarget],
n_classes: Optional[int] = None,
device="cpu",
dtype=torch.float32,
) -> torch.Tensor:
"""
Convert a label or a list of labels to a one-hot encoded tensor.
Parameters
----------
y
Label or list of labels.
classes
Classes.
n_classes
Number of classes.
device
Device.
dtype
Dtype.
Returns
-------
torch.Tensor
"""
if n_classes is None:
n_classes = len(classes)
if isinstance(y, pd.Series):
onehot = torch.zeros(len(y), n_classes, device=device, dtype=dtype)
pos_idcs = [classes.index(y_i) for y_i in y]
for i, pos_idx in enumerate(pos_idcs):
if isinstance(pos_idx, int) and pos_idx < n_classes:
onehot[i, pos_idx] = 1
else:
onehot = torch.zeros(1, n_classes, device=device, dtype=dtype)
pos_idx = classes.index(y)
if isinstance(pos_idx, int) and pos_idx < n_classes:
onehot[0, pos_idx] = 1
return onehot
def output2proba(
preds: torch.Tensor, classes: OrderedSet, with_logits=False
) -> Dict[ClfTarget, float]:
if with_logits:
if preds.shape[-1] >= 1:
preds = torch.softmax(preds, dim=-1)
else:
preds = torch.sigmoid(preds)
preds_np = preds.detach().numpy()
if preds_np.shape[1] == 1:
preds_np = np.hstack((preds_np, 1 - preds_np))
n_unobserved_classes = preds_np.shape[1] - len(classes)
if n_unobserved_classes > 0:
classes = classes.copy()
[
classes.append(f"unobserved {i}")
for i in range(n_unobserved_classes)
]
probas = (
dict(zip(classes, preds_np[0]))
if preds_np.shape[0] == 1
else [dict(zip(classes, pred)) for pred in preds_np]
)
return dict(probas) | /river_torch-0.1.2.tar.gz/river_torch-0.1.2/river_torch/utils/tensor_conversion.py | 0.953188 | 0.619443 | tensor_conversion.py | pypi |
import math
import warnings
from typing import Callable, Dict, List, Type, Union, cast
import pandas as pd
import torch
from ordered_set import OrderedSet
from river import base
from river.base.typing import ClfTarget
from torch import nn
from torch.utils.hooks import RemovableHandle
from river_torch.base import DeepEstimator
from river_torch.utils.hooks import ForwardOrderTracker, apply_hooks
from river_torch.utils.tensor_conversion import (
df2tensor,
dict2tensor,
labels2onehot,
output2proba,
)
class _TestModule(torch.nn.Module):
def __init__(self, n_features):
super().__init__()
self.dense0 = torch.nn.Linear(n_features, 5)
self.nonlin = torch.nn.ReLU()
self.dense1 = torch.nn.Linear(5, 2)
self.softmax = torch.nn.Softmax(dim=-1)
def forward(self, X, **kwargs):
X = self.nonlin(self.dense0(X))
X = self.nonlin(self.dense1(X))
X = self.softmax(X)
return X
class Classifier(DeepEstimator, base.Classifier):
"""
Wrapper for PyTorch classification models that automatically handles
increases in the number of classes by adding output neurons in case
the number of observed classes exceeds the current
number of output neurons.
Parameters
----------
module
Torch Module that builds the autoencoder to be wrapped.
The Module should accept parameter `n_features` so that the
returned model's input shape can be determined based on the number
of features in the initial training example.
loss_fn
Loss function to be used for training the wrapped model. Can be a
loss function provided by `torch.nn.functional` or one of the
following: 'mse', 'l1', 'cross_entropy',
'binary_cross_entropy_with_logits', 'binary_crossentropy',
'smooth_l1', 'kl_div'.
optimizer_fn
Optimizer to be used for training the wrapped model.
Can be an optimizer class provided by `torch.optim` or one of the
following: "adam", "adam_w", "sgd", "rmsprop", "lbfgs".
lr
Learning rate of the optimizer.
output_is_logit
Whether the module produces logits as output. If true, either
softmax or sigmoid is applied to the outputs when predicting.
is_class_incremental
Whether the classifier should adapt to the appearance of
previously unobserved classes by adding an unit to the output
layer of the network. This works only if the last trainable
layer is an nn.Linear layer. Note also, that output activation
functions can not be adapted, meaning that a binary classifier
with a sigmoid output can not be altered to perform multi-class
predictions.
device
Device to run the wrapped model on. Can be "cpu" or "cuda".
seed
Random seed to be used for training the wrapped model.
**net_params
Parameters to be passed to the `build_fn` function aside from
`n_features`.
Examples
--------
>>> from river import metrics, preprocessing, compose, datasets
>>> from river_torch import classification
>>> from torch import nn
>>> from torch import manual_seed
>>> _ = manual_seed(42)
>>> class MyModule(nn.Module):
... def __init__(self, n_features):
... super(MyModule, self).__init__()
... self.dense0 = nn.Linear(n_features,5)
... self.nonlin = nn.ReLU()
... self.dense1 = nn.Linear(5, 2)
... self.softmax = nn.Softmax(dim=-1)
...
... def forward(self, X, **kwargs):
... X = self.nonlin(self.dense0(X))
... X = self.nonlin(self.dense1(X))
... X = self.softmax(X)
... return X
>>> model_pipeline = compose.Pipeline(
... preprocessing.StandardScaler,
... Classifier(module=MyModule,
... loss_fn="binary_cross_entropy",
... optimizer_fn='adam')
... )
>>> dataset = datasets.Phishing()
>>> metric = metrics.Accuracy()
>>> for x, y in dataset:
... y_pred = model_pipeline.predict_one(x) # make a prediction
... metric = metric.update(y, y_pred) # update the metric
... model_pipeline = model_pipeline.learn_one(x,y)
>>> print(f'Accuracy: {metric.get()}')
Accuracy: 0.6728
"""
def __init__(
self,
module: Type[torch.nn.Module],
loss_fn: Union[str, Callable] = "binary_cross_entropy_with_logits",
optimizer_fn: Union[str, Callable] = "sgd",
lr: float = 1e-3,
output_is_logit: bool = True,
is_class_incremental: bool = False,
device: str = "cpu",
seed: int = 42,
**kwargs,
):
super().__init__(
loss_fn=loss_fn,
optimizer_fn=optimizer_fn,
module=module,
device=device,
lr=lr,
seed=seed,
**kwargs,
)
self.observed_classes: OrderedSet[ClfTarget] = OrderedSet()
self.output_layer: nn.Module
self.output_is_logit = output_is_logit
self.is_class_incremental = is_class_incremental
self._supported_output_layers: List[Type[nn.Module]] = [nn.Linear]
@classmethod
def _unit_test_params(cls):
"""
Returns a dictionary of parameters to be used for unit testing the
respective class.
Yields
-------
dict
Dictionary of parameters to be used for unit testing the
respective class.
"""
yield {
"module": _TestModule,
"loss_fn": "binary_cross_entropy_with_logits",
"optimizer_fn": "sgd",
}
@classmethod
def _unit_test_skips(cls) -> set:
"""
Indicates which checks to skip during unit testing.
Most estimators pass the full test suite.
However, in some cases, some estimators might not
be able to pass certain checks.
Returns
-------
set
Set of checks to skip during unit testing.
"""
return {
"check_shuffle_features_no_impact",
"check_emerging_features",
"check_disappearing_features",
"check_predict_proba_one",
"check_predict_proba_one_binary",
}
def _learn(self, x: torch.Tensor, y: Union[ClfTarget, List[ClfTarget]]):
self.module.train()
self.optimizer.zero_grad()
y_pred = self.module(x)
n_classes = y_pred.shape[-1]
y = labels2onehot(
y=y,
classes=self.observed_classes,
n_classes=n_classes,
device=self.device,
)
loss = self.loss_fn(y_pred, y)
loss.backward()
self.optimizer.step()
return self
def learn_one(self, x: dict, y: ClfTarget, **kwargs) -> "Classifier":
"""
Performs one step of training with a single example.
Parameters
----------
x
Input example.
y
Target value.
Returns
-------
Classifier
The classifier itself.
"""
# check if model is initialized
if not self.module_initialized:
self.kwargs["n_features"] = len(x)
self.initialize_module(**self.kwargs)
x_t = dict2tensor(x, device=self.device)
# check last layer
self.observed_classes.add(y)
if self.is_class_incremental:
self._adapt_output_dim()
return self._learn(x=x_t, y=y)
def predict_proba_one(self, x: dict) -> Dict[ClfTarget, float]:
"""
Predict the probability of each label given the input.
Parameters
----------
x
Input example.
Returns
-------
Dict[ClfTarget, float]
Dictionary of probabilities for each label.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(x)
self.initialize_module(**self.kwargs)
x_t = dict2tensor(x, device=self.device)
self.module.eval()
y_pred = self.module(x_t)
return output2proba(
y_pred, self.observed_classes, self.output_is_logit
)
def learn_many(self, X: pd.DataFrame, y: pd.Series) -> "Classifier":
"""
Performs one step of training with a batch of examples.
Parameters
----------
X
Input examples.
y
Target values.
Returns
-------
Classifier
The classifier itself.
"""
# check if model is initialized
if not self.module_initialized:
self.kwargs["n_features"] = len(X.columns)
self.initialize_module(**self.kwargs)
X = df2tensor(X, device=self.device)
self.observed_classes.update(y)
if self.is_class_incremental:
self._adapt_output_dim()
return self._learn(x=X, y=y.tolist())
def predict_proba_many(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Predict the probability of each label given the input.
Parameters
----------
X
Input examples.
Returns
-------
pd.DataFrame
DataFrame of probabilities for each label.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(X.columns)
self.initialize_module(**self.kwargs)
X_t = df2tensor(X, device=self.device)
self.module.eval()
y_preds = self.module(X_t)
return pd.Dataframe(output2proba(y_preds, self.observed_classes))
def _adapt_output_dim(self):
out_features_target = (
len(self.observed_classes) if len(self.observed_classes) > 2 else 1
)
n_classes_to_add = out_features_target - self.output_layer.out_features
if n_classes_to_add > 0:
self._add_output_features(n_classes_to_add)
def _add_output_features(self, n_classes_to_add: int) -> None:
"""
Adds output dimensions to the model by adding new rows of weights to
the existing weights of the last layer.
Parameters
----------
n_classes_to_add
Number of output dimensions to add.
"""
new_weights = (
torch.mean(cast(torch.Tensor, self.output_layer.weight), dim=0)
.unsqueeze(1)
.T
)
if n_classes_to_add > 1:
new_weights = (
new_weights.unsqueeze(1)
.T.repeat(1, n_classes_to_add, 1)
.squeeze()
)
self.output_layer.weight = nn.parameter.Parameter(
torch.cat(
[
cast(torch.Tensor, self.output_layer.weight),
cast(torch.Tensor, new_weights),
],
dim=0,
)
)
if self.output_layer.bias is not None:
new_bias = torch.empty(n_classes_to_add)
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(
self.output_layer.weight
)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(new_bias, -bound, bound)
self.output_layer.bias = nn.parameter.Parameter(
torch.cat(
[
cast(torch.Tensor, self.output_layer.bias),
cast(torch.Tensor, new_bias),
],
dim=0,
)
)
self.output_layer.out_features += torch.Tensor([n_classes_to_add])
self.optimizer = self.optimizer_fn(
self.module.parameters(), lr=self.lr
)
def find_output_layer(self, n_features: int):
handles: List[RemovableHandle] = []
tracker = ForwardOrderTracker()
apply_hooks(module=self.module, hook=tracker, handles=handles)
x_dummy = torch.empty((1, n_features), device=self.device)
self.module(x_dummy)
for h in handles:
h.remove()
if tracker.ordered_modules and isinstance(
tracker.ordered_modules[-1], tuple(self._supported_output_layers)
):
self.output_layer = tracker.ordered_modules[-1]
else:
warnings.warn(
"The model will not be able to adapt its output to new "
"classes since no linear layer output layer was found."
)
self.is_class_incremental = False
def initialize_module(self, **kwargs):
super().initialize_module(**kwargs)
self.find_output_layer(n_features=kwargs["n_features"]) | /river_torch-0.1.2.tar.gz/river_torch-0.1.2/river_torch/classification/classifier.py | 0.951391 | 0.506836 | classifier.py | pypi |
import math
from typing import Callable, Dict, List, Type, Union
import pandas as pd
import torch
from river.base.typing import ClfTarget
from torch import nn
from river_torch.base import RollingDeepEstimator
from river_torch.classification import Classifier
from river_torch.utils.tensor_conversion import (
deque2rolling_tensor,
output2proba,
)
class _TestLSTM(torch.nn.Module):
def __init__(self, n_features, hidden_size=2):
super().__init__()
self.n_features = n_features
self.hidden_size = hidden_size
self.lstm = torch.nn.LSTM(
input_size=n_features, hidden_size=hidden_size, num_layers=1
)
self.softmax = torch.nn.Softmax(dim=-1)
def forward(self, X, **kwargs):
# lstm with input, hidden, and internal state
output, (hn, cn) = self.lstm(X)
hn = hn.view(-1, self.hidden_size)
return self.softmax(hn)
class RollingClassifier(Classifier, RollingDeepEstimator):
"""
Wrapper that feeds a sliding window of the most recent examples to the
wrapped PyTorch classification model. The class also automatically handles
increases in the number of classes by adding output neurons in case the
number of observed classes exceeds the current number of output neurons.
Parameters
----------
module
Torch Module that builds the autoencoder to be wrapped.
The Module should accept parameter `n_features` so that the returned
model's input shape can be determined based on the number of features
in the initial training example.
loss_fn
Loss function to be used for training the wrapped model. Can be a
loss function provided by `torch.nn.functional` or one of the
following: 'mse', 'l1', 'cross_entropy', 'binary_crossentropy',
'smooth_l1', 'kl_div'.
optimizer_fn
Optimizer to be used for training the wrapped model. Can be an
optimizer class provided by `torch.optim` or one of the following:
"adam", "adam_w", "sgd", "rmsprop", "lbfgs".
lr
Learning rate of the optimizer.
output_is_logit
Whether the module produces logits as output. If true, either
softmax or sigmoid is applied to the outputs when predicting.
is_class_incremental
Whether the classifier should adapt to the appearance of previously
unobserved classes by adding an unit to the output
layer of the network. This works only if the last trainable layer
is an nn.Linear layer. Note also, that output activation functions
can not be adapted, meaning that a binary classifier with a sigmoid
output can not be altered to perform multi-class predictions.
device
Device to run the wrapped model on. Can be "cpu" or "cuda".
seed
Random seed to be used for training the wrapped model.
window_size
Number of recent examples to be fed to the wrapped model at each step.
append_predict
Whether to append inputs passed for prediction to the rolling window.
**kwargs
Parameters to be passed to the `build_fn`
function aside from `n_features`.
Examples
--------
>>> from river_torch.classification import RollingClassifier
>>> from river import metrics, datasets, compose, preprocessing
>>> import torch
>>> class MyModule(torch.nn.Module):
...
... def __init__(self, n_features, hidden_size=1):
... super().__init__()
... self.n_features=n_features
... self.hidden_size = hidden_size
... self.lstm = torch.nn.LSTM(input_size=n_features,
... hidden_size=hidden_size,
... batch_first=False,
... num_layers=1,
... bias=False)
... self.softmax = torch.nn.Softmax(dim=-1)
...
... def forward(self, X, **kwargs):
... output, (hn, cn) = self.lstm(X)
... hn = hn.view(-1, self.lstm.hidden_size)
... return self.softmax(hn)
>>> dataset = datasets.Keystroke()
>>> metric = metrics.Accuracy()
>>> optimizer_fn = torch.optim.SGD
>>> model_pipeline = preprocessing.StandardScaler()
>>> model_pipeline |= RollingClassifier(
... module=MyModule,
... loss_fn="binary_cross_entropy",
... optimizer_fn=torch.optim.SGD,
... window_size=20,
... lr=1e-2,
... append_predict=True,
... is_class_incremental=True
... )
>>> for x, y in dataset.take(5000):
... y_pred = model_pipeline.predict_one(x) # make a prediction
... metric = metric.update(y, y_pred) # update the metric
... model = model_pipeline.learn_one(x, y) # make the model learn
>>> print(f'Accuracy: {metric.get()}')
Accuracy: 0.4552
"""
def __init__(
self,
module: Type[torch.nn.Module],
loss_fn: Union[str, Callable] = "binary_cross_entropy",
optimizer_fn: Union[str, Callable] = "sgd",
lr: float = 1e-3,
output_is_logit: bool = True,
is_class_incremental: bool = False,
device: str = "cpu",
seed: int = 42,
window_size: int = 10,
append_predict: bool = False,
**kwargs,
):
super().__init__(
module=module,
loss_fn=loss_fn,
optimizer_fn=optimizer_fn,
lr=lr,
output_is_logit=output_is_logit,
is_class_incremental=is_class_incremental,
device=device,
seed=seed,
window_size=window_size,
append_predict=append_predict,
**kwargs,
)
self._supported_output_layers: List[Type[nn.Module]] = [
nn.Linear,
nn.LSTM,
nn.RNN,
]
@classmethod
def _unit_test_params(cls):
"""
Returns a dictionary of parameters to be used for unit testing
the respective class.
Yields
-------
dict
Dictionary of parameters to be used for unit testing
the respective class.
"""
yield {
"module": _TestLSTM,
"optimizer_fn": "sgd",
"lr": 1e-3,
}
@classmethod
def _unit_test_skips(cls) -> set:
"""
Indicates which checks to skip during unit testing.
Most estimators pass the full test suite. However,
in some cases, some estimators might not
be able to pass certain checks.
Returns
-------
set
Set of checks to skip during unit testing.
"""
return {
"check_shuffle_features_no_impact",
"check_emerging_features",
"check_disappearing_features",
"check_predict_proba_one",
"check_predict_proba_one_binary",
}
def learn_one(
self, x: dict, y: ClfTarget, **kwargs
) -> "RollingClassifier":
"""
Performs one step of training with the most recent training examples
stored in the sliding window.
Parameters
----------
x
Input example.
y
Target value.
Returns
-------
Classifier
The classifier itself.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(x)
self.initialize_module(**self.kwargs)
self._x_window.append(list(x.values()))
# check last layer
self.observed_classes.add(y)
if self.is_class_incremental:
self._adapt_output_dim()
# training process
if len(self._x_window) == self.window_size:
x_t = deque2rolling_tensor(self._x_window, device=self.device)
return self._learn(x=x_t, y=y)
return self
def predict_proba_one(self, x: dict) -> Dict[ClfTarget, float]:
"""
Predict the probability of each label given the most recent examples
stored in the sliding window.
Parameters
----------
x
Input example.
Returns
-------
Dict[ClfTarget, float]
Dictionary of probabilities for each label.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(x)
self.initialize_module(**self.kwargs)
if len(self._x_window) == self.window_size:
self.module.eval()
x_win = self._x_window.copy()
x_win.append(list(x.values()))
x_t = deque2rolling_tensor(x_win, device=self.device)
y_pred = self.module(x_t)
proba = output2proba(y_pred, self.observed_classes)
else:
proba = self._get_default_proba()
if self.append_predict:
self._x_window.append(list(x.values()))
return proba
def learn_many(self, X: pd.DataFrame, y: pd.Series) -> "RollingClassifier":
"""
Performs one step of training with the most recent training examples
stored in the sliding window.
Parameters
----------
X
Input examples.
y
Target values.
Returns
-------
Classifier
The classifier itself.
"""
# check if model is initialized
if not self.module_initialized:
self.kwargs["n_features"] = len(X.columns)
self.initialize_module(**self.kwargs)
self._x_window.extend(X.values.tolist())
self.observed_classes.update(y)
if self.is_class_incremental:
self._adapt_output_dim()
if len(self._x_window) == self.window_size:
X_t = deque2rolling_tensor(self._x_window, device=self.device)
self._learn(x=X_t, y=y.tolist())
return self
def predict_proba_many(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Predict the probability of each label given the most recent examples
Parameters
----------
X
Returns
-------
pd.DataFrame
DataFrame of probabilities for each label.
"""
if not self.module_initialized:
self.kwargs["n_features"] = len(X.columns)
self.initialize_module(**self.kwargs)
x_win = self._x_window.copy()
x_win.extend(X.values.tolist())
if len(x_win) == self.window_size:
self.module.eval()
x_t = deque2rolling_tensor(x_win, device=self.device)
probas = self.module(x_t).detach().tolist()
if len(probas) < len(X):
default_proba = self._get_default_proba()
probas = [default_proba] * (len(X) - len(probas)) + probas
else:
default_proba = self._get_default_proba()
probas = [default_proba] * len(X)
return pd.DataFrame(probas)
def _get_default_proba(self):
if len(self.observed_classes) > 0:
mean_proba = (
1 / len(self.observed_classes)
if len(self.observed_classes) != 0
else 0.0
)
proba = {c: mean_proba for c in self.observed_classes}
else:
proba = {c: 1.0 for c in self.observed_classes}
return proba
def _adapt_output_dim(self):
out_features_target = (
len(self.observed_classes) if len(self.observed_classes) > 2 else 1
)
if isinstance(self.output_layer, nn.Linear):
n_classes_to_add = (
out_features_target - self.output_layer.out_features
)
if n_classes_to_add > 0:
mean_input_weights = torch.empty(
n_classes_to_add, self.output_layer.in_features
)
nn.init.kaiming_uniform_(mean_input_weights, a=math.sqrt(5))
self.output_layer.weight = nn.parameter.Parameter(
torch.cat(
[self.output_layer.weight, mean_input_weights], dim=0
)
)
if self.output_layer.bias is not None:
new_bias = torch.empty(n_classes_to_add)
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(
self.output_layer.weight
)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(new_bias, -bound, bound)
self.output_layer.bias = nn.parameter.Parameter(
torch.cat([self.output_layer.bias, new_bias], dim=0)
)
self.output_layer.out_features += n_classes_to_add
elif isinstance(self.output_layer, nn.LSTM):
n_classes_to_add = (
out_features_target - self.output_layer.hidden_size
)
if n_classes_to_add > 0:
assert (
not self.output_layer.bidirectional
), "Bidirectional LSTM not supported"
assert (
self.output_layer.num_layers >= 1
), "Multi-layer LSTM not supported"
w_ii, w_if, w_ig, w_io = torch.chunk(
self.output_layer.weight_ih_l0, chunks=4, dim=0
)
w_hi, w_hf, w_hg, w_ho = torch.chunk(
self.output_layer.weight_hh_l0, chunks=4, dim=0
)
input_weights = [w_ii, w_if, w_ig, w_io]
hidden_weights = [w_hi, w_hf, w_hg, w_ho]
mean_input_weights = [
torch.mean(w, dim=0).unsqueeze(1).T for w in input_weights
]
mean_hidden_weights_dim_0 = [
torch.mean(w, dim=0).unsqueeze(0) for w in hidden_weights
]
mean_hidden_weights_dim_1 = [
torch.mean(w, dim=1).unsqueeze(1) for w in hidden_weights
]
if n_classes_to_add > 1:
mean_input_weights = [
w.repeat(n_classes_to_add, 1)
for w in mean_input_weights
]
mean_hidden_weights_dim_0 = [
w.repeat(n_classes_to_add, 1)
for w in mean_hidden_weights_dim_0
]
mean_hidden_weights_dim_1 = [
w.repeat(1, n_classes_to_add)
for w in mean_hidden_weights_dim_1
]
self.output_layer.weight_ih_l0 = nn.parameter.Parameter(
torch.cat(
[
input_weights[0],
mean_input_weights[0],
input_weights[1],
mean_input_weights[1],
input_weights[2],
mean_input_weights[2],
input_weights[3],
mean_input_weights[3],
],
dim=0,
)
)
self.output_layer.weight_hh_l0 = nn.parameter.Parameter(
torch.cat(
[
torch.cat(
[
hidden_weights[0],
mean_hidden_weights_dim_0[0],
hidden_weights[1],
mean_hidden_weights_dim_0[1],
hidden_weights[2],
mean_hidden_weights_dim_0[2],
hidden_weights[3],
mean_hidden_weights_dim_0[3],
],
dim=0,
),
torch.cat(
[
mean_hidden_weights_dim_1[0],
torch.normal(
0,
0.5,
size=(
n_classes_to_add,
n_classes_to_add,
),
),
mean_hidden_weights_dim_1[1],
torch.normal(
0,
0.5,
size=(
n_classes_to_add,
n_classes_to_add,
),
),
mean_hidden_weights_dim_1[2],
torch.normal(
0,
0.5,
size=(
n_classes_to_add,
n_classes_to_add,
),
),
mean_hidden_weights_dim_1[3],
torch.normal(
0,
0.5,
size=(
n_classes_to_add,
n_classes_to_add,
),
),
],
dim=0,
),
],
dim=1,
)
)
if self.output_layer.bias:
new_bias_hh_l0 = torch.empty(n_classes_to_add * 4)
new_bias_ih_l0 = torch.empty(n_classes_to_add * 4)
self.output_layer.bias_hh_l0 = nn.parameter.Parameter(
torch.cat(
[self.output_layer.bias_hh_l0, new_bias_hh_l0],
dim=0,
)
)
self.output_layer.bias_ih_l0 = nn.parameter.Parameter(
torch.cat(
[self.output_layer.bias_ih_l0, new_bias_ih_l0],
dim=0,
)
)
self.output_layer.hidden_size += n_classes_to_add
elif isinstance(self.output_layer, nn.RNN):
n_classes_to_add = (
out_features_target - self.output_layer.hidden_size
)
if n_classes_to_add > 0:
assert (
not self.output_layer.bidirectional
), "Bidirectional RNN not supported. Set bidirectional=False."
assert (
self.output_layer.num_layers >= 1
), "Multi-layer RNN not supported. Set num_layers=1."
mean_input_weights = (
torch.mean(self.output_layer.weight_ih_l0, dim=0)
.unsqueeze(1)
.T
)
mean_hidden_weights_dim_0 = torch.mean(
self.output_layer.weight_hh_l0, dim=0
).unsqueeze(0)
mean_hidden_weights_dim_1 = torch.mean(
self.output_layer.weight_hh_l0, dim=1
).unsqueeze(1)
if n_classes_to_add > 1:
mean_input_weights = mean_input_weights.repeat(
n_classes_to_add, 1
)
mean_hidden_weights_dim_0 = (
mean_hidden_weights_dim_0.repeat(n_classes_to_add, 1)
)
mean_hidden_weights_dim_1 = (
mean_hidden_weights_dim_1.repeat(1, n_classes_to_add)
)
self.output_layer.weight_ih_l0 = nn.parameter.Parameter(
torch.cat(
[self.output_layer.weight_ih_l0, mean_input_weights],
dim=0,
)
)
self.output_layer.weight_hh_l0 = nn.parameter.Parameter(
torch.cat(
[
torch.cat(
[
self.output_layer.weight_hh_l0,
mean_hidden_weights_dim_0,
],
dim=0,
),
torch.cat(
[
mean_hidden_weights_dim_1,
torch.normal(
0,
0.5,
size=(
n_classes_to_add,
n_classes_to_add,
),
),
],
dim=0,
),
],
dim=1,
)
)
if self.output_layer.bias:
new_bias_hh_l0 = torch.empty(n_classes_to_add)
new_bias_ih_l0 = torch.empty(n_classes_to_add)
self.output_layer.bias_hh_l0 = nn.parameter.Parameter(
torch.cat(
[self.output_layer.bias_hh_l0, new_bias_hh_l0],
dim=0,
)
)
self.output_layer.bias_ih_l0 = nn.parameter.Parameter(
torch.cat(
[self.output_layer.bias_ih_l0, new_bias_ih_l0],
dim=0,
)
)
self.output_layer.hidden_size += n_classes_to_add
self.optimizer = self.optimizer_fn(
self.module.parameters(), lr=self.lr
) | /river_torch-0.1.2.tar.gz/river_torch-0.1.2/river_torch/classification/rolling_classifier.py | 0.959078 | 0.498962 | rolling_classifier.py | pypi |
</br>
<p align="center">
<img height="80px" src="docs/img/logo.svg" alt="river_logo">
</p>
</br>
<p align="center">
<!-- Tests -->
<a href="https://github.com/online-ml/river/actions?query=workflow%3Atests+branch%3Amaster">
<img src="https://github.com/online-ml/river/workflows/tests/badge.svg?branch=master" alt="tests">
</a>
<!-- Code coverage -->
<a href="https://codecov.io/gh/online-ml/river">
<img src="https://codecov.io/gh/online-ml/river/branch/master/graph/badge.svg?token=luK6eFoMa9"/>
</a>
<!-- Documentation -->
<a href="https://riverml.xyz">
<img src="https://img.shields.io/website?label=docs&style=flat-square&url=https%3A%2F%2Friverml.xyz%2F" alt="documentation">
</a>
<!-- Roadmap -->
<a href="https://www.notion.so/d1e86fcdf21e4deda16eedab2b3361fb?v=503f44740b8b44a99a961aa96e9e46e1">
<img src="https://img.shields.io/website?label=roadmap&style=flat-square&url=https://www.notion.so/d1e86fcdf21e4deda16eedab2b3361fb?v=503f44740b8b44a99a961aa96e9e46e1" alt="roadmap">
</a>
<!-- PyPI -->
<a href="https://pypi.org/project/river">
<img src="https://img.shields.io/pypi/v/river.svg?label=release&color=blue&style=flat-square" alt="pypi">
</a>
<!-- PePy -->
<a href="https://pepy.tech/project/river">
<img src="https://img.shields.io/badge/dynamic/json?style=flat-square&maxAge=86400&label=downloads&query=%24.total_downloads&url=https%3A%2F%2Fapi.pepy.tech%2Fapi%2Fprojects%2Friver" alt="pepy">
</a>
<!-- License -->
<a href="https://opensource.org/licenses/BSD-3-Clause">
<img src="https://img.shields.io/badge/License-BSD%203--Clause-blue.svg?style=flat-square" alt="bsd_3_license">
</a>
</p>
</br>
<p align="center">
River is a Python library for <a href="https://www.wikiwand.com/en/Online_machine_learning">online machine learning</a>. It is the result of a merger between <a href="https://github.com/MaxHalford/creme">creme</a> and <a href="https://github.com/scikit-multiflow/scikit-multiflow">scikit-multiflow</a>. River's ambition is to be the go-to library for doing machine learning on streaming data.
</p>
## ⚡️ Quickstart
As a quick example, we'll train a logistic regression to classify the [website phishing dataset](http://archive.ics.uci.edu/ml/datasets/Website+Phishing). Here's a look at the first observation in the dataset.
```python
>>> from pprint import pprint
>>> from river import datasets
>>> dataset = datasets.Phishing()
>>> for x, y in dataset:
... pprint(x)
... print(y)
... break
{'age_of_domain': 1,
'anchor_from_other_domain': 0.0,
'empty_server_form_handler': 0.0,
'https': 0.0,
'ip_in_url': 1,
'is_popular': 0.5,
'long_url': 1.0,
'popup_window': 0.0,
'request_from_other_domain': 0.0}
True
```
Now let's run the model on the dataset in a streaming fashion. We sequentially interleave predictions and model updates. Meanwhile, we update a performance metric to see how well the model is doing.
```python
>>> from river import compose
>>> from river import linear_model
>>> from river import metrics
>>> from river import preprocessing
>>> model = compose.Pipeline(
... preprocessing.StandardScaler(),
... linear_model.LogisticRegression()
... )
>>> metric = metrics.Accuracy()
>>> for x, y in dataset:
... y_pred = model.predict_one(x) # make a prediction
... metric = metric.update(y, y_pred) # update the metric
... model = model.learn_one(x, y) # make the model learn
>>> metric
Accuracy: 89.20%
```
## 🛠 Installation
River is intended to work with **Python 3.6 or above**. Installation can be done with `pip`:
```sh
pip install river
```
⚠️ However, we are currently [waiting](https://github.com/pypa/pypi-support/issues/651) for the name "river" to be released on PyPI.
There are [wheels available](https://pypi.org/project/river/#files) for Linux, MacOS, and Windows, which means that you most probably won't have to build River from source.
You can install the latest development version from GitHub as so:
```sh
pip install git+https://github.com/online-ml/river --upgrade
```
Or, through SSH:
```sh
pip install git+ssh://git@github.com/online-ml/river.git --upgrade
```
## 🧠 Philosophy
Machine learning is often done in a batch setting, whereby a model is fitted to a dataset in one go. This results in a static model which has to be retrained in order to learn from new data. In many cases, this isn't elegant nor efficient, and usually incurs [a fair amount of technical debt](https://research.google/pubs/pub43146/). Indeed, if you're using a batch model, then you need to think about maintaining a training set, monitoring real-time performance, model retraining, etc.
With River, we encourage a different approach, which is to continuously learn a stream of data. This means that the model process one observation at a time, and can therefore be updated on the fly. This allows to learn from massive datasets that don't fit in main memory. Online machine learning also integrates nicely in cases where new data is constantly arriving. It shines in many use cases, such as time series forecasting, spam filtering, recommender systems, CTR prediction, and IoT applications. If you're bored with retraining models and want to instead build dynamic models, then online machine learning (and therefore River!) might be what you're looking for.
Here are some benefits of using River (and online machine learning in general):
- **Incremental**: models can update themselves in real-time.
- **Adaptive**: models can adapt to [concept drift](https://www.wikiwand.com/en/Concept_drift).
- **Production-ready**: working with data streams makes it simple to replicate production scenarios during model development.
- **Efficient**: models don't have to be retrained and require little compute power, which [lowers their carbon footprint](https://arxiv.org/abs/1907.10597)
- **Fast**: when the goal is to learn and predict with a single instance at a time, then River is an order of magnitude faster than PyTorch, Tensorflow, and scikit-learn.
## 🔥 Features
- Linear models with a wide array of optimizers
- Nearest neighbors, decision trees, naïve Bayes
- [Progressive model validation](https://hunch.net/~jl/projects/prediction_bounds/progressive_validation/coltfinal.pdf)
- Model pipelines as a first-class citizen
- Anomaly detection
- Recommender systems
- Time series forecasting
- Imbalanced learning
- Clustering
- Feature extraction and selection
- Online statistics and metrics
- Built-in datasets
- And [much more](https://riverml.xyz/latest/api/overview/)
## 🔗 Useful links
- [Documentation](https://riverml.xyz)
- [Benchmarks](https://github.com/online-ml/river/tree/master/benchmarks)
- [Issue tracker](https://github.com/online-ml/river/issues)
- [Package releases](https://pypi.org/project/river/#history)
## 👁️ Media
- PyData Amsterdam 2019 presentation ([slides](https://maxhalford.github.io/slides/river-pydata/), [video](https://www.youtube.com/watch?v=P3M6dt7bY9U&list=PLGVZCDnMOq0q7_6SdrC2wRtdkojGBTAht&index=11))
- [Toulouse Data Science Meetup presentation](https://maxhalford.github.io/slides/river-tds/)
- [Machine learning for streaming data with creme](https://towardsdatascience.com/machine-learning-for-streaming-data-with-river-dacf5fb469df)
- [Hong Kong Data Science Meetup presentation](https://maxhalford.github.io/slides/hkml2020.pdf)
## 👍 Contributing
Feel free to contribute in any way you like, we're always open to new ideas and approaches.
There are three ways for users to get involved:
- [Issue tracker](https://github.com/online-ml/river/issues): this place is meant to report bugs, request for minor features, or small improvements. Issues should be short-lived and solved as fast as possible.
- [Discussions](https://github.com/online-ml/river/discussions): you can ask for new features, submit your questions and get help, propose new ideas, or even show the community what you are achieving with River! If you have a new technique or want to port a new functionality to River, this is the place to discuss.
- [Roadmap](https://www.notion.so/d1e86fcdf21e4deda16eedab2b3361fb?v=503f44740b8b44a99a961aa96e9e46e1): you can check what we are doing, what are the next planned milestones for River, and look for cool ideas that still need someone to make them become a reality!
Please check out the [contribution guidelines](https://github.com/online-ml/river/blob/master/CONTRIBUTING.md) if you want to bring modifications to the code base. You can view the list of people who have contributed [here](https://github.com/online-ml/river/graphs/contributors).
## ❤️ They've used us
These are companies that we know have been using River, be it in production or for prototyping.
<p align="center">
<img width="70%" src="https://docs.google.com/drawings/d/e/2PACX-1vQbCUQkTU74dBf411r4nDl4udmqOEbLqzRtokUC-N7JDJUA7BGTfnMGmiMNqbcSuOaWAmazp1rFGwDC/pub?w=1194&h=567" alt="companies">
</p>
Feel welcome to get in touch if you want us to add your company logo!
## 🤝 Affiliations
**Sponsors**
<p align="center">
<img width="55%" src="https://docs.google.com/drawings/d/e/2PACX-1vSagEhWAjDsb0c24En_fhWAf9DJZbyh5YjU7lK0sNowD2m9uv9TuFm-U77k6ObqTyN2mP05Avf6TCJc/pub?w=2073&h=1127" alt="sponsors">
</p>
**Collaborating institutions and groups**
<p align="center">
<img width="55%" src="https://docs.google.com/drawings/d/e/2PACX-1vQB0C8YgnkCt_3C3cp-Csaw8NLZUwishdbJFB3iSbBPUD0AxEVS9AlF-Rs5PJq8UVRzRtFwZIOucuXj/pub?w=1442&h=489" alt="collaborations">
</p>
## 💬 Citation
If `river` has been useful for your research and you would like to cite it in an scientific publication, please refer to this [paper](https://arxiv.org/abs/2012.04740):
```bibtex
@misc{2020river,
title={River: machine learning for streaming data in Python},
author={Jacob Montiel and Max Halford and Saulo Martiello Mastelini
and Geoffrey Bolmier and Raphael Sourty and Robin Vaysse
and Adil Zouitine and Heitor Murilo Gomes and Jesse Read
and Talel Abdessalem and Albert Bifet},
year={2020},
eprint={2012.04740},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
## 📝 License
River is free and open-source software licensed under the [3-clause BSD license](https://github.com/online-ml/river/blob/master/LICENSE).
| /river-0.7.0.tar.gz/river-0.7.0/README.md | 0.601477 | 0.93276 | README.md | pypi |
# rivers2stratigraphy
[](https://travis-ci.org/sededu/rivers2stratigraphy)
[](https://ci.appveyor.com/project/amoodie/rivers2stratigraphy/branch/master)
[](https://badge.fury.io/gh/sededu%2Frivers2stratigraphy)
[](https://anaconda.org/sededu/rivers2stratigraphy)
[](https://badge.fury.io/py/rivers2stratigraphy)
[](https://anaconda.org/sededu/rivers2stratigraphy)
Explore how a river becomes stratigraphy
<img src="https://github.com/sededu/rivers2stratigraphy/blob/master/private/rivers2stratigraphy_demo.gif" alt="demo_gif">
This readme file provides an overview of the installation and setup process, as well as a brief description of the module worksheets available.
This repository is also linked into the [SedEdu suite of education modules](https://github.com/sededu/sededu), and can be accessed there as well.
## About the model
Stratigraphic model based on LAB models, i.e., geometric channel body is deposited in "matrix" of floodplain mud.
The channel is always fixed to the basin surface and subsidence is only control on vertical stratigraphy.
Horizontal stratigraphy is set by 1) lateral migration (drawn from a pdf) and dampened for realism, and 2) avulsion that is set to a fixed value.
## Installing and running the module
This module depends on Python 3, `tkinter`, and the Python packages `numpy`, `scipy`, `matplotlib`, and `shapely`.
### Installing Python 3
If you are new to Python, it is recommended that you install Anaconda, which is an open source distribution of Python which includes many basic scientific libraries, some of which are used in the module.
Anaconda can be downloaded at https://www.anaconda.com/download/ for Windows, macOS, and Linux.
If you do not have storage space on your machine for Anaconda or wish to install a smaller version of Python for another reason, see below on options for Miniconda or vanilla Python.
1. Visit the website for Anaconda https://www.anaconda.com/download/ and select the installer for your operating system.
__Be sure to select the Python 3.x installation.__
2. Start the installer.
3. If prompted, select to "install just for me", unless you know what you are doing.
4. When prompted to add Anaconda to the path during installation, select _yes_ if you __know__ you do not have any other Python installed on your computer; otherwise select _no_.
See below for detailed instructions on installing `rivers2stratigraphy` for your operating system.
### Installing the module
If you installed Anaconda Python or Miniconda, you can follow the instructions below for your operating system.
Otherwise see the instructions for PyPi installation below.
__Please__ [open an issue](https://github.com/sededu/rivers2stratigraphy/issues) if you encounter any troubles installing or any error messages along the way!
Please include 1) operating system, 2) installation method, and 3) copy-paste the error.
#### Windows users
1. Open your "start menu" and search for the "Anaconda prompt"; start this application.
2. Install with the module type the following command and hit "enter":
```
conda install -c sededu rivers2stratigraphy
```
If asked to proceed, type `Y` and press "enter" to continue installation.
3. This process may take a few minutes as the necessary source code is downloaded.
If the installation succeeds, proceed below to the "Run the module" section.
__Note on permissions:__ you may need to run as administrator on Windows.
#### Mac OSX and Linux users
__Linux users:__ you will need to also install `tkinter` before trying to install the module below package through `conda` or `pip3`.
On Ubuntu this is done with `sudo apt install python3-tk`.
<!-- Windows and Mac distributions should come with `python3-tk` installed. -->
1. Install the module by opening a terminal and typing the following command.
```
conda install -c sededu rivers2stratigraphy
```
If asked to proceed, type `Y` and press enter to continue installation.
2. This process may take a few minutes as the necessary source code is downloaded.
If the installation succeeds, proceed below to the "Run the module" section.
__Note on permissions:__ you may need to use `sudo` on OSX and Linux.
#### Advanced user installations
To install with `pip` from Pypi use (not recommended for entry-level users):
```
pip3 install pyqt rivers2stratigraphy
```
See below instructions for downloading the source code if you wish to be able to modify the source code for development or for exploration.
### Run the module
1. Open a Python shell by typing `python` (or `python3`) at the terminal (OSX and Linux users) or at the Conda or Command Prompt (Windows users).
2. Run the module from the Python shell with:
```
import rivers2stratigraphy
```
Instructions will indicate to use the following command to then run the module:
```
rivers2stratigraphy.run()
```
Alternatively, you can do this in one line from the standard terminal with:
```
python -c "import rivers2stratigraphy; rivers2stratigraphy.run()"
```
Alternatively, run the module with provided script (this is the hook used for launching from SedEdu):
```
python3 <path-to-installation>run_rivers2stratigraphy.py
```
Please [open an issue](https://github.com/sededu/rivers2stratigraphy/issues) if you encounter any additional error messages!
Please include 1) operating system, 2) installation method, and 3) copy-paste the error.
#### Smaller Python installation options
Note that if you do not want to install the complete Anaconda Python distribution you can install [Miniconda](https://conda.io/miniconda.html) (a smaller version of Anaconda), or you can install Python alone and use a package manager called pip to do the installation.
You can get [Python and pip together here](https://www.python.org/downloads/).
## Development
This module is under ongoing development to improve stability and features and optimize performance.
The module also requires occasional maintenance due to dependency updates.
If you are interested in contributing to the code-base please see below for instructions.
If you are interested in contributing to the accompanying worksheets/activities (which would be greatly appreciated!) please see [Writing Activities for SedEdu](https://github.com/sededu/sededu/blob/develop/docs/writing_activities.md)
#### Download the source code
You can download this entire repository as a `.zip` by clicking the "Clone or download button on this page", or by [clicking here](https://github.com/sededu/rivers2stratigraphy/archive/master.zip) to get a `.zip` folder.
Unzip the folder in your preferred location.
If you have installed `git` and are comfortable working with it, you can simply clone the repository to your preferred location.
```
git clone https://github.com/sededu/rivers2stratigraphy.git
```
Open a pull request when you want a review or some comments!
| /rivers2stratigraphy-0.3.17.tar.gz/rivers2stratigraphy-0.3.17/README.md | 0.454714 | 0.967533 | README.md | pypi |
import collections
def recursive_update(source, overrides, overwrite_nones=False):
"""Update a nested dictionary or similar mapping.
Modify ``source`` in place.
"""
for key, value in overrides.items():
if value is not None and not overwrite_nones or (overwrite_nones is True):
if isinstance(value, collections.Mapping):
returned = recursive_update(source.get(key, {}), value)
source[key] = returned
else:
source[key] = overrides[key]
return source
def merge_lists(list1, list2):
"""
not using set for the distinct operation
in order to support unhashable type(s)
"""
union_lists = [item for item in list1 if not isinstance(item, dict)] + \
[item for item in list2
if not isinstance(item, dict) and item not in list1]
dicts_ = [item for item in list1 if isinstance(item, dict)] + \
[item for item in list2
if isinstance(item, dict) and item not in list1]
merged_dicts = {}
for item in dicts_:
merged_dicts = recursively_merge_dictionaries(merged_dicts, item)
union_lists.append(merged_dicts)
return union_lists
def recursively_merge_dictionaries(updated_item, overwriting_item,
union_lists=False):
"""self explanatory.
notes:
(1) a simple dict.update function does not give the requested result,
hence the recursion
(2) when updated_item and overwriting_item params share the same key,
overwriting_item is stronger and will overwrite the value in
updated_item"""
res = updated_item
for key, val in overwriting_item.items():
if isinstance(val, list) and isinstance(updated_item.get(key), list) \
and union_lists:
res.update({key: merge_lists(val, updated_item.get(key))})
elif not isinstance(val, dict) or key not in updated_item.keys():
res.update({key: val})
else:
res[key] = recursively_merge_dictionaries(
updated_item.get(key), val)
return res | /rivery_cli-0.4.0-py3-none-any.whl/rivery_cli/utils/utils.py | 0.679285 | 0.188287 | utils.py | pypi |
import bson
import datetime
import uuid
import collections
import base64
from bson import ObjectId
import calendar
import decimal
import simplejson as json
def _datetime_to_millis(dtm):
"""Convert datetime to milliseconds since epoch UTC."""
if dtm.utcoffset() is not None:
dtm = dtm - dtm.utcoffset()
return int(calendar.timegm(dtm.timetuple()) * 1000 +
dtm.microsecond / 1000)
def object_hook(dct):
if "$oid" in dct:
return bson.ObjectId(str(dct["$oid"]))
if "$date" in dct:
dtm = dct["$date"]
# mongoexport 2.6 and newer
if isinstance(dtm, str):
# Parse offset
if dtm[-1] == 'Z':
dt = dtm[:-1]
offset = 'Z'
elif dtm[-3] == ':':
# (+|-)HH:MM
dt = dtm[:-6]
offset = dtm[-6:]
elif dtm[-5] in ('+', '-'):
# (+|-)HHMM
dt = dtm[:-5]
offset = dtm[-5:]
elif dtm[-3] in ('+', '-'):
# (+|-)HH
dt = dtm[:-3]
offset = dtm[-3:]
else:
dt = dtm
offset = ''
aware = datetime.datetime.strptime(
dt, "%Y-%m-%dT%H:%M:%S.%f")
if offset and offset != 'Z':
if len(offset) == 6:
hours, minutes = offset[1:].split(':')
secs = (int(hours) * 3600 + int(minutes) * 60)
elif len(offset) == 5:
secs = (int(offset[1:3]) * 3600 + int(offset[3:]) * 60)
elif len(offset) == 3:
secs = int(offset[1:3]) * 3600
if offset[0] == "-":
secs *= -1
aware = aware - datetime.timedelta(seconds=secs)
return aware.replace(tzinfo=None)
# mongoexport 2.6 and newer, time before the epoch (SERVER-15275)
elif isinstance(dtm, collections.Mapping):
millis = int(dtm["$numberLong"])
# mongoexport before 2.6
else:
millis = int(dtm)
return millis
if "$binary" in dct:
if isinstance(dct["$type"], int):
dct["$type"] = "%02x" % dct["$type"]
subtype = int(dct["$type"], 16)
if subtype >= 0xffffff80: # Handle mongoexport values
subtype = int(dct["$type"][6:], 16)
data = base64.b64decode(dct["$binary"].encode())
# special handling for UUID
if subtype == uuid.UUID:
return uuid.UUID(bytes=data)
return bytes(data)
if "$uuid" in dct:
return uuid.UUID(dct["$uuid"])
if "$undefined" in dct:
return None
return dct
def default(obj):
# We preserve key order when rendering SON, DBRef, etc. as JSON by
# returning a SON for those types instead of a dict.
if isinstance(obj, ObjectId):
return {"$oid": str(obj)}
if isinstance(obj, datetime.datetime):
millis = _datetime_to_millis(obj)
return {"$date": millis}
if isinstance(obj, bytes):
return {'$binary', base64.b64encode(obj).decode()}
if isinstance(obj, uuid.UUID):
return {"$uuid": obj.hex}
if isinstance(obj, decimal.Decimal):
return {"$numberDecimal": str(obj)}
def loads(s, *args, **kwargs):
""" Loads with object hook """
return json.loads(s, object_hook=object_hook, *args, **kwargs)
def dumps(obj, *args, **kwargs):
return json.dumps(obj, default=default, *args, **kwargs)
def convert_oid(s):
try:
return ObjectId(s)
except Exception as e:
return s | /rivery_cli-0.4.0-py3-none-any.whl/rivery_cli/utils/bson_utils.py | 0.639061 | 0.208773 | bson_utils.py | pypi |
import yaml
import click
import pathlib
def get_context():
""" Get the context from click application"""
try:
return click.get_current_context(silent=True).obj or {}
except:
return {}
def import_model(loader, node):
""" Import yaml from the specific paths in the project """
ctx = get_context()
models = ctx.get('MODELS_DIR') or pathlib.Path('./models') # Path class
scalar_ = loader.construct_scalar(node)
with open(models.joinpath(scalar_), 'r') as yml_:
return yaml.load(yml_, Loader=yaml.SafeLoader)
def import_sql(loader, node):
""" Import yaml from the specific paths in the project """
ctx = get_context()
models = ctx.get('SQLS_DIR') or pathlib.Path('./sqls') # Path class
scalar_ = loader.construct_scalar(node)
with open(models.joinpath(scalar_), 'r') as sql_:
return sql_.read().strip()
def import_maps(loader, node):
""" Import maps by relative path """
ctx = get_context()
maps = ctx.get('MAPS_DIR') or pathlib.Path('./maps')
scalar_ = loader.construct_scalar(node)
with open(maps.joinpath(scalar_), 'r') as fields:
mapping_ = yaml.load(fields, Loader=yaml.SafeLoader)
return mapping_.get('fields', []) or mapping_.get('mapping', [])
def import_yaql(loader, node):
"""Import yaql and make a recursive run on the yaml """
param = loader.construct_sequence(node.value[0], deep=True)
print(param)
# do something with the param here
return param
def get_loader():
yaml.SafeLoader.add_constructor('!model', import_model)
yaml.SafeLoader.add_constructor('$model', import_model)
yaml.SafeLoader.add_constructor('!sql', import_sql)
yaml.SafeLoader.add_constructor('$sql', import_sql)
yaml.SafeLoader.add_constructor('!map', import_maps)
yaml.SafeLoader.add_constructor('$map', import_maps)
# yaml.SafeLoader.add_constructor('!ref', import_yaql)
# yaml.SafeLoader.add_constructor('$ref', import_yaql)
return yaml.SafeLoader | /rivery_cli-0.4.0-py3-none-any.whl/rivery_cli/utils/yaml_loaders.py | 0.449151 | 0.202148 | yaml_loaders.py | pypi |
import uuid
import bson
import click
import simplejson as json
from rivery_cli.globals import global_keys, global_settings
from rivery_cli.rivery_session import RiverySession
from rivery_cli.utils import logicode_utils
class RiverConverter(object):
"""
River yaml converter.
convert the river from the
"""
def __init__(self, content):
self.content = content
self.definition = self.content.get(global_keys.DEFINITION)
self.river_full_definition = {
global_keys.RIVER_DEF: {
global_keys.SHARED_PARAMS: {
},
global_keys.RIVER_NAME: self.river_name,
global_keys.RIVER_TYPE: self.river_type,
global_keys.RIVER_DESCRIPTION: self.description,
global_keys.IS_SCHEDULED: False,
global_keys.SOURCE_TYPE: self.river_type
},
global_keys.TASKS_DEF: [{
global_keys.TASK_CONFIG: {},
global_keys.TASK_TYPE_ID: self.river_type
}],
global_keys.CROSS_ID: "",
}
self.notification_events = [
"on_error", "on_warning"
]
def get_sub_converter(self):
""" Getting the converter class """
converter_class = self.make_type_cls(type_=self.river_type)
return converter_class(**self.definition)
@classmethod
def make_type_cls(cls, type_: str):
"""
Initiate the river type class by the definition river type. Gets a self.river_type
and make the initiation.
"""
for _cls_ in cls.__subclasses__():
if _cls_.__name__ == f'{type_.title()}Converter':
return _cls_
@property
def river_type(self):
""" Property of the river type read"""
type_ = self.definition.get('type')
return type_
@property
def river_name(self):
return self.definition.get('name')
@property
def entity_id(self):
return self.content.get('entity_name')
@property
def description(self):
return self.definition.get('description') or ''
@property
def properties(self):
return self.definition.get('properties') or {}
@property
def cross_id(self):
return self.content.get('cross_id')
@property
def id(self):
return self.content.get('cross_id')
class LogicConverter(RiverConverter):
""" Converting Logic yaml definition into a """
validation_schema_path = '../schemas/river/logic.yaml'
valid_steps = {'container', 'step'}
step_types = {"container", "action", "river", "sql"}
task_type_id = "logic"
datasource_id = "logic"
step_bson_converter = ['river_id', 'action_id', 'gConnection',"connection_id", "fzConnection"]
def __init__(self, **kwargs):
super(LogicConverter, self).__init__(**kwargs)
self.vars = {}
def bson_converter(self, dct):
""" Convert specific keys to objectId"""
newdct = {}
for k, v in dct.items():
if k in self.step_bson_converter and v:
newdct[k] = bson.ObjectId(v)
else:
newdct[k] = v
return newdct
def valid_step(self, step_type):
""" Check validation on step"""
if step_type not in self.step_types:
raise ValueError(f'Step {step_type} is not compatible in logic rivers')
def steps_converter(self, steps: list, code_dir: str) -> [list, list]:
"""
converting yaml steps to the right API definition of the steps.
:param steps: A list of yaml definition of steps.
Validated by the self.validation_schema_path
:param code_dir: The code directory configured in the project.yaml
"""
all_steps = []
files_to_upload = []
for step in steps:
# Init the current step
current_step = {}
# Get the type of every step, and check if it's valid or not.
type_ = step.pop('type', 'step') or 'step'
assert type_ in self.valid_steps, \
f'Invalid step type: {type_}. Valid types: {",".join(self.valid_steps)}'
if type_ == 'container':
# Pop the steps definitions from the container, in order to use it in the container class
container_steps = step.pop('steps', [])
# This is a container. Make the LogicContainer class initiation,
# and use the steps converter
assert container_steps, 'Container must include at least one step'
current_step[global_keys.CONTAINER_RUNNING] = step.pop(global_keys.CONTAINER_RUNNING, 'run_once')
current_step.update(step)
current_step[global_keys.STEP_NAME] = step.get('step_name') or step.get('name')
current_step[global_keys.IS_ENABLED] = step.get('is_enabled') or step.get('isEnabled')
current_step[global_keys.IS_PARALLEL] = step.get('is_parallel') or \
step.get('isParallel') or step.get('parallel')
current_step[global_keys.NODES], inner_files_to_upload = self.steps_converter(
steps=container_steps,
code_dir=code_dir
)
files_to_upload += inner_files_to_upload
all_steps.append(current_step)
elif type_ == 'step':
# This is "low level" step. Means, it is not container in any kind.
content = {}
primary_type = step.pop('block_primary_type', 'sql')
block_db_type = step.pop('block_db_type', primary_type)
content[global_keys.BLOCK_PRIMARY_TYPE] = primary_type
content[global_keys.BLOCK_TYPE] = block_db_type
content[global_keys.BLOCK_DB_TYPE] = block_db_type
code_type = step.pop(global_keys.CODE_TYPE, False)
if code_type:
content[global_keys.CODE_TYPE] = code_type
# For each step with a python code, we want to download it to a local path configure in project.yaml
if content.get(global_keys.CODE_TYPE) == global_keys.PYTHON_CODE_TYPE:
click.echo("A Logic River with a Python step is configured. Will upload the file.")
python_file_name = step.get(global_keys.FILE_NAME)
if not python_file_name:
raise Exception("Please add a python_file_name in your river.yaml configuration.")
full_file_path = logicode_utils.verify_and_get_file_path_to_upload(python_file_name, code_dir)
files_to_upload.append({python_file_name: full_file_path})
# Make the step is enabled mandatory, and use the default of True if not exists
current_step[global_keys.IS_ENABLED] = step.pop('is_enabled', True) or True
current_step[global_keys.STEP_NAME] = step.pop('step_name', 'Step {}'.format(uuid.uuid4().hex[:4]))
current_step[global_keys.CONTAINER_RUNNING] = 'run_once'
if step.get('connection_id') or step.get('gConnection'):
content['gConnection'] = step.pop('connection_id') or step.pop('gConnection')
if step.get(global_keys.TARGET_TYPE) == global_keys.VARIABLE and step.get(global_keys.VARIABLE):
if not step.get(global_keys.VARIABLE) in self.vars:
# The target variable doesn't exist under the vars list of the logic.
# Raise an error about that.
raise KeyError(f"Step target type is variable, "
f"but the target variable doesn't exist in the logic definition. "
f"Please set the variable under `variables` key in the logic entity definition."
)
content.update(step)
current_step[global_keys.CONTNET] = content
current_step[global_keys.NODES] = []
all_steps.append(current_step)
return all_steps, files_to_upload
def convert(self, code_dir: str) -> [dict, list]:
"""Get a river payload in dictionary, convert it to river definition dict """
# Make the global definitions under the river def
self.river_full_definition[global_keys.CROSS_ID] = self.cross_id
if self.definition.get(global_keys.SCHEDULING, {}).get('isEnabled'):
self.river_full_definition[global_keys.RIVER_DEF][global_keys.IS_SCHEDULED] = True
self.river_full_definition[global_keys.RIVER_DEF][
global_keys.SHARED_PARAMS][global_keys.NOTIFICATIONS] = self.definition.get(global_keys.NOTIFICATIONS, {})
# Make the basic river task definition on config
# Logic has only 1 task under the task definition
self.river_full_definition[global_keys.TASKS_DEF] = [
{
global_keys.TASK_TYPE_ID: self.task_type_id,
global_keys.TASK_CONFIG: {},
global_keys.SCHEDULING: self.definition.get(
global_keys.SCHEDULING) or {"isEnabled": False},
global_keys.RIVER_ID: self.cross_id
}
]
# Run over the properties and then make some validations + conversion to the API river definition.
# Check if there's a "steps" key under the properties
# TODO: move to parameters validator or another validator class.
assert self.properties.get(global_keys.STEPS, []), 'Every logic river must have at least one step.'
# Populate the variables key
self.vars = self.properties.get('variables', {})
# Convert the steps to river definitions
steps, files_to_upload = self.steps_converter(self.properties.get('steps', []), code_dir)
# Make the full definition of the logic under the tasks definitions [0]
self.river_full_definition[global_keys.TASKS_DEF][0][
global_keys.TASK_CONFIG].update(
{"logic_steps": steps,
"datasource_id": self.datasource_id,
"fz_batched": False,
"variables": self.vars}
)
self.river_full_definition = json.loads(json.dumps(self.river_full_definition), object_hook=self.bson_converter)
return self.river_full_definition, files_to_upload
@staticmethod
def content_loader(content: dict) -> dict:
""" ObjectHook like to convert the content into more "reliable" content """
new_content = {}
primary_type = content.get('block_primary_type')
new_content['block_primary_type'] = primary_type
new_content['block_type'] = content.get('block_type')
if primary_type == 'river':
new_content['block_primary_type'] = primary_type
new_content['river_id'] = str(content.get('river_id'))
else:
new_content.update(content)
if new_content.get('connection_id') or new_content.get('gConnection'):
new_content['connection_id'] = str(content.pop('gConnection', content.get('connection_id')))
new_content.pop('gConnection', None)
return new_content
@classmethod
def step_importer(cls, steps: list, code_dir: str) -> [list, list]:
""" Convert the steps to the right keys in the yaml file """
# Make the steps list
all_steps = []
files_to_download = []
for step in steps:
current_step = {
"type": "step" if step.get('content', []) else "container",
"isEnabled": step.pop("isEnabled", True),
"step_name": step.pop("step_name", "Logic Step")
}
if current_step.get('type') == "step":
# Update the step definition as it exists in the content
current_step.update(cls.content_loader(step.pop("content", {})))
if step.get('condition', {}):
current_step['condition'] = step.get('condition')
# In order to "purge" any "Type" key comes from the river
current_step['type'] = 'step'
# For each step with a python code, we want to download it to a local path configure in project.yaml
if current_step.get(global_keys.CODE_TYPE) == global_keys.PYTHON_CODE_TYPE:
file_to_download = logicode_utils.get_file_to_download(
file_id=current_step.get('file_cross_id'),
code_dir=code_dir,
file_name=current_step.get('file_name')
)
files_to_download.append(file_to_download)
else:
# Update the CONTAINER definition
current_step["isParallel"] = step.pop('isParallel', False) or False
current_step["container_running"] = step.pop("container_running", "run_once")
current_step["loop_over_value"] = step.pop("loop_over_value", "")
current_step["loop_over_variable_name"] = step.pop("loop_over_variable_name", [])
current_step["steps"], inner_files_to_download = cls.step_importer(
steps=step.pop('nodes', []),
code_dir=code_dir
)
files_to_download += inner_files_to_download
all_steps.append(current_step)
return all_steps, files_to_download
@classmethod
def _import(cls, def_: dict, code_dir: str) -> dict:
"""Import a river into a yaml definition """
# Set the basics dictionary structure
final_response = {
global_keys.BASE: {
global_keys.ENTITY_NAME: f"river-{str(def_.get(global_keys.CROSS_ID))}",
global_keys.VERSION: global_settings.__version__,
global_keys.ENTITY_TYPE: "river",
global_keys.CROSS_ID: str(def_.get(global_keys.CROSS_ID)),
global_keys.DEFINITION: {}
}
}
definition_ = {
global_keys.PROPERTIES: {},
global_keys.SCHEDULING: {},
global_keys.NOTIFICATIONS: {}
}
# Get the river definitions from the def_
river_definition = def_.get(global_keys.RIVER_DEF, {})
# Populate IDS, and globals from the river
definition_.update({
"name": river_definition.get(global_keys.RIVER_NAME),
"description": river_definition.get(global_keys.RIVER_DESCRIPTION) or 'Imported by Rivery CLI',
global_keys.ENTITY_TYPE: river_definition.get('river_type'),
global_keys.NOTIFICATIONS: river_definition.get(
global_keys.SHARED_PARAMS, {}).get(global_keys.NOTIFICATIONS, {})
})
# Run on the tasks definitions, and set it out
tasks_def = def_.get(global_keys.TASKS_DEF, [])
files_to_download = []
for task in tasks_def:
task_config = task.get(global_keys.TASK_CONFIG, {})
# Run on each task, and set the right keys to the structure
definition_[global_keys.PROPERTIES]["steps"], task_files_to_download = cls.step_importer(
steps=task_config.get(global_keys.LOGIC_STEPS, []), code_dir=code_dir)
files_to_download += task_files_to_download
# Update the variables for the logic
definition_[global_keys.PROPERTIES]["variables"] = task_config.get('variables', {})
if task.get(global_keys.SCHEDULING, {}).get('isEnabled'):
definition_[global_keys.SCHEDULING] = {"cronExp": task.get(global_keys.SCHEDULING, {}
).get("cronExp", ""),
"isEnabled": task.get(global_keys.SCHEDULING, {}).get(
'isEnabled', False),
"startDate": task.get(global_keys.SCHEDULING, {}).get(
'startDate', None),
"endDate": task.get(global_keys.SCHEDULING, {}
).get('endDate', None)}
final_response[global_keys.BASE][global_keys.DEFINITION] = definition_
final_response['files_to_download'] = files_to_download
return final_response | /rivery_cli-0.4.0-py3-none-any.whl/rivery_cli/converters/entities/rivers.py | 0.676086 | 0.214465 | rivers.py | pypi |
from __future__ import unicode_literals
import json
import redis
from rivescript.sessions import SessionManager
__author__ = 'Noah Petherbridge'
__copyright__ = 'Copyright 2017, Noah Petherbridge'
__license__ = 'MIT'
__status__ = 'Beta'
__version__ = '0.1.0'
class RedisSessionManager(SessionManager):
"""A Redis powered session manager for RiveScript."""
def __init__(self, prefix="rivescript/", *args, **kwargs):
"""Initialize the Redis session driver.
Apart from the ``prefix`` parameter, all other options are passed
directly to the underlying Redis constructor, ``redis.StrictRedis()``.
See the documentation of redis-py for more information. Commonly used
arguments are listed below for convenience.
Args:
prefix (string): the key to prefix all the Redis keys with. The
default is ``rivescript/``, so that for a username of ``alice``
the key would be ``rivescript/alice``.
host (string): Hostname of the Redis server.
port (int): Port number of the Redis server.
db (int): Database number in Redis.
"""
self.client = redis.StrictRedis(*args, **kwargs)
self.prefix = prefix
self.frozen = "frozen:" + prefix
def _key(self, username, frozen=False):
"""Translate a username into a key for Redis."""
if frozen:
return self.frozen + username
return self.prefix + username
def _get_user(self, username):
"""Custom helper method to retrieve a user's data from Redis."""
data = self.client.get(self._key(username))
if data is None:
return None
return json.loads(data.decode())
# The below functions implement the RiveScript SessionManager.
def set(self, username, new_vars):
data = self._get_user(username)
if data is None:
data = self.default_session()
data.update(new_vars)
self.client.set(self._key(username), json.dumps(data))
def get(self, username, key):
data = self._get_user(username)
if data is None:
return None
return data.get(key, "undefined")
def get_any(self, username):
return self._get_user(username)
def get_all(self):
users = self.client.keys(self.prefix + "*")
result = dict()
for user in users:
username = users.replace(self.prefix, "")
result[username] = self._get_user(username)
return result
def reset(self, username):
self.client.delete(self._key(username))
def reset_all(self):
users = self.client.keys(self.prefix + "*")
for user in users:
self.c.delete(user)
def freeze(self, username):
data = self._get_user(username)
if data is not None:
self.client.set(self._key(username, True), json.dumps(data))
def thaw(self, username, action="thaw"):
data = self.client.get(self.key(username, True))
if data is not None:
data = json.loads(data.decode())
if action == "thaw":
self.reset(username)
self.set(username, data)
self.c.delete(self.key(username, True))
elif action == "discard":
self.c.delete(self.key(username, True))
elif action == "keep":
self.reset(username)
self.set(username, data)
else:
raise ValueError("unsupported thaw action") | /rivescript_redis-0.1.0.tar.gz/rivescript_redis-0.1.0/rivescript_redis.py | 0.78016 | 0.175485 | rivescript_redis.py | pypi |
# RiveScript-Python
[![Build Status][1]][2] [![Read the docs][3]][4] [![PyPI][5]][6]
## Introduction
This is a RiveScript interpreter for the Python programming language. RiveScript
is a scripting language for chatterbots, making it easy to write
trigger/response pairs for building up a bot's intelligence.
This library is compatible with both Python 2 and Python 3.
## Documentation
Module documentation is available at <http://rivescript.readthedocs.org/>
Also check out the [**RiveScript Community Wiki**](https://github.com/aichaos/rivescript/wiki)
for common design patterns and tips & tricks for RiveScript.
## Installation
This module is available on [PyPI](https://pypi.python.org/) and can be
installed via pip:
`pip install rivescript`
To install manually, download or clone the git repository and run
`python setup.py install`
## Examples
There are examples available in the
[eg/](https://github.com/aichaos/rivescript-python/tree/master/eg) directory of
this project on GitHub that show how to interface with a RiveScript bot in a
variety of ways--such as through the Twilio SMS API--and other code snippets and
useful tricks.
## Usage
The `rivescript` module can be executed as a stand-alone Python script, or
included in other Python code. When executed directly, it launches an
interactive chat session:
python rivescript ./eg/brain
In case running RiveScript as a script is inconvenient (for example, when it's
installed as a system module) you can use the `shell.py` script as an alias:
python shell.py eg/brain
When used as a library, the synopsis is as follows:
```python
from rivescript import RiveScript
bot = RiveScript()
bot.load_directory("./eg/brain")
bot.sort_replies()
while True:
msg = raw_input('You> ')
if msg == '/quit':
quit()
reply = bot.reply("localuser", msg)
print 'Bot>', reply
```
The scripts `example.py` and `example3.py` provide simple examples for using
RiveScript as a library for Python 2 and 3, respectively.
## UTF-8 Support
RiveScript supports Unicode but it is not enabled by default. Enable it by
passing a `True` value for the `utf8` option in the constructor, or by using the
`--utf8` argument to the standalone interactive mode.
In UTF-8 mode, most characters in a user's message are left intact, except for
certain metacharacters like backslashes and common punctuation characters like
`/[.,!?;:]/`.
If you want to override the punctuation regexp, you can provide a new one by
assigning the `unicode_punctuation` attribute of the bot object after
initialization. Example:
```python
import re
bot = RiveScript(utf8=True)
bot.unicode_punctuation = re.compile(r'[.,!?;:]')
```
Regardless of whether UTF-8 mode is on, all input messages given to the bot
are converted (if needed) to Python's `unicode` data type. So, while it's
good practice to make sure you're providing Unicode strings to the bot, the
library will have you covered if you forget.
## JSON Mode
The `rivescript` package, when run stand-alone, supports "JSON Mode", where
you communicate with the bot using JSON. This is useful for third-party
programs that want to use RiveScript but don't have an interpreter in their
native language.
Just run it like: `python rivescript --json /path/to/brain`
Print a JSON encoded data structure into the standard input. The format should
look like this:
{
"username": "localuser",
"message": "Hello bot!",
"vars": {
"name": "Aiden"
}
}
After sending this, you can send an `EOF` signal and the bot will respond with
a JSON response and then exit. Or, you can keep the session open, by sending
the string `__END__` on a line by itself after your input. The bot will do the
same when it responds, so you can reuse the same pipe for multiple
interactions.
The bot's response will be formatted like so:
{
"status": "ok",
"reply": "Hello, human!",
"vars": {
"name": "Aiden"
}
}
The `status` will be `ok` on success, or `error` if there was an error. The
`reply` is the bot's response (or an error message on error).
## Contributors
* [Noah Petherbridge](https://github.com/kirsle)
* [Arash Saidi](https://github.com/arashsa)
* [Danilo Bargen](https://github.com/dbrgn)
* [FujiMakoto](https://github.com/FujiMakoto)
* [Hung Tu Dinh](https://github.com/Dinh-Hung-Tu)
* [Julien Syx](https://github.com/Seraf)
* [Pablo](https://github.com/flogiston)
* [Peixuan (Shawn) Ding](https://github.com/dinever)
## License
```
The MIT License (MIT)
Copyright (c) 2020 Noah Petherbridge
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
```
SEE ALSO
--------
The official RiveScript website, http://www.rivescript.com/
[1]: https://travis-ci.org/aichaos/rivescript-python.svg?branch=master
[2]: https://travis-ci.org/aichaos/rivescript-python
[3]: https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat
[4]: http://rivescript.rtfd.io/
[5]: https://img.shields.io/pypi/v/rivescript.svg
[6]: https://pypi.python.org/pypi/rivescript/
| /rivescript-1.15.0.tar.gz/rivescript-1.15.0/README.md | 0.509764 | 0.888904 | README.md | pypi |
# RiveScript Deparse
This example purely consists of additional documentation and examples of the
`deparse()` method of RiveScript.
## Relevant Methods
* `rs.deparse()`
This method exports the current in-memory representation of the RiveScript
brain as a JSON-serializable data structure. See [Schema](#schema) for the
format of this data structure.
* `rs.write(fh[, deparsed])`
This method converts a data structure like the one from `deparse()` into plain
text RiveScript source code and writes it to the file-like object ``fh``.
By default this will use the current in-memory representation of the
RiveScript brain. For example, if you loaded a directory of RiveScript files
and then called `write()` you would get a large text blob that contains
all the source code of all the files from that directory. Note, however, that
the formatting of the original reply data might be lost because the output
from `write()` is working backwards from the in-memory representation, so
for example, comments in the original source code aren't preserved and places
where `^Continue` was used will instead result in one single line of code
in the output.
If you pass in a data structure formatted the same way as the one `deparse()`
returns, you can write that code instead. This way you could
programmatically generate RiveScript data (for example, from a custom user
interface for authoring bots) and convert it into valid RiveScript source code
using this method.
## Schema
The data structure returned by `deparse()` looks like this, annotated:
```yaml
begin:
global: map of key/value pairs for `! global` global variable definitions
var: map of key/value pairs for `! var` bot variable definitions
sub: map of key/value pairs for `! sub` substitution definitions
person: map of key/value pairs for `! person` substitution definitions
array: map of `! array` names to arrays of their values
triggers: array of trigger data (see below)
topics: map of topic names -> array of trigger data under that topic
$name: []
```
The trigger data is stored in arrays underneath `begin.triggers` (for those in
the `> begin` block) and `topics.$NAME` for triggers under a particular topic,
with the default topic being named "random".
Each trigger is an object with the following schema:
```yaml
trigger: the plain text trigger
reply: array of the plain text `-Reply` commands, or `[]`
condition: array of the plain text `*Condition` commands, or `[]`
redirect: the text of the `@Redirect` command, or `null`
previous: the text of the `%Previous` command, or `null`
```
## Examples
Here are some example code snippets that show what the deparsed data structure
looks like.
Python Code (`example.py`)
```python
from rivescript import RiveScript
import json
bot = RiveScript()
bot.load_file("example.rive")
dep = bot.deparse()
print(json.dumps(dep, indent=2))
```
RiveScript Code (`example.rive`)
```rivescript
! version = 1.0
! var name = Aiden
! var age = 5
! sub what's = what is
! array colors = red blue green yellow cyan magenta black white
> begin
+ request
- {ok}
< begin
+ hello bot
- Hello human.
+ hi robot
@ hello bot
+ my name is *
- <set name=<formal>>Nice to meet you, <get name>.
- <set name=<formal>>Hello, <get name>.
+ what is my name
* <get name> != undefined => Your name is <get name>.
- You didn't tell me your name.
> topic game-global
+ help
- How to play...
< topic
> topic game-room-1 inherits game-global
+ look
- You're in a room labeled "1".
< topic
> object reverse javascript
var msg = args.join(" ");
return msg.split("").reverse().join("");
< object
+ say * in reverse
- <call>reverse <star></call>
```
JSON output:
```javascript
{
"begin": {
"global": {},
"var": {
"name": "Aiden",
"age": "5"
},
"sub": {
"what's": "what is"
},
"person": {},
"array": {
"colors": [
"red",
"blue",
"green",
"yellow",
"cyan",
"magenta",
"black",
"white"
]
},
"triggers": [
{
"trigger": "request",
"reply": [
"{ok}"
],
"condition": [],
"redirect": null,
"previous": null
}
]
},
"topics": {
"random": {
"triggers": [
{
"trigger": "hello bot",
"reply": [
"Hello human."
],
"condition": [],
"redirect": null,
"previous": null
},
{
"trigger": "hi robot",
"reply": [],
"condition": [],
"redirect": "hello bot",
"previous": null
},
{
"trigger": "my name is *",
"reply": [
"<set name=<formal>>Nice to meet you, <get name>.",
"<set name=<formal>>Hello, <get name>."
],
"condition": [],
"redirect": null,
"previous": null
},
{
"trigger": "what is my name",
"reply": [
"You didn't tell me your name."
],
"condition": [
"<get name> != undefined => Your name is <get name>."
],
"redirect": null,
"previous": null
},
{
"trigger": "say * in reverse",
"reply": [
"<call>reverse <star></call>"
],
"condition": [],
"redirect": null,
"previous": null
}
],
"includes": {},
"inherits": {}
},
"game-global": {
"triggers": [
{
"trigger": "help",
"reply": [
"How to play..."
],
"condition": [],
"redirect": null,
"previous": null
}
],
"includes": {},
"inherits": {}
},
"game-room-1": {
"triggers": [
{
"trigger": "look",
"reply": [
"You're in a room labeled \"1\"."
],
"condition": [],
"redirect": null,
"previous": null
}
],
"includes": {},
"inherits": {
"game-global": 1
}
}
}
}
```
| /rivescript-1.15.0.tar.gz/rivescript-1.15.0/eg/deparse/README.md | 0.717903 | 0.920718 | README.md | pypi |
# rivet
A user-friendly Python-to-S3 interface. Adds quality of life and convenience features around `boto3`, including the handling of reading and writing to files in proper formats. While there is nothing that you can do with `rivet` that you can't do with `boto3`, `rivet`'s primary focus is ease-of-use. By handling lower-level operations such as client establishment and default argument specification behind the scenes, the cost of entry to interacting with cloud storage from within Python is lowered.
It also enforces good practice in S3 naming conventions.
## Usage
`rivet` acts as an abstraction around the S3 functionality of Amazon's `boto3` package.
Although `boto3` is very powerful, the expansive functionality it boasts can be overwhelming
and often results in users sifting through a lot of documentation to find the subset of
functionality that they need. In order to make use of this package, you will need to have
the environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` configured
for the buckets you wish to interact with.
### General
1. Because S3 allows for almost anything to be used as an S3 key, it can be very easy to
lose track of what exactly you have saved in the cloud. A very important example of this is
filetype - without a file extension at the end of the S3 key, it is entirely possible to
lose track of what format a file is saved as. `rivet` enforces file extensions in the objects
it reads and writes.
* Currently supported formats are: CSV, JSON, Avro, Feather, Parquet, Pickle
* Accessible in a Python session via `rivet.supported_formats`
2. A default S3 bucket can be set up as an environment variable, removing the requirement
to provide it to each function call. The name of this environment variable is `RV_DEFAULT_S3_BUCKET`.
### Reading
Reading in `rivet` only requires two things: a key, and a bucket.
```
import rivet as rv
df = rv.read('test_path/test_key.csv', 'test_bucket')
```
The file will be downloaded from S3 to a temporary file on your machine, and
based on the file extension at the end of the S3 key, the proper file reading
function will be used to read the object into the Python session.
Because it cannot be expected that all teams will always utilize good practice though,
the `read_badpractice` function allows for reading of files that do not have a file
extension (or do not follow enforced key-writing practices). In addition to a key
and bucket, this function requires that a storage format is provided.
```
import rivet as rv
obj = rv.read_badpractice('test_path/bad_key', 'test_bucket', filetype='pkl')
```
Both the `read` and `read_badpractice` functions accept additional arguments
for the underlying file reading functions. So, if a user is familiar with
those functions, they can customize how files are read.
```
import rivet as rv
df = rv.read('test_path/test_key.csv', 'test_bucket', delimiter='|')
```
### Writing
Writing is handled almost identically to reading, with the additional
parameter of the object to be uploaded. `write` returns the full path to
the object written to S3, including bucket name, without the `s3://` prefix.
```
import pandas as pd
import rivet as rv
df = pd.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]})
rv.write(df, 'test_path/test_key.csv', 'test_bucket')
```
Similar to the read functionality, `write` determines which underlying write
function to use based on the file extension in the S3 key provided. It can
accept additional arguments to be passed to those functions, exactly like
in the reading functions. However, unlike the reading functions, there is
no 'bad practice' writing funcitonality. The `rivet` developers understand that
its users can't control the practices of other teams, but as soon as writing
begins, the package will ensure that best practice is being followed.
### Other operations
1. Listing<br>
`rivet` can list the files that are present at a given location in S3, with
two different options being available for how to do so: `include_prefix` and `recursive`.
We will be using the following example S3 bucket structure:
```
test_bucket
|---- test_key_0.csv
|---- folder0/
|---- test_key_1.pq,
|---- folder1/
|---- test_key_2.pkl,
|---- subfolder0/
|---- test_key_3.pkl,
|---- folder2/
|---- test_key_4.csv
```
- `rv.list` would behave as follows with default behavior:
```
import rivet as rv
rv.list(path='', bucket='test_bucket')
Output: ['test_key_0.csv', 'folder0/', 'folder1/', 'folder2/']
rv.list(path='folder1/', bucket='test_bucket')
Output: ['test_key_2.pkl', 'subfolder0/']
```
- `include_prefix` option will result in the full S3 key up to the current folder
to be included in the returned list of keys.
```
import rivet as rv
rv.list_objects(path='folder1/', bucket='test_bucket', include_prefix=True)
Output: ['folder1/test_key_2.pkl', 'folder1/subfolder0/']
```
- The `recursive` option will result in objects stored in nested folders to be returned as well.
```
import rivet as rv
rv.list(path='folder1', bucket='test_bucket', recursive=True)
Output: ['test_key_2.pkl', 'subfolder0/test_key_3.pkl']
```
- `include_prefix` and `recursive` can be used simultaneously.
- Regular expression matching on keys can be performed with the `matches` parameter.
- You can account for your key prefix:
1. In the `path` argument (highly encouraged for the above reasons): `rv.list_objects(path='folder0/')`
2. Hard-coded as part of the regular expression in your `matches` argument: `rv.list_objects(matches='folder0/.*')`
3. or by accounting for it in the matching logic of your regular expression: `rv.list_objects(matches='f.*der0/.*')`
- When you are using both `path` and `matches` parameters, however, there is one situation you need to be cautious of:
1. Hard-coding the path in `path` and using `matches` to match on anything that comes _after_ the path works great: `rv.list_objects(path='folder0/', matches='other_.*.csv')`
2. Hard-coding the path in `path` and including the hard-coded path in `matches` works fine, but is discouraged for a number of reasons: `rv.list_objects(path='folder0/', matches='folder0/other_.*.csv')`
3. What **will not** work is hard-coding the path in `path` and dynamically matching it in `matches`: `rv.list_objects(path='folder0/', matches='f.*der0/other_.*.csv')`
- This is because including the path in the regular expression interferes with the logic of the function. When you provide the hard-coded path both in `path` and in the beginning of `matches`, it can be detected and removed from the regular expression, but there is no definitive way to do this when you are matching on it.
- So, in general, try to separate the keep `path` and `matches` entirely separate if at all possible.
2. Existence checks<br>
As an extension of listing operations, `rivet` can check if an object exists at
a specific S3 key. Note that for existence to be `True`, there must be an
_exact_ match with the key provided
Using the following bucket structure:
```
test_bucket
|---- test_key_0.csv
```
```
import rivet as rv
rv.exists('test_key_0.csv', bucket='test_bucket')
Output: True
rv.exists('test_key_1.csv', bucket='test_bucket')
Output: False
rv.exists('test_key_.csv', bucket='test_bucket')
Output: False
```
3. Copying<br>
It is possible to copy a file from one location in S3 to another using `rivet`.
This function is not configurable - it only takes a source and destination key and bucket.
```
import rivet as rv
rv.copy(source_path='test_path/df.csv',
dest_path='test_path_destination/df.csv',
source_bucket='test_bucket',
dest_bucket='test_bucket_destination')
```
### Session-Level Configuration
`rivet` outputs certain messages to the screen to help interactive users
maintain awareness of what is being performed behind-the-scenes. If this
is not desirable (as may be the case for notebooks, pipelines, usage of
`rivet` within other packages, etc.), all non-logging output can be
disabled with `rv.set_option('verbose', False)`.
| /rivet-1.6.0.tar.gz/rivet-1.6.0/README.md | 0.613584 | 0.943138 | README.md | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.