code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
__all__ = ["FxBinaryDefinition"]
from ._abstracted_class import BinaryDefinition
from ._settlement_type import SettlementType
class FxBinaryDefinition(BinaryDefinition):
""""""
def __init__(
self,
binary_type=None,
payout_amount=None,
payout_ccy=None,
settlement_type=None,
trigger=None,
):
super().__init__()
self.binary_type = binary_type
self.payout_amount = payout_amount
self.payout_ccy = payout_ccy
self.settlement_type = settlement_type
self.trigger = trigger
@property
def binary_type(self):
"""
Possible values:
- None,
- OneTouchImmediate
- OneTouchDeferred
- NoTouch
- Digital
:return: enum BinaryType
"""
from ._fx_binary_type import FxBinaryType
return self._get_enum_parameter(FxBinaryType, "binaryType")
@binary_type.setter
def binary_type(self, value):
from ._fx_binary_type import FxBinaryType
self._set_enum_parameter(FxBinaryType, "binaryType", value)
@property
def payout_amount(self):
"""
:return: double
"""
return self._get_parameter("payoutAmount")
@payout_amount.setter
def payout_amount(self, value):
self._set_parameter("payoutAmount", value)
@property
def payout_ccy(self):
"""
:return: string
"""
return self._get_parameter("payoutCcy")
@payout_ccy.setter
def payout_ccy(self, value):
self._set_parameter("payoutCcy", value)
@property
def settlement_type(self):
"""
:return: string
"""
return self._get_enum_parameter(SettlementType, "settlementType")
@settlement_type.setter
def settlement_type(self, value):
self._set_enum_parameter(SettlementType, "settlementType", value)
@property
def trigger(self):
"""
:return: double
"""
return self._get_parameter("trigger")
@trigger.setter
def trigger(self, value):
self._set_parameter("trigger", value) | /refinitiv-dataplatform-1.0.0a15.tar.gz/refinitiv-dataplatform-1.0.0a15/refinitiv/dataplatform/content/ipa/contracts/option/_fx_binary_definition.py | 0.858274 | 0.150496 | _fx_binary_definition.py | pypi |
__all__ = ["DoubleBinaryDefinition"]
from refinitiv.dataplatform.content.ipa.instrument._definition import ObjectDefinition
class DoubleBinaryDefinition(ObjectDefinition):
""""""
def __init__(
self,
double_binary_type=None,
payout_amount=None,
payout_ccy=None,
settlement_type=None,
trigger_down=None,
trigger_up=None,
):
super().__init__()
self.double_binary_type = double_binary_type
self.payout_amount = payout_amount
self.payout_ccy = payout_ccy
self.settlement_type = settlement_type
self.trigger_down = trigger_down
self.trigger_up = trigger_up
@property
def double_binary_type(self):
"""
Possible values:
- None,
- DoubleNoTouch
:return: fx.DoubleBinaryType
"""
from refinitiv.dataplatform.content.ipa.contracts.option._double_binary_type import (
DoubleBinaryType,
)
return self._get_enum_parameter(DoubleBinaryType, "binaryType")
@double_binary_type.setter
def double_binary_type(self, value):
from refinitiv.dataplatform.content.ipa.contracts.option._double_binary_type import (
DoubleBinaryType,
)
self._set_enum_parameter(DoubleBinaryType, "binaryType", value)
@property
def payout_amount(self):
"""
:return: double
"""
return self._get_parameter("payoutAmount")
@payout_amount.setter
def payout_amount(self, value):
self._set_parameter("payoutAmount", value)
@property
def payout_ccy(self):
"""
:return: string
"""
return self._get_parameter("payoutCcy")
@payout_ccy.setter
def payout_ccy(self, value):
self._set_parameter("payoutCcy", value)
@property
def settlement_type(self):
"""
Settlement Type of the BinaryOption
Possible values :
- Undefined
- Cash
- Asset
:return: enum fx.SettlementType
"""
from ._settlement_type import SettlementType
return self._get_enum_parameter(SettlementType, "settlementType")
@settlement_type.setter
def settlement_type(self, value):
from ._settlement_type import SettlementType
self._set_enum_parameter(SettlementType, "settlementType", value)
@property
def trigger_down(self):
"""
Barrier Down for the Double Barrier option
:return: double
"""
return self._get_parameter("triggerDown")
@trigger_down.setter
def trigger_down(self, value):
self._set_parameter("triggerDown", value)
@property
def trigger_up(self):
"""
Barrier Up for the Double Barrier option
:return: double
"""
return self._get_parameter("triggerUp")
@trigger_up.setter
def trigger_up(self, value):
self._set_parameter("triggerUp", value) | /refinitiv-dataplatform-1.0.0a15.tar.gz/refinitiv-dataplatform-1.0.0a15/refinitiv/dataplatform/content/ipa/contracts/option/_double_binary_definition.py | 0.861494 | 0.173113 | _double_binary_definition.py | pypi |
__all__ = ["AverageInfo"]
from refinitiv.dataplatform.content.ipa.instrument._definition import ObjectDefinition
from ...enum_types.average_type import AverageType
from ...enum_types.fixing_frequency import FixingFrequency
class AverageInfo(ObjectDefinition):
def __init__(
self,
average_type=None,
fixing_frequency=None,
average_so_far=None,
fixing_ric_source=None,
fixing_start_date=None,
include_holidays=None,
include_week_ends=None,
):
super().__init__()
self.average_type = average_type
self.fixing_frequency = fixing_frequency
self.average_so_far = average_so_far
self.fixing_ric_source = fixing_ric_source
self.fixing_start_date = fixing_start_date
self.include_holidays = include_holidays
self.include_week_ends = include_week_ends
@property
def average_type(self):
"""
The type of average used to compute. Possible values:
- ArithmeticRate
- ArithmeticStrike
- GeometricRate
- GeometricStrike
:return: enum AverageType
"""
return self._get_enum_parameter(AverageType, "averageType")
@average_type.setter
def average_type(self, value):
self._set_enum_parameter(AverageType, "averageType", value)
@property
def fixing_frequency(self):
"""
The fixing's frequency. Possible values:
- Daily
- Weekly
- BiWeekly
- Monthly
- Quaterly
- SemiAnnual
- Annual
:return: enum FixingFrequency
"""
return self._get_enum_parameter(FixingFrequency, "fixingFrequency")
@fixing_frequency.setter
def fixing_frequency(self, value):
self._set_enum_parameter(FixingFrequency, "fixingFrequency", value)
@property
def average_so_far(self):
"""
The value of the AverageType
:return: float
"""
return self._get_parameter("averageSoFar")
@average_so_far.setter
def average_so_far(self, value):
self._set_parameter("averageSoFar", value)
@property
def fixing_ric_source(self):
"""
The fixing's RIC source.
Default value: the first available source RIC of the Fx Cross Code
:return: str
"""
return self._get_parameter("fixingRicSource")
@fixing_ric_source.setter
def fixing_ric_source(self, value):
self._set_parameter("fixingRicSource", value)
@property
def fixing_start_date(self):
"""
The beginning date of the fixing period.
:return: str
"""
return self._get_parameter("fixingStartDate")
@fixing_start_date.setter
def fixing_start_date(self, value):
self._set_parameter("fixingStartDate", value)
@property
def include_holidays(self):
"""
Include the holidays in the list of fixings
:return: bool
"""
return self._get_parameter("includeHolidays")
@include_holidays.setter
def include_holidays(self, value):
self._set_parameter("includeHolidays", value)
@property
def include_week_ends(self):
"""
Include the week-ends in the list of fixings
:return: bool
"""
return self._get_parameter("includeWeekEnds")
@include_week_ends.setter
def include_week_ends(self, value):
self._set_parameter("includeWeekEnds", value) | /refinitiv-dataplatform-1.0.0a15.tar.gz/refinitiv-dataplatform-1.0.0a15/refinitiv/dataplatform/content/ipa/contracts/option/_average_info.py | 0.912006 | 0.215289 | _average_info.py | pypi |
__all__ = ["StreamingPriceCallback"]
class StreamingPriceCallback(object):
"""
All callbacks for StreamingPrice.
Parameters
----------
on_refresh: callable object
Called when the stream is opened or when the record is refreshed with a new image.
This callback receives a full image
Default: None
on_update: callable object
Called when an update is received.
This callback receives an utf-8 string as argument.
Default: None
on_error: callable object
Called when an error occurs.
This callback receives Exception as argument
Default: None
on_status: callable object
Called when subscription status changed.
This callback receives an status as argument.
Default: None
on_open_complete: callable object
Called when the stream received all expected data.
Raises
------
Exception
If request fails or if Refinitiv Services return an error
"""
def __init__(self):
self._on_refresh_cb = None
self._on_update_cb = None
self._on_error_cb = None
self._on_status_cb = None
self._on_open_complete_cb = None
@property
def on_refresh(self):
return self._on_refresh_cb
@on_refresh.setter
def on_refresh(self, on_refresh_cb):
self._on_refresh_cb = on_refresh_cb
@property
def on_update(self):
return self._on_update_cb
@on_update.setter
def on_update(self, on_update_cb):
self._on_update_cb = on_update_cb
@property
def on_error(self):
return self._on_error_cb
@on_error.setter
def on_error(self, on_error_cb):
self._on_error_cb = on_error_cb
@property
def on_status(self):
return self._on_status_cb
@on_status.setter
def on_status(self, on_status_cb):
self._on_status_cb = on_status_cb
@property
def on_open_complete(self):
return self._on_open_complete_cb
@on_open_complete.setter
def on_open_complete(self, on_open_complete_cb):
self._on_open_complete_cb = on_open_complete_cb | /refinitiv-dataplatform-1.0.0a15.tar.gz/refinitiv-dataplatform-1.0.0a15/refinitiv/dataplatform/content/streaming/streamingprice_callback.py | 0.82176 | 0.279595 | streamingprice_callback.py | pypi |
__all__ = ["ChainRecords"]
from threading import Lock
from refinitiv.dataplatform.delivery.stream import OMMItemStream, StreamState
from refinitiv.dataplatform.core.log_reporter import LogReporter
class ChainRecords(LogReporter):
def __init__(self, streaming_chain, session):
super().__init__(logger=session)
self._stream_chain = streaming_chain
self._session = session
# mapping chain record name to chain record
self.records_by_name = {}
# mapping chain record name to each chain record item stream
self.statuses_by_name = {}
# dictionary mapping between chain record name and future object
self.refreshing_by_name = {}
# mapping dict of each streaming chains to construct this given chain
# note that completed chain record may construct from multiple chain records
# ie. complete chain record named ".DJI" contains with three chain records including
# "0#.DJI", "1#.DJI", "2#.DJI"
self.streams_by_name = {}
self.lock = {
"streams": Lock(),
"refreshing": Lock(),
"records": Lock(),
"statuses": Lock(),
}
def add(self, name):
"""
Construct new item streaming for given chain record name
and store it as a mapping from chain record name
"""
with self.lock["streams"]:
assert name not in self.streams_by_name
from .streaming_chain import _STREAMING_DOMAIN, _STREAMING_FIELDS
# construct and run the item stream
stream = OMMItemStream(
session=self._session,
name=name,
domain=_STREAMING_DOMAIN,
service=self._stream_chain._service,
fields=_STREAMING_FIELDS,
on_refresh=self._stream_chain._on_refresh_handler,
on_status=self._stream_chain._on_status_handler,
on_update=self._stream_chain._on_update_handler,
on_complete=self._stream_chain._on_complete_handler,
on_error=self._stream_chain._on_error_handler,
)
# store the mapping between chain record name to stream
with self.lock["streams"]:
self.streams_by_name[name] = stream
with self.lock["refreshing"]:
self.refreshing_by_name[
name
] = self._session._loop.create_future() # asyncio.Future()
# done, return this chain record item stream
return stream
def has(self, name):
""" Check given chain record has chain record object or not ? """
with self.lock["records"]:
return name in self.records_by_name
def has_stream(self, name):
""" Check given chain record has item stream or not ? """
with self.lock["streams"]:
return name in self.streams_by_name
def not_has_stream(self, name):
return not self.has_stream(name)
def get_display_name(self, name):
# get the first chain display name and return
with self.lock["records"]:
# check for chain record name is valid or not?
chain_record = self.records_by_name[name]
return chain_record.displayName or ""
async def open_streams(self, with_updates):
# loop over all initial set of stream for this chains and open it
with self.lock["streams"]:
for name in self.streams_by_name.keys():
# open each chain record stream
await self.open_stream(name, with_updates)
async def open_stream(self, name, with_updates):
self.info(f"Opening stream of chain record = {name}.")
stream = self.streams_by_name[name]
await stream.open_async(with_updates=with_updates)
def is_status_closed(self, name):
# warning THIS IS A HACK. THE STREAMING CHAIN NEED TO REFACTOR FOLLOWING THE NEW DESIGN OF STREAMING WEBSOCKET
if name not in self.statuses_by_name:
return False
status = self.statuses_by_name[name].get("status", None)
return status == StreamState.Closed if status is not None else False
async def wait_refresh(self, name):
await self.refreshing_by_name[name]
def get_record(self, name):
with self.lock["records"]:
item = self.records_by_name.get(name, None)
return item
def get_stream(self, name):
return self.streams_by_name[name]
def close_streams(self):
""" close chain record item streams """
# loop over all initial set of stream for this chains and close it
with self.lock["streams"]:
for name, stream in self.streams_by_name.items():
# close each chain record stream
self.info(f"Closing stream of chain record = {name}.")
stream.close()
def add_record(self, name, record):
# store in the mapping between streaming name to chain record
with self.lock["records"]:
assert name not in self.records_by_name
self.records_by_name[name] = record
def refreshed(self, name):
# change future flag on this chain record stream
with self.lock["refreshing"]:
refreshing = self.refreshing_by_name[name]
# warning :: PREVENT AND ERROR WHEN IT HAS MULTIPLE REFRESH MESSAGE FROM SERVER
# PLEASE RECHECK THE PROTOCOL ON SERVER SIDE
if not refreshing.done():
# it's possible that it's receiving a refresh message multiple time from server
self._session._loop.call_soon_threadsafe(refreshing.set_result, True)
def set_status(self, name, status):
# store the status of chain record streaming
with self.lock["statuses"]:
self.statuses_by_name[name] = status | /refinitiv-dataplatform-1.0.0a15.tar.gz/refinitiv-dataplatform-1.0.0a15/refinitiv/dataplatform/content/streaming/streamingchain/chain_records.py | 0.667256 | 0.163079 | chain_records.py | pypi |
__all__ = ["get_data"]
from ._fundamental_class import Fundamental
def get_data(
universe,
fields,
parameters=None,
field_name=None,
):
"""
Returns a pandas.DataFrame with fields in columns and instruments as row index
Parameters
----------
universe: string or list
Single instrument or list of instruments to request.
fields: string, dictionary or list of strings and/or dictionaries.
List of fields to request.
Examples:
- 'TR.PriceClose'
- {'TR.GrossProfit': { 'params':{ 'Scale': 6, 'Curn': 'EUR' }}
- {'TR.GrossProfit': { 'params':{ 'Scale': 6, 'Curn': 'EUR' },sort_dir:'desc'}
- ['TR.PriceClose','TR.PriceOpen']
- [{'TR.PriceClose': {'sort_dir':asc,sort_priority:1}},{'TR.PriceOpen': {'sort_dir':asc,sort_priority:0}}
You can use the legacy TR_Field to build the fields:
>>> fields = [ek.TR_Field('tr.revenue'),ek.TR_Field('tr.open','asc',1),ek.TR_Field('TR.GrossProfit',{'Scale': 6, 'Curn': 'EUR'},'asc',0)]
>>> data, err = ek.get_data(["IBM","MSFT.O"],fields)
Tips:
You can launch the Data Item Browser to discover fields and parameters,
or copy field names and parameters from TR Eikon - MS Office formulas
parameters: string or dictionary, optional
Single global parameter key=value or dictionary of global parameters to request.
Default: None
field_name: boolean, optional
Define if column headers are filled with field name or display names.
If True value, field names will ube used as column headers. Otherwise, the full display name will be used.
Default: False
Returns
-------
pandas.DataFrame
Returns pandas.DataFrame with fields in columns and instruments as row index
errors
Returns a list of errors
Raises
----------
Exception
If http request fails or if server returns an error.
ValueError
If a parameter type or value is wrong.
Examples
--------
>>> import refinitiv.dataplatform as rdp
>>> data = rdp.get_data(["IBM", "GOOG.O", "MSFT.O"], ["TR.PriceClose", "TR.Volume", "TR.PriceLow"])
>>> data = rdp.get_data("IBM", ['TR.Employees', {'TR.GrossProfit':{'params':{'Scale': 6, 'Curn': 'EUR'},'sort_dir':'asc'}}])
>>> fields = [rdp.TR_Field('tr.revenue'),rdp.TR_Field('tr.open',None,'asc',1),rdp.TR_Field('TR.GrossProfit',{'Scale': 6, 'Curn': 'EUR'},'asc',0)]
>>> data = rdp.get_data(["IBM","MSFT.O"], fields)
"""
result = Fundamental.get_data(
universe=universe,
fields=fields,
parameters=parameters,
field_name=field_name,
)
from refinitiv.dataplatform.factory.content_factory import ContentFactory
if result.is_success and result.data and result.data.df is not None:
retval = result.data.df
else:
ContentFactory._last_error_status = result.status
retval = None
ContentFactory._last_result = result
return retval | /refinitiv-dataplatform-1.0.0a15.tar.gz/refinitiv-dataplatform-1.0.0a15/refinitiv/dataplatform/content/data_grid/_functions.py | 0.827375 | 0.429848 | _functions.py | pypi |
from enum import Enum, unique
__all__ = ["CountryCode", "COUNTRY_CODE_VALUES"]
@unique
class CountryCode(Enum):
AFG = "G:7R"
ALB = "G:7G"
DZA = "G:7S"
ASM = "G:39"
AND = "G:32"
AGO = "G:82"
AIA = "G:1H"
ATA = "G:AY"
ATG = "G:31"
ARG = "G:60"
ARM = "G:7I"
ABW = "G:AD"
AUS = "G:2H"
AUT = "G:1F"
AZE = "G:4R"
BHS = "G:5E"
BHR = "G:5Q"
BGD = "G:9B"
BRB = "G:8P"
BLR = "G:8B"
BEL = "G:9Y"
BLZ = "G:6C"
BEN = "G:48"
BMU = "G:75"
BTN = "G:8F"
BOL = "G:3J"
BIH = "G:2D"
BWA = "G:61"
BVT = "G:6F"
BRA = "G:26"
IOT = "G:9R"
BRN = "G:80"
BGR = "G:1W"
BFA = "G:5A"
BDI = "G:68"
KHM = "G:9G"
CMR = "G:4I"
CAN = "G:8W"
CPV = "G:AF"
CYM = "G:4J"
CAF = "G:5G"
TCD = "G:A6"
CHL = "G:4M"
CHN = "G:B1"
CXR = "G:5R"
CCK = "G:67"
COL = "G:2S"
COM = "G:99"
COD = "G:8A"
COG = "G:5K"
COK = "G:1L"
CRI = "G:5H"
HRV = "G:5X"
CUB = "G:7X"
CYP = "G:8T"
CZE = "G:2E"
DNK = "G:19"
DJI = "G:6Z"
DMA = "G:8U"
DOM = "G:76"
ECU = "G:8Q"
EGY = "G:3G"
SLV = "G:AB"
GNQ = "G:5L"
ERI = "G:6K"
EST = "G:9D"
SWZ = "G:7H"
ETH = "G:6L"
FLK = "G:3L"
FRO = "G:24"
FJI = "G:3Z"
FIN = "G:90"
FRA = "G:5M"
GUF = "G:3B"
PYF = "G:54"
ATF = "G:9V"
GAB = "G:69"
GMB = "G:77"
GEO = "G:9F"
DEU = "G:3D"
GHA = "G:5N"
GIB = "G:79"
GRC = "G:6A"
GRL = "G:2R"
GRD = "G:9A"
GLP = "G:4Q"
GUM = "G:2Y"
GTM = "G:96"
GGY = "G:34"
GIN = "G:9L"
GNB = "G:9Z"
GUY = "G:44"
HTI = "G:22"
HMD = "G:4W"
HND = "G:AG"
HKG = "G:3H"
HUN = "G:46"
ISL = "G:6I"
IND = "G:5B"
IDN = "G:25"
IRN = "G:56"
IRQ = "G:8G"
IRL = "G:6X"
IMN = "G:35"
ISR = "G:3S"
ITA = "G:5J"
JAM = "G:1G"
JPN = "G:41"
JEY = "G:33"
JOR = "G:1Z"
KAZ = "G:85"
KEN = "G:70"
KIR = "G:7P"
PRK = "G:AE"
KOR = "G:83"
KWT = "G:7Q"
KGZ = "G:8R"
LAO = "G:8L"
LVA = "G:4H"
LBN = "G:64"
LSO = "G:2M"
LBR = "G:3U"
LBY = "G:6W"
LIE = "G:A9"
LTU = "G:8I"
LUX = "G:7M"
MAC = "G:3I"
MKD = "G:AI"
MDG = "G:7Z"
MWI = "G:6G"
MYS = "G:8S"
MDV = "G:6H"
MLI = "G:3V"
MLT = "G:4G"
MHL = "G:36"
MTQ = "G:8C"
MRT = "G:2X"
MUS = "G:9N"
MEX = "G:2V"
FSM = "G:9E"
MDA = "G:6P"
MCO = "G:88"
MNG = "G:66"
MNE = "G:3E"
MSR = "G:1X"
MAR = "G:8X"
MOZ = "G:2B"
MMR = "G:72"
NAM = "G:6Q"
NRU = "G:8J"
NPL = "G:2J"
NLD = "G:7K"
NCL = "G:2L"
NZL = "G:49"
NIC = "G:AC"
NER = "G:2U"
NGA = "G:6B"
NIU = "G:62"
NFK = "G:7Y"
NOR = "G:3N"
OMN = "G:7B"
PAK = "G:2P"
PLW = "G:2N"
PSE = "G:59"
PAN = "G:4U"
PNG = "G:2G"
PRY = "G:89"
PER = "G:3T"
PHL = "G:7L"
PCN = "G:15"
POL = "G:5Y"
PRT = "G:A3"
PRI = "G:5U"
QAT = "G:51"
REU = "G:6N"
ROU = "G:2Z"
RUS = "G:38"
RWA = "G:AA"
SHN = "G:9S"
KNA = "G:40"
LCA = "G:3A"
SPM = "G:4E"
VCT = "G:3F"
WSM = "G:2F"
SMR = "G:78"
STP = "G:5F"
SAU = "G:92"
SEN = "G:6E"
SRB = "G:7F"
SYC = "G:5C"
SLE = "G:A5"
SGP = "G:7D"
SVK = "G:1C"
SVN = "G:74"
SLB = "G:1Y"
SOM = "G:5D"
ZAF = "G:2I"
SGS = "G:1N"
SSD = "G:C2"
ESP = "G:55"
LKA = "G:1J"
SDN = "G:C1"
SUR = "G:86"
SJM = "G:1M"
SWE = "G:6V"
CHE = "G:30"
SYR = "G:4P"
TWN = "G:7U"
TJK = "G:4N"
TZA = "G:2T"
THA = "G:3R"
TGO = "G:91"
TKL = "G:5P"
TON = "G:8K"
TTO = "G:9T"
TUN = "G:2W"
TUR = "G:8Z"
TKM = "G:42"
TCA = "G:9I"
TUV = "G:2C"
UGA = "G:47"
UKR = "G:71"
ARE = "G:A4"
GBR = "G:7J"
UMI = "G:9W"
USA = "G:6J"
URY = "G:4Y"
UZB = "G:8M"
VAT = "G:8Y"
VUT = "G:9M"
VEN = "G:2K"
VNM = "G:5Z"
WLF = "G:4L"
ESH = "G:4F"
YEM = "G:28"
ZMB = "G:73"
ZWE = "G:52"
@staticmethod
def convert_to_str(some):
result = None
if isinstance(some, str):
result = CountryCode.normalize(some)
elif isinstance(some, CountryCode):
result = some.value
if result:
return result
else:
raise AttributeError(f"Country code value must be in {COUNTRY_CODE_VALUES}")
@staticmethod
def normalize(some):
some_lower = some.lower()
symbol_type = COUNTRY_CODE_LOWER.get(some_lower)
result = ""
if symbol_type:
result = symbol_type.value
return result
COUNTRY_CODE_VALUES = tuple(t.value for t in CountryCode)
COUNTRY_CODE_LOWER = {
name.lower(): item for name, item in CountryCode.__members__.items()
} | /refinitiv-dataplatform-1.0.0a15.tar.gz/refinitiv-dataplatform-1.0.0a15/refinitiv/dataplatform/content/symbology/country_code.py | 0.41941 | 0.402451 | country_code.py | pypi |
import numpy as np
import pandas as pd
from refit_loader.utilities import convert_object2timestamps
def __generate_activity_report(df, target_appliance, threshold):
"""
This method will return the durations or events when the appliance was active (on)
Parameters
----------
data : pandas.core.frame.DataFrame
example dataframe is of the following format
{
'time': pandas.core.indexes.datetimes.DatetimeIndex
timestamps as index identifying every data row
'aggregate': numpy.int64
aggregated power consumption of all appliances in the specified house (watts)
'kettle': numpy.int64
kettle power consumption in the specified house (watts)
}
target_appliance : string
name of the target appliance (may be the name of the column targeted)
threshold : float
value of threshold for raw samples of power consumption, above this threshold appliance is consider active
returns: pandas.core.frame.DataFrame
dataframe is of the following format
{
'Activity_Start': pandas._libs.tslibs.timestamps.Timestamp
start of the duration/event
'Activity_End': pandas._libs.tslibs.timestamps.Timestamp
end of the duration/event
'Duration': float
minutes of active appliance (using the method = convert_timestamps2minutes to convert timestamps to minutes)
}
"""
try:
duration_start = []
duration_end = []
duration_size = []
if isinstance(df.index, object):
df.index = convert_object2timestamps(df.index)
df_tmp = df[[target_appliance]].copy()
mask = df[target_appliance] > threshold
df_tmp['mask'] = (mask)
df_tmp['cum_sum'] = (~mask).cumsum()
df_tmp = df_tmp[df_tmp['mask'] == True]
df_tmp = df_tmp.groupby(['cum_sum', str(df.index.name)]).first()
for x in df_tmp.index.unique(level='cum_sum'):
d = df_tmp.loc[(x)].reset_index()
duration_start.append(d.iloc[0][str(df.index.name)])
duration_end.append(d.iloc[-1][str(df.index.name)])
duration_size.append(duration_end[-1] - duration_start[-1])
durations = (pd.Series(duration_size)) / np.timedelta64(1, 'm')
return pd.DataFrame({'Activity_Start': duration_start, 'Activity_End': duration_end, 'Duration': durations})
except Exception as e:
print("Exception raised in generate_activity_report() method = ", e)
def get_activities(data, target_appliance=None, threshold=None):
"""
This method will call the generate_activity_report for every dataframe to compute the durations of the active appliance and append to either a dictionary or returns back the single dataframe
Parameters
----------
data : dict or pandas.core.frame.DataFrame
dictionary =
contains dataframes of multiple houses where key represents the house number (int) and value represents (pandas.core.frame.DataFrame)
every dataframe includes the aggregate consumption and power consumption of a specific appliance in float values
with timestamps as index
pandas.core.frame.DataFrame =
single dataframe of a single house that includes the aggregate consumption and power consumption of a specific appliance in float values
with timestamps as index
example dataframe is of the following format
{
'time': pandas.core.indexes.datetimes.DatetimeIndex
timestamps as index identifying every data row
'aggregate': numpy.int64
aggregated power consumption of all appliances in the specified house (watts)
'kettle': numpy.int64
kettle power consumption in the specified house (watts)
}
target_appliance : string
name of the target appliance (may be the name of the column targeted)
threshold : float
value of threshold for raw samples of power consumption, above this threshold appliance is consider active
returns: dict or pandas.core.frame.DataFrame
dictionary =
contains dataframes of multiple houses where key represents the house number (int) and value represents (pandas.core.frame.DataFrame)
pandas.core.frame.DataFrame =
dataframe is of the following format
{
'Activity_Start': pandas._libs.tslibs.timestamps.Timestamp
start of the duration/event
'Activity_End': pandas._libs.tslibs.timestamps.Timestamp
end of the duration/event
'Duration': float
minutes of active appliance (using the method = convert_timestamps2minutes to convert timestamps to minutes)
}
"""
try:
if threshold is None:
threshold = 0.0
if isinstance(data, dict):
house_activities = {}
for key, df in data.items():
if target_appliance is None:
if len(df.columns)==2:
target_appliance = df.columns[-1]
else:
raise Exception(f"Please specify target appliance {df.columns}")
print(f"Estimating active durations of House {key}: {target_appliance}")
house_activities.update({key: __generate_activity_report(df, target_appliance, threshold)})
return house_activities
elif isinstance(data, pd.core.frame.DataFrame):
if target_appliance is None:
if len(data.columns)==2:
target_appliance = data.columns[-1]
else:
raise Exception(f"Please specify target_appliance \n {data.columns}")
print(f"Estimating active durations of: {target_appliance}")
return __generate_activity_report(data, target_appliance, threshold)
else:
print(f"Provided data should be of type <dict> or <pandas.core.frame.DataFrame> and not {type(data)}.")
except Exception as e:
print("Exception raised in get_activities() method = ", e) | /refit_loader-1.2.2-py3-none-any.whl/refit_loader/modules/active_durations.py | 0.68437 | 0.500916 | active_durations.py | pypi |
def check_house_availability(arg_name, arg_value, collection):
"""
This method will check if the house in the refit dataset exist and will return a flag either True or False
Parameters
----------
arg_name : string
name of parameter
arg_value : (any)
value of the target parameter
target_datatype : (any)
data type of the target parameter
returns: boolean
True if all the validations for target parameter are validated correct otherwise False
"""
try:
if arg_value in collection:
return True
elif arg_value == None:
print(f"NoneTypeError: Argument value provided is 'None'")
return False
elif isinstance(arg_value, str):
print(f"TypeError: String not accepted. Expected value of datatype <class 'int'>")
return False
else:
print(f"{arg_name} = {arg_value} does not exist in the provided dataset.")
return False
except Exception as e:
print("Error occured in check_availability method due to ", e)
def check_correct_datatype(arg_name, arg_value, target_datatype):
"""
This method will check all the validations and will return a flag either True or False depending on the correct validations
Parameters
----------
arg_name : string
name of parameter
arg_value : (any)
value of the target parameter
target_datatype : (any)
data type of the target parameter
returns: boolean
True if all the validations for target parameter are validated correct otherwise False
"""
try:
if isinstance(arg_value, target_datatype):
return True
elif arg_value == None:
print(f"NoneTypeError: Argument '{arg_name}' cannot be 'None' and it accepts datatype {target_datatype})")
return False
else:
print(f"TypeError: Argument '{arg_name}' accepts datatype {target_datatype}")
return False
except Exception as e:
print("Error occured in check_correct_datatype method due to ", e)
def check_list_validations(arg_name, arg_value, member_datatype):
"""
This method will check all the validations and will return a flag either True or False depending on the correct validations
Parameters
----------
arg_name : string
name of parameter
arg_value : (any)
value of the target parameter
member_datatype : (any)
data type of the members in the list
returns: boolean
True if all the validations for target list are validated correct otherwise False
"""
try:
if check_correct_datatype(arg_name, arg_value, list):
if len(arg_value)!=0:
return True
else:
print(f"Error: Empty list. Please specify some values using the argument '{arg_name}' <class 'list'>: ({member_datatype})")
return False
except Exception as e:
print("Error occured in check_correct_datatype method due to ", e) | /refit_loader-1.2.2-py3-none-any.whl/refit_loader/modules/validations.py | 0.729327 | 0.510252 | validations.py | pypi |
from typing import *
from os import path
from .util import flatten
from .log import Logging
from .config import *
log = Logging(LOGLEVEL)()
backends: List[str] = ["GHC", "GHCNoMain", "LaTeX", "QuickLaTeX"]
rewriteModes: List[str] = ["AsIs", "Instantiated", "HeadNormal", "Simplified", "Normalised"]
computeModes: List[str] = ["DefaultCompute", "IgnoreAbstract", "UseShowInstance"]
removeOrKeep: List[str] = ["Remove", "Keep"]
useForce: List[str] = ["WithForce", "WithoutForce"]
class Position:
def __init__(self:Any, srcFile:str, position:int, line:int, column:int):
assert path.exists(srcFile)
self.srcFile = srcFile
self.position = position
self.line = line
self.column = column
def __call__(self) -> List[str]:
return ['(Pn ()', str(self.position), str(self.line), str(self.column), ')']
class Interval:
def __init__(self:Any, start:Position, end:Position):
self.start = start
self.end = end
def __call__(self:Any):
return ['[Interval '] + self.start() + self.end() + [']']
class Range:
def __init__(self:Any, intervals:List[Interval]=[]):
self.intervals = intervals
def __call__(self:Any):
p = self.intervals[0].start.srcFile if len(self.intervals) > 0 else None
intervals = flatten([i() for i in self.intervals ])
return 'noRange' if len(self.intervals) == 0 else \
'(intervalsToRange (Just (mkAbsolute "{p}")) {intervals})'.format(p=p, intervals=' '.join(intervals))
def rangeBuilder(f:str, p1:int, l1:int, c1:int, p2:int, l2:int, c2:int):
assert path.exists(f)
pos1 = Position(f, str(p1), str(l1), str(c1))
pos2 = Position(f, str(p2), str(l2), str(c2))
intr = Interval(pos1, pos2)
return Range([intr])
class Commands:
def __init__(self:Any, srcFile:str):
assert path.exists(srcFile)
self.history:List[str] = []
self.srcFile = srcFile
def __get__(self:Any, command:str):
assert hasattr(self, command)
method = getattr(self, command)
return method
def wrap(self:Any, command:str):
return 'IOTCM "{srcFile}" NonInteractive Indirect ({command})'\
.format(srcFile=self.srcFile, command=command)
def wrap_global(self:Any, command:str):
return 'IOTCM "{srcFile}" None Indirect ({command})'\
.format(srcFile=self.srcFile, command=command)
def compile(self:Any, backend:str, cmds:List[str]) -> str:
assert backend in backends, \
backend + ' should be on of ' + ', '.join(backends)
command = 'Cmd_compile {backend} "{src}" [{commands}]'.format(
backend=backend,
src=self.srcFile,
commands=','.join([ "\"" + c + "\"" for c in cmds ])
)
self.history.append(command)
return self.wrap(command)
def load(self:Any, cmds:List[str]) -> str:
command = 'Cmd_load "{src}" [{commands}]'.format(
src=self.srcFile,
commands=','.join([ "\"" + c + "\"" for c in cmds ])
)
self.history.append(command)
return self.wrap(command)
def constraints(self:Any) -> str:
command = 'Cmd_constraints'
self.history.append(command)
return self.wrap(command)
def metas(self:Any) -> str:
command = 'Cmd_metas'
self.history.append(command)
return self.wrap(command)
def show_module_contents_toplevel(self:Any, rewrite:str='Simplified', expr:str='') -> str:
assert rewrite in rewriteModes, \
rewrite + ' should be on of ' + ', '.join(rewriteModes)
command = 'Cmd_show_module_contents_toplevel {rewrite} "{expr}"'.format(
rewrite=rewrite,
expr=expr
)
self.history.append(command)
return self.wrap_global(command)
def search_about_toplevel(self:Any, rewrite:str='Simplified', expr:str='') -> str:
assert rewrite in rewriteModes, \
rewrite + ' should be on of ' + ', '.join(rewriteModes)
command = 'Cmd_search_about_toplevel {rewrite} "{expr}"'.format(
rewrite=rewrite,
expr=expr
)
self.history.append(command)
return self.wrap(command)
def solveAll(self:Any, rewrite:str='Simplified') -> str:
assert rewrite in rewriteModes, \
rewrite + ' should be on of ' + ', '.join(rewriteModes)
command = 'Cmd_solveAll {rewrite}'.format(rewrite=rewrite)
self.history.append(command)
return self.wrap(command)
def solveOne(self:Any, rewrite:str='Simplified', interactionId:int=0, where:Range=Range(), expr:str='') -> str:
assert rewrite in rewriteModes, \
rewrite + ' should be on of ' + ', '.join(rewriteModes)
command = 'Cmd_solveOne {rewrite} {interactionId} {where} "{expr}"'.format(
rewrite=rewrite,
interactionId=str(interactionId),
where=where(),
expr=expr
)
self.history.append(command)
return self.wrap(command)
def autoAll(self:Any) -> str:
command = 'Cmd_autoAll'
self.history.append(command)
return self.wrap(command)
def autoOne(self:Any, interactionId:int=0, where:Range=Range(), expr:str='') -> str:
command = 'Cmd_autoOne {interactionId} {where} "{expr}"'.format(
interactionId=str(interactionId),
where=where(),
expr=expr
)
self.history.append(command)
return self.wrap(command)
def auto(self:Any, interactionId:int=0, where:Range=Range(), expr:str='') -> str:
command = 'Cmd_auto {interactionId} {where} "{expr}"'.format(
interactionId=str(interactionId),
where=where(),
expr=expr
)
self.history.append(command)
return self.wrap(command)
def infer_toplevel(self:Any, rewrite:str='Simplified', expr:str='') -> str:
assert rewrite in rewriteModes, \
rewrite + ' should be on of ' + ', '.join(rewriteModes)
command = 'Cmd_infer_toplevel {rewrite} "{expr}"'.format(
rewrite=rewrite,
expr=expr
)
self.history.append(command)
return self.wrap(command)
def compute_toplevel(self:Any, computeMode:str='DefaultCompute', expr:str='') -> str:
assert computeMode in computeModes, \
computeMode + ' should be on of ' + ', '.join(computeModes)
command = 'Cmd_compute_toplevel {computeMode} "{expr}"'.format(
computeMode=computeMode,
expr=expr
)
self.history.append(command)
return self.wrap_global(command)
def load_highlighting_info(self:Any) -> str:
command = 'Cmd_load_highlighting_info "{src}"'.format(src=self.srcFile)
self.history.append(command)
return self.wrap(command)
def tokenHighlighting(self:Any, remove:str) -> str:
assert remove in removeOrKeep, \
remove + ' should be on of ' + ', '.join(removeOrKeep)
command = 'Cmd_tokenHighlighting "{src}" {remove}'.format(
src=self.srcFile,
remove=remove
)
self.history.append(command)
return self.wrap(command)
def highlight(self:Any, interactionId:int=0, where:Range=Range()) -> str:
command = 'Cmd_highlight {interactionId} {where} "{src}"'.format(
interactionId=str(interactionId),
where=where(),
src=self.srcFile
)
self.history.append(command)
return self.wrap(command)
def give(self:Any, force:str, interactionId:int=0, where:Range=Range(), expr:str='') -> str:
assert force in useForce, \
force + ' should be on of ' + ', '.join(useForce)
expr = expr if expr != '' else self.srcFile
command = 'Cmd_give {force} {interactionId} {where} "{src}"'.format(
force=force,
interactionId=str(interactionId),
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def refine(self:Any, interactionId:int=0, where:Range=Range(), expr:str='') -> str:
expr = expr if expr != '' else self.srcFile
command = 'Cmd_refine {interactionId} {where} "{src}"'.format(
interactionId=str(interactionId),
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def intro(self:Any, whether:bool, interactionId:int=0, where:Range=Range(), expr:str='') -> str:
expr = expr if expr != '' else self.srcFile
command = 'Cmd_intro {whether} {interactionId} {where} "{src}"'.format(
whether='True' if whether else 'False',
interactionId=str(interactionId),
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def refine_or_intro(self:Any, whether:bool, interactionId:int=0, where:Range=Range(), expr:str='') -> str:
expr = expr if expr != '' else self.srcFile
command = 'Cmd_refine_or_intro {whether} {interactionId} {where} "{src}"'.format(
whether='True' if whether else 'False',
interactionId=str(interactionId),
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def context(self:Any, rewrite:str='Simplified', interactionId:int=0, where:Range=Range(), expr:str='') -> str:
assert rewrite in rewriteModes, \
rewrite + ' should be on of ' + ', '.join(rewriteModes)
expr = expr if expr != '' else self.srcFile
command = 'Cmd_context {rewrite} {interactionId} {where} "{src}"'.format(
rewrite=rewrite,
interactionId=interactionId,
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def helper_function(self:Any, rewrite:str='Simplified', interactionId:int=0, where:Range=Range(), expr:str='') -> str:
assert rewrite in rewriteModes, \
rewrite + ' should be on of ' + ', '.join(rewriteModes)
expr = expr if expr != '' else self.srcFile
command = 'Cmd_helper_function {rewrite} {interactionId} {where} "{src}"'.format(
rewrite=rewrite,
interactionId=interactionId,
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def infer(self:Any, rewrite:str='Simplified', interactionId:int=0, where:Range=Range(), expr:str='') -> str:
assert rewrite in rewriteModes, \
rewrite + ' should be on of ' + ', '.join(rewriteModes)
expr = expr if expr != '' else self.srcFile
command = 'Cmd_infer {rewrite} {interactionId} {where} "{src}"'.format(
rewrite=rewrite,
interactionId=interactionId,
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def goal_type(self:Any, rewrite:str='Simplified', interactionId:int=0, where:Range=Range(), expr:str='') -> str:
assert rewrite in rewriteModes, \
rewrite + ' should be on of ' + ', '.join(rewriteModes)
expr = expr if expr != '' else self.srcFile
command = 'Cmd_goal_type {rewrite} {interactionId} {where} "{src}"'.format(
rewrite=rewrite,
interactionId=interactionId,
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def elaborate_give(self:Any, rewrite:str='Simplified', interactionId:int=0, where:Range=Range(), expr:str='') -> str:
assert rewrite in rewriteModes, \
rewrite + ' should be on of ' + ', '.join(rewriteModes)
expr = expr if expr != '' else self.srcFile
command = 'Cmd_elaborate_give {rewrite} {interactionId} {where} "{src}"'.format(
rewrite=rewrite,
interactionId=interactionId,
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def goal_type_context(self:Any, rewrite:str='Simplified', interactionId:int=0, where:Range=Range(), expr:str='') -> str:
assert rewrite in rewriteModes, \
rewrite + ' should be on of ' + ', '.join(rewriteModes)
expr = expr if expr != '' else self.srcFile
command = 'Cmd_goal_type_context {rewrite} {interactionId} {where} "{src}"'.format(
rewrite=rewrite,
interactionId=interactionId,
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def goal_type_context_infer(self:Any, rewrite:str='Simplified', interactionId:int=0, where:Range=Range(), expr:str='') -> str:
assert rewrite in rewriteModes, \
rewrite + ' should be on of ' + ', '.join(rewriteModes)
expr = expr if expr != '' else self.srcFile
command = 'Cmd_goal_type_context_infer {rewrite} {interactionId} {where} "{src}"'.format(
rewrite=rewrite,
interactionId=interactionId,
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def goal_type_context_check(self:Any, rewrite:str='Simplified', interactionId:int=0, where:Range=Range(), expr:str='') -> str:
assert rewrite in rewriteModes, \
rewrite + ' should be on of ' + ', '.join(rewriteModes)
expr = expr if expr != '' else self.srcFile
command = 'Cmd_goal_type_context_check {rewrite} {interactionId} {where} "{src}"'.format(
rewrite=rewrite,
interactionId=interactionId,
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def show_module_contents(self:Any, rewrite:str='Simplified', interactionId:int=0, where:Range=Range(), expr:str='') -> str:
assert rewrite in rewriteModes, \
rewrite + ' should be on of ' + ', '.join(rewriteModes)
expr = expr if expr != '' else self.srcFile
command = 'Cmd_show_module_contents {rewrite} {interactionId} {where} "{src}"'.format(
rewrite=rewrite,
interactionId=interactionId,
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def make_case(self:Any, interactionId:int=0, where:Range=Range(), expr:str='') -> str:
expr = expr if expr != '' else self.srcFile
command = 'Cmd_make_case {interactionId} {where} "{src}"'.format(
interactionId=interactionId,
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def why_in_scope(self:Any, interactionId:int=0, where:Range=Range(), expr:str='') -> str:
expr = expr if expr != '' else self.srcFile
command = 'Cmd_why_in_scope {interactionId} {where} "{src}"'.format(
interactionId=interactionId,
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def compute(self:Any, computeMode:str='DefaultCompute', interactionId:int=0, where:Range=Range(), expr:str='') -> str:
assert computeMode in computeModes, \
computeMode + ' should be on of ' + ', '.join(computeModes)
expr = expr if expr != '' else self.srcFile
command = 'Cmd_compute {computeMode} {interactionId} {where} "{src}"'.format(
computeMode=computeMode,
interactionId=interactionId,
where=where(),
src=expr
)
self.history.append(command)
return self.wrap(command)
def why_in_scope_toplevel(self:Any, expr:str='') -> str:
command = 'Cmd_why_in_scope_toplevel "{expr}"'.format(expr=expr)
self.history.append(command)
return self.wrap(command)
def show_version(self:Any) -> str:
command = 'Cmd_show_version'
self.history.append(command)
return self.wrap(command)
def abort(self:Any) -> str:
command = 'Cmd_abort'
self.history.append(command)
return self.wrap(command) | /refl-0.1-py3-none-any.whl/src/commands.py | 0.756088 | 0.327561 | commands.py | pypi |
import numpy as np
import random
import torch
from reflective_listening import Parametric
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
random.seed(10)
def concat_start(text):
"""Concatenate standard reflective listening phrases"""
starts = ["It sounds like ", "I understand, seems like ", "I get a sense that ", "It seems like ", "I see, so "]
return random.choice(starts) + text
def flip_pov(text):
"""Flip the P.O.V from the speaker to the listener (I <-> you)"""
subject_flip = {
"I": "you",
"my": "your",
"My": "Your",
"I'm": "you're",
"am": "are",
"we": "you",
"We": "You",
"myself": "yourself",
"Myself": "Yourself",
"I'd": "you'd",
}
text = text.split()
for idx, word in enumerate(text):
if word in subject_flip:
text[idx] = subject_flip[word]
text = ' '.join(text)
lowercase = lambda s: s[:1].lower() + s[1:] if s else ''
return lowercase(text)
class ReflectiveListening:
"""
A class to generate reflective listening statements via paraphrase generation
For example:
Statement: "My teeth can be sensitive at times due to TMJ issues."
Reflective listening response: "I understand, so your teeth are sensitive due to temporomandibular disorders."
"""
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
def __init__(self):
self.model_name = 'tuner007/pegasus_paraphrase'
self.pegasus_tokenizer = PegasusTokenizer.from_pretrained(self.model_name)
self.pegasus_model = PegasusForConditionalGeneration.from_pretrained(self.model_name) \
.to(ReflectiveListening.torch_device)
def get_paraphrase(self, input_text):
"""
Obtains paraphrase of a text using the PEGASUS model https://huggingface.co/tuner007/pegasus_paraphrase
20 candidate paraphrases are generated using beam search, and scored against the Parametric score. The highest
scoring paraphrase is returned.
:param input_text: Original text to be paraphrased
:return: Paraphrase with the highest Parametric score
"""
batch = self.pegasus_tokenizer([input_text], truncation=True, padding='longest', max_length=60,
return_tensors="pt").to(ReflectiveListening.torch_device)
paraphrased = self.pegasus_model.generate(
**batch,
max_length=60,
num_beams=20,
num_return_sequences=20,
temperature=1.5,
early_stopping=True
)
paraphrases = self.pegasus_tokenizer.batch_decode(paraphrased, skip_special_tokens=True)
parametric = Parametric()
scores = [parametric.aggregate_score(input_text, para)['overall'] for para in paraphrases]
return paraphrases[np.argmax(scores)]
def get_response(self, input_text):
"""
Obtains the final response by paraphrasing the input text, then flipping the P.O.V, and concatenating standard
reflective listening phrases at the start e.g. "I understand, "
"""
paraphrase = self.get_paraphrase(input_text)
flipped = flip_pov(paraphrase)
response = concat_start(flipped)
return response | /reflective_listening-0.1.3-py3-none-any.whl/reflective_listening/reflective_listening.py | 0.799011 | 0.283273 | reflective_listening.py | pypi |
import language_tool_python
import nltk
from nltk.tokenize import word_tokenize
from scipy import spatial
from sentence_transformers import SentenceTransformer
def num_of_tokens(text):
"""Counts the number of words in a text"""
splits = nltk.word_tokenize(text)
words = [word for word in splits if word.isalpha()]
return len(words)
def containment_measure(original, paraphrase):
"""Counts the containment measure between 2 texts based on trigram similarity"""
original = original.replace("\n", " ")
paraphrase = paraphrase.replace("\n", " ")
# Tokenize words
tokens_o = word_tokenize(original)
tokens_p = word_tokenize(paraphrase)
# Lowercase
tokens_o = [token.lower() for token in tokens_o]
tokens_p = [token.lower() for token in tokens_p]
# Trigram Similarity measures
trigrams_o = []
for i in range(len(tokens_o) - 2):
t = (tokens_o[i], tokens_o[i + 1], tokens_o[i + 2])
trigrams_o.append(t)
s = 0
trigrams_p = []
for i in range(len(tokens_p) - 2):
t = (tokens_p[i], tokens_p[i + 1], tokens_p[i + 2])
trigrams_p.append(t)
if t in trigrams_o:
s += 1
# To avoid division by zero when the text has fewer than 3 words
num_trigrams_p = len(trigrams_p)
if num_trigrams_p is 0:
num_trigrams_p = 1
# Containment measure
C = s / num_trigrams_p
return C
class Parametric:
"""
A class to represent the ParaMetric: an evaluation metric for the quality of a paraphrase.
The metric is a weighted average of 3 components, each corresponding to a notion of what makes a good paraphrase:
- 'similarity' score: cosine-similarity of Sentence-BERT embeddings of the original and the paraphrase
- 'grammar' score: 1 - ratio of number of grammar errors detected by Language Tool, over the number of tokens
- 'structure' score: 1 - containment measure of the original and paraphrase based on trigrams
The default weights are 0.8, 0.1 and 0.1 for each component respectively, but it can be modified.
"""
def __init__(self):
self.lang_tool = language_tool_python.LanguageTool('en-US')
self.bert_model = SentenceTransformer('bert-base-nli-mean-tokens')
def grammar_score(self, text):
"""Measures the grammaticality of a text (higher means more grammatical)"""
num_of_errors = len(self.lang_tool.check(text))
num_tokens = num_of_tokens(text)
if num_tokens == 0:
return 1
return 1 - (num_of_errors / num_tokens)
def similarity_score(self, original, paraphrase):
"""Measures the semantic relatedness of 2 texts (higher means more similar meaning)"""
vector_original = self.bert_model.encode(original)
vector_paraphrase = self.bert_model.encode(paraphrase)
return 1 - spatial.distance.cosine(vector_original, vector_paraphrase)
def structure_score(self, original, paraphrase):
"""Measures the difference in structure between 2 texts (higher means more differences)"""
return 1 - containment_measure(original, paraphrase)
def aggregate_score(self, original, paraphrase, embed_wt=0.8, grammar_wt=0.1, structure_wt=0.1):
"""Weighted average of grammar, similarity and structure score"""
# Empty string should have a score of 0
if len(paraphrase) is 0:
return 0, 0, 0, 0
similarity = self.similarity_score(original, paraphrase)
grammar = self.grammar_score(paraphrase)
structure = self.structure_score(original, paraphrase)
weighted_avg = embed_wt * similarity + grammar_wt * grammar + structure_wt * structure
return {
'similarity': similarity,
'grammar': grammar,
'structure': structure,
'overall': weighted_avg,
} | /reflective_listening-0.1.3-py3-none-any.whl/reflective_listening/parametric.py | 0.710528 | 0.564699 | parametric.py | pypi |
<!--
SPDX-FileCopyrightText: 2022 Gregory Clunies <greg@reflekt-ci.com>
SPDX-License-Identifier: Apache-2.0
-->
# Reflekt



> ***Product analytics is a team sport***
Reflekt helps Data, Engineering, and Product teams work together to define, manage, and model events for product analytics. Reflekt integrates with [schema registries](#interacting-with-schema-registries), cloud [data warehouses]((#supported-data-warehouses)), and [dbt](#dbt-artifacts).
- Define event schemas (aka data contracts) as `code` using [jsonschema](https://json-schema.org/). Schemas are version controlled, and stored in a GitHub repo.
- Configure naming and metadata conventions for events and properties. Lint schemas to test for compliance.
- Open pull requests (PRs) to propose schema changes, get input, and request reviews.
- Easily build a CI suite to [lint](#linting-schemas) schemas, [push](#push-schemas-to-a-registry) them to a schema registry, and [build corresponding dbt artifacts](#building-private-dbt-packages).
https://user-images.githubusercontent.com/28986302/217134526-df83ec90-86f3-491e-9588-b7cd56956db1.mp4
## Table of Contents
- [Getting Started](#usage)<br>
- [Installation](#installation)<br>
- [Reflekt `--help`](#reflekt-help)<br>
- [Creating a project](#creating-a-project)<br>
- [Project configuration](#project-configuration)<br>
- [Using Reflekt](#using-schemas)<br>
- [Defining schemas](#defining-schemas)<br>
- [Identifying and selecting schemas](#identifying-and-selecting-schemas)<br>
- [Schema versions](#schema-versions)<br>
- [Linting schemas](#linting-schemas)<br>
- [Interacting with schema registries](#interacting-with-schema-registries)<br>
- [Building dbt artifacts](#dbt-artifacts)<br>
- [Supported data warehouses](#supported-data-warehouses)<br>
## Getting Started
### Installation
Reflekt is available on [PyPI](https://pypi.org/project/my-reflekt-project/). Install with `pip` (or package manager of choice), preferably in a virtual environment:
```bash
❯ source /path/to/venv/bin/activate # Activate virtual environment
❯ pip install reflekt # Install Reflekt
❯ reflekt --version # Confirm installation
Reflekt CLI Version: 0.3.1
```
### Reflekt `--help`
The `--help` flag provides an overview of available `reflekt` commands.
```bash
❯ reflekt --help # Show general --help details
Usage: reflekt [OPTIONS] COMMAND [ARGS]...
Reflekt CLI.
╭─ Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
│ --version │
│ --install-completion [bash|zsh|fish|powershell|pwsh] Install completion for the specified shell. [default: None] │
│ --show-completion [bash|zsh|fish|powershell|pwsh] Show completion for the specified shell, to copy it or customize the installation. [default: None] │
│ --help Show this message and exit. │
╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
╭─ Commands ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
│ build Build data artifacts based on schemas. │
│ debug Check Reflekt project configuration. │
│ init Initialize a Reflekt project. │
│ lint Lint schema(s) to test for naming and metadata conventions. │
│ pull Pull schema(s) from a schema registry. │
│ push Push schema(s) to a schema registry. │
╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
```
Each command also has a `--help` flag providing command details (arguments, options, syntax, etc.).
```bash
❯ reflekt lint --help # Show --help details for `reflekt lint`
Usage: reflekt lint [OPTIONS]
Lint schema(s) to test for naming and metadata conventions.
╭─ Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
│ * --select -s TEXT Schema(s) to lint. [default: None] [required] │
│ --help Show this message and exit. │
╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
```
### Creating a project
Create a new directory, initialize a new Git repo, and run `reflekt init` to create a new Reflekt project.
```bash
❯ mkdir ~/Repos/my-reflekt-project # Create a new directory for the project
❯ cd ~/Repos/my-reflekt-project # Navigate to the project directory
❯ git init # Initialize a new Git repo
❯ reflekt init # Initialize a new Reflekt project in the current directory
# Follow the prompts to configure the project
```
This will create a new Reflekt project with the following structure:
```bash
my-reflekt-project
├── .logs/ # Reflekt command logs
├── .reflekt_cache/ # Local cache used by Reflekt
├── artifacts/ # Where Reflekt builds data artifacts (i.e., dbt packages)
├── schemas/ # Where event schemas are defined and stored
├── .gitignore
├── README.md
└── reflekt_project.yml # Project configuration
```
### Project configuration
Reflekt uses 3 files to configure a project: `reflekt_project.yml`, `reflekt_profiles.yml`, and `schemas/.reflekt/meta/1-0.json`. Under the hood, Reflekt validates these configuration files before running, raising errors if an invalid configuration is detected. Examples of each file with configuration details are found below.
#### **reflekt_project.yml**
Contains general project settings as well as configuration for schema conventions, schema registry details (if needed), and data artifact generation. Click to expand the example below with details on each setting.
<details>
<summary><code>example_reflekt_project.yml</code>(CLICK TO EXPAND)</summary>
<br>
```yaml
# Example reflekt_project.yml
# GENERAL CONFIG ----------------------------------------------------------------------
version: 1.0
name: reflekt_demo # Project name
vendor: com.company_name # Default vendor for schemas in reflekt project
default_profile: dev_reflekt # Default profile to use from reflekt_profiles.yml
profiles_path: ~/.reflekt/reflekt_profiles.yml # Path to reflekt_profiles.yml
# SCHEMAS CONFIG ----------------------------------------------------------------------
schemas: # Define schema conventions
conventions:
event:
casing: title # title | snake | camel | pascal | any
numbers: false # Allow numbers in event names
reserved: [] # Reserved event names
property:
casing: snake # title | snake | camel | pascal | any
numbers: false # Allow numbers in property names
reserved: [] # Reserved property names
data_types: [ # Allowed data types
string, integer, number, boolean, object, array, any, 'null'
]
# REGISTRY CONFIG ---------------------------------------------------------------------
registry: # Additional config for schema registry if needed
avo: # Avo specific config
branches: # Provide ID for Avo branches for `reflekt pull` to work
staging: AbC12dEfG # Safe to version control (See Avo docs to find branch ID: https://bit.ly/avo-docs-branch-id)
main: main # 'main' always refers to the main branch
# ARTIFACTS CONFIG -----------------------------------------------------------------------
artifacts: # Configure how data artifacts are built
dbt: # dbt package config
sources:
prefix: __src_ # Source files will start with this prefix
models:
prefix: stg_ # Model files will start with this prefix
docs:
prefix: _stg_ # Docs files will start with this prefix
in_folder: false # Docs files in separate folder?
tests: # dbt tests to add based on column name (can be empty dict {})
id: [unique, not_null]
```
</details>
<br>
#### **reflekt_profiles.yml**
Contains connection details for schema registries (used to validate event data) and data sources (i.e., data warehouse with raw event data). Click to expand the example below with details on each setting.
<details>
<summary><code>example_reflekt_profiles.yml</code>(CLICK TO EXPAND)</summary>
<br>
```yaml
# Example reflekt_profiles.yml
version: 1.0
dev_reflekt: # Profile name (multiple profiles can be defined)
# Define connections to schema registries (multiple allowed)
registry:
- type: segment
api_token: segment_api_token # https://docs.segmentapis.com/tag/Getting-Started#section/Get-an-API-token
- type: avo
workspace_id: avo_workspace_id # https://www.avo.app/docs/public-api/export-tracking-plan#endpoint
service_account_name: avo_service_account_name # https://www.avo.app/docs/public-api/authentication#creating-service-accounts
service_account_secret: avo_service_account_secret
# Connections to data sources (data warehouses) where event data is stored.
# Sources are uniquely identified by their ID and are used in the `--source` arg when running `reflekt build`.
source:
- id: snowflake # For simplicity, we use the same ID as the source type.
type: snowflake # Snowflake DWH. Credentials follow.
account: abc12345
database: raw
warehouse: transforming
role: transformer
user: reflekt_user
password: reflekt_user_password
- id: redshift # For simplicity, we use the same ID as the source type.
type: redshift # Redshift DWH. Credentials follow.
host: example-redshift-cluster-1.abc123.us-west-1.redshift.amazonaws.com
database: analytics
port: 5439
user: reflekt_user
password: reflekt_user_password
```
</details>
<br>
#### **schemas/.reflekt/meta/1-0.json**
A meta-schema used to validate all event schemas in the project. Under the hood, Reflekt uses this meta-schema along with the naming conventions defined in the `reflekt_project.yml` file to validate all event schemas.
To define ***required metadata*** for all event schemas in your project, you can update the `metadata` object in `schemas/.reflekt/meta/1-0.json`. See the example below showing how to require both **code owner** and **product owner** metadata.
<details>
<summary><code>schemas/.reflekt/meta/1-0.json</code>(CLICK TO EXPAND)</summary>
<br>
```json
{
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": ".reflekt/meta/1-0.json",
"description": "Meta-schema for all Reflekt events",
"self": {
"vendor": "reflekt",
"name": "meta",
"format": "jsonschema",
"version": "1-0"
},
"type": "object",
"allOf": [
{
"$ref": "http://json-schema.org/draft-07/schema#"
},
{
"properties": {
"self": {
"type": "object",
"properties": {
"vendor": {
"type": "string",
"description": "The company, application, team, or system that authored the schema (e.g., com.company, com.company.android, com.company.marketing)"
},
"name": {
"type": "string",
"description": "The schema name. Describes what the schema is meant to capture (e.g., pageViewed, clickedLink)"
},
"format": {
"type": "string",
"description": "The format of the schema",
"const": "jsonschema"
},
"version": {
"type": "string",
"description": "The schema version, in MODEL-ADDITION format (e.g., 1-0, 1-1, 2-3, etc.)",
"pattern": "^[1-9][0-9]*-(0|[1-9][0-9]*)$"
},
"metadata": { // EXAMPLE: Defining required metadata (code_owner, product_owner)
"type": "object",
"description": "Required metadata for all event schemas",
"properties": {
"code_owner": {"type": "string"},
"product_owner": {"type": "string"}
},
"required": ["code_owner", "product_owner"],
"additionalProperties": false
},
},
"required": ["vendor", "name", "format", "version"],
"additionalProperties": false
},
"properties": {},
"tests": {},
},
"required": ["self", "metadata", "properties"]
}
]
}
```
</details>
<br>
<br>
## Using Reflekt
### Defining schemas
Event schemas are defined using [jsonschema](https://json-schema.org/). Each schema is defined as a separate JSON file, stored in the `schemas/` directory of a Reflekt project. An example `ProductClicked` event schema is shown below.
<details>
<summary><code>my-reflekt-project/schemas/segment/ecommerce/ProductClicked/1-0.json</code>(CLICK TO EXPAND)</summary>
<br>
```json
{
"$id": "segment/ecommerce/ProductClicked/1-0.json", // Unique ID for the schema
"$schema": "http://json-schema.org/draft-07/schema#", // JSON Schema version
"self": {
"vendor": "com.company_name", // Company, application, team, or system that authored the schema
"name": "ProductClicked", // Name of the event
"format": "jsonschema", // Format of the schema
"version": "1-0", // Version of the schema
"metadata": { // Metadata for the event
"code_owner": "engineering/ecommerce-squad",
"product_owner": "product_manager_name@company_name.com",
}
},
"type": "object",
"properties": { // Properties of the event
"product_id": {
"type": "string",
"description": "Database id of the product being viewed"
},
"sku": {
"type": "string",
"description": "Sku of the product being viewed"
},
"category": {
"type": "string",
"description": "Category of the product being viewed"
},
"name": {
"type": "string",
"description": "Name of the product being viewed"
},
"brand": {
"type": "string",
"description": "Brand of the product being viewed"
},
"variant": {
"type": "string",
"description": "Variant of the product being viewed"
},
"price": {
"type": "number",
"description": "Price of the product ($) being viewed"
},
"quantity": {
"type": "integer",
"description": "Quantity of the product being viewed"
},
"coupon": {
"type": "string",
"description": "Coupon code associated with a product (for example, MAY_DEALS_3)"
},
"position": {
"type": "integer",
"description": "Position in the product list (ex. 3)"
},
"url": {
"type": "string",
"description": "URL of the product being viewed"
},
"image_url": {
"type": "string",
"description": "URL of the product image being viewed"
},
},
"required": [ // Required properties
"product_id",
"sku",
"category",
"name",
"brand",
"price",
"quantity"
],
"additionalProperties": false, // No additional properties allowed
}
```
</details>
<br>
### Identifying and selecting schemas
Schemas are uniquely identified by their `$id`, which is determine by their path relative to the `schemas/` directory. For example:
| Path to schema | Schema `$id` |
|---------------------------------------------------------------------|------------------------------------------|
| `my-reflekt-project/schemas/segment/ecommerce/CartViewed/1-0.json` | `segment/ecommerce/CartViewed/1-0.json` |
| `my-reflekt-project/schemas/segment/ecommerce/LinkClicked/2-1.json` | `segment/ecommerce/LinkClicked/2-1.json` |
These `$id`s are used to `--select` schemas when running Reflekt commands. For example:
```bash
❯ reflekt lint --select segment/ecommerce/CartViewed/1-0.json # Lint version 1-0 of the CartViewed schema
❯ reflekt lint --select "segment/ecommerce/Link Clicked/2-1.json" # $ids with spaces must be surrounded by quotes
❯ reflekt lint --select segment/ecommerce # Lint all schemas in the segment/ecommerce directory
```
### Schema versions
As data collection requirements change, event schemas must be updated to *reflekt* the new schema. Reflekt supports schema evolution by defining a `version` for each schema, starting at `1-0` and following a `MAJOR-MINOR` version spec. The definition of `MAJOR` and `MINOR` is as follows:
- **MAJOR** - Breaking schema changes incompatible with previous data. Examples:
- Add/remove/rename a required property
- Change a property from *optional to required*
- Change a property's type
- **MINOR** - Non-breaking schema changes compatible with previous data. Examples:
- Add/remove/rename an optional property
- Change a property from *required to optional*
When defining a new schema version, **create a new file** with the incremented version and updated schema definition.
### Interacting with schema registries
Schema registries are used to store and serve schemas. Once a schema is in a registry, it can be used to validate event data against the schema to ensure event data quality. Reflekt supports interacting with schema registries to push (publish) and pull (retrieve) schemas. Currently, the following registries are supported:
| **Registry** | **`--push` support** | **`--pull` support** | **Schema `--select` syntax** | **Schema `version` support** |
|-----------------------|:--------:|:--------:|----------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| **Segment Protocols** | ✅ | ✅ | `--select segment/tracking_plan_name` | Only supports `MAJOR-0` versions. |
| **Avo** | ❌ | ✅ | `--select avo/branch_name` | Schema changes managed in Avo [branches](https://www.avo.app/docs/workspace/branches) - `"version": "1-0"` (always).<br> Avo customers pull schemas with `reflekt pull` and build dbt artifacts with `reflekt build`. |
#### Pull schemas from a registry
Pulling schemas from a registry is as easy as ...
```bash
❯ reflekt pull --select segment/ecommerce
[19:28:32] INFO Running with reflekt=0.3.1
[19:28:32] INFO Searching Segment for schemas
[19:28:33] INFO Found 9 schemas to pull:
[19:28:33] INFO 1 of 9 Writing to /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Identify/1-0.json
[19:28:34] INFO 2 of 9 Writing to /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Group/1-0.json
[19:28:34] INFO 3 of 9 Writing to /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Cart Viewed/1-0.json
[19:28:34] INFO 4 of 9 Writing to /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Checkout Started/1-0.json
[19:28:34] INFO 5 of 9 Writing to /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Checkout Step Completed/1-0.json
[19:28:34] INFO 6 of 9 Writing to /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Checkout Step Viewed/1-0.json
[19:28:34] INFO 7 of 9 Writing to /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Order Completed/1-0.json
[19:28:34] INFO 8 of 9 Writing to /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Page Viewed/1-0.json
[19:28:34] INFO 9 of 9 Writing to /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Product Added/1-0.json
[19:28:34] INFO Completed successfully
```
The `reflekt pull` command builds the corresponding JSON files in the `schemas/` directory. For the example above, the resulting directory structure would be:
```bash
schemas
├── .reflekt
└── segment
└── ecommerce
├── Cart Viewed
│ └── 1-0.json
├── Checkout Started
│ └── 1-0.json
├── Checkout Step Completed
│ └── 1-0.json
├── Checkout Step Viewed
│ └── 1-0.json
├── Group
│ └── 1-0.json
├── Identify
│ └── 1-0.json
├── Order Completed
│ └── 1-0.json
├── Page Viewed
│ └── 1-0.json
└── Product Added
└── 1-0.json
```
#### Push schemas to a registry
Publishing schemas to a registry follows the same pattern ...
```bash
❯ reflekt push --select segment/ecommerce
[19:29:06] INFO Running with reflekt=0.3.1
[19:29:07] INFO Searching for JSON schemas in: /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce
[19:29:07] INFO Found 9 schemas to push
[19:29:07] INFO 1 of 9 Pushing /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Identify/1-0.json
[19:29:07] INFO 2 of 9 Pushing /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Cart Viewed/1-0.json
[19:29:07] INFO 3 of 9 Pushing /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Checkout Step Viewed/1-0.json
[19:29:07] INFO 4 of 9 Pushing /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Group/1-0.json
[19:29:07] INFO 5 of 9 Pushing /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Order Completed/1-0.json
[19:29:07] INFO 6 of 9 Pushing /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Checkout Step Completed/1-0.json
[19:29:07] INFO 7 of 9 Pushing /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Checkout Started/1-0.json
[19:29:07] INFO 8 of 9 Pushing /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Page Viewed/1-0.json
[19:29:07] INFO 9 of 9 Pushing /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Product Added/1-0.json
[19:29:08] INFO Completed successfully
```
<br>
### Linting schemas
Schemas can be linted to test if they follow the naming and metadata conventions configured for a Reflekt project.
```bash
❯ reflekt lint --select segment/ecommerce
[18:57:45] INFO Running with reflekt=0.3.1
[18:57:46] INFO Searching for JSON schemas in: /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce
[18:57:46] INFO Found 9 schema(s) to lint
[18:57:46] INFO 1 of 9 Linting /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Identify/1-0.json
[18:57:47] INFO 2 of 9 Linting /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Cart Viewed/1-0.json
[18:57:48] ERROR Property 'cartId' in /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Cart Viewed/1-0.json does not match naming convention 'casing: snake' in
/Users/myuser/Repos/my-reflekt-project/reflekt_project.yml.
[18:57:48] INFO 3 of 9 Linting /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Checkout Step Viewed/1-0.json
[18:57:50] INFO 4 of 9 Linting /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Group/1-0.json
[18:57:50] INFO 5 of 9 Linting /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Order Completed/1-0.json
[18:57:54] INFO 6 of 9 Linting /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Checkout Step Completed/1-0.json
[18:57:55] INFO 7 of 9 Linting /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Checkout Started/1-0.json
[18:57:58] INFO 8 of 9 Linting /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Page Viewed/1-0.json
[18:58:01] INFO 9 of 9 Linting /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Product Added/1-0.json
[18:58:04] ERROR Linting failed with 1 error(s):
[18:58:04] ERROR Property 'cartId' in /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Cart Viewed/1-0.json does not match naming convention 'casing: snake' in
/Users/myuser/Repos/my-reflekt-project/reflekt_project.yml.
```
Running `reflekt lint` in a CI/CD pipeline is a great way to ensure schema consistency and quality before pushing schemas to a schema registry.
<br>
## dbt artifacts
[dbt](https://www.getdbt.com/) is a popular data tool to transformation and model data. When modeling data in dbt, it is [best practice](https://docs.getdbt.com/guides/best-practices/how-we-structure/1-guide-overview) to:
- Define sources pointing to the raw data.
- Define staging models, 1-to-1 for each source, that [rename, recast, or usefully reconsider](https://discourse.getdbt.com/t/how-we-used-to-structure-our-dbt-projects/355#data-transformation-101-1) columns into a consistent format. Materialized as views.
- Document staging models with descriptions for the model and its fields, including relevant tests (e.g., unique and not_null IDs) as required.
While we recommend following this practice, it can be ***a lot of work to maintain*** for product analytics, where:
- There are many events (often 100+) and properties.
- Events and properties are added or updated regularly as the product and data requirements evolve.
- The Product and Engineer teams are bigger than the Data team, making it hard to keep up with the changes.
**Reflekt can help by building dbt artifacts for you with a single CLI command.** Think of this as dbt's [codegen](https://github.com/dbt-labs/dbt-codegen) package on steroids :muscle: :pill:.
### Building private dbt packages
To build a private dbt package with sources, staging models, and docs that perfectly *reflekts* the schemas in a Reflekt project and the raw data in the warehouse, you can run a command like the example below.
Where:
- `--select segment/ecommerce` selects all the schemas in the `schemas/segment/ecommerce/` directory.
- `--source snowflake.raw.ecomm_demo` specifies to connect to a data source with ID `snowflake`, database `raw`, and schema `ecomm_demo` as defined in `reflekt_profiles.yml`.
- `--sdk segment` specifies the event data was collected via the Segment SDK. Reflekt knows how Segment loads data into data warehouses and writes SQL models accordingly.
If an event schema has multiple versions, Reflekt builds a staging model for both versions, allowing the Data team to easily consolidate schema changes as needed.
```bash
❯ reflekt build dbt --select segment/ecommerce --source snowflake.raw.ecomm_demo --sdk segment
[09:45:23] INFO Running with reflekt=0.3.1
[09:45:24] INFO Searching for JSON schemas in: /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce
[09:45:24] INFO Found 10 schemas to build
[09:45:24] INFO Building dbt package:
name: reflekt_demo
dir: /Users/myuser/Repos/my-reflekt-project/artifacts/dbt/reflekt_demo
--select: segment/ecommerce
--sdk_arg: segment
--source: snowflake.raw.ecomm_demo
[09:45:24] INFO Building dbt source 'ecomm_demo'
[09:45:24] INFO Building dbt artifacts for schema: /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Identify/1-0.json
[09:45:26] INFO Building dbt table 'identifies' in source 'ecomm_demo'
[09:45:26] INFO Building staging model 'stg_ecomm_demo__identifies.sql'
[09:45:26] INFO Building dbt documentation '_stg_ecomm_demo__identifies.yml'
[09:45:26] INFO Building dbt artifacts for schema: Segment 'users' table
[09:45:26] INFO Building dbt table 'users' in source 'ecomm_demo'
[09:45:26] INFO Building staging model 'stg_ecomm_demo__users.sql'
[09:45:26] INFO Building dbt documentation '_stg_ecomm_demo__users.yml'
[09:45:26] INFO Building dbt artifacts for schema: /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Cart Viewed/2-0.json
[09:45:26] INFO Building dbt table 'cart_viewed' in source 'ecomm_demo'
[09:45:26] INFO Building staging model 'stg_ecomm_demo__cart_viewed__v2_0.sql'
[09:45:26] INFO Building dbt documentation '_stg_ecomm_demo__cart_viewed__v2_0.yml'
[09:45:26] INFO Building dbt artifacts for schema: /Users/myuser/Repos/my-reflekt-project/schemas/segment/ecommerce/Cart Viewed/1-0.json
[09:45:27] INFO Building staging model 'stg_ecomm_demo__cart_viewed.sql'
[09:45:27] INFO Building dbt documentation '_stg_ecomm_demo__cart_viewed.yml'
...
... # Full output omitted for brevity
...
[09:45:29] INFO Building dbt artifacts for schema: Segment 'tracks' table
[09:45:29] INFO Building dbt table 'tracks' in source 'ecomm_demo'
[09:45:29] INFO Building staging model 'stg_ecomm_demo__tracks.sql'
[09:45:29] INFO Building dbt documentation '_stg_ecomm_demo__tracks.yml'
[09:45:29] INFO Copying dbt package from temporary path /Users/myuser/Repos/my-reflekt-project/.reflekt_cache/artifacts/dbt/reflekt_demo to
/Users/myuser/Repos/my-reflekt-project/artifacts/dbt/reflekt_demo
[09:45:29] INFO Successfully built dbt package
```
### Using private dbt packages
To use a Reflekt dbt package in a downstream dbt project, add it to the dbt project's `packages.yml`.
#### dbt-core
```yaml
# packages.yml
packages:
- git: "https://github.com/<your_user_or_org>/<your_repo>" # Reflekt project Github repo URL
subdirectory: "dbt-packages/<reflekt_dbt_package_name>"
revision: v0.1.0__reflekt_demo # Branch, tag, or commit (40-character hash). For latest, use 'main' branch.
```
#### dbt-cloud
```yaml
# packages.yml
packages:
- git: ""https://{{env_var('DBT_ENV_SECRET_GITHUB_PAT')}}@github.com/<your_user_or_org>/<your_repo>.git"" # Reflekt project Github repo URL with GitHub PAT
subdirectory: "dbt-packages/<reflekt_dbt_package_name>"
revision: v0.1.0__reflekt_demo # Branch, tag, or commit (40-character hash). For latest, use 'main' branch.
```
To use with dbt-cloud, you will need to create a [Github personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) (e.g., `DBT_ENV_SECRET_GITHUB_PAT`) and [configure it as an environment variable](https://docs.getdbt.com/docs/dbt-cloud/using-dbt-cloud/cloud-environment-variables) in your dbt-cloud account.
For local dbt development, set the environment variable on your local machine.
```bash
# Add the following line to your .zshrc, .bash_profile, etc.
export DBT_ENV_SECRET_GITHUB_PAT=YOUR_TOKEN
```
### Supported data warehouses
Reflekt currently supports the following data warehouses:
- Snowflake
- Redshift
- :construction: BigQuery support coming soon! :construction:
| /reflekt-0.3.25.tar.gz/reflekt-0.3.25/README.md | 0.716814 | 0.795936 | README.md | pypi |
DEFAULT_VARIABLES = [
"sns_topic_arn",
"reflex_kms_key_id",
"cloudwatch_event_rule_id",
"cloudwatch_event_rule_arn",
"package_location",
]
class Rule:
"""Base class that represents a rule object to the CLI."""
def __init__(self, rule_name, rule_dict):
self.name = rule_name
self.raw_rule = rule_dict
self.version = rule_dict.get("version")
self.description = rule_dict.get("description")
self.variables = rule_dict.get("variables")
@property
def configurables(self):
"""Displays configurable elements of a Rule."""
configurables = []
for variable in self.variables:
if variable not in DEFAULT_VARIABLES:
configurables.append(variable)
return configurables
@property
def numeric_version(self):
"""Returns a version dictionary for major minor patch."""
raw_version = self.version.replace("v", "")
split_version = raw_version.split(".")
return {
"major": split_version[0],
"minor": split_version[1],
"patch": split_version[2],
}
@property
def github_org(self):
"""Returns the GitHub organization that owns this Rule.
Returns:
str: GitHub organization name
"""
if "github_org" in self.raw_rule:
return self.raw_rule["github_org"]
return "reflexivesecurity"
@property
def repository_name(self):
"""Returns the repository name for this Rule.
Returns:
str: The repository name
"""
if "github_org" in self.raw_rule:
return self.name
return f"reflex-aws-{self.name}"
@property
def remote_url(self):
"""Returns the remote repository URL.
Returns:
str: The remote repository URL
"""
return f"https://github.com/{self.github_org}/{self.repository_name}"
@property
def is_custom(self):
"""Returns a boolean representing if this is a custom rule.
Returns True if a custom rule, False otherwise.
Returns:
bool: Whether or not the rule is custom
"""
if self.github_org == "reflexivesecurity":
return False
return True | /reflex_cli-1.3.3-py3-none-any.whl/reflex_cli/rule.py | 0.886978 | 0.174674 | rule.py | pypi |
import logging
import os
from jinja2 import Environment, PackageLoader, select_autoescape
from reflex_cli.create_template_utils import (
ensure_output_directory_exists,
write_template_file,
)
LOGGER = logging.getLogger(__name__)
DEFAULT_GITHUB_ORG = "reflexivesecurity"
class RepoFoundationSkeleton:
"""Generate a set of templates from a given config."""
def __init__(self, output_directory, configuration):
self.output_directory = output_directory
self.github_org_name = configuration.get("github_org")
self.rule_name = configuration.get("rule_name")
self.class_name = configuration.get("class_name")
self.mode = configuration.get("mode")
self.template_env = Environment(
loader=PackageLoader("reflex_cli", "templates/rule_templates"),
autoescape=select_autoescape(["tf"]),
)
def create_templates(self): # pragma: no cover
"""Generates templates for rule."""
ensure_output_directory_exists(self.output_directory)
self.create_release_workflow_templates()
self.create_test_workflow_template()
self.create_renovate_template()
self.create_source_template()
self.create_requirements_template()
self.create_gitignore_template()
self.create_pylintrc_template()
self.create_license_template()
self.create_readme_template()
def create_template(
self, template_file, template_options, output_path
): # pragma: no cover
"""Helper method to create file from rendered jinja."""
template = self.template_env.get_template(template_file)
rendered_template = template.render(template_options)
output_file = os.path.join(self.output_directory, output_path)
write_template_file(output_file, rendered_template)
def create_release_workflow_templates(self): # pragma: no cover
"""Generates templates for GitHub release files"""
self.create_template(
template_file=".github/workflows/release.yaml.jinja2",
template_options={},
output_path=".github/workflows/release.yaml",
)
self.create_template(
template_file=".github/.releaserc.json.jinja2",
template_options={},
output_path=".github/.releaserc.json",
)
def create_test_workflow_template(self): # pragma: no cover
"""Generates template for GitHub test file"""
self.create_template(
template_file=".github/workflows/test.yaml.jinja2",
template_options={},
output_path=".github/workflows/test.yaml",
)
def create_renovate_template(self): # pragma: no cover
"""Generates template for Renovate config file"""
self.create_template(
template_file=".github/renovate.json.jinja2",
template_options={},
output_path=".github/renovate.json",
)
def create_source_template(self): # pragma: no cover
"""Generates template for rule source code"""
self.create_template(
template_file="source/rule.py.jinja2",
template_options={
"rule_class_name": self.class_name,
"mode": self.mode,
},
output_path=f"source/{self.rule_name.replace('-', '_')}.py",
)
def create_requirements_template(self): # pragma: no cover
"""Generates template for requirements.txt"""
require_file = "source/requirements.txt"
self.create_template(
template_file=require_file,
template_options={},
output_path=require_file,
)
def create_gitignore_template(self): # pragma: no cover
"""Generates template for .gitignore"""
self.create_template(
template_file=".gitignore",
template_options={},
output_path=".gitignore",
)
def create_pylintrc_template(self): # pragma: no cover
"""Generates template for .pylintrc"""
self.create_template(
template_file=".pylintrc",
template_options={},
output_path=".pylintrc",
)
def create_license_template(self): # pragma: no cover
"""Generates template for LICENSE"""
self.create_template(
template_file="LICENSE", template_options={}, output_path="LICENSE"
)
def create_readme_template(self): # pragma: no cover
"""Generates template for README.md"""
self.create_template(
template_file="README.md",
template_options={
"github_org_name": self.github_org_name,
"rule_name": self.rule_name,
"mode": self.mode,
},
output_path="README.md",
) | /reflex_cli-1.3.3-py3-none-any.whl/reflex_cli/repo_foundation_skeleton.py | 0.551815 | 0.184896 | repo_foundation_skeleton.py | pypi |
import json
import logging
import os
import re
import traceback
import boto3
from reflex_core.notifiers import Notifier, SNSNotifier
from reflex_core import utilities
LOG_LEVEL = os.environ.get("LOG_LEVEL", "INFO").upper()
class AWSRuleInterface:
"""Generic interface class for AWS compliance rules.
Attributes:
event (dict): The AWS CloudWatch event that the rule is responding to.
account (str): The AWS account number that the event occurred in.
region (str): The AWS region that the event occurred in.
service (str): The name of the AWS service that triggered the event.
client (boto3.client): A boto3 client for the service that triggered the event.
pre_compliance_check_functions (list): A list of callables (usually functions)
to be run before the resource compliance check occurs.
post_compliance_check_functions (list): A list of callables (usually functions)
to be run after rthe resource compliance check occurs.
pre_remediation_functions (list): A list of callables (usually functions)
to be run before remediation action occurs.
post_remediation_functions (list): A list of callables (usually functions)
to be run after remediation action occurs.
notifiers (list): A list of Notifiers that will send notifications.
"""
LOGGER = logging.getLogger()
LOGGER.setLevel(LOG_LEVEL)
def __init__(self, event):
"""Initialize the rule object.
Args:
event (dict): An AWS CloudWatch event.
"""
self.LOGGER.info("Incoming event: %s", event)
self.event = event
self.account = event["account"]
self.region = event["region"]
self.service = self.parse_service_name(event["source"])
self.client = self.get_boto3_client()
self.extract_event_data(event)
self.pre_compliance_check_functions = []
self.post_compliance_check_functions = []
self.pre_remediation_functions = []
self.post_remediation_functions = []
self.notifiers = []
self.notifiers.append(SNSNotifier)
def get_boto3_client(self):
"""Instantiate and return a boto3 client.
Returns:
boto3.client: A boto3 client for the service that triggered the event.
The boto3 client will be for the specific account and region that triggered
the event. If no service can be parsed from the event (usually as a result
of the event being custom), or the parsed service name is invalid, this
will return None.
"""
role_arn = self.get_role_arn()
role_session_name = self.get_role_session_name()
return utilities.get_boto3_client(self.service, role_arn, role_session_name, self.region)
def get_role_arn(self):
"""Get and return the ARN of the role we will assume.
Returns:
str: The ARN of the IAM role we will assume for our boto3 client.
"""
return utilities.get_assume_role_arn(self.account)
def get_role_session_name(self):
""" Get and return the AWS role session name """
return f"{self.__class__.__name__}Session"
def parse_service_name(self, event_source):
"""Parse the AWS service name from event["source"].
Args:
event_source (str): The event source from the tirggering CWE.
Returns:
str: The name of the AWS service that produced the event.
Returns None if the event contains a Non-AWS event source.
"""
if not event_source.startswith("aws."):
self.LOGGER.info(
"Non-AWS event source present. This is a custom CloudWatch Event."
)
return None
service_name = event_source.replace("aws.", "", 1)
return service_name
def extract_event_data(self, event):
"""Extracts required data from the event.
Args:
event (dict): The event that triggered the rule.
Returns:
None
Raises:
NotImplementedError: Raised if the rule has not implemented logic
for extracting required data.
"""
raise NotImplementedError("extract_event_data not implemented")
def run_compliance_rule(self):
"""Runs all steps of the compliance rule.
Checks for SystemExit to allow for use of sys.exit() to end rule
execution without the Lambda failing, incrementing failure counter,
and retrying.
Returns:
None
"""
try:
self.pre_compliance_check()
self.LOGGER.debug("Checking if resource is compliant")
resource_compliant = self.resource_compliant()
self.post_compliance_check()
if not resource_compliant:
self.LOGGER.debug("Resource is not compliant")
if self.should_remediate():
self.pre_remediation()
self.LOGGER.debug("Remediating resource")
self.remediate()
self.LOGGER.debug("Remediation complete")
self.post_remediation()
self.notify()
return
self.LOGGER.debug("Resource is compliant")
except SystemExit as exception:
if exception.code is None or exception.code == 0:
return
raise
def resource_compliant(self):
"""Determine if the resource complies with rule requirements.
Returns:
bool: True if the resource is compliant, False otherwise.
Raises:
NotImplementedError: Raised if the rule has not implemented logic
for determining if the resource is compliant.
"""
raise NotImplementedError("resource_compliant not implemented")
def remediate(self):
"""Fixes the configuration of the non-compliant resource.
Returns:
None
Raises:
NotImplementedError: Raised if the rule has not implemented logic
for fixing the resource configuration.
"""
raise NotImplementedError("remediate not implemented")
def pre_compliance_check(self):
"""Runs all pre-compliance check functions.
This function executes all functions that have been registered in the
pre_compliance_check_functions list. This is run immediately before the
resource_compliant function is called. Functions are executed in the order
they occur in the pre_compliance_check_functions list.
Returns:
None
"""
self.LOGGER.debug("Running pre-compliance check functions")
for pre_compliance_check_function in self.pre_compliance_check_functions:
self.LOGGER.debug(
"Running pre-compliance check function %s",
pre_compliance_check_function.__name__,
)
try:
pre_compliance_check_function()
except Exception:
traceback.print_exc()
def post_compliance_check(self):
"""Runs all post-compliance check functions.
This function executes all functions that have been registered in the
post_compliance_check_functions list. This is run immediately after the
resource_compliant function is called. Functions are executed in the order
they occur in the post_compliance_check_functions list.
Returns:
None
"""
self.LOGGER.debug("Running post-compliance check functions")
for post_compliance_check_function in self.post_compliance_check_functions:
self.LOGGER.debug(
"Running post-remediation function %s",
post_compliance_check_function.__name__,
)
try:
post_compliance_check_function()
except Exception:
traceback.print_exc()
def pre_remediation(self):
"""Runs all pre-remediation functions.
This function executes all functions that have been registered in the
pre_remediation_functions list. This is run immediately before the
remediate function is called. Functions are executed in the order
they occur in the pre_remediation_functions list.
Returns:
None
"""
self.LOGGER.debug("Running pre-remediation functions")
for pre_remediation_function in self.pre_remediation_functions:
self.LOGGER.debug(
"Running pre-remediation function %s", pre_remediation_function.__name__
)
try:
pre_remediation_function()
except Exception:
traceback.print_exc()
def post_remediation(self):
"""Runs all post-remediation functions.
This function executes all functions that have been registered in the
post_remediation_functions list. This is run immediately after the
remediate function is called. Functions are executed in the order
they occur in the post_remediation_functions list.
Returns:
None
"""
self.LOGGER.debug("Running post-remediation functions")
for post_remediation_function in self.post_remediation_functions:
self.LOGGER.debug(
"Running post-remediation function %s",
post_remediation_function.__name__,
)
try:
post_remediation_function()
except Exception:
traceback.print_exc()
def _get_remediation_message(self):
"""Generates the message that will be sent in notifications.
This function retrieves the rule specific notification message
(from get_remedation_message) and appends generic information,
such as the time at which the triggering event occurred and
the raw event data.
Returns:
str: The message that will be sent in notifications.
"""
rule_message = self.get_remediation_message()
message = (
f"{rule_message}\n\n"
f"Event time: {self.event['time']}\n"
f"Raw event: {json.dumps(self.event, indent=2)}"
)
return message
def get_remediation_message(self):
"""Provides a rule specific message to be sent in notifications.
Returns:
str: The rule specific message to be sent in notifications.
Raises:
NotImplementedError: Raised if the rule has not implemented logic
for creating a notification message.
"""
raise NotImplementedError("get_remediation_message not implemented")
def get_remediation_message_subject(self):
"""Provides the subject to use when sending notifications.
Note: Subjects must be ASCII text that begin with a letter, number, or
punctuation mark; must not include line breaks or control characters;
and must be less than 100 characters long in order to be compatible
with SNS. See https://docs.aws.amazon.com/sns/latest/api/API_Publish.html
Returns:
str: The subject to use in notifications.
"""
subject = self.__class__.__name__
subject_split = re.findall(r"[0-9A-Z](?:[0-9a-z]+|[0-9A-Z]*(?=[0-9A-Z]|$))", subject)
fixed_subject = " ".join(subject_split)
return f"The Reflex Rule {fixed_subject} was triggered."
def notify(self):
"""Send notification messages with all Notifiers.
Returns:
None
"""
for notifier in self.notifiers:
try:
notifier().notify(
subject=self.get_remediation_message_subject(),
message=self._get_remediation_message(),
)
except Exception as exp: # pylint: disable=broad-except
self.LOGGER.error(
"An error occurred while trying to send a notification: %s", exp
)
def should_remediate(self):
"""Determines if remediation action should be taken.
Returns:
bool: True if remediation mode is active, False otherwise.
"""
mode = os.environ.get("MODE", "detect").lower()
return mode == "remediate" | /reflex-core-2.1.0.tar.gz/reflex-core-2.1.0/reflex_core/aws_rule_interface.py | 0.620622 | 0.191592 | aws_rule_interface.py | pypi |
from __future__ import annotations
from typing import Any
import reflex as rx
from reflex.components.tags import Tag
from reflex.vars import Var
class DebounceInput(rx.Component):
library = "react-debounce-input"
tag = "DebounceInput"
min_length: Var[int] = 0
debounce_timeout: Var[int] = 100
force_notify_by_enter: Var[bool] = True
force_notify_on_blur: Var[bool] = True
def _render(self) -> Tag:
"""Carry first child props directly on this tag.
Since react-debounce-input wants to create and manage the underlying
input component itself, we carry all props, events, and styles from
the child, and then neuter the child's render method so it produces no output.
"""
if not self.children:
raise RuntimeError(
"Provide a child for DebounceInput, such as rx.input() or rx.text_area()",
)
child = self.children[0]
tag = super()._render()
tag.add_props(
**child.event_triggers,
**props_not_none(child),
sx=child.style,
id=child.id,
class_name=child.class_name,
element=Var.create("{%s}" % child.tag, is_local=False, is_string=False),
)
# do NOT render the child, DebounceInput will create it
object.__setattr__(child, "render", lambda: "")
return tag
debounce_input = DebounceInput.create
def props_not_none(c: rx.Component) -> dict[str, Any]:
cdict = {a: getattr(c, a) for a in c.get_props() if getattr(c, a, None) is not None}
return cdict
def ensure_frontend_package():
from reflex.config import get_config
config = get_config()
for frontend_package in config.frontend_packages:
if frontend_package.partition("@")[0].strip() == "react-debounce-input":
return
config.frontend_packages.append("react-debounce-input")
# ensure that all users of this module have it available
ensure_frontend_package() | /reflex-debounce-input-0.4.tar.gz/reflex-debounce-input-0.4/src/reflex_debounce_input.py | 0.726814 | 0.194349 | reflex_debounce_input.py | pypi |
import reflex as rx
class BsFill0CircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill0CircleFill"
class BsFill0SquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill0SquareFill"
class BsFill1CircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill1CircleFill"
class BsFill1SquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill1SquareFill"
class BsFill2CircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill2CircleFill"
class BsFill2SquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill2SquareFill"
class BsFill3CircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill3CircleFill"
class BsFill3SquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill3SquareFill"
class BsFill4CircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill4CircleFill"
class BsFill4SquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill4SquareFill"
class BsFill5CircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill5CircleFill"
class BsFill5SquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill5SquareFill"
class BsFill6CircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill6CircleFill"
class BsFill6SquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill6SquareFill"
class BsFill7CircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill7CircleFill"
class BsFill7SquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill7SquareFill"
class BsFill8CircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill8CircleFill"
class BsFill8SquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill8SquareFill"
class BsFill9CircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill9CircleFill"
class BsFill9SquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFill9SquareFill"
class BsFillAirplaneEnginesFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillAirplaneEnginesFill"
class BsFillAirplaneFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillAirplaneFill"
class BsFillAlarmFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillAlarmFill"
class BsFillArchiveFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArchiveFill"
class BsFillArrowDownCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowDownCircleFill"
class BsFillArrowDownLeftCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowDownLeftCircleFill"
class BsFillArrowDownLeftSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowDownLeftSquareFill"
class BsFillArrowDownRightCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowDownRightCircleFill"
class BsFillArrowDownRightSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowDownRightSquareFill"
class BsFillArrowDownSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowDownSquareFill"
class BsFillArrowLeftCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowLeftCircleFill"
class BsFillArrowLeftSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowLeftSquareFill"
class BsFillArrowRightCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowRightCircleFill"
class BsFillArrowRightSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowRightSquareFill"
class BsFillArrowThroughHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowThroughHeartFill"
class BsFillArrowUpCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowUpCircleFill"
class BsFillArrowUpLeftCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowUpLeftCircleFill"
class BsFillArrowUpLeftSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowUpLeftSquareFill"
class BsFillArrowUpRightCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowUpRightCircleFill"
class BsFillArrowUpRightSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowUpRightSquareFill"
class BsFillArrowUpSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillArrowUpSquareFill"
class BsFillAspectRatioFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillAspectRatioFill"
class BsFillAwardFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillAwardFill"
class BsFillBackspaceFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBackspaceFill"
class BsFillBackspaceReverseFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBackspaceReverseFill"
class BsFillBadge3DFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBadge3DFill"
class BsFillBadge4KFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBadge4KFill"
class BsFillBadge8KFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBadge8KFill"
class BsFillBadgeAdFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBadgeAdFill"
class BsFillBadgeArFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBadgeArFill"
class BsFillBadgeCcFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBadgeCcFill"
class BsFillBadgeHdFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBadgeHdFill"
class BsFillBadgeSdFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBadgeSdFill"
class BsFillBadgeTmFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBadgeTmFill"
class BsFillBadgeVoFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBadgeVoFill"
class BsFillBadgeVrFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBadgeVrFill"
class BsFillBadgeWcFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBadgeWcFill"
class BsFillBagCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBagCheckFill"
class BsFillBagDashFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBagDashFill"
class BsFillBagFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBagFill"
class BsFillBagHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBagHeartFill"
class BsFillBagPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBagPlusFill"
class BsFillBagXFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBagXFill"
class BsFillBalloonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBalloonFill"
class BsFillBalloonHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBalloonHeartFill"
class BsFillBandaidFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBandaidFill"
class BsFillBarChartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBarChartFill"
class BsFillBarChartLineFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBarChartLineFill"
class BsFillBasketFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBasketFill"
class BsFillBasket2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBasket2Fill"
class BsFillBasket3Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBasket3Fill"
class BsFillBellFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBellFill"
class BsFillBellSlashFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBellSlashFill"
class BsFillBinocularsFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBinocularsFill"
class BsFillBookFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBookFill"
class BsFillBookmarkCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBookmarkCheckFill"
class BsFillBookmarkDashFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBookmarkDashFill"
class BsFillBookmarkFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBookmarkFill"
class BsFillBookmarkHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBookmarkHeartFill"
class BsFillBookmarkPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBookmarkPlusFill"
class BsFillBookmarkStarFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBookmarkStarFill"
class BsFillBookmarkXFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBookmarkXFill"
class BsFillBookmarksFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBookmarksFill"
class BsFillBoomboxFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBoomboxFill"
class BsFillBootstrapFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBootstrapFill"
class BsFillBoxFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBoxFill"
class BsFillBoxSeamFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBoxSeamFill"
class BsFillBox2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBox2Fill"
class BsFillBox2HeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBox2HeartFill"
class BsFillBriefcaseFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBriefcaseFill"
class BsFillBrightnessAltHighFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBrightnessAltHighFill"
class BsFillBrightnessAltLowFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBrightnessAltLowFill"
class BsFillBrightnessHighFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBrightnessHighFill"
class BsFillBrightnessLowFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBrightnessLowFill"
class BsFillBrushFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBrushFill"
class BsFillBucketFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBucketFill"
class BsFillBugFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBugFill"
class BsFillBuildingFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBuildingFill"
class BsFillBuildingsFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBuildingsFill"
class BsFillBusFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillBusFrontFill"
class BsFillCCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCCircleFill"
class BsFillCSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCSquareFill"
class BsFillCalculatorFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalculatorFill"
class BsFillCalendarCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendarCheckFill"
class BsFillCalendarDateFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendarDateFill"
class BsFillCalendarDayFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendarDayFill"
class BsFillCalendarEventFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendarEventFill"
class BsFillCalendarFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendarFill"
class BsFillCalendarHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendarHeartFill"
class BsFillCalendarMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendarMinusFill"
class BsFillCalendarMonthFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendarMonthFill"
class BsFillCalendarPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendarPlusFill"
class BsFillCalendarRangeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendarRangeFill"
class BsFillCalendarWeekFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendarWeekFill"
class BsFillCalendarXFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendarXFill"
class BsFillCalendar2CheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar2CheckFill"
class BsFillCalendar2DateFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar2DateFill"
class BsFillCalendar2DayFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar2DayFill"
class BsFillCalendar2EventFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar2EventFill"
class BsFillCalendar2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar2Fill"
class BsFillCalendar2HeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar2HeartFill"
class BsFillCalendar2MinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar2MinusFill"
class BsFillCalendar2MonthFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar2MonthFill"
class BsFillCalendar2PlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar2PlusFill"
class BsFillCalendar2RangeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar2RangeFill"
class BsFillCalendar2WeekFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar2WeekFill"
class BsFillCalendar2XFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar2XFill"
class BsFillCalendar3EventFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar3EventFill"
class BsFillCalendar3Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar3Fill"
class BsFillCalendar3RangeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar3RangeFill"
class BsFillCalendar3WeekFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCalendar3WeekFill"
class BsFillCameraFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCameraFill"
class BsFillCameraReelsFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCameraReelsFill"
class BsFillCameraVideoFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCameraVideoFill"
class BsFillCameraVideoOffFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCameraVideoOffFill"
class BsFillCapslockFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCapslockFill"
class BsFillCarFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCarFrontFill"
class BsFillCaretDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCaretDownFill"
class BsFillCaretDownSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCaretDownSquareFill"
class BsFillCaretLeftFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCaretLeftFill"
class BsFillCaretLeftSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCaretLeftSquareFill"
class BsFillCaretRightFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCaretRightFill"
class BsFillCaretRightSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCaretRightSquareFill"
class BsFillCaretUpFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCaretUpFill"
class BsFillCaretUpSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCaretUpSquareFill"
class BsFillCartCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCartCheckFill"
class BsFillCartDashFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCartDashFill"
class BsFillCartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCartFill"
class BsFillCartPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCartPlusFill"
class BsFillCartXFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCartXFill"
class BsFillCassetteFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCassetteFill"
class BsFillCcCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCcCircleFill"
class BsFillCcSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCcSquareFill"
class BsFillChatDotsFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatDotsFill"
class BsFillChatFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatFill"
class BsFillChatHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatHeartFill"
class BsFillChatLeftDotsFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatLeftDotsFill"
class BsFillChatLeftFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatLeftFill"
class BsFillChatLeftHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatLeftHeartFill"
class BsFillChatLeftQuoteFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatLeftQuoteFill"
class BsFillChatLeftTextFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatLeftTextFill"
class BsFillChatQuoteFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatQuoteFill"
class BsFillChatRightDotsFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatRightDotsFill"
class BsFillChatRightFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatRightFill"
class BsFillChatRightHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatRightHeartFill"
class BsFillChatRightQuoteFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatRightQuoteFill"
class BsFillChatRightTextFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatRightTextFill"
class BsFillChatSquareDotsFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatSquareDotsFill"
class BsFillChatSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatSquareFill"
class BsFillChatSquareHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatSquareHeartFill"
class BsFillChatSquareQuoteFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatSquareQuoteFill"
class BsFillChatSquareTextFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatSquareTextFill"
class BsFillChatTextFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillChatTextFill"
class BsFillCheckCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCheckCircleFill"
class BsFillCheckSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCheckSquareFill"
class BsFillCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCircleFill"
class BsFillClipboardCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClipboardCheckFill"
class BsFillClipboardDataFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClipboardDataFill"
class BsFillClipboardFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClipboardFill"
class BsFillClipboardHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClipboardHeartFill"
class BsFillClipboardMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClipboardMinusFill"
class BsFillClipboardPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClipboardPlusFill"
class BsFillClipboardXFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClipboardXFill"
class BsFillClipboard2CheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClipboard2CheckFill"
class BsFillClipboard2DataFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClipboard2DataFill"
class BsFillClipboard2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClipboard2Fill"
class BsFillClipboard2HeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClipboard2HeartFill"
class BsFillClipboard2MinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClipboard2MinusFill"
class BsFillClipboard2PlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClipboard2PlusFill"
class BsFillClipboard2PulseFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClipboard2PulseFill"
class BsFillClipboard2XFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClipboard2XFill"
class BsFillClockFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillClockFill"
class BsFillCloudArrowDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudArrowDownFill"
class BsFillCloudArrowUpFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudArrowUpFill"
class BsFillCloudCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudCheckFill"
class BsFillCloudDownloadFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudDownloadFill"
class BsFillCloudDrizzleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudDrizzleFill"
class BsFillCloudFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudFill"
class BsFillCloudFogFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudFogFill"
class BsFillCloudFog2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudFog2Fill"
class BsFillCloudHailFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudHailFill"
class BsFillCloudHazeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudHazeFill"
class BsFillCloudHaze2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudHaze2Fill"
class BsFillCloudLightningFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudLightningFill"
class BsFillCloudLightningRainFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudLightningRainFill"
class BsFillCloudMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudMinusFill"
class BsFillCloudMoonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudMoonFill"
class BsFillCloudPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudPlusFill"
class BsFillCloudRainFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudRainFill"
class BsFillCloudRainHeavyFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudRainHeavyFill"
class BsFillCloudSlashFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudSlashFill"
class BsFillCloudSleetFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudSleetFill"
class BsFillCloudSnowFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudSnowFill"
class BsFillCloudSunFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudSunFill"
class BsFillCloudUploadFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudUploadFill"
class BsFillCloudsFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudsFill"
class BsFillCloudyFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCloudyFill"
class BsFillCollectionFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCollectionFill"
class BsFillCollectionPlayFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCollectionPlayFill"
class BsFillCompassFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCompassFill"
class BsFillCpuFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCpuFill"
class BsFillCreditCard2BackFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCreditCard2BackFill"
class BsFillCreditCard2FrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCreditCard2FrontFill"
class BsFillCreditCardFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCreditCardFill"
class BsFillCupFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCupFill"
class BsFillCupHotFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCupHotFill"
class BsFillCursorFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillCursorFill"
class BsFillDashCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDashCircleFill"
class BsFillDashSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDashSquareFill"
class BsFillDatabaseFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDatabaseFill"
class BsFillDeviceHddFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDeviceHddFill"
class BsFillDeviceSsdFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDeviceSsdFill"
class BsFillDiagram2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDiagram2Fill"
class BsFillDiagram3Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDiagram3Fill"
class BsFillDiamondFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDiamondFill"
class BsFillDice1Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDice1Fill"
class BsFillDice2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDice2Fill"
class BsFillDice3Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDice3Fill"
class BsFillDice4Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDice4Fill"
class BsFillDice5Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDice5Fill"
class BsFillDice6Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDice6Fill"
class BsFillDiscFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDiscFill"
class BsFillDisplayFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDisplayFill"
class BsFillDisplayportFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDisplayportFill"
class BsFillDoorClosedFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDoorClosedFill"
class BsFillDoorOpenFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDoorOpenFill"
class BsFillDpadFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDpadFill"
class BsFillDropletFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillDropletFill"
class BsFillEarFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEarFill"
class BsFillEaselFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEaselFill"
class BsFillEasel2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEasel2Fill"
class BsFillEasel3Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEasel3Fill"
class BsFillEggFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEggFill"
class BsFillEjectFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEjectFill"
class BsFillEmojiAngryFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEmojiAngryFill"
class BsFillEmojiDizzyFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEmojiDizzyFill"
class BsFillEmojiExpressionlessFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEmojiExpressionlessFill"
class BsFillEmojiFrownFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEmojiFrownFill"
class BsFillEmojiHeartEyesFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEmojiHeartEyesFill"
class BsFillEmojiKissFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEmojiKissFill"
class BsFillEmojiLaughingFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEmojiLaughingFill"
class BsFillEmojiNeutralFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEmojiNeutralFill"
class BsFillEmojiSmileFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEmojiSmileFill"
class BsFillEmojiSmileUpsideDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEmojiSmileUpsideDownFill"
class BsFillEmojiSunglassesFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEmojiSunglassesFill"
class BsFillEmojiWinkFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEmojiWinkFill"
class BsFillEnvelopeAtFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEnvelopeAtFill"
class BsFillEnvelopeCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEnvelopeCheckFill"
class BsFillEnvelopeDashFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEnvelopeDashFill"
class BsFillEnvelopeExclamationFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEnvelopeExclamationFill"
class BsFillEnvelopeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEnvelopeFill"
class BsFillEnvelopeHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEnvelopeHeartFill"
class BsFillEnvelopeOpenFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEnvelopeOpenFill"
class BsFillEnvelopeOpenHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEnvelopeOpenHeartFill"
class BsFillEnvelopePaperFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEnvelopePaperFill"
class BsFillEnvelopePaperHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEnvelopePaperHeartFill"
class BsFillEnvelopePlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEnvelopePlusFill"
class BsFillEnvelopeSlashFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEnvelopeSlashFill"
class BsFillEnvelopeXFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEnvelopeXFill"
class BsFillEraserFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEraserFill"
class BsFillEvFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEvFrontFill"
class BsFillEvStationFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEvStationFill"
class BsFillExclamationCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillExclamationCircleFill"
class BsFillExclamationDiamondFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillExclamationDiamondFill"
class BsFillExclamationOctagonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillExclamationOctagonFill"
class BsFillExclamationSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillExclamationSquareFill"
class BsFillExclamationTriangleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillExclamationTriangleFill"
class BsFillExplicitFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillExplicitFill"
class BsFillEyeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEyeFill"
class BsFillEyeSlashFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillEyeSlashFill"
class BsFillFastForwardBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFastForwardBtnFill"
class BsFillFastForwardCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFastForwardCircleFill"
class BsFillFastForwardFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFastForwardFill"
class BsFillFileArrowDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileArrowDownFill"
class BsFillFileArrowUpFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileArrowUpFill"
class BsFillFileBarGraphFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileBarGraphFill"
class BsFillFileBinaryFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileBinaryFill"
class BsFillFileBreakFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileBreakFill"
class BsFillFileCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileCheckFill"
class BsFillFileCodeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileCodeFill"
class BsFillFileDiffFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileDiffFill"
class BsFillFileEarmarkArrowDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkArrowDownFill"
class BsFillFileEarmarkArrowUpFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkArrowUpFill"
class BsFillFileEarmarkBarGraphFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkBarGraphFill"
class BsFillFileEarmarkBinaryFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkBinaryFill"
class BsFillFileEarmarkBreakFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkBreakFill"
class BsFillFileEarmarkCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkCheckFill"
class BsFillFileEarmarkCodeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkCodeFill"
class BsFillFileEarmarkDiffFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkDiffFill"
class BsFillFileEarmarkEaselFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkEaselFill"
class BsFillFileEarmarkExcelFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkExcelFill"
class BsFillFileEarmarkFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkFill"
class BsFillFileEarmarkFontFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkFontFill"
class BsFillFileEarmarkImageFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkImageFill"
class BsFillFileEarmarkLockFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkLockFill"
class BsFillFileEarmarkLock2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkLock2Fill"
class BsFillFileEarmarkMedicalFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkMedicalFill"
class BsFillFileEarmarkMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkMinusFill"
class BsFillFileEarmarkMusicFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkMusicFill"
class BsFillFileEarmarkPdfFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkPdfFill"
class BsFillFileEarmarkPersonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkPersonFill"
class BsFillFileEarmarkPlayFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkPlayFill"
class BsFillFileEarmarkPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkPlusFill"
class BsFillFileEarmarkPostFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkPostFill"
class BsFillFileEarmarkPptFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkPptFill"
class BsFillFileEarmarkRichtextFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkRichtextFill"
class BsFillFileEarmarkRuledFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkRuledFill"
class BsFillFileEarmarkSlidesFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkSlidesFill"
class BsFillFileEarmarkSpreadsheetFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkSpreadsheetFill"
class BsFillFileEarmarkTextFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkTextFill"
class BsFillFileEarmarkWordFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkWordFill"
class BsFillFileEarmarkXFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkXFill"
class BsFillFileEarmarkZipFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEarmarkZipFill"
class BsFillFileEaselFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileEaselFill"
class BsFillFileExcelFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileExcelFill"
class BsFillFileFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileFill"
class BsFillFileFontFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileFontFill"
class BsFillFileImageFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileImageFill"
class BsFillFileLockFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileLockFill"
class BsFillFileLock2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileLock2Fill"
class BsFillFileMedicalFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileMedicalFill"
class BsFillFileMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileMinusFill"
class BsFillFileMusicFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileMusicFill"
class BsFillFilePdfFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFilePdfFill"
class BsFillFilePersonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFilePersonFill"
class BsFillFilePlayFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFilePlayFill"
class BsFillFilePlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFilePlusFill"
class BsFillFilePostFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFilePostFill"
class BsFillFilePptFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFilePptFill"
class BsFillFileRichtextFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileRichtextFill"
class BsFillFileRuledFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileRuledFill"
class BsFillFileSlidesFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileSlidesFill"
class BsFillFileSpreadsheetFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileSpreadsheetFill"
class BsFillFileTextFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileTextFill"
class BsFillFileWordFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileWordFill"
class BsFillFileXFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileXFill"
class BsFillFileZipFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFileZipFill"
class BsFillFilterCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFilterCircleFill"
class BsFillFilterSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFilterSquareFill"
class BsFillFlagFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFlagFill"
class BsFillFolderFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFolderFill"
class BsFillFolderSymlinkFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFolderSymlinkFill"
class BsFillForwardFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillForwardFill"
class BsFillFuelPumpDieselFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFuelPumpDieselFill"
class BsFillFuelPumpFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFuelPumpFill"
class BsFillFunnelFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillFunnelFill"
class BsFillGearFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillGearFill"
class BsFillGeoAltFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillGeoAltFill"
class BsFillGeoFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillGeoFill"
class BsFillGiftFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillGiftFill"
class BsFillGrid1X2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillGrid1X2Fill"
class BsFillGrid3X2GapFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillGrid3X2GapFill"
class BsFillGrid3X3GapFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillGrid3X3GapFill"
class BsFillGridFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillGridFill"
class BsFillHCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHCircleFill"
class BsFillHSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHSquareFill"
class BsFillHandIndexFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHandIndexFill"
class BsFillHandIndexThumbFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHandIndexThumbFill"
class BsFillHandThumbsDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHandThumbsDownFill"
class BsFillHandThumbsUpFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHandThumbsUpFill"
class BsFillHandbagFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHandbagFill"
class BsFillHddFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHddFill"
class BsFillHddNetworkFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHddNetworkFill"
class BsFillHddRackFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHddRackFill"
class BsFillHddStackFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHddStackFill"
class BsFillHdmiFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHdmiFill"
class BsFillHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHeartFill"
class BsFillHeartPulseFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHeartPulseFill"
class BsFillHeartbreakFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHeartbreakFill"
class BsFillHeptagonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHeptagonFill"
class BsFillHexagonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHexagonFill"
class BsFillHospitalFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHospitalFill"
class BsFillHouseAddFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHouseAddFill"
class BsFillHouseCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHouseCheckFill"
class BsFillHouseDashFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHouseDashFill"
class BsFillHouseDoorFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHouseDoorFill"
class BsFillHouseDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHouseDownFill"
class BsFillHouseExclamationFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHouseExclamationFill"
class BsFillHouseFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHouseFill"
class BsFillHouseGearFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHouseGearFill"
class BsFillHouseHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHouseHeartFill"
class BsFillHouseLockFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHouseLockFill"
class BsFillHouseSlashFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHouseSlashFill"
class BsFillHouseUpFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHouseUpFill"
class BsFillHouseXFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHouseXFill"
class BsFillHousesFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillHousesFill"
class BsFillImageFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillImageFill"
class BsFillInboxFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillInboxFill"
class BsFillInboxesFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillInboxesFill"
class BsFillInfoCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillInfoCircleFill"
class BsFillInfoSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillInfoSquareFill"
class BsFillJournalBookmarkFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillJournalBookmarkFill"
class BsFillKanbanFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillKanbanFill"
class BsFillKeyFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillKeyFill"
class BsFillKeyboardFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillKeyboardFill"
class BsFillLampFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillLampFill"
class BsFillLaptopFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillLaptopFill"
class BsFillLayersFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillLayersFill"
class BsFillLightbulbFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillLightbulbFill"
class BsFillLightbulbOffFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillLightbulbOffFill"
class BsFillLightningChargeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillLightningChargeFill"
class BsFillLightningFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillLightningFill"
class BsFillLockFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillLockFill"
class BsFillLungsFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillLungsFill"
class BsFillMagnetFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMagnetFill"
class BsFillMapFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMapFill"
class BsFillMarkdownFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMarkdownFill"
class BsFillMegaphoneFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMegaphoneFill"
class BsFillMenuAppFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMenuAppFill"
class BsFillMenuButtonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMenuButtonFill"
class BsFillMenuButtonWideFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMenuButtonWideFill"
class BsFillMicFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMicFill"
class BsFillMicMuteFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMicMuteFill"
class BsFillModemFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillModemFill"
class BsFillMoonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMoonFill"
class BsFillMoonStarsFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMoonStarsFill"
class BsFillMortarboardFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMortarboardFill"
class BsFillMotherboardFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMotherboardFill"
class BsFillMouseFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMouseFill"
class BsFillMouse2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMouse2Fill"
class BsFillMouse3Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMouse3Fill"
class BsFillMusicPlayerFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillMusicPlayerFill"
class BsFillNodeMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillNodeMinusFill"
class BsFillNodePlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillNodePlusFill"
class BsFillNutFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillNutFill"
class BsFillOctagonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillOctagonFill"
class BsFillOpticalAudioFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillOpticalAudioFill"
class BsFillPCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPCircleFill"
class BsFillPSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPSquareFill"
class BsFillPaletteFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPaletteFill"
class BsFillPassFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPassFill"
class BsFillPatchCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPatchCheckFill"
class BsFillPatchExclamationFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPatchExclamationFill"
class BsFillPatchMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPatchMinusFill"
class BsFillPatchPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPatchPlusFill"
class BsFillPatchQuestionFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPatchQuestionFill"
class BsFillPauseBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPauseBtnFill"
class BsFillPauseCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPauseCircleFill"
class BsFillPauseFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPauseFill"
class BsFillPeaceFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPeaceFill"
class BsFillPenFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPenFill"
class BsFillPencilFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPencilFill"
class BsFillPentagonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPentagonFill"
class BsFillPeopleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPeopleFill"
class BsFillPersonBadgeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPersonBadgeFill"
class BsFillPersonCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPersonCheckFill"
class BsFillPersonDashFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPersonDashFill"
class BsFillPersonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPersonFill"
class BsFillPersonLinesFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPersonLinesFill"
class BsFillPersonPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPersonPlusFill"
class BsFillPersonVcardFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPersonVcardFill"
class BsFillPersonXFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPersonXFill"
class BsFillPhoneFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPhoneFill"
class BsFillPhoneLandscapeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPhoneLandscapeFill"
class BsFillPhoneVibrateFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPhoneVibrateFill"
class BsFillPieChartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPieChartFill"
class BsFillPiggyBankFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPiggyBankFill"
class BsFillPinAngleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPinAngleFill"
class BsFillPinFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPinFill"
class BsFillPinMapFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPinMapFill"
class BsFillPipFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPipFill"
class BsFillPlayBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPlayBtnFill"
class BsFillPlayCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPlayCircleFill"
class BsFillPlayFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPlayFill"
class BsFillPlugFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPlugFill"
class BsFillPlusCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPlusCircleFill"
class BsFillPlusSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPlusSquareFill"
class BsFillPostageFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPostageFill"
class BsFillPostageHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPostageHeartFill"
class BsFillPostcardFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPostcardFill"
class BsFillPostcardHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPostcardHeartFill"
class BsFillPrinterFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPrinterFill"
class BsFillProjectorFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillProjectorFill"
class BsFillPuzzleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillPuzzleFill"
class BsFillQuestionCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillQuestionCircleFill"
class BsFillQuestionDiamondFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillQuestionDiamondFill"
class BsFillQuestionOctagonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillQuestionOctagonFill"
class BsFillQuestionSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillQuestionSquareFill"
class BsFillRCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillRCircleFill"
class BsFillRSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillRSquareFill"
class BsFillRecordBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillRecordBtnFill"
class BsFillRecordCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillRecordCircleFill"
class BsFillRecordFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillRecordFill"
class BsFillRecord2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillRecord2Fill"
class BsFillReplyAllFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillReplyAllFill"
class BsFillReplyFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillReplyFill"
class BsFillRewindBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillRewindBtnFill"
class BsFillRewindCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillRewindCircleFill"
class BsFillRewindFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillRewindFill"
class BsFillRocketFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillRocketFill"
class BsFillRocketTakeoffFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillRocketTakeoffFill"
class BsFillRouterFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillRouterFill"
class BsFillRssFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillRssFill"
class BsFillSafeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSafeFill"
class BsFillSafe2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSafe2Fill"
class BsFillSaveFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSaveFill"
class BsFillSave2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSave2Fill"
class BsFillSdCardFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSdCardFill"
class BsFillSearchHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSearchHeartFill"
class BsFillSendCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSendCheckFill"
class BsFillSendDashFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSendDashFill"
class BsFillSendExclamationFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSendExclamationFill"
class BsFillSendFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSendFill"
class BsFillSendPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSendPlusFill"
class BsFillSendSlashFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSendSlashFill"
class BsFillSendXFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSendXFill"
class BsFillShareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillShareFill"
class BsFillShieldFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillShieldFill"
class BsFillShieldLockFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillShieldLockFill"
class BsFillShieldSlashFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillShieldSlashFill"
class BsFillShiftFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillShiftFill"
class BsFillSignDeadEndFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignDeadEndFill"
class BsFillSignDoNotEnterFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignDoNotEnterFill"
class BsFillSignIntersectionFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignIntersectionFill"
class BsFillSignIntersectionSideFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignIntersectionSideFill"
class BsFillSignIntersectionTFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignIntersectionTFill"
class BsFillSignIntersectionYFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignIntersectionYFill"
class BsFillSignMergeLeftFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignMergeLeftFill"
class BsFillSignMergeRightFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignMergeRightFill"
class BsFillSignNoLeftTurnFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignNoLeftTurnFill"
class BsFillSignNoParkingFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignNoParkingFill"
class BsFillSignNoRightTurnFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignNoRightTurnFill"
class BsFillSignRailroadFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignRailroadFill"
class BsFillSignStopFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignStopFill"
class BsFillSignStopLightsFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignStopLightsFill"
class BsFillSignTurnLeftFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignTurnLeftFill"
class BsFillSignTurnRightFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignTurnRightFill"
class BsFillSignTurnSlightLeftFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignTurnSlightLeftFill"
class BsFillSignTurnSlightRightFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignTurnSlightRightFill"
class BsFillSignYieldFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignYieldFill"
class BsFillSignpost2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignpost2Fill"
class BsFillSignpostFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignpostFill"
class BsFillSignpostSplitFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSignpostSplitFill"
class BsFillSimFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSimFill"
class BsFillSkipBackwardBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSkipBackwardBtnFill"
class BsFillSkipBackwardCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSkipBackwardCircleFill"
class BsFillSkipBackwardFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSkipBackwardFill"
class BsFillSkipEndBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSkipEndBtnFill"
class BsFillSkipEndCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSkipEndCircleFill"
class BsFillSkipEndFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSkipEndFill"
class BsFillSkipForwardBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSkipForwardBtnFill"
class BsFillSkipForwardCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSkipForwardCircleFill"
class BsFillSkipForwardFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSkipForwardFill"
class BsFillSkipStartBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSkipStartBtnFill"
class BsFillSkipStartCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSkipStartCircleFill"
class BsFillSkipStartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSkipStartFill"
class BsFillSlashCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSlashCircleFill"
class BsFillSlashSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSlashSquareFill"
class BsFillSpeakerFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSpeakerFill"
class BsFillSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSquareFill"
class BsFillStarFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillStarFill"
class BsFillStickiesFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillStickiesFill"
class BsFillStickyFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillStickyFill"
class BsFillStopBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillStopBtnFill"
class BsFillStopCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillStopCircleFill"
class BsFillStopFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillStopFill"
class BsFillStoplightsFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillStoplightsFill"
class BsFillStopwatchFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillStopwatchFill"
class BsFillSuitClubFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSuitClubFill"
class BsFillSuitDiamondFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSuitDiamondFill"
class BsFillSuitHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSuitHeartFill"
class BsFillSuitSpadeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSuitSpadeFill"
class BsFillSunFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSunFill"
class BsFillSunriseFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSunriseFill"
class BsFillSunsetFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillSunsetFill"
class BsFillTabletFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTabletFill"
class BsFillTabletLandscapeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTabletLandscapeFill"
class BsFillTagFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTagFill"
class BsFillTagsFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTagsFill"
class BsFillTaxiFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTaxiFrontFill"
class BsFillTelephoneFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTelephoneFill"
class BsFillTelephoneForwardFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTelephoneForwardFill"
class BsFillTelephoneInboundFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTelephoneInboundFill"
class BsFillTelephoneMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTelephoneMinusFill"
class BsFillTelephoneOutboundFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTelephoneOutboundFill"
class BsFillTelephonePlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTelephonePlusFill"
class BsFillTelephoneXFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTelephoneXFill"
class BsFillTerminalFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTerminalFill"
class BsFillThunderboltFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillThunderboltFill"
class BsFillTicketDetailedFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTicketDetailedFill"
class BsFillTicketFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTicketFill"
class BsFillTicketPerforatedFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTicketPerforatedFill"
class BsFillTrainFreightFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTrainFreightFrontFill"
class BsFillTrainFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTrainFrontFill"
class BsFillTrainLightrailFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTrainLightrailFrontFill"
class BsFillTrashFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTrashFill"
class BsFillTrash2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTrash2Fill"
class BsFillTrash3Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTrash3Fill"
class BsFillTreeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTreeFill"
class BsFillTriangleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTriangleFill"
class BsFillTrophyFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTrophyFill"
class BsFillTruckFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTruckFrontFill"
class BsFillTvFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillTvFill"
class BsFillUmbrellaFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillUmbrellaFill"
class BsFillUnlockFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillUnlockFill"
class BsFillUsbCFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillUsbCFill"
class BsFillUsbDriveFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillUsbDriveFill"
class BsFillUsbFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillUsbFill"
class BsFillUsbMicroFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillUsbMicroFill"
class BsFillUsbMiniFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillUsbMiniFill"
class BsFillUsbPlugFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillUsbPlugFill"
class BsFillVinylFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillVinylFill"
class BsFillVolumeDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillVolumeDownFill"
class BsFillVolumeMuteFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillVolumeMuteFill"
class BsFillVolumeOffFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillVolumeOffFill"
class BsFillVolumeUpFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillVolumeUpFill"
class BsFillWalletFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillWalletFill"
class BsFillWebcamFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillWebcamFill"
class BsFillWrenchAdjustableCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillWrenchAdjustableCircleFill"
class BsFillXCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillXCircleFill"
class BsFillXDiamondFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillXDiamondFill"
class BsFillXOctagonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillXOctagonFill"
class BsFillXSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFillXSquareFill"
class BsReverseBackspaceReverse(rx.Component):
library = "react-icons/bs"
tag = "BsReverseBackspaceReverse"
class BsReverseLayoutSidebarInsetReverse(rx.Component):
library = "react-icons/bs"
tag = "BsReverseLayoutSidebarInsetReverse"
class BsReverseLayoutSidebarReverse(rx.Component):
library = "react-icons/bs"
tag = "BsReverseLayoutSidebarReverse"
class BsReverseLayoutTextSidebarReverse(rx.Component):
library = "react-icons/bs"
tag = "BsReverseLayoutTextSidebarReverse"
class BsReverseLayoutTextWindowReverse(rx.Component):
library = "react-icons/bs"
tag = "BsReverseLayoutTextWindowReverse"
class BsReverseListColumnsReverse(rx.Component):
library = "react-icons/bs"
tag = "BsReverseListColumnsReverse"
class Bs0CircleFill(rx.Component):
library = "react-icons/bs"
tag = "Bs0CircleFill"
class Bs0Circle(rx.Component):
library = "react-icons/bs"
tag = "Bs0Circle"
class Bs0SquareFill(rx.Component):
library = "react-icons/bs"
tag = "Bs0SquareFill"
class Bs0Square(rx.Component):
library = "react-icons/bs"
tag = "Bs0Square"
class Bs1CircleFill(rx.Component):
library = "react-icons/bs"
tag = "Bs1CircleFill"
class Bs1Circle(rx.Component):
library = "react-icons/bs"
tag = "Bs1Circle"
class Bs1SquareFill(rx.Component):
library = "react-icons/bs"
tag = "Bs1SquareFill"
class Bs1Square(rx.Component):
library = "react-icons/bs"
tag = "Bs1Square"
class Bs123(rx.Component):
library = "react-icons/bs"
tag = "Bs123"
class Bs2CircleFill(rx.Component):
library = "react-icons/bs"
tag = "Bs2CircleFill"
class Bs2Circle(rx.Component):
library = "react-icons/bs"
tag = "Bs2Circle"
class Bs2SquareFill(rx.Component):
library = "react-icons/bs"
tag = "Bs2SquareFill"
class Bs2Square(rx.Component):
library = "react-icons/bs"
tag = "Bs2Square"
class Bs3CircleFill(rx.Component):
library = "react-icons/bs"
tag = "Bs3CircleFill"
class Bs3Circle(rx.Component):
library = "react-icons/bs"
tag = "Bs3Circle"
class Bs3SquareFill(rx.Component):
library = "react-icons/bs"
tag = "Bs3SquareFill"
class Bs3Square(rx.Component):
library = "react-icons/bs"
tag = "Bs3Square"
class Bs4CircleFill(rx.Component):
library = "react-icons/bs"
tag = "Bs4CircleFill"
class Bs4Circle(rx.Component):
library = "react-icons/bs"
tag = "Bs4Circle"
class Bs4SquareFill(rx.Component):
library = "react-icons/bs"
tag = "Bs4SquareFill"
class Bs4Square(rx.Component):
library = "react-icons/bs"
tag = "Bs4Square"
class Bs5CircleFill(rx.Component):
library = "react-icons/bs"
tag = "Bs5CircleFill"
class Bs5Circle(rx.Component):
library = "react-icons/bs"
tag = "Bs5Circle"
class Bs5SquareFill(rx.Component):
library = "react-icons/bs"
tag = "Bs5SquareFill"
class Bs5Square(rx.Component):
library = "react-icons/bs"
tag = "Bs5Square"
class Bs6CircleFill(rx.Component):
library = "react-icons/bs"
tag = "Bs6CircleFill"
class Bs6Circle(rx.Component):
library = "react-icons/bs"
tag = "Bs6Circle"
class Bs6SquareFill(rx.Component):
library = "react-icons/bs"
tag = "Bs6SquareFill"
class Bs6Square(rx.Component):
library = "react-icons/bs"
tag = "Bs6Square"
class Bs7CircleFill(rx.Component):
library = "react-icons/bs"
tag = "Bs7CircleFill"
class Bs7Circle(rx.Component):
library = "react-icons/bs"
tag = "Bs7Circle"
class Bs7SquareFill(rx.Component):
library = "react-icons/bs"
tag = "Bs7SquareFill"
class Bs7Square(rx.Component):
library = "react-icons/bs"
tag = "Bs7Square"
class Bs8CircleFill(rx.Component):
library = "react-icons/bs"
tag = "Bs8CircleFill"
class Bs8Circle(rx.Component):
library = "react-icons/bs"
tag = "Bs8Circle"
class Bs8SquareFill(rx.Component):
library = "react-icons/bs"
tag = "Bs8SquareFill"
class Bs8Square(rx.Component):
library = "react-icons/bs"
tag = "Bs8Square"
class Bs9CircleFill(rx.Component):
library = "react-icons/bs"
tag = "Bs9CircleFill"
class Bs9Circle(rx.Component):
library = "react-icons/bs"
tag = "Bs9Circle"
class Bs9SquareFill(rx.Component):
library = "react-icons/bs"
tag = "Bs9SquareFill"
class Bs9Square(rx.Component):
library = "react-icons/bs"
tag = "Bs9Square"
class BsActivity(rx.Component):
library = "react-icons/bs"
tag = "BsActivity"
class BsAirplaneEnginesFill(rx.Component):
library = "react-icons/bs"
tag = "BsAirplaneEnginesFill"
class BsAirplaneEngines(rx.Component):
library = "react-icons/bs"
tag = "BsAirplaneEngines"
class BsAirplaneFill(rx.Component):
library = "react-icons/bs"
tag = "BsAirplaneFill"
class BsAirplane(rx.Component):
library = "react-icons/bs"
tag = "BsAirplane"
class BsAlarmFill(rx.Component):
library = "react-icons/bs"
tag = "BsAlarmFill"
class BsAlarm(rx.Component):
library = "react-icons/bs"
tag = "BsAlarm"
class BsAlexa(rx.Component):
library = "react-icons/bs"
tag = "BsAlexa"
class BsAlignBottom(rx.Component):
library = "react-icons/bs"
tag = "BsAlignBottom"
class BsAlignCenter(rx.Component):
library = "react-icons/bs"
tag = "BsAlignCenter"
class BsAlignEnd(rx.Component):
library = "react-icons/bs"
tag = "BsAlignEnd"
class BsAlignMiddle(rx.Component):
library = "react-icons/bs"
tag = "BsAlignMiddle"
class BsAlignStart(rx.Component):
library = "react-icons/bs"
tag = "BsAlignStart"
class BsAlignTop(rx.Component):
library = "react-icons/bs"
tag = "BsAlignTop"
class BsAlipay(rx.Component):
library = "react-icons/bs"
tag = "BsAlipay"
class BsAlt(rx.Component):
library = "react-icons/bs"
tag = "BsAlt"
class BsAmd(rx.Component):
library = "react-icons/bs"
tag = "BsAmd"
class BsAndroid(rx.Component):
library = "react-icons/bs"
tag = "BsAndroid"
class BsAndroid2(rx.Component):
library = "react-icons/bs"
tag = "BsAndroid2"
class BsAppIndicator(rx.Component):
library = "react-icons/bs"
tag = "BsAppIndicator"
class BsApp(rx.Component):
library = "react-icons/bs"
tag = "BsApp"
class BsApple(rx.Component):
library = "react-icons/bs"
tag = "BsApple"
class BsArchiveFill(rx.Component):
library = "react-icons/bs"
tag = "BsArchiveFill"
class BsArchive(rx.Component):
library = "react-icons/bs"
tag = "BsArchive"
class BsArrow90DegDown(rx.Component):
library = "react-icons/bs"
tag = "BsArrow90DegDown"
class BsArrow90DegLeft(rx.Component):
library = "react-icons/bs"
tag = "BsArrow90DegLeft"
class BsArrow90DegRight(rx.Component):
library = "react-icons/bs"
tag = "BsArrow90DegRight"
class BsArrow90DegUp(rx.Component):
library = "react-icons/bs"
tag = "BsArrow90DegUp"
class BsArrowBarDown(rx.Component):
library = "react-icons/bs"
tag = "BsArrowBarDown"
class BsArrowBarLeft(rx.Component):
library = "react-icons/bs"
tag = "BsArrowBarLeft"
class BsArrowBarRight(rx.Component):
library = "react-icons/bs"
tag = "BsArrowBarRight"
class BsArrowBarUp(rx.Component):
library = "react-icons/bs"
tag = "BsArrowBarUp"
class BsArrowClockwise(rx.Component):
library = "react-icons/bs"
tag = "BsArrowClockwise"
class BsArrowCounterclockwise(rx.Component):
library = "react-icons/bs"
tag = "BsArrowCounterclockwise"
class BsArrowDownCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownCircleFill"
class BsArrowDownCircle(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownCircle"
class BsArrowDownLeftCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownLeftCircleFill"
class BsArrowDownLeftCircle(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownLeftCircle"
class BsArrowDownLeftSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownLeftSquareFill"
class BsArrowDownLeftSquare(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownLeftSquare"
class BsArrowDownLeft(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownLeft"
class BsArrowDownRightCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownRightCircleFill"
class BsArrowDownRightCircle(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownRightCircle"
class BsArrowDownRightSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownRightSquareFill"
class BsArrowDownRightSquare(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownRightSquare"
class BsArrowDownRight(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownRight"
class BsArrowDownShort(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownShort"
class BsArrowDownSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownSquareFill"
class BsArrowDownSquare(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownSquare"
class BsArrowDownUp(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDownUp"
class BsArrowDown(rx.Component):
library = "react-icons/bs"
tag = "BsArrowDown"
class BsArrowLeftCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowLeftCircleFill"
class BsArrowLeftCircle(rx.Component):
library = "react-icons/bs"
tag = "BsArrowLeftCircle"
class BsArrowLeftRight(rx.Component):
library = "react-icons/bs"
tag = "BsArrowLeftRight"
class BsArrowLeftShort(rx.Component):
library = "react-icons/bs"
tag = "BsArrowLeftShort"
class BsArrowLeftSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowLeftSquareFill"
class BsArrowLeftSquare(rx.Component):
library = "react-icons/bs"
tag = "BsArrowLeftSquare"
class BsArrowLeft(rx.Component):
library = "react-icons/bs"
tag = "BsArrowLeft"
class BsArrowRepeat(rx.Component):
library = "react-icons/bs"
tag = "BsArrowRepeat"
class BsArrowReturnLeft(rx.Component):
library = "react-icons/bs"
tag = "BsArrowReturnLeft"
class BsArrowReturnRight(rx.Component):
library = "react-icons/bs"
tag = "BsArrowReturnRight"
class BsArrowRightCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowRightCircleFill"
class BsArrowRightCircle(rx.Component):
library = "react-icons/bs"
tag = "BsArrowRightCircle"
class BsArrowRightShort(rx.Component):
library = "react-icons/bs"
tag = "BsArrowRightShort"
class BsArrowRightSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowRightSquareFill"
class BsArrowRightSquare(rx.Component):
library = "react-icons/bs"
tag = "BsArrowRightSquare"
class BsArrowRight(rx.Component):
library = "react-icons/bs"
tag = "BsArrowRight"
class BsArrowThroughHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowThroughHeartFill"
class BsArrowThroughHeart(rx.Component):
library = "react-icons/bs"
tag = "BsArrowThroughHeart"
class BsArrowUpCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUpCircleFill"
class BsArrowUpCircle(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUpCircle"
class BsArrowUpLeftCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUpLeftCircleFill"
class BsArrowUpLeftCircle(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUpLeftCircle"
class BsArrowUpLeftSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUpLeftSquareFill"
class BsArrowUpLeftSquare(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUpLeftSquare"
class BsArrowUpLeft(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUpLeft"
class BsArrowUpRightCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUpRightCircleFill"
class BsArrowUpRightCircle(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUpRightCircle"
class BsArrowUpRightSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUpRightSquareFill"
class BsArrowUpRightSquare(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUpRightSquare"
class BsArrowUpRight(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUpRight"
class BsArrowUpShort(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUpShort"
class BsArrowUpSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUpSquareFill"
class BsArrowUpSquare(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUpSquare"
class BsArrowUp(rx.Component):
library = "react-icons/bs"
tag = "BsArrowUp"
class BsArrowsAngleContract(rx.Component):
library = "react-icons/bs"
tag = "BsArrowsAngleContract"
class BsArrowsAngleExpand(rx.Component):
library = "react-icons/bs"
tag = "BsArrowsAngleExpand"
class BsArrowsCollapse(rx.Component):
library = "react-icons/bs"
tag = "BsArrowsCollapse"
class BsArrowsExpand(rx.Component):
library = "react-icons/bs"
tag = "BsArrowsExpand"
class BsArrowsFullscreen(rx.Component):
library = "react-icons/bs"
tag = "BsArrowsFullscreen"
class BsArrowsMove(rx.Component):
library = "react-icons/bs"
tag = "BsArrowsMove"
class BsAspectRatioFill(rx.Component):
library = "react-icons/bs"
tag = "BsAspectRatioFill"
class BsAspectRatio(rx.Component):
library = "react-icons/bs"
tag = "BsAspectRatio"
class BsAsterisk(rx.Component):
library = "react-icons/bs"
tag = "BsAsterisk"
class BsAt(rx.Component):
library = "react-icons/bs"
tag = "BsAt"
class BsAwardFill(rx.Component):
library = "react-icons/bs"
tag = "BsAwardFill"
class BsAward(rx.Component):
library = "react-icons/bs"
tag = "BsAward"
class BsBack(rx.Component):
library = "react-icons/bs"
tag = "BsBack"
class BsBackspaceFill(rx.Component):
library = "react-icons/bs"
tag = "BsBackspaceFill"
class BsBackspaceReverseFill(rx.Component):
library = "react-icons/bs"
tag = "BsBackspaceReverseFill"
class BsBackspaceReverse(rx.Component):
library = "react-icons/bs"
tag = "BsBackspaceReverse"
class BsBackspace(rx.Component):
library = "react-icons/bs"
tag = "BsBackspace"
class BsBadge3DFill(rx.Component):
library = "react-icons/bs"
tag = "BsBadge3DFill"
class BsBadge3D(rx.Component):
library = "react-icons/bs"
tag = "BsBadge3D"
class BsBadge4KFill(rx.Component):
library = "react-icons/bs"
tag = "BsBadge4KFill"
class BsBadge4K(rx.Component):
library = "react-icons/bs"
tag = "BsBadge4K"
class BsBadge8KFill(rx.Component):
library = "react-icons/bs"
tag = "BsBadge8KFill"
class BsBadge8K(rx.Component):
library = "react-icons/bs"
tag = "BsBadge8K"
class BsBadgeAdFill(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeAdFill"
class BsBadgeAd(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeAd"
class BsBadgeArFill(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeArFill"
class BsBadgeAr(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeAr"
class BsBadgeCcFill(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeCcFill"
class BsBadgeCc(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeCc"
class BsBadgeHdFill(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeHdFill"
class BsBadgeHd(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeHd"
class BsBadgeSdFill(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeSdFill"
class BsBadgeSd(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeSd"
class BsBadgeTmFill(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeTmFill"
class BsBadgeTm(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeTm"
class BsBadgeVoFill(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeVoFill"
class BsBadgeVo(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeVo"
class BsBadgeVrFill(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeVrFill"
class BsBadgeVr(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeVr"
class BsBadgeWcFill(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeWcFill"
class BsBadgeWc(rx.Component):
library = "react-icons/bs"
tag = "BsBadgeWc"
class BsBagCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsBagCheckFill"
class BsBagCheck(rx.Component):
library = "react-icons/bs"
tag = "BsBagCheck"
class BsBagDashFill(rx.Component):
library = "react-icons/bs"
tag = "BsBagDashFill"
class BsBagDash(rx.Component):
library = "react-icons/bs"
tag = "BsBagDash"
class BsBagFill(rx.Component):
library = "react-icons/bs"
tag = "BsBagFill"
class BsBagHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsBagHeartFill"
class BsBagHeart(rx.Component):
library = "react-icons/bs"
tag = "BsBagHeart"
class BsBagPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsBagPlusFill"
class BsBagPlus(rx.Component):
library = "react-icons/bs"
tag = "BsBagPlus"
class BsBagXFill(rx.Component):
library = "react-icons/bs"
tag = "BsBagXFill"
class BsBagX(rx.Component):
library = "react-icons/bs"
tag = "BsBagX"
class BsBag(rx.Component):
library = "react-icons/bs"
tag = "BsBag"
class BsBalloonFill(rx.Component):
library = "react-icons/bs"
tag = "BsBalloonFill"
class BsBalloonHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsBalloonHeartFill"
class BsBalloonHeart(rx.Component):
library = "react-icons/bs"
tag = "BsBalloonHeart"
class BsBalloon(rx.Component):
library = "react-icons/bs"
tag = "BsBalloon"
class BsBandaidFill(rx.Component):
library = "react-icons/bs"
tag = "BsBandaidFill"
class BsBandaid(rx.Component):
library = "react-icons/bs"
tag = "BsBandaid"
class BsBank(rx.Component):
library = "react-icons/bs"
tag = "BsBank"
class BsBank2(rx.Component):
library = "react-icons/bs"
tag = "BsBank2"
class BsBarChartFill(rx.Component):
library = "react-icons/bs"
tag = "BsBarChartFill"
class BsBarChartLineFill(rx.Component):
library = "react-icons/bs"
tag = "BsBarChartLineFill"
class BsBarChartLine(rx.Component):
library = "react-icons/bs"
tag = "BsBarChartLine"
class BsBarChartSteps(rx.Component):
library = "react-icons/bs"
tag = "BsBarChartSteps"
class BsBarChart(rx.Component):
library = "react-icons/bs"
tag = "BsBarChart"
class BsBasketFill(rx.Component):
library = "react-icons/bs"
tag = "BsBasketFill"
class BsBasket(rx.Component):
library = "react-icons/bs"
tag = "BsBasket"
class BsBasket2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsBasket2Fill"
class BsBasket2(rx.Component):
library = "react-icons/bs"
tag = "BsBasket2"
class BsBasket3Fill(rx.Component):
library = "react-icons/bs"
tag = "BsBasket3Fill"
class BsBasket3(rx.Component):
library = "react-icons/bs"
tag = "BsBasket3"
class BsBatteryCharging(rx.Component):
library = "react-icons/bs"
tag = "BsBatteryCharging"
class BsBatteryFull(rx.Component):
library = "react-icons/bs"
tag = "BsBatteryFull"
class BsBatteryHalf(rx.Component):
library = "react-icons/bs"
tag = "BsBatteryHalf"
class BsBattery(rx.Component):
library = "react-icons/bs"
tag = "BsBattery"
class BsBehance(rx.Component):
library = "react-icons/bs"
tag = "BsBehance"
class BsBellFill(rx.Component):
library = "react-icons/bs"
tag = "BsBellFill"
class BsBellSlashFill(rx.Component):
library = "react-icons/bs"
tag = "BsBellSlashFill"
class BsBellSlash(rx.Component):
library = "react-icons/bs"
tag = "BsBellSlash"
class BsBell(rx.Component):
library = "react-icons/bs"
tag = "BsBell"
class BsBezier(rx.Component):
library = "react-icons/bs"
tag = "BsBezier"
class BsBezier2(rx.Component):
library = "react-icons/bs"
tag = "BsBezier2"
class BsBicycle(rx.Component):
library = "react-icons/bs"
tag = "BsBicycle"
class BsBinocularsFill(rx.Component):
library = "react-icons/bs"
tag = "BsBinocularsFill"
class BsBinoculars(rx.Component):
library = "react-icons/bs"
tag = "BsBinoculars"
class BsBlockquoteLeft(rx.Component):
library = "react-icons/bs"
tag = "BsBlockquoteLeft"
class BsBlockquoteRight(rx.Component):
library = "react-icons/bs"
tag = "BsBlockquoteRight"
class BsBluetooth(rx.Component):
library = "react-icons/bs"
tag = "BsBluetooth"
class BsBodyText(rx.Component):
library = "react-icons/bs"
tag = "BsBodyText"
class BsBookFill(rx.Component):
library = "react-icons/bs"
tag = "BsBookFill"
class BsBookHalf(rx.Component):
library = "react-icons/bs"
tag = "BsBookHalf"
class BsBook(rx.Component):
library = "react-icons/bs"
tag = "BsBook"
class BsBookmarkCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsBookmarkCheckFill"
class BsBookmarkCheck(rx.Component):
library = "react-icons/bs"
tag = "BsBookmarkCheck"
class BsBookmarkDashFill(rx.Component):
library = "react-icons/bs"
tag = "BsBookmarkDashFill"
class BsBookmarkDash(rx.Component):
library = "react-icons/bs"
tag = "BsBookmarkDash"
class BsBookmarkFill(rx.Component):
library = "react-icons/bs"
tag = "BsBookmarkFill"
class BsBookmarkHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsBookmarkHeartFill"
class BsBookmarkHeart(rx.Component):
library = "react-icons/bs"
tag = "BsBookmarkHeart"
class BsBookmarkPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsBookmarkPlusFill"
class BsBookmarkPlus(rx.Component):
library = "react-icons/bs"
tag = "BsBookmarkPlus"
class BsBookmarkStarFill(rx.Component):
library = "react-icons/bs"
tag = "BsBookmarkStarFill"
class BsBookmarkStar(rx.Component):
library = "react-icons/bs"
tag = "BsBookmarkStar"
class BsBookmarkXFill(rx.Component):
library = "react-icons/bs"
tag = "BsBookmarkXFill"
class BsBookmarkX(rx.Component):
library = "react-icons/bs"
tag = "BsBookmarkX"
class BsBookmark(rx.Component):
library = "react-icons/bs"
tag = "BsBookmark"
class BsBookmarksFill(rx.Component):
library = "react-icons/bs"
tag = "BsBookmarksFill"
class BsBookmarks(rx.Component):
library = "react-icons/bs"
tag = "BsBookmarks"
class BsBookshelf(rx.Component):
library = "react-icons/bs"
tag = "BsBookshelf"
class BsBoomboxFill(rx.Component):
library = "react-icons/bs"
tag = "BsBoomboxFill"
class BsBoombox(rx.Component):
library = "react-icons/bs"
tag = "BsBoombox"
class BsBootstrapFill(rx.Component):
library = "react-icons/bs"
tag = "BsBootstrapFill"
class BsBootstrapReboot(rx.Component):
library = "react-icons/bs"
tag = "BsBootstrapReboot"
class BsBootstrap(rx.Component):
library = "react-icons/bs"
tag = "BsBootstrap"
class BsBorderAll(rx.Component):
library = "react-icons/bs"
tag = "BsBorderAll"
class BsBorderBottom(rx.Component):
library = "react-icons/bs"
tag = "BsBorderBottom"
class BsBorderCenter(rx.Component):
library = "react-icons/bs"
tag = "BsBorderCenter"
class BsBorderInner(rx.Component):
library = "react-icons/bs"
tag = "BsBorderInner"
class BsBorderLeft(rx.Component):
library = "react-icons/bs"
tag = "BsBorderLeft"
class BsBorderMiddle(rx.Component):
library = "react-icons/bs"
tag = "BsBorderMiddle"
class BsBorderOuter(rx.Component):
library = "react-icons/bs"
tag = "BsBorderOuter"
class BsBorderRight(rx.Component):
library = "react-icons/bs"
tag = "BsBorderRight"
class BsBorderStyle(rx.Component):
library = "react-icons/bs"
tag = "BsBorderStyle"
class BsBorderTop(rx.Component):
library = "react-icons/bs"
tag = "BsBorderTop"
class BsBorderWidth(rx.Component):
library = "react-icons/bs"
tag = "BsBorderWidth"
class BsBorder(rx.Component):
library = "react-icons/bs"
tag = "BsBorder"
class BsBoundingBoxCircles(rx.Component):
library = "react-icons/bs"
tag = "BsBoundingBoxCircles"
class BsBoundingBox(rx.Component):
library = "react-icons/bs"
tag = "BsBoundingBox"
class BsBoxArrowDownLeft(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowDownLeft"
class BsBoxArrowDownRight(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowDownRight"
class BsBoxArrowDown(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowDown"
class BsBoxArrowInDownLeft(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowInDownLeft"
class BsBoxArrowInDownRight(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowInDownRight"
class BsBoxArrowInDown(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowInDown"
class BsBoxArrowInLeft(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowInLeft"
class BsBoxArrowInRight(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowInRight"
class BsBoxArrowInUpLeft(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowInUpLeft"
class BsBoxArrowInUpRight(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowInUpRight"
class BsBoxArrowInUp(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowInUp"
class BsBoxArrowLeft(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowLeft"
class BsBoxArrowRight(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowRight"
class BsBoxArrowUpLeft(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowUpLeft"
class BsBoxArrowUpRight(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowUpRight"
class BsBoxArrowUp(rx.Component):
library = "react-icons/bs"
tag = "BsBoxArrowUp"
class BsBoxFill(rx.Component):
library = "react-icons/bs"
tag = "BsBoxFill"
class BsBoxSeamFill(rx.Component):
library = "react-icons/bs"
tag = "BsBoxSeamFill"
class BsBoxSeam(rx.Component):
library = "react-icons/bs"
tag = "BsBoxSeam"
class BsBox(rx.Component):
library = "react-icons/bs"
tag = "BsBox"
class BsBox2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsBox2Fill"
class BsBox2HeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsBox2HeartFill"
class BsBox2Heart(rx.Component):
library = "react-icons/bs"
tag = "BsBox2Heart"
class BsBox2(rx.Component):
library = "react-icons/bs"
tag = "BsBox2"
class BsBoxes(rx.Component):
library = "react-icons/bs"
tag = "BsBoxes"
class BsBracesAsterisk(rx.Component):
library = "react-icons/bs"
tag = "BsBracesAsterisk"
class BsBraces(rx.Component):
library = "react-icons/bs"
tag = "BsBraces"
class BsBricks(rx.Component):
library = "react-icons/bs"
tag = "BsBricks"
class BsBriefcaseFill(rx.Component):
library = "react-icons/bs"
tag = "BsBriefcaseFill"
class BsBriefcase(rx.Component):
library = "react-icons/bs"
tag = "BsBriefcase"
class BsBrightnessAltHighFill(rx.Component):
library = "react-icons/bs"
tag = "BsBrightnessAltHighFill"
class BsBrightnessAltHigh(rx.Component):
library = "react-icons/bs"
tag = "BsBrightnessAltHigh"
class BsBrightnessAltLowFill(rx.Component):
library = "react-icons/bs"
tag = "BsBrightnessAltLowFill"
class BsBrightnessAltLow(rx.Component):
library = "react-icons/bs"
tag = "BsBrightnessAltLow"
class BsBrightnessHighFill(rx.Component):
library = "react-icons/bs"
tag = "BsBrightnessHighFill"
class BsBrightnessHigh(rx.Component):
library = "react-icons/bs"
tag = "BsBrightnessHigh"
class BsBrightnessLowFill(rx.Component):
library = "react-icons/bs"
tag = "BsBrightnessLowFill"
class BsBrightnessLow(rx.Component):
library = "react-icons/bs"
tag = "BsBrightnessLow"
class BsBroadcastPin(rx.Component):
library = "react-icons/bs"
tag = "BsBroadcastPin"
class BsBroadcast(rx.Component):
library = "react-icons/bs"
tag = "BsBroadcast"
class BsBrowserChrome(rx.Component):
library = "react-icons/bs"
tag = "BsBrowserChrome"
class BsBrowserEdge(rx.Component):
library = "react-icons/bs"
tag = "BsBrowserEdge"
class BsBrowserFirefox(rx.Component):
library = "react-icons/bs"
tag = "BsBrowserFirefox"
class BsBrowserSafari(rx.Component):
library = "react-icons/bs"
tag = "BsBrowserSafari"
class BsBrushFill(rx.Component):
library = "react-icons/bs"
tag = "BsBrushFill"
class BsBrush(rx.Component):
library = "react-icons/bs"
tag = "BsBrush"
class BsBucketFill(rx.Component):
library = "react-icons/bs"
tag = "BsBucketFill"
class BsBucket(rx.Component):
library = "react-icons/bs"
tag = "BsBucket"
class BsBugFill(rx.Component):
library = "react-icons/bs"
tag = "BsBugFill"
class BsBug(rx.Component):
library = "react-icons/bs"
tag = "BsBug"
class BsBuildingAdd(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingAdd"
class BsBuildingCheck(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingCheck"
class BsBuildingDash(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingDash"
class BsBuildingDown(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingDown"
class BsBuildingExclamation(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingExclamation"
class BsBuildingFillAdd(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingFillAdd"
class BsBuildingFillCheck(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingFillCheck"
class BsBuildingFillDash(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingFillDash"
class BsBuildingFillDown(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingFillDown"
class BsBuildingFillExclamation(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingFillExclamation"
class BsBuildingFillGear(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingFillGear"
class BsBuildingFillLock(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingFillLock"
class BsBuildingFillSlash(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingFillSlash"
class BsBuildingFillUp(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingFillUp"
class BsBuildingFillX(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingFillX"
class BsBuildingFill(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingFill"
class BsBuildingGear(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingGear"
class BsBuildingLock(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingLock"
class BsBuildingSlash(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingSlash"
class BsBuildingUp(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingUp"
class BsBuildingX(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingX"
class BsBuilding(rx.Component):
library = "react-icons/bs"
tag = "BsBuilding"
class BsBuildingsFill(rx.Component):
library = "react-icons/bs"
tag = "BsBuildingsFill"
class BsBuildings(rx.Component):
library = "react-icons/bs"
tag = "BsBuildings"
class BsBullseye(rx.Component):
library = "react-icons/bs"
tag = "BsBullseye"
class BsBusFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsBusFrontFill"
class BsBusFront(rx.Component):
library = "react-icons/bs"
tag = "BsBusFront"
class BsCCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsCCircleFill"
class BsCCircle(rx.Component):
library = "react-icons/bs"
tag = "BsCCircle"
class BsCSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsCSquareFill"
class BsCSquare(rx.Component):
library = "react-icons/bs"
tag = "BsCSquare"
class BsCalculatorFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalculatorFill"
class BsCalculator(rx.Component):
library = "react-icons/bs"
tag = "BsCalculator"
class BsCalendarCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarCheckFill"
class BsCalendarCheck(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarCheck"
class BsCalendarDateFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarDateFill"
class BsCalendarDate(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarDate"
class BsCalendarDayFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarDayFill"
class BsCalendarDay(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarDay"
class BsCalendarEventFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarEventFill"
class BsCalendarEvent(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarEvent"
class BsCalendarFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarFill"
class BsCalendarHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarHeartFill"
class BsCalendarHeart(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarHeart"
class BsCalendarMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarMinusFill"
class BsCalendarMinus(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarMinus"
class BsCalendarMonthFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarMonthFill"
class BsCalendarMonth(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarMonth"
class BsCalendarPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarPlusFill"
class BsCalendarPlus(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarPlus"
class BsCalendarRangeFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarRangeFill"
class BsCalendarRange(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarRange"
class BsCalendarWeekFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarWeekFill"
class BsCalendarWeek(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarWeek"
class BsCalendarXFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarXFill"
class BsCalendarX(rx.Component):
library = "react-icons/bs"
tag = "BsCalendarX"
class BsCalendar(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar"
class BsCalendar2CheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2CheckFill"
class BsCalendar2Check(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2Check"
class BsCalendar2DateFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2DateFill"
class BsCalendar2Date(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2Date"
class BsCalendar2DayFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2DayFill"
class BsCalendar2Day(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2Day"
class BsCalendar2EventFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2EventFill"
class BsCalendar2Event(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2Event"
class BsCalendar2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2Fill"
class BsCalendar2HeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2HeartFill"
class BsCalendar2Heart(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2Heart"
class BsCalendar2MinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2MinusFill"
class BsCalendar2Minus(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2Minus"
class BsCalendar2MonthFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2MonthFill"
class BsCalendar2Month(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2Month"
class BsCalendar2PlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2PlusFill"
class BsCalendar2Plus(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2Plus"
class BsCalendar2RangeFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2RangeFill"
class BsCalendar2Range(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2Range"
class BsCalendar2WeekFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2WeekFill"
class BsCalendar2Week(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2Week"
class BsCalendar2XFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2XFill"
class BsCalendar2X(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2X"
class BsCalendar2(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar2"
class BsCalendar3EventFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar3EventFill"
class BsCalendar3Event(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar3Event"
class BsCalendar3Fill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar3Fill"
class BsCalendar3RangeFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar3RangeFill"
class BsCalendar3Range(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar3Range"
class BsCalendar3WeekFill(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar3WeekFill"
class BsCalendar3Week(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar3Week"
class BsCalendar3(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar3"
class BsCalendar4Event(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar4Event"
class BsCalendar4Range(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar4Range"
class BsCalendar4Week(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar4Week"
class BsCalendar4(rx.Component):
library = "react-icons/bs"
tag = "BsCalendar4"
class BsCameraFill(rx.Component):
library = "react-icons/bs"
tag = "BsCameraFill"
class BsCameraReelsFill(rx.Component):
library = "react-icons/bs"
tag = "BsCameraReelsFill"
class BsCameraReels(rx.Component):
library = "react-icons/bs"
tag = "BsCameraReels"
class BsCameraVideoFill(rx.Component):
library = "react-icons/bs"
tag = "BsCameraVideoFill"
class BsCameraVideoOffFill(rx.Component):
library = "react-icons/bs"
tag = "BsCameraVideoOffFill"
class BsCameraVideoOff(rx.Component):
library = "react-icons/bs"
tag = "BsCameraVideoOff"
class BsCameraVideo(rx.Component):
library = "react-icons/bs"
tag = "BsCameraVideo"
class BsCamera(rx.Component):
library = "react-icons/bs"
tag = "BsCamera"
class BsCamera2(rx.Component):
library = "react-icons/bs"
tag = "BsCamera2"
class BsCapslockFill(rx.Component):
library = "react-icons/bs"
tag = "BsCapslockFill"
class BsCapslock(rx.Component):
library = "react-icons/bs"
tag = "BsCapslock"
class BsCapsulePill(rx.Component):
library = "react-icons/bs"
tag = "BsCapsulePill"
class BsCapsule(rx.Component):
library = "react-icons/bs"
tag = "BsCapsule"
class BsCarFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsCarFrontFill"
class BsCarFront(rx.Component):
library = "react-icons/bs"
tag = "BsCarFront"
class BsCardChecklist(rx.Component):
library = "react-icons/bs"
tag = "BsCardChecklist"
class BsCardHeading(rx.Component):
library = "react-icons/bs"
tag = "BsCardHeading"
class BsCardImage(rx.Component):
library = "react-icons/bs"
tag = "BsCardImage"
class BsCardList(rx.Component):
library = "react-icons/bs"
tag = "BsCardList"
class BsCardText(rx.Component):
library = "react-icons/bs"
tag = "BsCardText"
class BsCaretDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsCaretDownFill"
class BsCaretDownSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsCaretDownSquareFill"
class BsCaretDownSquare(rx.Component):
library = "react-icons/bs"
tag = "BsCaretDownSquare"
class BsCaretDown(rx.Component):
library = "react-icons/bs"
tag = "BsCaretDown"
class BsCaretLeftFill(rx.Component):
library = "react-icons/bs"
tag = "BsCaretLeftFill"
class BsCaretLeftSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsCaretLeftSquareFill"
class BsCaretLeftSquare(rx.Component):
library = "react-icons/bs"
tag = "BsCaretLeftSquare"
class BsCaretLeft(rx.Component):
library = "react-icons/bs"
tag = "BsCaretLeft"
class BsCaretRightFill(rx.Component):
library = "react-icons/bs"
tag = "BsCaretRightFill"
class BsCaretRightSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsCaretRightSquareFill"
class BsCaretRightSquare(rx.Component):
library = "react-icons/bs"
tag = "BsCaretRightSquare"
class BsCaretRight(rx.Component):
library = "react-icons/bs"
tag = "BsCaretRight"
class BsCaretUpFill(rx.Component):
library = "react-icons/bs"
tag = "BsCaretUpFill"
class BsCaretUpSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsCaretUpSquareFill"
class BsCaretUpSquare(rx.Component):
library = "react-icons/bs"
tag = "BsCaretUpSquare"
class BsCaretUp(rx.Component):
library = "react-icons/bs"
tag = "BsCaretUp"
class BsCartCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsCartCheckFill"
class BsCartCheck(rx.Component):
library = "react-icons/bs"
tag = "BsCartCheck"
class BsCartDashFill(rx.Component):
library = "react-icons/bs"
tag = "BsCartDashFill"
class BsCartDash(rx.Component):
library = "react-icons/bs"
tag = "BsCartDash"
class BsCartFill(rx.Component):
library = "react-icons/bs"
tag = "BsCartFill"
class BsCartPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsCartPlusFill"
class BsCartPlus(rx.Component):
library = "react-icons/bs"
tag = "BsCartPlus"
class BsCartXFill(rx.Component):
library = "react-icons/bs"
tag = "BsCartXFill"
class BsCartX(rx.Component):
library = "react-icons/bs"
tag = "BsCartX"
class BsCart(rx.Component):
library = "react-icons/bs"
tag = "BsCart"
class BsCart2(rx.Component):
library = "react-icons/bs"
tag = "BsCart2"
class BsCart3(rx.Component):
library = "react-icons/bs"
tag = "BsCart3"
class BsCart4(rx.Component):
library = "react-icons/bs"
tag = "BsCart4"
class BsCashCoin(rx.Component):
library = "react-icons/bs"
tag = "BsCashCoin"
class BsCashStack(rx.Component):
library = "react-icons/bs"
tag = "BsCashStack"
class BsCash(rx.Component):
library = "react-icons/bs"
tag = "BsCash"
class BsCassetteFill(rx.Component):
library = "react-icons/bs"
tag = "BsCassetteFill"
class BsCassette(rx.Component):
library = "react-icons/bs"
tag = "BsCassette"
class BsCast(rx.Component):
library = "react-icons/bs"
tag = "BsCast"
class BsCcCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsCcCircleFill"
class BsCcCircle(rx.Component):
library = "react-icons/bs"
tag = "BsCcCircle"
class BsCcSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsCcSquareFill"
class BsCcSquare(rx.Component):
library = "react-icons/bs"
tag = "BsCcSquare"
class BsChatDotsFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatDotsFill"
class BsChatDots(rx.Component):
library = "react-icons/bs"
tag = "BsChatDots"
class BsChatFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatFill"
class BsChatHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatHeartFill"
class BsChatHeart(rx.Component):
library = "react-icons/bs"
tag = "BsChatHeart"
class BsChatLeftDotsFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatLeftDotsFill"
class BsChatLeftDots(rx.Component):
library = "react-icons/bs"
tag = "BsChatLeftDots"
class BsChatLeftFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatLeftFill"
class BsChatLeftHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatLeftHeartFill"
class BsChatLeftHeart(rx.Component):
library = "react-icons/bs"
tag = "BsChatLeftHeart"
class BsChatLeftQuoteFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatLeftQuoteFill"
class BsChatLeftQuote(rx.Component):
library = "react-icons/bs"
tag = "BsChatLeftQuote"
class BsChatLeftTextFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatLeftTextFill"
class BsChatLeftText(rx.Component):
library = "react-icons/bs"
tag = "BsChatLeftText"
class BsChatLeft(rx.Component):
library = "react-icons/bs"
tag = "BsChatLeft"
class BsChatQuoteFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatQuoteFill"
class BsChatQuote(rx.Component):
library = "react-icons/bs"
tag = "BsChatQuote"
class BsChatRightDotsFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatRightDotsFill"
class BsChatRightDots(rx.Component):
library = "react-icons/bs"
tag = "BsChatRightDots"
class BsChatRightFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatRightFill"
class BsChatRightHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatRightHeartFill"
class BsChatRightHeart(rx.Component):
library = "react-icons/bs"
tag = "BsChatRightHeart"
class BsChatRightQuoteFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatRightQuoteFill"
class BsChatRightQuote(rx.Component):
library = "react-icons/bs"
tag = "BsChatRightQuote"
class BsChatRightTextFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatRightTextFill"
class BsChatRightText(rx.Component):
library = "react-icons/bs"
tag = "BsChatRightText"
class BsChatRight(rx.Component):
library = "react-icons/bs"
tag = "BsChatRight"
class BsChatSquareDotsFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatSquareDotsFill"
class BsChatSquareDots(rx.Component):
library = "react-icons/bs"
tag = "BsChatSquareDots"
class BsChatSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatSquareFill"
class BsChatSquareHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatSquareHeartFill"
class BsChatSquareHeart(rx.Component):
library = "react-icons/bs"
tag = "BsChatSquareHeart"
class BsChatSquareQuoteFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatSquareQuoteFill"
class BsChatSquareQuote(rx.Component):
library = "react-icons/bs"
tag = "BsChatSquareQuote"
class BsChatSquareTextFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatSquareTextFill"
class BsChatSquareText(rx.Component):
library = "react-icons/bs"
tag = "BsChatSquareText"
class BsChatSquare(rx.Component):
library = "react-icons/bs"
tag = "BsChatSquare"
class BsChatTextFill(rx.Component):
library = "react-icons/bs"
tag = "BsChatTextFill"
class BsChatText(rx.Component):
library = "react-icons/bs"
tag = "BsChatText"
class BsChat(rx.Component):
library = "react-icons/bs"
tag = "BsChat"
class BsCheckAll(rx.Component):
library = "react-icons/bs"
tag = "BsCheckAll"
class BsCheckCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsCheckCircleFill"
class BsCheckCircle(rx.Component):
library = "react-icons/bs"
tag = "BsCheckCircle"
class BsCheckLg(rx.Component):
library = "react-icons/bs"
tag = "BsCheckLg"
class BsCheckSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsCheckSquareFill"
class BsCheckSquare(rx.Component):
library = "react-icons/bs"
tag = "BsCheckSquare"
class BsCheck(rx.Component):
library = "react-icons/bs"
tag = "BsCheck"
class BsCheck2All(rx.Component):
library = "react-icons/bs"
tag = "BsCheck2All"
class BsCheck2Circle(rx.Component):
library = "react-icons/bs"
tag = "BsCheck2Circle"
class BsCheck2Square(rx.Component):
library = "react-icons/bs"
tag = "BsCheck2Square"
class BsCheck2(rx.Component):
library = "react-icons/bs"
tag = "BsCheck2"
class BsChevronBarContract(rx.Component):
library = "react-icons/bs"
tag = "BsChevronBarContract"
class BsChevronBarDown(rx.Component):
library = "react-icons/bs"
tag = "BsChevronBarDown"
class BsChevronBarExpand(rx.Component):
library = "react-icons/bs"
tag = "BsChevronBarExpand"
class BsChevronBarLeft(rx.Component):
library = "react-icons/bs"
tag = "BsChevronBarLeft"
class BsChevronBarRight(rx.Component):
library = "react-icons/bs"
tag = "BsChevronBarRight"
class BsChevronBarUp(rx.Component):
library = "react-icons/bs"
tag = "BsChevronBarUp"
class BsChevronCompactDown(rx.Component):
library = "react-icons/bs"
tag = "BsChevronCompactDown"
class BsChevronCompactLeft(rx.Component):
library = "react-icons/bs"
tag = "BsChevronCompactLeft"
class BsChevronCompactRight(rx.Component):
library = "react-icons/bs"
tag = "BsChevronCompactRight"
class BsChevronCompactUp(rx.Component):
library = "react-icons/bs"
tag = "BsChevronCompactUp"
class BsChevronContract(rx.Component):
library = "react-icons/bs"
tag = "BsChevronContract"
class BsChevronDoubleDown(rx.Component):
library = "react-icons/bs"
tag = "BsChevronDoubleDown"
class BsChevronDoubleLeft(rx.Component):
library = "react-icons/bs"
tag = "BsChevronDoubleLeft"
class BsChevronDoubleRight(rx.Component):
library = "react-icons/bs"
tag = "BsChevronDoubleRight"
class BsChevronDoubleUp(rx.Component):
library = "react-icons/bs"
tag = "BsChevronDoubleUp"
class BsChevronDown(rx.Component):
library = "react-icons/bs"
tag = "BsChevronDown"
class BsChevronExpand(rx.Component):
library = "react-icons/bs"
tag = "BsChevronExpand"
class BsChevronLeft(rx.Component):
library = "react-icons/bs"
tag = "BsChevronLeft"
class BsChevronRight(rx.Component):
library = "react-icons/bs"
tag = "BsChevronRight"
class BsChevronUp(rx.Component):
library = "react-icons/bs"
tag = "BsChevronUp"
class BsCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsCircleFill"
class BsCircleHalf(rx.Component):
library = "react-icons/bs"
tag = "BsCircleHalf"
class BsCircleSquare(rx.Component):
library = "react-icons/bs"
tag = "BsCircleSquare"
class BsCircle(rx.Component):
library = "react-icons/bs"
tag = "BsCircle"
class BsClipboardCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsClipboardCheckFill"
class BsClipboardCheck(rx.Component):
library = "react-icons/bs"
tag = "BsClipboardCheck"
class BsClipboardDataFill(rx.Component):
library = "react-icons/bs"
tag = "BsClipboardDataFill"
class BsClipboardData(rx.Component):
library = "react-icons/bs"
tag = "BsClipboardData"
class BsClipboardFill(rx.Component):
library = "react-icons/bs"
tag = "BsClipboardFill"
class BsClipboardHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsClipboardHeartFill"
class BsClipboardHeart(rx.Component):
library = "react-icons/bs"
tag = "BsClipboardHeart"
class BsClipboardMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsClipboardMinusFill"
class BsClipboardMinus(rx.Component):
library = "react-icons/bs"
tag = "BsClipboardMinus"
class BsClipboardPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsClipboardPlusFill"
class BsClipboardPlus(rx.Component):
library = "react-icons/bs"
tag = "BsClipboardPlus"
class BsClipboardPulse(rx.Component):
library = "react-icons/bs"
tag = "BsClipboardPulse"
class BsClipboardXFill(rx.Component):
library = "react-icons/bs"
tag = "BsClipboardXFill"
class BsClipboardX(rx.Component):
library = "react-icons/bs"
tag = "BsClipboardX"
class BsClipboard(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard"
class BsClipboard2CheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2CheckFill"
class BsClipboard2Check(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2Check"
class BsClipboard2DataFill(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2DataFill"
class BsClipboard2Data(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2Data"
class BsClipboard2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2Fill"
class BsClipboard2HeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2HeartFill"
class BsClipboard2Heart(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2Heart"
class BsClipboard2MinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2MinusFill"
class BsClipboard2Minus(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2Minus"
class BsClipboard2PlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2PlusFill"
class BsClipboard2Plus(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2Plus"
class BsClipboard2PulseFill(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2PulseFill"
class BsClipboard2Pulse(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2Pulse"
class BsClipboard2XFill(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2XFill"
class BsClipboard2X(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2X"
class BsClipboard2(rx.Component):
library = "react-icons/bs"
tag = "BsClipboard2"
class BsClockFill(rx.Component):
library = "react-icons/bs"
tag = "BsClockFill"
class BsClockHistory(rx.Component):
library = "react-icons/bs"
tag = "BsClockHistory"
class BsClock(rx.Component):
library = "react-icons/bs"
tag = "BsClock"
class BsCloudArrowDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudArrowDownFill"
class BsCloudArrowDown(rx.Component):
library = "react-icons/bs"
tag = "BsCloudArrowDown"
class BsCloudArrowUpFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudArrowUpFill"
class BsCloudArrowUp(rx.Component):
library = "react-icons/bs"
tag = "BsCloudArrowUp"
class BsCloudCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudCheckFill"
class BsCloudCheck(rx.Component):
library = "react-icons/bs"
tag = "BsCloudCheck"
class BsCloudDownloadFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudDownloadFill"
class BsCloudDownload(rx.Component):
library = "react-icons/bs"
tag = "BsCloudDownload"
class BsCloudDrizzleFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudDrizzleFill"
class BsCloudDrizzle(rx.Component):
library = "react-icons/bs"
tag = "BsCloudDrizzle"
class BsCloudFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudFill"
class BsCloudFogFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudFogFill"
class BsCloudFog(rx.Component):
library = "react-icons/bs"
tag = "BsCloudFog"
class BsCloudFog2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudFog2Fill"
class BsCloudFog2(rx.Component):
library = "react-icons/bs"
tag = "BsCloudFog2"
class BsCloudHailFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudHailFill"
class BsCloudHail(rx.Component):
library = "react-icons/bs"
tag = "BsCloudHail"
class BsCloudHazeFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudHazeFill"
class BsCloudHaze(rx.Component):
library = "react-icons/bs"
tag = "BsCloudHaze"
class BsCloudHaze2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudHaze2Fill"
class BsCloudHaze2(rx.Component):
library = "react-icons/bs"
tag = "BsCloudHaze2"
class BsCloudLightningFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudLightningFill"
class BsCloudLightningRainFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudLightningRainFill"
class BsCloudLightningRain(rx.Component):
library = "react-icons/bs"
tag = "BsCloudLightningRain"
class BsCloudLightning(rx.Component):
library = "react-icons/bs"
tag = "BsCloudLightning"
class BsCloudMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudMinusFill"
class BsCloudMinus(rx.Component):
library = "react-icons/bs"
tag = "BsCloudMinus"
class BsCloudMoonFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudMoonFill"
class BsCloudMoon(rx.Component):
library = "react-icons/bs"
tag = "BsCloudMoon"
class BsCloudPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudPlusFill"
class BsCloudPlus(rx.Component):
library = "react-icons/bs"
tag = "BsCloudPlus"
class BsCloudRainFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudRainFill"
class BsCloudRainHeavyFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudRainHeavyFill"
class BsCloudRainHeavy(rx.Component):
library = "react-icons/bs"
tag = "BsCloudRainHeavy"
class BsCloudRain(rx.Component):
library = "react-icons/bs"
tag = "BsCloudRain"
class BsCloudSlashFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudSlashFill"
class BsCloudSlash(rx.Component):
library = "react-icons/bs"
tag = "BsCloudSlash"
class BsCloudSleetFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudSleetFill"
class BsCloudSleet(rx.Component):
library = "react-icons/bs"
tag = "BsCloudSleet"
class BsCloudSnowFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudSnowFill"
class BsCloudSnow(rx.Component):
library = "react-icons/bs"
tag = "BsCloudSnow"
class BsCloudSunFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudSunFill"
class BsCloudSun(rx.Component):
library = "react-icons/bs"
tag = "BsCloudSun"
class BsCloudUploadFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudUploadFill"
class BsCloudUpload(rx.Component):
library = "react-icons/bs"
tag = "BsCloudUpload"
class BsCloud(rx.Component):
library = "react-icons/bs"
tag = "BsCloud"
class BsCloudsFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudsFill"
class BsClouds(rx.Component):
library = "react-icons/bs"
tag = "BsClouds"
class BsCloudyFill(rx.Component):
library = "react-icons/bs"
tag = "BsCloudyFill"
class BsCloudy(rx.Component):
library = "react-icons/bs"
tag = "BsCloudy"
class BsCodeSlash(rx.Component):
library = "react-icons/bs"
tag = "BsCodeSlash"
class BsCodeSquare(rx.Component):
library = "react-icons/bs"
tag = "BsCodeSquare"
class BsCode(rx.Component):
library = "react-icons/bs"
tag = "BsCode"
class BsCoin(rx.Component):
library = "react-icons/bs"
tag = "BsCoin"
class BsCollectionFill(rx.Component):
library = "react-icons/bs"
tag = "BsCollectionFill"
class BsCollectionPlayFill(rx.Component):
library = "react-icons/bs"
tag = "BsCollectionPlayFill"
class BsCollectionPlay(rx.Component):
library = "react-icons/bs"
tag = "BsCollectionPlay"
class BsCollection(rx.Component):
library = "react-icons/bs"
tag = "BsCollection"
class BsColumnsGap(rx.Component):
library = "react-icons/bs"
tag = "BsColumnsGap"
class BsColumns(rx.Component):
library = "react-icons/bs"
tag = "BsColumns"
class BsCommand(rx.Component):
library = "react-icons/bs"
tag = "BsCommand"
class BsCompassFill(rx.Component):
library = "react-icons/bs"
tag = "BsCompassFill"
class BsCompass(rx.Component):
library = "react-icons/bs"
tag = "BsCompass"
class BsConeStriped(rx.Component):
library = "react-icons/bs"
tag = "BsConeStriped"
class BsCone(rx.Component):
library = "react-icons/bs"
tag = "BsCone"
class BsController(rx.Component):
library = "react-icons/bs"
tag = "BsController"
class BsCpuFill(rx.Component):
library = "react-icons/bs"
tag = "BsCpuFill"
class BsCpu(rx.Component):
library = "react-icons/bs"
tag = "BsCpu"
class BsCreditCard2BackFill(rx.Component):
library = "react-icons/bs"
tag = "BsCreditCard2BackFill"
class BsCreditCard2Back(rx.Component):
library = "react-icons/bs"
tag = "BsCreditCard2Back"
class BsCreditCard2FrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsCreditCard2FrontFill"
class BsCreditCard2Front(rx.Component):
library = "react-icons/bs"
tag = "BsCreditCard2Front"
class BsCreditCardFill(rx.Component):
library = "react-icons/bs"
tag = "BsCreditCardFill"
class BsCreditCard(rx.Component):
library = "react-icons/bs"
tag = "BsCreditCard"
class BsCrop(rx.Component):
library = "react-icons/bs"
tag = "BsCrop"
class BsCupFill(rx.Component):
library = "react-icons/bs"
tag = "BsCupFill"
class BsCupHotFill(rx.Component):
library = "react-icons/bs"
tag = "BsCupHotFill"
class BsCupHot(rx.Component):
library = "react-icons/bs"
tag = "BsCupHot"
class BsCupStraw(rx.Component):
library = "react-icons/bs"
tag = "BsCupStraw"
class BsCup(rx.Component):
library = "react-icons/bs"
tag = "BsCup"
class BsCurrencyBitcoin(rx.Component):
library = "react-icons/bs"
tag = "BsCurrencyBitcoin"
class BsCurrencyDollar(rx.Component):
library = "react-icons/bs"
tag = "BsCurrencyDollar"
class BsCurrencyEuro(rx.Component):
library = "react-icons/bs"
tag = "BsCurrencyEuro"
class BsCurrencyExchange(rx.Component):
library = "react-icons/bs"
tag = "BsCurrencyExchange"
class BsCurrencyPound(rx.Component):
library = "react-icons/bs"
tag = "BsCurrencyPound"
class BsCurrencyRupee(rx.Component):
library = "react-icons/bs"
tag = "BsCurrencyRupee"
class BsCurrencyYen(rx.Component):
library = "react-icons/bs"
tag = "BsCurrencyYen"
class BsCursorFill(rx.Component):
library = "react-icons/bs"
tag = "BsCursorFill"
class BsCursorText(rx.Component):
library = "react-icons/bs"
tag = "BsCursorText"
class BsCursor(rx.Component):
library = "react-icons/bs"
tag = "BsCursor"
class BsDashCircleDotted(rx.Component):
library = "react-icons/bs"
tag = "BsDashCircleDotted"
class BsDashCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsDashCircleFill"
class BsDashCircle(rx.Component):
library = "react-icons/bs"
tag = "BsDashCircle"
class BsDashLg(rx.Component):
library = "react-icons/bs"
tag = "BsDashLg"
class BsDashSquareDotted(rx.Component):
library = "react-icons/bs"
tag = "BsDashSquareDotted"
class BsDashSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsDashSquareFill"
class BsDashSquare(rx.Component):
library = "react-icons/bs"
tag = "BsDashSquare"
class BsDash(rx.Component):
library = "react-icons/bs"
tag = "BsDash"
class BsDatabaseAdd(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseAdd"
class BsDatabaseCheck(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseCheck"
class BsDatabaseDash(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseDash"
class BsDatabaseDown(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseDown"
class BsDatabaseExclamation(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseExclamation"
class BsDatabaseFillAdd(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseFillAdd"
class BsDatabaseFillCheck(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseFillCheck"
class BsDatabaseFillDash(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseFillDash"
class BsDatabaseFillDown(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseFillDown"
class BsDatabaseFillExclamation(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseFillExclamation"
class BsDatabaseFillGear(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseFillGear"
class BsDatabaseFillLock(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseFillLock"
class BsDatabaseFillSlash(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseFillSlash"
class BsDatabaseFillUp(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseFillUp"
class BsDatabaseFillX(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseFillX"
class BsDatabaseFill(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseFill"
class BsDatabaseGear(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseGear"
class BsDatabaseLock(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseLock"
class BsDatabaseSlash(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseSlash"
class BsDatabaseUp(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseUp"
class BsDatabaseX(rx.Component):
library = "react-icons/bs"
tag = "BsDatabaseX"
class BsDatabase(rx.Component):
library = "react-icons/bs"
tag = "BsDatabase"
class BsDeviceHddFill(rx.Component):
library = "react-icons/bs"
tag = "BsDeviceHddFill"
class BsDeviceHdd(rx.Component):
library = "react-icons/bs"
tag = "BsDeviceHdd"
class BsDeviceSsdFill(rx.Component):
library = "react-icons/bs"
tag = "BsDeviceSsdFill"
class BsDeviceSsd(rx.Component):
library = "react-icons/bs"
tag = "BsDeviceSsd"
class BsDiagram2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsDiagram2Fill"
class BsDiagram2(rx.Component):
library = "react-icons/bs"
tag = "BsDiagram2"
class BsDiagram3Fill(rx.Component):
library = "react-icons/bs"
tag = "BsDiagram3Fill"
class BsDiagram3(rx.Component):
library = "react-icons/bs"
tag = "BsDiagram3"
class BsDiamondFill(rx.Component):
library = "react-icons/bs"
tag = "BsDiamondFill"
class BsDiamondHalf(rx.Component):
library = "react-icons/bs"
tag = "BsDiamondHalf"
class BsDiamond(rx.Component):
library = "react-icons/bs"
tag = "BsDiamond"
class BsDice1Fill(rx.Component):
library = "react-icons/bs"
tag = "BsDice1Fill"
class BsDice1(rx.Component):
library = "react-icons/bs"
tag = "BsDice1"
class BsDice2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsDice2Fill"
class BsDice2(rx.Component):
library = "react-icons/bs"
tag = "BsDice2"
class BsDice3Fill(rx.Component):
library = "react-icons/bs"
tag = "BsDice3Fill"
class BsDice3(rx.Component):
library = "react-icons/bs"
tag = "BsDice3"
class BsDice4Fill(rx.Component):
library = "react-icons/bs"
tag = "BsDice4Fill"
class BsDice4(rx.Component):
library = "react-icons/bs"
tag = "BsDice4"
class BsDice5Fill(rx.Component):
library = "react-icons/bs"
tag = "BsDice5Fill"
class BsDice5(rx.Component):
library = "react-icons/bs"
tag = "BsDice5"
class BsDice6Fill(rx.Component):
library = "react-icons/bs"
tag = "BsDice6Fill"
class BsDice6(rx.Component):
library = "react-icons/bs"
tag = "BsDice6"
class BsDiscFill(rx.Component):
library = "react-icons/bs"
tag = "BsDiscFill"
class BsDisc(rx.Component):
library = "react-icons/bs"
tag = "BsDisc"
class BsDiscord(rx.Component):
library = "react-icons/bs"
tag = "BsDiscord"
class BsDisplayFill(rx.Component):
library = "react-icons/bs"
tag = "BsDisplayFill"
class BsDisplay(rx.Component):
library = "react-icons/bs"
tag = "BsDisplay"
class BsDisplayportFill(rx.Component):
library = "react-icons/bs"
tag = "BsDisplayportFill"
class BsDisplayport(rx.Component):
library = "react-icons/bs"
tag = "BsDisplayport"
class BsDistributeHorizontal(rx.Component):
library = "react-icons/bs"
tag = "BsDistributeHorizontal"
class BsDistributeVertical(rx.Component):
library = "react-icons/bs"
tag = "BsDistributeVertical"
class BsDoorClosedFill(rx.Component):
library = "react-icons/bs"
tag = "BsDoorClosedFill"
class BsDoorClosed(rx.Component):
library = "react-icons/bs"
tag = "BsDoorClosed"
class BsDoorOpenFill(rx.Component):
library = "react-icons/bs"
tag = "BsDoorOpenFill"
class BsDoorOpen(rx.Component):
library = "react-icons/bs"
tag = "BsDoorOpen"
class BsDot(rx.Component):
library = "react-icons/bs"
tag = "BsDot"
class BsDownload(rx.Component):
library = "react-icons/bs"
tag = "BsDownload"
class BsDpadFill(rx.Component):
library = "react-icons/bs"
tag = "BsDpadFill"
class BsDpad(rx.Component):
library = "react-icons/bs"
tag = "BsDpad"
class BsDribbble(rx.Component):
library = "react-icons/bs"
tag = "BsDribbble"
class BsDropbox(rx.Component):
library = "react-icons/bs"
tag = "BsDropbox"
class BsDropletFill(rx.Component):
library = "react-icons/bs"
tag = "BsDropletFill"
class BsDropletHalf(rx.Component):
library = "react-icons/bs"
tag = "BsDropletHalf"
class BsDroplet(rx.Component):
library = "react-icons/bs"
tag = "BsDroplet"
class BsEarFill(rx.Component):
library = "react-icons/bs"
tag = "BsEarFill"
class BsEar(rx.Component):
library = "react-icons/bs"
tag = "BsEar"
class BsEarbuds(rx.Component):
library = "react-icons/bs"
tag = "BsEarbuds"
class BsEaselFill(rx.Component):
library = "react-icons/bs"
tag = "BsEaselFill"
class BsEasel(rx.Component):
library = "react-icons/bs"
tag = "BsEasel"
class BsEasel2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsEasel2Fill"
class BsEasel2(rx.Component):
library = "react-icons/bs"
tag = "BsEasel2"
class BsEasel3Fill(rx.Component):
library = "react-icons/bs"
tag = "BsEasel3Fill"
class BsEasel3(rx.Component):
library = "react-icons/bs"
tag = "BsEasel3"
class BsEggFill(rx.Component):
library = "react-icons/bs"
tag = "BsEggFill"
class BsEggFried(rx.Component):
library = "react-icons/bs"
tag = "BsEggFried"
class BsEgg(rx.Component):
library = "react-icons/bs"
tag = "BsEgg"
class BsEjectFill(rx.Component):
library = "react-icons/bs"
tag = "BsEjectFill"
class BsEject(rx.Component):
library = "react-icons/bs"
tag = "BsEject"
class BsEmojiAngryFill(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiAngryFill"
class BsEmojiAngry(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiAngry"
class BsEmojiDizzyFill(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiDizzyFill"
class BsEmojiDizzy(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiDizzy"
class BsEmojiExpressionlessFill(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiExpressionlessFill"
class BsEmojiExpressionless(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiExpressionless"
class BsEmojiFrownFill(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiFrownFill"
class BsEmojiFrown(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiFrown"
class BsEmojiHeartEyesFill(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiHeartEyesFill"
class BsEmojiHeartEyes(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiHeartEyes"
class BsEmojiKissFill(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiKissFill"
class BsEmojiKiss(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiKiss"
class BsEmojiLaughingFill(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiLaughingFill"
class BsEmojiLaughing(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiLaughing"
class BsEmojiNeutralFill(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiNeutralFill"
class BsEmojiNeutral(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiNeutral"
class BsEmojiSmileFill(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiSmileFill"
class BsEmojiSmileUpsideDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiSmileUpsideDownFill"
class BsEmojiSmileUpsideDown(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiSmileUpsideDown"
class BsEmojiSmile(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiSmile"
class BsEmojiSunglassesFill(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiSunglassesFill"
class BsEmojiSunglasses(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiSunglasses"
class BsEmojiWinkFill(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiWinkFill"
class BsEmojiWink(rx.Component):
library = "react-icons/bs"
tag = "BsEmojiWink"
class BsEnvelopeAtFill(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeAtFill"
class BsEnvelopeAt(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeAt"
class BsEnvelopeCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeCheckFill"
class BsEnvelopeCheck(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeCheck"
class BsEnvelopeDashFill(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeDashFill"
class BsEnvelopeDash(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeDash"
class BsEnvelopeExclamationFill(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeExclamationFill"
class BsEnvelopeExclamation(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeExclamation"
class BsEnvelopeFill(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeFill"
class BsEnvelopeHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeHeartFill"
class BsEnvelopeHeart(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeHeart"
class BsEnvelopeOpenFill(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeOpenFill"
class BsEnvelopeOpenHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeOpenHeartFill"
class BsEnvelopeOpenHeart(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeOpenHeart"
class BsEnvelopeOpen(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeOpen"
class BsEnvelopePaperFill(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopePaperFill"
class BsEnvelopePaperHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopePaperHeartFill"
class BsEnvelopePaperHeart(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopePaperHeart"
class BsEnvelopePaper(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopePaper"
class BsEnvelopePlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopePlusFill"
class BsEnvelopePlus(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopePlus"
class BsEnvelopeSlashFill(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeSlashFill"
class BsEnvelopeSlash(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeSlash"
class BsEnvelopeXFill(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeXFill"
class BsEnvelopeX(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelopeX"
class BsEnvelope(rx.Component):
library = "react-icons/bs"
tag = "BsEnvelope"
class BsEraserFill(rx.Component):
library = "react-icons/bs"
tag = "BsEraserFill"
class BsEraser(rx.Component):
library = "react-icons/bs"
tag = "BsEraser"
class BsEscape(rx.Component):
library = "react-icons/bs"
tag = "BsEscape"
class BsEthernet(rx.Component):
library = "react-icons/bs"
tag = "BsEthernet"
class BsEvFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsEvFrontFill"
class BsEvFront(rx.Component):
library = "react-icons/bs"
tag = "BsEvFront"
class BsEvStationFill(rx.Component):
library = "react-icons/bs"
tag = "BsEvStationFill"
class BsEvStation(rx.Component):
library = "react-icons/bs"
tag = "BsEvStation"
class BsExclamationCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsExclamationCircleFill"
class BsExclamationCircle(rx.Component):
library = "react-icons/bs"
tag = "BsExclamationCircle"
class BsExclamationDiamondFill(rx.Component):
library = "react-icons/bs"
tag = "BsExclamationDiamondFill"
class BsExclamationDiamond(rx.Component):
library = "react-icons/bs"
tag = "BsExclamationDiamond"
class BsExclamationLg(rx.Component):
library = "react-icons/bs"
tag = "BsExclamationLg"
class BsExclamationOctagonFill(rx.Component):
library = "react-icons/bs"
tag = "BsExclamationOctagonFill"
class BsExclamationOctagon(rx.Component):
library = "react-icons/bs"
tag = "BsExclamationOctagon"
class BsExclamationSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsExclamationSquareFill"
class BsExclamationSquare(rx.Component):
library = "react-icons/bs"
tag = "BsExclamationSquare"
class BsExclamationTriangleFill(rx.Component):
library = "react-icons/bs"
tag = "BsExclamationTriangleFill"
class BsExclamationTriangle(rx.Component):
library = "react-icons/bs"
tag = "BsExclamationTriangle"
class BsExclamation(rx.Component):
library = "react-icons/bs"
tag = "BsExclamation"
class BsExclude(rx.Component):
library = "react-icons/bs"
tag = "BsExclude"
class BsExplicitFill(rx.Component):
library = "react-icons/bs"
tag = "BsExplicitFill"
class BsExplicit(rx.Component):
library = "react-icons/bs"
tag = "BsExplicit"
class BsEyeFill(rx.Component):
library = "react-icons/bs"
tag = "BsEyeFill"
class BsEyeSlashFill(rx.Component):
library = "react-icons/bs"
tag = "BsEyeSlashFill"
class BsEyeSlash(rx.Component):
library = "react-icons/bs"
tag = "BsEyeSlash"
class BsEye(rx.Component):
library = "react-icons/bs"
tag = "BsEye"
class BsEyedropper(rx.Component):
library = "react-icons/bs"
tag = "BsEyedropper"
class BsEyeglasses(rx.Component):
library = "react-icons/bs"
tag = "BsEyeglasses"
class BsFacebook(rx.Component):
library = "react-icons/bs"
tag = "BsFacebook"
class BsFan(rx.Component):
library = "react-icons/bs"
tag = "BsFan"
class BsFastForwardBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsFastForwardBtnFill"
class BsFastForwardBtn(rx.Component):
library = "react-icons/bs"
tag = "BsFastForwardBtn"
class BsFastForwardCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFastForwardCircleFill"
class BsFastForwardCircle(rx.Component):
library = "react-icons/bs"
tag = "BsFastForwardCircle"
class BsFastForwardFill(rx.Component):
library = "react-icons/bs"
tag = "BsFastForwardFill"
class BsFastForward(rx.Component):
library = "react-icons/bs"
tag = "BsFastForward"
class BsFileArrowDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileArrowDownFill"
class BsFileArrowDown(rx.Component):
library = "react-icons/bs"
tag = "BsFileArrowDown"
class BsFileArrowUpFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileArrowUpFill"
class BsFileArrowUp(rx.Component):
library = "react-icons/bs"
tag = "BsFileArrowUp"
class BsFileBarGraphFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileBarGraphFill"
class BsFileBarGraph(rx.Component):
library = "react-icons/bs"
tag = "BsFileBarGraph"
class BsFileBinaryFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileBinaryFill"
class BsFileBinary(rx.Component):
library = "react-icons/bs"
tag = "BsFileBinary"
class BsFileBreakFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileBreakFill"
class BsFileBreak(rx.Component):
library = "react-icons/bs"
tag = "BsFileBreak"
class BsFileCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileCheckFill"
class BsFileCheck(rx.Component):
library = "react-icons/bs"
tag = "BsFileCheck"
class BsFileCodeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileCodeFill"
class BsFileCode(rx.Component):
library = "react-icons/bs"
tag = "BsFileCode"
class BsFileDiffFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileDiffFill"
class BsFileDiff(rx.Component):
library = "react-icons/bs"
tag = "BsFileDiff"
class BsFileEarmarkArrowDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkArrowDownFill"
class BsFileEarmarkArrowDown(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkArrowDown"
class BsFileEarmarkArrowUpFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkArrowUpFill"
class BsFileEarmarkArrowUp(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkArrowUp"
class BsFileEarmarkBarGraphFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkBarGraphFill"
class BsFileEarmarkBarGraph(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkBarGraph"
class BsFileEarmarkBinaryFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkBinaryFill"
class BsFileEarmarkBinary(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkBinary"
class BsFileEarmarkBreakFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkBreakFill"
class BsFileEarmarkBreak(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkBreak"
class BsFileEarmarkCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkCheckFill"
class BsFileEarmarkCheck(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkCheck"
class BsFileEarmarkCodeFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkCodeFill"
class BsFileEarmarkCode(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkCode"
class BsFileEarmarkDiffFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkDiffFill"
class BsFileEarmarkDiff(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkDiff"
class BsFileEarmarkEaselFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkEaselFill"
class BsFileEarmarkEasel(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkEasel"
class BsFileEarmarkExcelFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkExcelFill"
class BsFileEarmarkExcel(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkExcel"
class BsFileEarmarkFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkFill"
class BsFileEarmarkFontFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkFontFill"
class BsFileEarmarkFont(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkFont"
class BsFileEarmarkImageFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkImageFill"
class BsFileEarmarkImage(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkImage"
class BsFileEarmarkLockFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkLockFill"
class BsFileEarmarkLock(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkLock"
class BsFileEarmarkLock2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkLock2Fill"
class BsFileEarmarkLock2(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkLock2"
class BsFileEarmarkMedicalFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkMedicalFill"
class BsFileEarmarkMedical(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkMedical"
class BsFileEarmarkMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkMinusFill"
class BsFileEarmarkMinus(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkMinus"
class BsFileEarmarkMusicFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkMusicFill"
class BsFileEarmarkMusic(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkMusic"
class BsFileEarmarkPdfFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkPdfFill"
class BsFileEarmarkPdf(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkPdf"
class BsFileEarmarkPersonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkPersonFill"
class BsFileEarmarkPerson(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkPerson"
class BsFileEarmarkPlayFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkPlayFill"
class BsFileEarmarkPlay(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkPlay"
class BsFileEarmarkPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkPlusFill"
class BsFileEarmarkPlus(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkPlus"
class BsFileEarmarkPostFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkPostFill"
class BsFileEarmarkPost(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkPost"
class BsFileEarmarkPptFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkPptFill"
class BsFileEarmarkPpt(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkPpt"
class BsFileEarmarkRichtextFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkRichtextFill"
class BsFileEarmarkRichtext(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkRichtext"
class BsFileEarmarkRuledFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkRuledFill"
class BsFileEarmarkRuled(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkRuled"
class BsFileEarmarkSlidesFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkSlidesFill"
class BsFileEarmarkSlides(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkSlides"
class BsFileEarmarkSpreadsheetFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkSpreadsheetFill"
class BsFileEarmarkSpreadsheet(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkSpreadsheet"
class BsFileEarmarkTextFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkTextFill"
class BsFileEarmarkText(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkText"
class BsFileEarmarkWordFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkWordFill"
class BsFileEarmarkWord(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkWord"
class BsFileEarmarkXFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkXFill"
class BsFileEarmarkX(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkX"
class BsFileEarmarkZipFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkZipFill"
class BsFileEarmarkZip(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmarkZip"
class BsFileEarmark(rx.Component):
library = "react-icons/bs"
tag = "BsFileEarmark"
class BsFileEaselFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileEaselFill"
class BsFileEasel(rx.Component):
library = "react-icons/bs"
tag = "BsFileEasel"
class BsFileExcelFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileExcelFill"
class BsFileExcel(rx.Component):
library = "react-icons/bs"
tag = "BsFileExcel"
class BsFileFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileFill"
class BsFileFontFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileFontFill"
class BsFileFont(rx.Component):
library = "react-icons/bs"
tag = "BsFileFont"
class BsFileImageFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileImageFill"
class BsFileImage(rx.Component):
library = "react-icons/bs"
tag = "BsFileImage"
class BsFileLockFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileLockFill"
class BsFileLock(rx.Component):
library = "react-icons/bs"
tag = "BsFileLock"
class BsFileLock2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsFileLock2Fill"
class BsFileLock2(rx.Component):
library = "react-icons/bs"
tag = "BsFileLock2"
class BsFileMedicalFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileMedicalFill"
class BsFileMedical(rx.Component):
library = "react-icons/bs"
tag = "BsFileMedical"
class BsFileMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileMinusFill"
class BsFileMinus(rx.Component):
library = "react-icons/bs"
tag = "BsFileMinus"
class BsFileMusicFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileMusicFill"
class BsFileMusic(rx.Component):
library = "react-icons/bs"
tag = "BsFileMusic"
class BsFilePdfFill(rx.Component):
library = "react-icons/bs"
tag = "BsFilePdfFill"
class BsFilePdf(rx.Component):
library = "react-icons/bs"
tag = "BsFilePdf"
class BsFilePersonFill(rx.Component):
library = "react-icons/bs"
tag = "BsFilePersonFill"
class BsFilePerson(rx.Component):
library = "react-icons/bs"
tag = "BsFilePerson"
class BsFilePlayFill(rx.Component):
library = "react-icons/bs"
tag = "BsFilePlayFill"
class BsFilePlay(rx.Component):
library = "react-icons/bs"
tag = "BsFilePlay"
class BsFilePlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsFilePlusFill"
class BsFilePlus(rx.Component):
library = "react-icons/bs"
tag = "BsFilePlus"
class BsFilePostFill(rx.Component):
library = "react-icons/bs"
tag = "BsFilePostFill"
class BsFilePost(rx.Component):
library = "react-icons/bs"
tag = "BsFilePost"
class BsFilePptFill(rx.Component):
library = "react-icons/bs"
tag = "BsFilePptFill"
class BsFilePpt(rx.Component):
library = "react-icons/bs"
tag = "BsFilePpt"
class BsFileRichtextFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileRichtextFill"
class BsFileRichtext(rx.Component):
library = "react-icons/bs"
tag = "BsFileRichtext"
class BsFileRuledFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileRuledFill"
class BsFileRuled(rx.Component):
library = "react-icons/bs"
tag = "BsFileRuled"
class BsFileSlidesFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileSlidesFill"
class BsFileSlides(rx.Component):
library = "react-icons/bs"
tag = "BsFileSlides"
class BsFileSpreadsheetFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileSpreadsheetFill"
class BsFileSpreadsheet(rx.Component):
library = "react-icons/bs"
tag = "BsFileSpreadsheet"
class BsFileTextFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileTextFill"
class BsFileText(rx.Component):
library = "react-icons/bs"
tag = "BsFileText"
class BsFileWordFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileWordFill"
class BsFileWord(rx.Component):
library = "react-icons/bs"
tag = "BsFileWord"
class BsFileXFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileXFill"
class BsFileX(rx.Component):
library = "react-icons/bs"
tag = "BsFileX"
class BsFileZipFill(rx.Component):
library = "react-icons/bs"
tag = "BsFileZipFill"
class BsFileZip(rx.Component):
library = "react-icons/bs"
tag = "BsFileZip"
class BsFile(rx.Component):
library = "react-icons/bs"
tag = "BsFile"
class BsFilesAlt(rx.Component):
library = "react-icons/bs"
tag = "BsFilesAlt"
class BsFiles(rx.Component):
library = "react-icons/bs"
tag = "BsFiles"
class BsFiletypeAac(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeAac"
class BsFiletypeAi(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeAi"
class BsFiletypeBmp(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeBmp"
class BsFiletypeCs(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeCs"
class BsFiletypeCss(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeCss"
class BsFiletypeCsv(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeCsv"
class BsFiletypeDoc(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeDoc"
class BsFiletypeDocx(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeDocx"
class BsFiletypeExe(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeExe"
class BsFiletypeGif(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeGif"
class BsFiletypeHeic(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeHeic"
class BsFiletypeHtml(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeHtml"
class BsFiletypeJava(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeJava"
class BsFiletypeJpg(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeJpg"
class BsFiletypeJs(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeJs"
class BsFiletypeJson(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeJson"
class BsFiletypeJsx(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeJsx"
class BsFiletypeKey(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeKey"
class BsFiletypeM4P(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeM4P"
class BsFiletypeMd(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeMd"
class BsFiletypeMdx(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeMdx"
class BsFiletypeMov(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeMov"
class BsFiletypeMp3(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeMp3"
class BsFiletypeMp4(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeMp4"
class BsFiletypeOtf(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeOtf"
class BsFiletypePdf(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypePdf"
class BsFiletypePhp(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypePhp"
class BsFiletypePng(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypePng"
class BsFiletypePpt(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypePpt"
class BsFiletypePptx(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypePptx"
class BsFiletypePsd(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypePsd"
class BsFiletypePy(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypePy"
class BsFiletypeRaw(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeRaw"
class BsFiletypeRb(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeRb"
class BsFiletypeSass(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeSass"
class BsFiletypeScss(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeScss"
class BsFiletypeSh(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeSh"
class BsFiletypeSql(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeSql"
class BsFiletypeSvg(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeSvg"
class BsFiletypeTiff(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeTiff"
class BsFiletypeTsx(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeTsx"
class BsFiletypeTtf(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeTtf"
class BsFiletypeTxt(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeTxt"
class BsFiletypeWav(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeWav"
class BsFiletypeWoff(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeWoff"
class BsFiletypeXls(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeXls"
class BsFiletypeXlsx(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeXlsx"
class BsFiletypeXml(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeXml"
class BsFiletypeYml(rx.Component):
library = "react-icons/bs"
tag = "BsFiletypeYml"
class BsFilm(rx.Component):
library = "react-icons/bs"
tag = "BsFilm"
class BsFilterCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsFilterCircleFill"
class BsFilterCircle(rx.Component):
library = "react-icons/bs"
tag = "BsFilterCircle"
class BsFilterLeft(rx.Component):
library = "react-icons/bs"
tag = "BsFilterLeft"
class BsFilterRight(rx.Component):
library = "react-icons/bs"
tag = "BsFilterRight"
class BsFilterSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsFilterSquareFill"
class BsFilterSquare(rx.Component):
library = "react-icons/bs"
tag = "BsFilterSquare"
class BsFilter(rx.Component):
library = "react-icons/bs"
tag = "BsFilter"
class BsFingerprint(rx.Component):
library = "react-icons/bs"
tag = "BsFingerprint"
class BsFire(rx.Component):
library = "react-icons/bs"
tag = "BsFire"
class BsFlagFill(rx.Component):
library = "react-icons/bs"
tag = "BsFlagFill"
class BsFlag(rx.Component):
library = "react-icons/bs"
tag = "BsFlag"
class BsFlower1(rx.Component):
library = "react-icons/bs"
tag = "BsFlower1"
class BsFlower2(rx.Component):
library = "react-icons/bs"
tag = "BsFlower2"
class BsFlower3(rx.Component):
library = "react-icons/bs"
tag = "BsFlower3"
class BsFolderCheck(rx.Component):
library = "react-icons/bs"
tag = "BsFolderCheck"
class BsFolderFill(rx.Component):
library = "react-icons/bs"
tag = "BsFolderFill"
class BsFolderMinus(rx.Component):
library = "react-icons/bs"
tag = "BsFolderMinus"
class BsFolderPlus(rx.Component):
library = "react-icons/bs"
tag = "BsFolderPlus"
class BsFolderSymlinkFill(rx.Component):
library = "react-icons/bs"
tag = "BsFolderSymlinkFill"
class BsFolderSymlink(rx.Component):
library = "react-icons/bs"
tag = "BsFolderSymlink"
class BsFolderX(rx.Component):
library = "react-icons/bs"
tag = "BsFolderX"
class BsFolder(rx.Component):
library = "react-icons/bs"
tag = "BsFolder"
class BsFolder2Open(rx.Component):
library = "react-icons/bs"
tag = "BsFolder2Open"
class BsFolder2(rx.Component):
library = "react-icons/bs"
tag = "BsFolder2"
class BsFonts(rx.Component):
library = "react-icons/bs"
tag = "BsFonts"
class BsForwardFill(rx.Component):
library = "react-icons/bs"
tag = "BsForwardFill"
class BsForward(rx.Component):
library = "react-icons/bs"
tag = "BsForward"
class BsFront(rx.Component):
library = "react-icons/bs"
tag = "BsFront"
class BsFuelPumpDieselFill(rx.Component):
library = "react-icons/bs"
tag = "BsFuelPumpDieselFill"
class BsFuelPumpDiesel(rx.Component):
library = "react-icons/bs"
tag = "BsFuelPumpDiesel"
class BsFuelPumpFill(rx.Component):
library = "react-icons/bs"
tag = "BsFuelPumpFill"
class BsFuelPump(rx.Component):
library = "react-icons/bs"
tag = "BsFuelPump"
class BsFullscreenExit(rx.Component):
library = "react-icons/bs"
tag = "BsFullscreenExit"
class BsFullscreen(rx.Component):
library = "react-icons/bs"
tag = "BsFullscreen"
class BsFunnelFill(rx.Component):
library = "react-icons/bs"
tag = "BsFunnelFill"
class BsFunnel(rx.Component):
library = "react-icons/bs"
tag = "BsFunnel"
class BsGearFill(rx.Component):
library = "react-icons/bs"
tag = "BsGearFill"
class BsGearWideConnected(rx.Component):
library = "react-icons/bs"
tag = "BsGearWideConnected"
class BsGearWide(rx.Component):
library = "react-icons/bs"
tag = "BsGearWide"
class BsGear(rx.Component):
library = "react-icons/bs"
tag = "BsGear"
class BsGem(rx.Component):
library = "react-icons/bs"
tag = "BsGem"
class BsGenderAmbiguous(rx.Component):
library = "react-icons/bs"
tag = "BsGenderAmbiguous"
class BsGenderFemale(rx.Component):
library = "react-icons/bs"
tag = "BsGenderFemale"
class BsGenderMale(rx.Component):
library = "react-icons/bs"
tag = "BsGenderMale"
class BsGenderTrans(rx.Component):
library = "react-icons/bs"
tag = "BsGenderTrans"
class BsGeoAltFill(rx.Component):
library = "react-icons/bs"
tag = "BsGeoAltFill"
class BsGeoAlt(rx.Component):
library = "react-icons/bs"
tag = "BsGeoAlt"
class BsGeoFill(rx.Component):
library = "react-icons/bs"
tag = "BsGeoFill"
class BsGeo(rx.Component):
library = "react-icons/bs"
tag = "BsGeo"
class BsGiftFill(rx.Component):
library = "react-icons/bs"
tag = "BsGiftFill"
class BsGift(rx.Component):
library = "react-icons/bs"
tag = "BsGift"
class BsGit(rx.Component):
library = "react-icons/bs"
tag = "BsGit"
class BsGithub(rx.Component):
library = "react-icons/bs"
tag = "BsGithub"
class BsGlobeAmericas(rx.Component):
library = "react-icons/bs"
tag = "BsGlobeAmericas"
class BsGlobeAsiaAustralia(rx.Component):
library = "react-icons/bs"
tag = "BsGlobeAsiaAustralia"
class BsGlobeCentralSouthAsia(rx.Component):
library = "react-icons/bs"
tag = "BsGlobeCentralSouthAsia"
class BsGlobeEuropeAfrica(rx.Component):
library = "react-icons/bs"
tag = "BsGlobeEuropeAfrica"
class BsGlobe(rx.Component):
library = "react-icons/bs"
tag = "BsGlobe"
class BsGlobe2(rx.Component):
library = "react-icons/bs"
tag = "BsGlobe2"
class BsGooglePlay(rx.Component):
library = "react-icons/bs"
tag = "BsGooglePlay"
class BsGoogle(rx.Component):
library = "react-icons/bs"
tag = "BsGoogle"
class BsGpuCard(rx.Component):
library = "react-icons/bs"
tag = "BsGpuCard"
class BsGraphDownArrow(rx.Component):
library = "react-icons/bs"
tag = "BsGraphDownArrow"
class BsGraphDown(rx.Component):
library = "react-icons/bs"
tag = "BsGraphDown"
class BsGraphUpArrow(rx.Component):
library = "react-icons/bs"
tag = "BsGraphUpArrow"
class BsGraphUp(rx.Component):
library = "react-icons/bs"
tag = "BsGraphUp"
class BsGrid1X2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsGrid1X2Fill"
class BsGrid1X2(rx.Component):
library = "react-icons/bs"
tag = "BsGrid1X2"
class BsGrid3X2GapFill(rx.Component):
library = "react-icons/bs"
tag = "BsGrid3X2GapFill"
class BsGrid3X2Gap(rx.Component):
library = "react-icons/bs"
tag = "BsGrid3X2Gap"
class BsGrid3X2(rx.Component):
library = "react-icons/bs"
tag = "BsGrid3X2"
class BsGrid3X3GapFill(rx.Component):
library = "react-icons/bs"
tag = "BsGrid3X3GapFill"
class BsGrid3X3Gap(rx.Component):
library = "react-icons/bs"
tag = "BsGrid3X3Gap"
class BsGrid3X3(rx.Component):
library = "react-icons/bs"
tag = "BsGrid3X3"
class BsGridFill(rx.Component):
library = "react-icons/bs"
tag = "BsGridFill"
class BsGrid(rx.Component):
library = "react-icons/bs"
tag = "BsGrid"
class BsGripHorizontal(rx.Component):
library = "react-icons/bs"
tag = "BsGripHorizontal"
class BsGripVertical(rx.Component):
library = "react-icons/bs"
tag = "BsGripVertical"
class BsHCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsHCircleFill"
class BsHCircle(rx.Component):
library = "react-icons/bs"
tag = "BsHCircle"
class BsHSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsHSquareFill"
class BsHSquare(rx.Component):
library = "react-icons/bs"
tag = "BsHSquare"
class BsHammer(rx.Component):
library = "react-icons/bs"
tag = "BsHammer"
class BsHandIndexFill(rx.Component):
library = "react-icons/bs"
tag = "BsHandIndexFill"
class BsHandIndexThumbFill(rx.Component):
library = "react-icons/bs"
tag = "BsHandIndexThumbFill"
class BsHandIndexThumb(rx.Component):
library = "react-icons/bs"
tag = "BsHandIndexThumb"
class BsHandIndex(rx.Component):
library = "react-icons/bs"
tag = "BsHandIndex"
class BsHandThumbsDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsHandThumbsDownFill"
class BsHandThumbsDown(rx.Component):
library = "react-icons/bs"
tag = "BsHandThumbsDown"
class BsHandThumbsUpFill(rx.Component):
library = "react-icons/bs"
tag = "BsHandThumbsUpFill"
class BsHandThumbsUp(rx.Component):
library = "react-icons/bs"
tag = "BsHandThumbsUp"
class BsHandbagFill(rx.Component):
library = "react-icons/bs"
tag = "BsHandbagFill"
class BsHandbag(rx.Component):
library = "react-icons/bs"
tag = "BsHandbag"
class BsHash(rx.Component):
library = "react-icons/bs"
tag = "BsHash"
class BsHddFill(rx.Component):
library = "react-icons/bs"
tag = "BsHddFill"
class BsHddNetworkFill(rx.Component):
library = "react-icons/bs"
tag = "BsHddNetworkFill"
class BsHddNetwork(rx.Component):
library = "react-icons/bs"
tag = "BsHddNetwork"
class BsHddRackFill(rx.Component):
library = "react-icons/bs"
tag = "BsHddRackFill"
class BsHddRack(rx.Component):
library = "react-icons/bs"
tag = "BsHddRack"
class BsHddStackFill(rx.Component):
library = "react-icons/bs"
tag = "BsHddStackFill"
class BsHddStack(rx.Component):
library = "react-icons/bs"
tag = "BsHddStack"
class BsHdd(rx.Component):
library = "react-icons/bs"
tag = "BsHdd"
class BsHdmiFill(rx.Component):
library = "react-icons/bs"
tag = "BsHdmiFill"
class BsHdmi(rx.Component):
library = "react-icons/bs"
tag = "BsHdmi"
class BsHeadphones(rx.Component):
library = "react-icons/bs"
tag = "BsHeadphones"
class BsHeadsetVr(rx.Component):
library = "react-icons/bs"
tag = "BsHeadsetVr"
class BsHeadset(rx.Component):
library = "react-icons/bs"
tag = "BsHeadset"
class BsHeartArrow(rx.Component):
library = "react-icons/bs"
tag = "BsHeartArrow"
class BsHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsHeartFill"
class BsHeartHalf(rx.Component):
library = "react-icons/bs"
tag = "BsHeartHalf"
class BsHeartPulseFill(rx.Component):
library = "react-icons/bs"
tag = "BsHeartPulseFill"
class BsHeartPulse(rx.Component):
library = "react-icons/bs"
tag = "BsHeartPulse"
class BsHeart(rx.Component):
library = "react-icons/bs"
tag = "BsHeart"
class BsHeartbreakFill(rx.Component):
library = "react-icons/bs"
tag = "BsHeartbreakFill"
class BsHeartbreak(rx.Component):
library = "react-icons/bs"
tag = "BsHeartbreak"
class BsHearts(rx.Component):
library = "react-icons/bs"
tag = "BsHearts"
class BsHeptagonFill(rx.Component):
library = "react-icons/bs"
tag = "BsHeptagonFill"
class BsHeptagonHalf(rx.Component):
library = "react-icons/bs"
tag = "BsHeptagonHalf"
class BsHeptagon(rx.Component):
library = "react-icons/bs"
tag = "BsHeptagon"
class BsHexagonFill(rx.Component):
library = "react-icons/bs"
tag = "BsHexagonFill"
class BsHexagonHalf(rx.Component):
library = "react-icons/bs"
tag = "BsHexagonHalf"
class BsHexagon(rx.Component):
library = "react-icons/bs"
tag = "BsHexagon"
class BsHospitalFill(rx.Component):
library = "react-icons/bs"
tag = "BsHospitalFill"
class BsHospital(rx.Component):
library = "react-icons/bs"
tag = "BsHospital"
class BsHourglassBottom(rx.Component):
library = "react-icons/bs"
tag = "BsHourglassBottom"
class BsHourglassSplit(rx.Component):
library = "react-icons/bs"
tag = "BsHourglassSplit"
class BsHourglassTop(rx.Component):
library = "react-icons/bs"
tag = "BsHourglassTop"
class BsHourglass(rx.Component):
library = "react-icons/bs"
tag = "BsHourglass"
class BsHouseAddFill(rx.Component):
library = "react-icons/bs"
tag = "BsHouseAddFill"
class BsHouseAdd(rx.Component):
library = "react-icons/bs"
tag = "BsHouseAdd"
class BsHouseCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsHouseCheckFill"
class BsHouseCheck(rx.Component):
library = "react-icons/bs"
tag = "BsHouseCheck"
class BsHouseDashFill(rx.Component):
library = "react-icons/bs"
tag = "BsHouseDashFill"
class BsHouseDash(rx.Component):
library = "react-icons/bs"
tag = "BsHouseDash"
class BsHouseDoorFill(rx.Component):
library = "react-icons/bs"
tag = "BsHouseDoorFill"
class BsHouseDoor(rx.Component):
library = "react-icons/bs"
tag = "BsHouseDoor"
class BsHouseDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsHouseDownFill"
class BsHouseDown(rx.Component):
library = "react-icons/bs"
tag = "BsHouseDown"
class BsHouseExclamationFill(rx.Component):
library = "react-icons/bs"
tag = "BsHouseExclamationFill"
class BsHouseExclamation(rx.Component):
library = "react-icons/bs"
tag = "BsHouseExclamation"
class BsHouseFill(rx.Component):
library = "react-icons/bs"
tag = "BsHouseFill"
class BsHouseGearFill(rx.Component):
library = "react-icons/bs"
tag = "BsHouseGearFill"
class BsHouseGear(rx.Component):
library = "react-icons/bs"
tag = "BsHouseGear"
class BsHouseHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsHouseHeartFill"
class BsHouseHeart(rx.Component):
library = "react-icons/bs"
tag = "BsHouseHeart"
class BsHouseLockFill(rx.Component):
library = "react-icons/bs"
tag = "BsHouseLockFill"
class BsHouseLock(rx.Component):
library = "react-icons/bs"
tag = "BsHouseLock"
class BsHouseSlashFill(rx.Component):
library = "react-icons/bs"
tag = "BsHouseSlashFill"
class BsHouseSlash(rx.Component):
library = "react-icons/bs"
tag = "BsHouseSlash"
class BsHouseUpFill(rx.Component):
library = "react-icons/bs"
tag = "BsHouseUpFill"
class BsHouseUp(rx.Component):
library = "react-icons/bs"
tag = "BsHouseUp"
class BsHouseXFill(rx.Component):
library = "react-icons/bs"
tag = "BsHouseXFill"
class BsHouseX(rx.Component):
library = "react-icons/bs"
tag = "BsHouseX"
class BsHouse(rx.Component):
library = "react-icons/bs"
tag = "BsHouse"
class BsHousesFill(rx.Component):
library = "react-icons/bs"
tag = "BsHousesFill"
class BsHouses(rx.Component):
library = "react-icons/bs"
tag = "BsHouses"
class BsHr(rx.Component):
library = "react-icons/bs"
tag = "BsHr"
class BsHurricane(rx.Component):
library = "react-icons/bs"
tag = "BsHurricane"
class BsHypnotize(rx.Component):
library = "react-icons/bs"
tag = "BsHypnotize"
class BsImageAlt(rx.Component):
library = "react-icons/bs"
tag = "BsImageAlt"
class BsImageFill(rx.Component):
library = "react-icons/bs"
tag = "BsImageFill"
class BsImage(rx.Component):
library = "react-icons/bs"
tag = "BsImage"
class BsImages(rx.Component):
library = "react-icons/bs"
tag = "BsImages"
class BsInboxFill(rx.Component):
library = "react-icons/bs"
tag = "BsInboxFill"
class BsInbox(rx.Component):
library = "react-icons/bs"
tag = "BsInbox"
class BsInboxesFill(rx.Component):
library = "react-icons/bs"
tag = "BsInboxesFill"
class BsInboxes(rx.Component):
library = "react-icons/bs"
tag = "BsInboxes"
class BsIncognito(rx.Component):
library = "react-icons/bs"
tag = "BsIncognito"
class BsIndent(rx.Component):
library = "react-icons/bs"
tag = "BsIndent"
class BsInfinity(rx.Component):
library = "react-icons/bs"
tag = "BsInfinity"
class BsInfoCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsInfoCircleFill"
class BsInfoCircle(rx.Component):
library = "react-icons/bs"
tag = "BsInfoCircle"
class BsInfoLg(rx.Component):
library = "react-icons/bs"
tag = "BsInfoLg"
class BsInfoSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsInfoSquareFill"
class BsInfoSquare(rx.Component):
library = "react-icons/bs"
tag = "BsInfoSquare"
class BsInfo(rx.Component):
library = "react-icons/bs"
tag = "BsInfo"
class BsInputCursorText(rx.Component):
library = "react-icons/bs"
tag = "BsInputCursorText"
class BsInputCursor(rx.Component):
library = "react-icons/bs"
tag = "BsInputCursor"
class BsInstagram(rx.Component):
library = "react-icons/bs"
tag = "BsInstagram"
class BsIntersect(rx.Component):
library = "react-icons/bs"
tag = "BsIntersect"
class BsJournalAlbum(rx.Component):
library = "react-icons/bs"
tag = "BsJournalAlbum"
class BsJournalArrowDown(rx.Component):
library = "react-icons/bs"
tag = "BsJournalArrowDown"
class BsJournalArrowUp(rx.Component):
library = "react-icons/bs"
tag = "BsJournalArrowUp"
class BsJournalBookmarkFill(rx.Component):
library = "react-icons/bs"
tag = "BsJournalBookmarkFill"
class BsJournalBookmark(rx.Component):
library = "react-icons/bs"
tag = "BsJournalBookmark"
class BsJournalCheck(rx.Component):
library = "react-icons/bs"
tag = "BsJournalCheck"
class BsJournalCode(rx.Component):
library = "react-icons/bs"
tag = "BsJournalCode"
class BsJournalMedical(rx.Component):
library = "react-icons/bs"
tag = "BsJournalMedical"
class BsJournalMinus(rx.Component):
library = "react-icons/bs"
tag = "BsJournalMinus"
class BsJournalPlus(rx.Component):
library = "react-icons/bs"
tag = "BsJournalPlus"
class BsJournalRichtext(rx.Component):
library = "react-icons/bs"
tag = "BsJournalRichtext"
class BsJournalText(rx.Component):
library = "react-icons/bs"
tag = "BsJournalText"
class BsJournalX(rx.Component):
library = "react-icons/bs"
tag = "BsJournalX"
class BsJournal(rx.Component):
library = "react-icons/bs"
tag = "BsJournal"
class BsJournals(rx.Component):
library = "react-icons/bs"
tag = "BsJournals"
class BsJoystick(rx.Component):
library = "react-icons/bs"
tag = "BsJoystick"
class BsJustifyLeft(rx.Component):
library = "react-icons/bs"
tag = "BsJustifyLeft"
class BsJustifyRight(rx.Component):
library = "react-icons/bs"
tag = "BsJustifyRight"
class BsJustify(rx.Component):
library = "react-icons/bs"
tag = "BsJustify"
class BsKanbanFill(rx.Component):
library = "react-icons/bs"
tag = "BsKanbanFill"
class BsKanban(rx.Component):
library = "react-icons/bs"
tag = "BsKanban"
class BsKeyFill(rx.Component):
library = "react-icons/bs"
tag = "BsKeyFill"
class BsKey(rx.Component):
library = "react-icons/bs"
tag = "BsKey"
class BsKeyboardFill(rx.Component):
library = "react-icons/bs"
tag = "BsKeyboardFill"
class BsKeyboard(rx.Component):
library = "react-icons/bs"
tag = "BsKeyboard"
class BsLadder(rx.Component):
library = "react-icons/bs"
tag = "BsLadder"
class BsLampFill(rx.Component):
library = "react-icons/bs"
tag = "BsLampFill"
class BsLamp(rx.Component):
library = "react-icons/bs"
tag = "BsLamp"
class BsLaptopFill(rx.Component):
library = "react-icons/bs"
tag = "BsLaptopFill"
class BsLaptop(rx.Component):
library = "react-icons/bs"
tag = "BsLaptop"
class BsLayerBackward(rx.Component):
library = "react-icons/bs"
tag = "BsLayerBackward"
class BsLayerForward(rx.Component):
library = "react-icons/bs"
tag = "BsLayerForward"
class BsLayersFill(rx.Component):
library = "react-icons/bs"
tag = "BsLayersFill"
class BsLayersHalf(rx.Component):
library = "react-icons/bs"
tag = "BsLayersHalf"
class BsLayers(rx.Component):
library = "react-icons/bs"
tag = "BsLayers"
class BsLayoutSidebarInsetReverse(rx.Component):
library = "react-icons/bs"
tag = "BsLayoutSidebarInsetReverse"
class BsLayoutSidebarInset(rx.Component):
library = "react-icons/bs"
tag = "BsLayoutSidebarInset"
class BsLayoutSidebarReverse(rx.Component):
library = "react-icons/bs"
tag = "BsLayoutSidebarReverse"
class BsLayoutSidebar(rx.Component):
library = "react-icons/bs"
tag = "BsLayoutSidebar"
class BsLayoutSplit(rx.Component):
library = "react-icons/bs"
tag = "BsLayoutSplit"
class BsLayoutTextSidebarReverse(rx.Component):
library = "react-icons/bs"
tag = "BsLayoutTextSidebarReverse"
class BsLayoutTextSidebar(rx.Component):
library = "react-icons/bs"
tag = "BsLayoutTextSidebar"
class BsLayoutTextWindowReverse(rx.Component):
library = "react-icons/bs"
tag = "BsLayoutTextWindowReverse"
class BsLayoutTextWindow(rx.Component):
library = "react-icons/bs"
tag = "BsLayoutTextWindow"
class BsLayoutThreeColumns(rx.Component):
library = "react-icons/bs"
tag = "BsLayoutThreeColumns"
class BsLayoutWtf(rx.Component):
library = "react-icons/bs"
tag = "BsLayoutWtf"
class BsLifePreserver(rx.Component):
library = "react-icons/bs"
tag = "BsLifePreserver"
class BsLightbulbFill(rx.Component):
library = "react-icons/bs"
tag = "BsLightbulbFill"
class BsLightbulbOffFill(rx.Component):
library = "react-icons/bs"
tag = "BsLightbulbOffFill"
class BsLightbulbOff(rx.Component):
library = "react-icons/bs"
tag = "BsLightbulbOff"
class BsLightbulb(rx.Component):
library = "react-icons/bs"
tag = "BsLightbulb"
class BsLightningChargeFill(rx.Component):
library = "react-icons/bs"
tag = "BsLightningChargeFill"
class BsLightningCharge(rx.Component):
library = "react-icons/bs"
tag = "BsLightningCharge"
class BsLightningFill(rx.Component):
library = "react-icons/bs"
tag = "BsLightningFill"
class BsLightning(rx.Component):
library = "react-icons/bs"
tag = "BsLightning"
class BsLine(rx.Component):
library = "react-icons/bs"
tag = "BsLine"
class BsLink45Deg(rx.Component):
library = "react-icons/bs"
tag = "BsLink45Deg"
class BsLink(rx.Component):
library = "react-icons/bs"
tag = "BsLink"
class BsLinkedin(rx.Component):
library = "react-icons/bs"
tag = "BsLinkedin"
class BsListCheck(rx.Component):
library = "react-icons/bs"
tag = "BsListCheck"
class BsListColumnsReverse(rx.Component):
library = "react-icons/bs"
tag = "BsListColumnsReverse"
class BsListColumns(rx.Component):
library = "react-icons/bs"
tag = "BsListColumns"
class BsListNested(rx.Component):
library = "react-icons/bs"
tag = "BsListNested"
class BsListOl(rx.Component):
library = "react-icons/bs"
tag = "BsListOl"
class BsListStars(rx.Component):
library = "react-icons/bs"
tag = "BsListStars"
class BsListTask(rx.Component):
library = "react-icons/bs"
tag = "BsListTask"
class BsListUl(rx.Component):
library = "react-icons/bs"
tag = "BsListUl"
class BsList(rx.Component):
library = "react-icons/bs"
tag = "BsList"
class BsLockFill(rx.Component):
library = "react-icons/bs"
tag = "BsLockFill"
class BsLock(rx.Component):
library = "react-icons/bs"
tag = "BsLock"
class BsLungsFill(rx.Component):
library = "react-icons/bs"
tag = "BsLungsFill"
class BsLungs(rx.Component):
library = "react-icons/bs"
tag = "BsLungs"
class BsMagic(rx.Component):
library = "react-icons/bs"
tag = "BsMagic"
class BsMagnetFill(rx.Component):
library = "react-icons/bs"
tag = "BsMagnetFill"
class BsMagnet(rx.Component):
library = "react-icons/bs"
tag = "BsMagnet"
class BsMailbox(rx.Component):
library = "react-icons/bs"
tag = "BsMailbox"
class BsMailbox2(rx.Component):
library = "react-icons/bs"
tag = "BsMailbox2"
class BsMapFill(rx.Component):
library = "react-icons/bs"
tag = "BsMapFill"
class BsMap(rx.Component):
library = "react-icons/bs"
tag = "BsMap"
class BsMarkdownFill(rx.Component):
library = "react-icons/bs"
tag = "BsMarkdownFill"
class BsMarkdown(rx.Component):
library = "react-icons/bs"
tag = "BsMarkdown"
class BsMask(rx.Component):
library = "react-icons/bs"
tag = "BsMask"
class BsMastodon(rx.Component):
library = "react-icons/bs"
tag = "BsMastodon"
class BsMedium(rx.Component):
library = "react-icons/bs"
tag = "BsMedium"
class BsMegaphoneFill(rx.Component):
library = "react-icons/bs"
tag = "BsMegaphoneFill"
class BsMegaphone(rx.Component):
library = "react-icons/bs"
tag = "BsMegaphone"
class BsMemory(rx.Component):
library = "react-icons/bs"
tag = "BsMemory"
class BsMenuAppFill(rx.Component):
library = "react-icons/bs"
tag = "BsMenuAppFill"
class BsMenuApp(rx.Component):
library = "react-icons/bs"
tag = "BsMenuApp"
class BsMenuButtonFill(rx.Component):
library = "react-icons/bs"
tag = "BsMenuButtonFill"
class BsMenuButtonWideFill(rx.Component):
library = "react-icons/bs"
tag = "BsMenuButtonWideFill"
class BsMenuButtonWide(rx.Component):
library = "react-icons/bs"
tag = "BsMenuButtonWide"
class BsMenuButton(rx.Component):
library = "react-icons/bs"
tag = "BsMenuButton"
class BsMenuDown(rx.Component):
library = "react-icons/bs"
tag = "BsMenuDown"
class BsMenuUp(rx.Component):
library = "react-icons/bs"
tag = "BsMenuUp"
class BsMessenger(rx.Component):
library = "react-icons/bs"
tag = "BsMessenger"
class BsMeta(rx.Component):
library = "react-icons/bs"
tag = "BsMeta"
class BsMicFill(rx.Component):
library = "react-icons/bs"
tag = "BsMicFill"
class BsMicMuteFill(rx.Component):
library = "react-icons/bs"
tag = "BsMicMuteFill"
class BsMicMute(rx.Component):
library = "react-icons/bs"
tag = "BsMicMute"
class BsMic(rx.Component):
library = "react-icons/bs"
tag = "BsMic"
class BsMicrosoftTeams(rx.Component):
library = "react-icons/bs"
tag = "BsMicrosoftTeams"
class BsMicrosoft(rx.Component):
library = "react-icons/bs"
tag = "BsMicrosoft"
class BsMinecartLoaded(rx.Component):
library = "react-icons/bs"
tag = "BsMinecartLoaded"
class BsMinecart(rx.Component):
library = "react-icons/bs"
tag = "BsMinecart"
class BsModemFill(rx.Component):
library = "react-icons/bs"
tag = "BsModemFill"
class BsModem(rx.Component):
library = "react-icons/bs"
tag = "BsModem"
class BsMoisture(rx.Component):
library = "react-icons/bs"
tag = "BsMoisture"
class BsMoonFill(rx.Component):
library = "react-icons/bs"
tag = "BsMoonFill"
class BsMoonStarsFill(rx.Component):
library = "react-icons/bs"
tag = "BsMoonStarsFill"
class BsMoonStars(rx.Component):
library = "react-icons/bs"
tag = "BsMoonStars"
class BsMoon(rx.Component):
library = "react-icons/bs"
tag = "BsMoon"
class BsMortarboardFill(rx.Component):
library = "react-icons/bs"
tag = "BsMortarboardFill"
class BsMortarboard(rx.Component):
library = "react-icons/bs"
tag = "BsMortarboard"
class BsMotherboardFill(rx.Component):
library = "react-icons/bs"
tag = "BsMotherboardFill"
class BsMotherboard(rx.Component):
library = "react-icons/bs"
tag = "BsMotherboard"
class BsMouseFill(rx.Component):
library = "react-icons/bs"
tag = "BsMouseFill"
class BsMouse(rx.Component):
library = "react-icons/bs"
tag = "BsMouse"
class BsMouse2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsMouse2Fill"
class BsMouse2(rx.Component):
library = "react-icons/bs"
tag = "BsMouse2"
class BsMouse3Fill(rx.Component):
library = "react-icons/bs"
tag = "BsMouse3Fill"
class BsMouse3(rx.Component):
library = "react-icons/bs"
tag = "BsMouse3"
class BsMusicNoteBeamed(rx.Component):
library = "react-icons/bs"
tag = "BsMusicNoteBeamed"
class BsMusicNoteList(rx.Component):
library = "react-icons/bs"
tag = "BsMusicNoteList"
class BsMusicNote(rx.Component):
library = "react-icons/bs"
tag = "BsMusicNote"
class BsMusicPlayerFill(rx.Component):
library = "react-icons/bs"
tag = "BsMusicPlayerFill"
class BsMusicPlayer(rx.Component):
library = "react-icons/bs"
tag = "BsMusicPlayer"
class BsNewspaper(rx.Component):
library = "react-icons/bs"
tag = "BsNewspaper"
class BsNintendoSwitch(rx.Component):
library = "react-icons/bs"
tag = "BsNintendoSwitch"
class BsNodeMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsNodeMinusFill"
class BsNodeMinus(rx.Component):
library = "react-icons/bs"
tag = "BsNodeMinus"
class BsNodePlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsNodePlusFill"
class BsNodePlus(rx.Component):
library = "react-icons/bs"
tag = "BsNodePlus"
class BsNutFill(rx.Component):
library = "react-icons/bs"
tag = "BsNutFill"
class BsNut(rx.Component):
library = "react-icons/bs"
tag = "BsNut"
class BsNvidia(rx.Component):
library = "react-icons/bs"
tag = "BsNvidia"
class BsOctagonFill(rx.Component):
library = "react-icons/bs"
tag = "BsOctagonFill"
class BsOctagonHalf(rx.Component):
library = "react-icons/bs"
tag = "BsOctagonHalf"
class BsOctagon(rx.Component):
library = "react-icons/bs"
tag = "BsOctagon"
class BsOpticalAudioFill(rx.Component):
library = "react-icons/bs"
tag = "BsOpticalAudioFill"
class BsOpticalAudio(rx.Component):
library = "react-icons/bs"
tag = "BsOpticalAudio"
class BsOption(rx.Component):
library = "react-icons/bs"
tag = "BsOption"
class BsOutlet(rx.Component):
library = "react-icons/bs"
tag = "BsOutlet"
class BsPCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsPCircleFill"
class BsPCircle(rx.Component):
library = "react-icons/bs"
tag = "BsPCircle"
class BsPSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsPSquareFill"
class BsPSquare(rx.Component):
library = "react-icons/bs"
tag = "BsPSquare"
class BsPaintBucket(rx.Component):
library = "react-icons/bs"
tag = "BsPaintBucket"
class BsPaletteFill(rx.Component):
library = "react-icons/bs"
tag = "BsPaletteFill"
class BsPalette(rx.Component):
library = "react-icons/bs"
tag = "BsPalette"
class BsPalette2(rx.Component):
library = "react-icons/bs"
tag = "BsPalette2"
class BsPaperclip(rx.Component):
library = "react-icons/bs"
tag = "BsPaperclip"
class BsParagraph(rx.Component):
library = "react-icons/bs"
tag = "BsParagraph"
class BsPassFill(rx.Component):
library = "react-icons/bs"
tag = "BsPassFill"
class BsPass(rx.Component):
library = "react-icons/bs"
tag = "BsPass"
class BsPatchCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsPatchCheckFill"
class BsPatchCheck(rx.Component):
library = "react-icons/bs"
tag = "BsPatchCheck"
class BsPatchExclamationFill(rx.Component):
library = "react-icons/bs"
tag = "BsPatchExclamationFill"
class BsPatchExclamation(rx.Component):
library = "react-icons/bs"
tag = "BsPatchExclamation"
class BsPatchMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsPatchMinusFill"
class BsPatchMinus(rx.Component):
library = "react-icons/bs"
tag = "BsPatchMinus"
class BsPatchPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsPatchPlusFill"
class BsPatchPlus(rx.Component):
library = "react-icons/bs"
tag = "BsPatchPlus"
class BsPatchQuestionFill(rx.Component):
library = "react-icons/bs"
tag = "BsPatchQuestionFill"
class BsPatchQuestion(rx.Component):
library = "react-icons/bs"
tag = "BsPatchQuestion"
class BsPauseBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsPauseBtnFill"
class BsPauseBtn(rx.Component):
library = "react-icons/bs"
tag = "BsPauseBtn"
class BsPauseCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsPauseCircleFill"
class BsPauseCircle(rx.Component):
library = "react-icons/bs"
tag = "BsPauseCircle"
class BsPauseFill(rx.Component):
library = "react-icons/bs"
tag = "BsPauseFill"
class BsPause(rx.Component):
library = "react-icons/bs"
tag = "BsPause"
class BsPaypal(rx.Component):
library = "react-icons/bs"
tag = "BsPaypal"
class BsPcDisplayHorizontal(rx.Component):
library = "react-icons/bs"
tag = "BsPcDisplayHorizontal"
class BsPcDisplay(rx.Component):
library = "react-icons/bs"
tag = "BsPcDisplay"
class BsPcHorizontal(rx.Component):
library = "react-icons/bs"
tag = "BsPcHorizontal"
class BsPc(rx.Component):
library = "react-icons/bs"
tag = "BsPc"
class BsPciCard(rx.Component):
library = "react-icons/bs"
tag = "BsPciCard"
class BsPeaceFill(rx.Component):
library = "react-icons/bs"
tag = "BsPeaceFill"
class BsPeace(rx.Component):
library = "react-icons/bs"
tag = "BsPeace"
class BsPenFill(rx.Component):
library = "react-icons/bs"
tag = "BsPenFill"
class BsPen(rx.Component):
library = "react-icons/bs"
tag = "BsPen"
class BsPencilFill(rx.Component):
library = "react-icons/bs"
tag = "BsPencilFill"
class BsPencilSquare(rx.Component):
library = "react-icons/bs"
tag = "BsPencilSquare"
class BsPencil(rx.Component):
library = "react-icons/bs"
tag = "BsPencil"
class BsPentagonFill(rx.Component):
library = "react-icons/bs"
tag = "BsPentagonFill"
class BsPentagonHalf(rx.Component):
library = "react-icons/bs"
tag = "BsPentagonHalf"
class BsPentagon(rx.Component):
library = "react-icons/bs"
tag = "BsPentagon"
class BsPeopleFill(rx.Component):
library = "react-icons/bs"
tag = "BsPeopleFill"
class BsPeople(rx.Component):
library = "react-icons/bs"
tag = "BsPeople"
class BsPercent(rx.Component):
library = "react-icons/bs"
tag = "BsPercent"
class BsPersonAdd(rx.Component):
library = "react-icons/bs"
tag = "BsPersonAdd"
class BsPersonBadgeFill(rx.Component):
library = "react-icons/bs"
tag = "BsPersonBadgeFill"
class BsPersonBadge(rx.Component):
library = "react-icons/bs"
tag = "BsPersonBadge"
class BsPersonBoundingBox(rx.Component):
library = "react-icons/bs"
tag = "BsPersonBoundingBox"
class BsPersonCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsPersonCheckFill"
class BsPersonCheck(rx.Component):
library = "react-icons/bs"
tag = "BsPersonCheck"
class BsPersonCircle(rx.Component):
library = "react-icons/bs"
tag = "BsPersonCircle"
class BsPersonDashFill(rx.Component):
library = "react-icons/bs"
tag = "BsPersonDashFill"
class BsPersonDash(rx.Component):
library = "react-icons/bs"
tag = "BsPersonDash"
class BsPersonDown(rx.Component):
library = "react-icons/bs"
tag = "BsPersonDown"
class BsPersonExclamation(rx.Component):
library = "react-icons/bs"
tag = "BsPersonExclamation"
class BsPersonFillAdd(rx.Component):
library = "react-icons/bs"
tag = "BsPersonFillAdd"
class BsPersonFillCheck(rx.Component):
library = "react-icons/bs"
tag = "BsPersonFillCheck"
class BsPersonFillDash(rx.Component):
library = "react-icons/bs"
tag = "BsPersonFillDash"
class BsPersonFillDown(rx.Component):
library = "react-icons/bs"
tag = "BsPersonFillDown"
class BsPersonFillExclamation(rx.Component):
library = "react-icons/bs"
tag = "BsPersonFillExclamation"
class BsPersonFillGear(rx.Component):
library = "react-icons/bs"
tag = "BsPersonFillGear"
class BsPersonFillLock(rx.Component):
library = "react-icons/bs"
tag = "BsPersonFillLock"
class BsPersonFillSlash(rx.Component):
library = "react-icons/bs"
tag = "BsPersonFillSlash"
class BsPersonFillUp(rx.Component):
library = "react-icons/bs"
tag = "BsPersonFillUp"
class BsPersonFillX(rx.Component):
library = "react-icons/bs"
tag = "BsPersonFillX"
class BsPersonFill(rx.Component):
library = "react-icons/bs"
tag = "BsPersonFill"
class BsPersonGear(rx.Component):
library = "react-icons/bs"
tag = "BsPersonGear"
class BsPersonHeart(rx.Component):
library = "react-icons/bs"
tag = "BsPersonHeart"
class BsPersonHearts(rx.Component):
library = "react-icons/bs"
tag = "BsPersonHearts"
class BsPersonLinesFill(rx.Component):
library = "react-icons/bs"
tag = "BsPersonLinesFill"
class BsPersonLock(rx.Component):
library = "react-icons/bs"
tag = "BsPersonLock"
class BsPersonPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsPersonPlusFill"
class BsPersonPlus(rx.Component):
library = "react-icons/bs"
tag = "BsPersonPlus"
class BsPersonRolodex(rx.Component):
library = "react-icons/bs"
tag = "BsPersonRolodex"
class BsPersonSlash(rx.Component):
library = "react-icons/bs"
tag = "BsPersonSlash"
class BsPersonSquare(rx.Component):
library = "react-icons/bs"
tag = "BsPersonSquare"
class BsPersonUp(rx.Component):
library = "react-icons/bs"
tag = "BsPersonUp"
class BsPersonVcardFill(rx.Component):
library = "react-icons/bs"
tag = "BsPersonVcardFill"
class BsPersonVcard(rx.Component):
library = "react-icons/bs"
tag = "BsPersonVcard"
class BsPersonVideo(rx.Component):
library = "react-icons/bs"
tag = "BsPersonVideo"
class BsPersonVideo2(rx.Component):
library = "react-icons/bs"
tag = "BsPersonVideo2"
class BsPersonVideo3(rx.Component):
library = "react-icons/bs"
tag = "BsPersonVideo3"
class BsPersonWorkspace(rx.Component):
library = "react-icons/bs"
tag = "BsPersonWorkspace"
class BsPersonXFill(rx.Component):
library = "react-icons/bs"
tag = "BsPersonXFill"
class BsPersonX(rx.Component):
library = "react-icons/bs"
tag = "BsPersonX"
class BsPerson(rx.Component):
library = "react-icons/bs"
tag = "BsPerson"
class BsPhoneFill(rx.Component):
library = "react-icons/bs"
tag = "BsPhoneFill"
class BsPhoneFlip(rx.Component):
library = "react-icons/bs"
tag = "BsPhoneFlip"
class BsPhoneLandscapeFill(rx.Component):
library = "react-icons/bs"
tag = "BsPhoneLandscapeFill"
class BsPhoneLandscape(rx.Component):
library = "react-icons/bs"
tag = "BsPhoneLandscape"
class BsPhoneVibrateFill(rx.Component):
library = "react-icons/bs"
tag = "BsPhoneVibrateFill"
class BsPhoneVibrate(rx.Component):
library = "react-icons/bs"
tag = "BsPhoneVibrate"
class BsPhone(rx.Component):
library = "react-icons/bs"
tag = "BsPhone"
class BsPieChartFill(rx.Component):
library = "react-icons/bs"
tag = "BsPieChartFill"
class BsPieChart(rx.Component):
library = "react-icons/bs"
tag = "BsPieChart"
class BsPiggyBankFill(rx.Component):
library = "react-icons/bs"
tag = "BsPiggyBankFill"
class BsPiggyBank(rx.Component):
library = "react-icons/bs"
tag = "BsPiggyBank"
class BsPinAngleFill(rx.Component):
library = "react-icons/bs"
tag = "BsPinAngleFill"
class BsPinAngle(rx.Component):
library = "react-icons/bs"
tag = "BsPinAngle"
class BsPinFill(rx.Component):
library = "react-icons/bs"
tag = "BsPinFill"
class BsPinMapFill(rx.Component):
library = "react-icons/bs"
tag = "BsPinMapFill"
class BsPinMap(rx.Component):
library = "react-icons/bs"
tag = "BsPinMap"
class BsPin(rx.Component):
library = "react-icons/bs"
tag = "BsPin"
class BsPinterest(rx.Component):
library = "react-icons/bs"
tag = "BsPinterest"
class BsPipFill(rx.Component):
library = "react-icons/bs"
tag = "BsPipFill"
class BsPip(rx.Component):
library = "react-icons/bs"
tag = "BsPip"
class BsPlayBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsPlayBtnFill"
class BsPlayBtn(rx.Component):
library = "react-icons/bs"
tag = "BsPlayBtn"
class BsPlayCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsPlayCircleFill"
class BsPlayCircle(rx.Component):
library = "react-icons/bs"
tag = "BsPlayCircle"
class BsPlayFill(rx.Component):
library = "react-icons/bs"
tag = "BsPlayFill"
class BsPlay(rx.Component):
library = "react-icons/bs"
tag = "BsPlay"
class BsPlaystation(rx.Component):
library = "react-icons/bs"
tag = "BsPlaystation"
class BsPlugFill(rx.Component):
library = "react-icons/bs"
tag = "BsPlugFill"
class BsPlug(rx.Component):
library = "react-icons/bs"
tag = "BsPlug"
class BsPlugin(rx.Component):
library = "react-icons/bs"
tag = "BsPlugin"
class BsPlusCircleDotted(rx.Component):
library = "react-icons/bs"
tag = "BsPlusCircleDotted"
class BsPlusCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsPlusCircleFill"
class BsPlusCircle(rx.Component):
library = "react-icons/bs"
tag = "BsPlusCircle"
class BsPlusLg(rx.Component):
library = "react-icons/bs"
tag = "BsPlusLg"
class BsPlusSlashMinus(rx.Component):
library = "react-icons/bs"
tag = "BsPlusSlashMinus"
class BsPlusSquareDotted(rx.Component):
library = "react-icons/bs"
tag = "BsPlusSquareDotted"
class BsPlusSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsPlusSquareFill"
class BsPlusSquare(rx.Component):
library = "react-icons/bs"
tag = "BsPlusSquare"
class BsPlus(rx.Component):
library = "react-icons/bs"
tag = "BsPlus"
class BsPostageFill(rx.Component):
library = "react-icons/bs"
tag = "BsPostageFill"
class BsPostageHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsPostageHeartFill"
class BsPostageHeart(rx.Component):
library = "react-icons/bs"
tag = "BsPostageHeart"
class BsPostage(rx.Component):
library = "react-icons/bs"
tag = "BsPostage"
class BsPostcardFill(rx.Component):
library = "react-icons/bs"
tag = "BsPostcardFill"
class BsPostcardHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsPostcardHeartFill"
class BsPostcardHeart(rx.Component):
library = "react-icons/bs"
tag = "BsPostcardHeart"
class BsPostcard(rx.Component):
library = "react-icons/bs"
tag = "BsPostcard"
class BsPower(rx.Component):
library = "react-icons/bs"
tag = "BsPower"
class BsPrescription(rx.Component):
library = "react-icons/bs"
tag = "BsPrescription"
class BsPrescription2(rx.Component):
library = "react-icons/bs"
tag = "BsPrescription2"
class BsPrinterFill(rx.Component):
library = "react-icons/bs"
tag = "BsPrinterFill"
class BsPrinter(rx.Component):
library = "react-icons/bs"
tag = "BsPrinter"
class BsProjectorFill(rx.Component):
library = "react-icons/bs"
tag = "BsProjectorFill"
class BsProjector(rx.Component):
library = "react-icons/bs"
tag = "BsProjector"
class BsPuzzleFill(rx.Component):
library = "react-icons/bs"
tag = "BsPuzzleFill"
class BsPuzzle(rx.Component):
library = "react-icons/bs"
tag = "BsPuzzle"
class BsQrCodeScan(rx.Component):
library = "react-icons/bs"
tag = "BsQrCodeScan"
class BsQrCode(rx.Component):
library = "react-icons/bs"
tag = "BsQrCode"
class BsQuestionCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsQuestionCircleFill"
class BsQuestionCircle(rx.Component):
library = "react-icons/bs"
tag = "BsQuestionCircle"
class BsQuestionDiamondFill(rx.Component):
library = "react-icons/bs"
tag = "BsQuestionDiamondFill"
class BsQuestionDiamond(rx.Component):
library = "react-icons/bs"
tag = "BsQuestionDiamond"
class BsQuestionLg(rx.Component):
library = "react-icons/bs"
tag = "BsQuestionLg"
class BsQuestionOctagonFill(rx.Component):
library = "react-icons/bs"
tag = "BsQuestionOctagonFill"
class BsQuestionOctagon(rx.Component):
library = "react-icons/bs"
tag = "BsQuestionOctagon"
class BsQuestionSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsQuestionSquareFill"
class BsQuestionSquare(rx.Component):
library = "react-icons/bs"
tag = "BsQuestionSquare"
class BsQuestion(rx.Component):
library = "react-icons/bs"
tag = "BsQuestion"
class BsQuora(rx.Component):
library = "react-icons/bs"
tag = "BsQuora"
class BsQuote(rx.Component):
library = "react-icons/bs"
tag = "BsQuote"
class BsRCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsRCircleFill"
class BsRCircle(rx.Component):
library = "react-icons/bs"
tag = "BsRCircle"
class BsRSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsRSquareFill"
class BsRSquare(rx.Component):
library = "react-icons/bs"
tag = "BsRSquare"
class BsRadioactive(rx.Component):
library = "react-icons/bs"
tag = "BsRadioactive"
class BsRainbow(rx.Component):
library = "react-icons/bs"
tag = "BsRainbow"
class BsReceiptCutoff(rx.Component):
library = "react-icons/bs"
tag = "BsReceiptCutoff"
class BsReceipt(rx.Component):
library = "react-icons/bs"
tag = "BsReceipt"
class BsReception0(rx.Component):
library = "react-icons/bs"
tag = "BsReception0"
class BsReception1(rx.Component):
library = "react-icons/bs"
tag = "BsReception1"
class BsReception2(rx.Component):
library = "react-icons/bs"
tag = "BsReception2"
class BsReception3(rx.Component):
library = "react-icons/bs"
tag = "BsReception3"
class BsReception4(rx.Component):
library = "react-icons/bs"
tag = "BsReception4"
class BsRecordBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsRecordBtnFill"
class BsRecordBtn(rx.Component):
library = "react-icons/bs"
tag = "BsRecordBtn"
class BsRecordCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsRecordCircleFill"
class BsRecordCircle(rx.Component):
library = "react-icons/bs"
tag = "BsRecordCircle"
class BsRecordFill(rx.Component):
library = "react-icons/bs"
tag = "BsRecordFill"
class BsRecord(rx.Component):
library = "react-icons/bs"
tag = "BsRecord"
class BsRecord2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsRecord2Fill"
class BsRecord2(rx.Component):
library = "react-icons/bs"
tag = "BsRecord2"
class BsRecycle(rx.Component):
library = "react-icons/bs"
tag = "BsRecycle"
class BsReddit(rx.Component):
library = "react-icons/bs"
tag = "BsReddit"
class BsRegex(rx.Component):
library = "react-icons/bs"
tag = "BsRegex"
class BsRepeat1(rx.Component):
library = "react-icons/bs"
tag = "BsRepeat1"
class BsRepeat(rx.Component):
library = "react-icons/bs"
tag = "BsRepeat"
class BsReplyAllFill(rx.Component):
library = "react-icons/bs"
tag = "BsReplyAllFill"
class BsReplyAll(rx.Component):
library = "react-icons/bs"
tag = "BsReplyAll"
class BsReplyFill(rx.Component):
library = "react-icons/bs"
tag = "BsReplyFill"
class BsReply(rx.Component):
library = "react-icons/bs"
tag = "BsReply"
class BsRewindBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsRewindBtnFill"
class BsRewindBtn(rx.Component):
library = "react-icons/bs"
tag = "BsRewindBtn"
class BsRewindCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsRewindCircleFill"
class BsRewindCircle(rx.Component):
library = "react-icons/bs"
tag = "BsRewindCircle"
class BsRewindFill(rx.Component):
library = "react-icons/bs"
tag = "BsRewindFill"
class BsRewind(rx.Component):
library = "react-icons/bs"
tag = "BsRewind"
class BsRobot(rx.Component):
library = "react-icons/bs"
tag = "BsRobot"
class BsRocketFill(rx.Component):
library = "react-icons/bs"
tag = "BsRocketFill"
class BsRocketTakeoffFill(rx.Component):
library = "react-icons/bs"
tag = "BsRocketTakeoffFill"
class BsRocketTakeoff(rx.Component):
library = "react-icons/bs"
tag = "BsRocketTakeoff"
class BsRocket(rx.Component):
library = "react-icons/bs"
tag = "BsRocket"
class BsRouterFill(rx.Component):
library = "react-icons/bs"
tag = "BsRouterFill"
class BsRouter(rx.Component):
library = "react-icons/bs"
tag = "BsRouter"
class BsRssFill(rx.Component):
library = "react-icons/bs"
tag = "BsRssFill"
class BsRss(rx.Component):
library = "react-icons/bs"
tag = "BsRss"
class BsRulers(rx.Component):
library = "react-icons/bs"
tag = "BsRulers"
class BsSafeFill(rx.Component):
library = "react-icons/bs"
tag = "BsSafeFill"
class BsSafe(rx.Component):
library = "react-icons/bs"
tag = "BsSafe"
class BsSafe2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsSafe2Fill"
class BsSafe2(rx.Component):
library = "react-icons/bs"
tag = "BsSafe2"
class BsSaveFill(rx.Component):
library = "react-icons/bs"
tag = "BsSaveFill"
class BsSave(rx.Component):
library = "react-icons/bs"
tag = "BsSave"
class BsSave2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsSave2Fill"
class BsSave2(rx.Component):
library = "react-icons/bs"
tag = "BsSave2"
class BsScissors(rx.Component):
library = "react-icons/bs"
tag = "BsScissors"
class BsScooter(rx.Component):
library = "react-icons/bs"
tag = "BsScooter"
class BsScrewdriver(rx.Component):
library = "react-icons/bs"
tag = "BsScrewdriver"
class BsSdCardFill(rx.Component):
library = "react-icons/bs"
tag = "BsSdCardFill"
class BsSdCard(rx.Component):
library = "react-icons/bs"
tag = "BsSdCard"
class BsSearchHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsSearchHeartFill"
class BsSearchHeart(rx.Component):
library = "react-icons/bs"
tag = "BsSearchHeart"
class BsSearch(rx.Component):
library = "react-icons/bs"
tag = "BsSearch"
class BsSegmentedNav(rx.Component):
library = "react-icons/bs"
tag = "BsSegmentedNav"
class BsSendCheckFill(rx.Component):
library = "react-icons/bs"
tag = "BsSendCheckFill"
class BsSendCheck(rx.Component):
library = "react-icons/bs"
tag = "BsSendCheck"
class BsSendDashFill(rx.Component):
library = "react-icons/bs"
tag = "BsSendDashFill"
class BsSendDash(rx.Component):
library = "react-icons/bs"
tag = "BsSendDash"
class BsSendExclamationFill(rx.Component):
library = "react-icons/bs"
tag = "BsSendExclamationFill"
class BsSendExclamation(rx.Component):
library = "react-icons/bs"
tag = "BsSendExclamation"
class BsSendFill(rx.Component):
library = "react-icons/bs"
tag = "BsSendFill"
class BsSendPlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsSendPlusFill"
class BsSendPlus(rx.Component):
library = "react-icons/bs"
tag = "BsSendPlus"
class BsSendSlashFill(rx.Component):
library = "react-icons/bs"
tag = "BsSendSlashFill"
class BsSendSlash(rx.Component):
library = "react-icons/bs"
tag = "BsSendSlash"
class BsSendXFill(rx.Component):
library = "react-icons/bs"
tag = "BsSendXFill"
class BsSendX(rx.Component):
library = "react-icons/bs"
tag = "BsSendX"
class BsSend(rx.Component):
library = "react-icons/bs"
tag = "BsSend"
class BsServer(rx.Component):
library = "react-icons/bs"
tag = "BsServer"
class BsShareFill(rx.Component):
library = "react-icons/bs"
tag = "BsShareFill"
class BsShare(rx.Component):
library = "react-icons/bs"
tag = "BsShare"
class BsShieldCheck(rx.Component):
library = "react-icons/bs"
tag = "BsShieldCheck"
class BsShieldExclamation(rx.Component):
library = "react-icons/bs"
tag = "BsShieldExclamation"
class BsShieldFillCheck(rx.Component):
library = "react-icons/bs"
tag = "BsShieldFillCheck"
class BsShieldFillExclamation(rx.Component):
library = "react-icons/bs"
tag = "BsShieldFillExclamation"
class BsShieldFillMinus(rx.Component):
library = "react-icons/bs"
tag = "BsShieldFillMinus"
class BsShieldFillPlus(rx.Component):
library = "react-icons/bs"
tag = "BsShieldFillPlus"
class BsShieldFillX(rx.Component):
library = "react-icons/bs"
tag = "BsShieldFillX"
class BsShieldFill(rx.Component):
library = "react-icons/bs"
tag = "BsShieldFill"
class BsShieldLockFill(rx.Component):
library = "react-icons/bs"
tag = "BsShieldLockFill"
class BsShieldLock(rx.Component):
library = "react-icons/bs"
tag = "BsShieldLock"
class BsShieldMinus(rx.Component):
library = "react-icons/bs"
tag = "BsShieldMinus"
class BsShieldPlus(rx.Component):
library = "react-icons/bs"
tag = "BsShieldPlus"
class BsShieldShaded(rx.Component):
library = "react-icons/bs"
tag = "BsShieldShaded"
class BsShieldSlashFill(rx.Component):
library = "react-icons/bs"
tag = "BsShieldSlashFill"
class BsShieldSlash(rx.Component):
library = "react-icons/bs"
tag = "BsShieldSlash"
class BsShieldX(rx.Component):
library = "react-icons/bs"
tag = "BsShieldX"
class BsShield(rx.Component):
library = "react-icons/bs"
tag = "BsShield"
class BsShiftFill(rx.Component):
library = "react-icons/bs"
tag = "BsShiftFill"
class BsShift(rx.Component):
library = "react-icons/bs"
tag = "BsShift"
class BsShopWindow(rx.Component):
library = "react-icons/bs"
tag = "BsShopWindow"
class BsShop(rx.Component):
library = "react-icons/bs"
tag = "BsShop"
class BsShuffle(rx.Component):
library = "react-icons/bs"
tag = "BsShuffle"
class BsSignDeadEndFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignDeadEndFill"
class BsSignDeadEnd(rx.Component):
library = "react-icons/bs"
tag = "BsSignDeadEnd"
class BsSignDoNotEnterFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignDoNotEnterFill"
class BsSignDoNotEnter(rx.Component):
library = "react-icons/bs"
tag = "BsSignDoNotEnter"
class BsSignIntersectionFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignIntersectionFill"
class BsSignIntersectionSideFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignIntersectionSideFill"
class BsSignIntersectionSide(rx.Component):
library = "react-icons/bs"
tag = "BsSignIntersectionSide"
class BsSignIntersectionTFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignIntersectionTFill"
class BsSignIntersectionT(rx.Component):
library = "react-icons/bs"
tag = "BsSignIntersectionT"
class BsSignIntersectionYFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignIntersectionYFill"
class BsSignIntersectionY(rx.Component):
library = "react-icons/bs"
tag = "BsSignIntersectionY"
class BsSignIntersection(rx.Component):
library = "react-icons/bs"
tag = "BsSignIntersection"
class BsSignMergeLeftFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignMergeLeftFill"
class BsSignMergeLeft(rx.Component):
library = "react-icons/bs"
tag = "BsSignMergeLeft"
class BsSignMergeRightFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignMergeRightFill"
class BsSignMergeRight(rx.Component):
library = "react-icons/bs"
tag = "BsSignMergeRight"
class BsSignNoLeftTurnFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignNoLeftTurnFill"
class BsSignNoLeftTurn(rx.Component):
library = "react-icons/bs"
tag = "BsSignNoLeftTurn"
class BsSignNoParkingFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignNoParkingFill"
class BsSignNoParking(rx.Component):
library = "react-icons/bs"
tag = "BsSignNoParking"
class BsSignNoRightTurnFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignNoRightTurnFill"
class BsSignNoRightTurn(rx.Component):
library = "react-icons/bs"
tag = "BsSignNoRightTurn"
class BsSignRailroadFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignRailroadFill"
class BsSignRailroad(rx.Component):
library = "react-icons/bs"
tag = "BsSignRailroad"
class BsSignStopFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignStopFill"
class BsSignStopLightsFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignStopLightsFill"
class BsSignStopLights(rx.Component):
library = "react-icons/bs"
tag = "BsSignStopLights"
class BsSignStop(rx.Component):
library = "react-icons/bs"
tag = "BsSignStop"
class BsSignTurnLeftFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignTurnLeftFill"
class BsSignTurnLeft(rx.Component):
library = "react-icons/bs"
tag = "BsSignTurnLeft"
class BsSignTurnRightFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignTurnRightFill"
class BsSignTurnRight(rx.Component):
library = "react-icons/bs"
tag = "BsSignTurnRight"
class BsSignTurnSlightLeftFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignTurnSlightLeftFill"
class BsSignTurnSlightLeft(rx.Component):
library = "react-icons/bs"
tag = "BsSignTurnSlightLeft"
class BsSignTurnSlightRightFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignTurnSlightRightFill"
class BsSignTurnSlightRight(rx.Component):
library = "react-icons/bs"
tag = "BsSignTurnSlightRight"
class BsSignYieldFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignYieldFill"
class BsSignYield(rx.Component):
library = "react-icons/bs"
tag = "BsSignYield"
class BsSignal(rx.Component):
library = "react-icons/bs"
tag = "BsSignal"
class BsSignpost2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsSignpost2Fill"
class BsSignpost2(rx.Component):
library = "react-icons/bs"
tag = "BsSignpost2"
class BsSignpostFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignpostFill"
class BsSignpostSplitFill(rx.Component):
library = "react-icons/bs"
tag = "BsSignpostSplitFill"
class BsSignpostSplit(rx.Component):
library = "react-icons/bs"
tag = "BsSignpostSplit"
class BsSignpost(rx.Component):
library = "react-icons/bs"
tag = "BsSignpost"
class BsSimFill(rx.Component):
library = "react-icons/bs"
tag = "BsSimFill"
class BsSim(rx.Component):
library = "react-icons/bs"
tag = "BsSim"
class BsSinaWeibo(rx.Component):
library = "react-icons/bs"
tag = "BsSinaWeibo"
class BsSkipBackwardBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsSkipBackwardBtnFill"
class BsSkipBackwardBtn(rx.Component):
library = "react-icons/bs"
tag = "BsSkipBackwardBtn"
class BsSkipBackwardCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsSkipBackwardCircleFill"
class BsSkipBackwardCircle(rx.Component):
library = "react-icons/bs"
tag = "BsSkipBackwardCircle"
class BsSkipBackwardFill(rx.Component):
library = "react-icons/bs"
tag = "BsSkipBackwardFill"
class BsSkipBackward(rx.Component):
library = "react-icons/bs"
tag = "BsSkipBackward"
class BsSkipEndBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsSkipEndBtnFill"
class BsSkipEndBtn(rx.Component):
library = "react-icons/bs"
tag = "BsSkipEndBtn"
class BsSkipEndCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsSkipEndCircleFill"
class BsSkipEndCircle(rx.Component):
library = "react-icons/bs"
tag = "BsSkipEndCircle"
class BsSkipEndFill(rx.Component):
library = "react-icons/bs"
tag = "BsSkipEndFill"
class BsSkipEnd(rx.Component):
library = "react-icons/bs"
tag = "BsSkipEnd"
class BsSkipForwardBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsSkipForwardBtnFill"
class BsSkipForwardBtn(rx.Component):
library = "react-icons/bs"
tag = "BsSkipForwardBtn"
class BsSkipForwardCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsSkipForwardCircleFill"
class BsSkipForwardCircle(rx.Component):
library = "react-icons/bs"
tag = "BsSkipForwardCircle"
class BsSkipForwardFill(rx.Component):
library = "react-icons/bs"
tag = "BsSkipForwardFill"
class BsSkipForward(rx.Component):
library = "react-icons/bs"
tag = "BsSkipForward"
class BsSkipStartBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsSkipStartBtnFill"
class BsSkipStartBtn(rx.Component):
library = "react-icons/bs"
tag = "BsSkipStartBtn"
class BsSkipStartCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsSkipStartCircleFill"
class BsSkipStartCircle(rx.Component):
library = "react-icons/bs"
tag = "BsSkipStartCircle"
class BsSkipStartFill(rx.Component):
library = "react-icons/bs"
tag = "BsSkipStartFill"
class BsSkipStart(rx.Component):
library = "react-icons/bs"
tag = "BsSkipStart"
class BsSkype(rx.Component):
library = "react-icons/bs"
tag = "BsSkype"
class BsSlack(rx.Component):
library = "react-icons/bs"
tag = "BsSlack"
class BsSlashCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsSlashCircleFill"
class BsSlashCircle(rx.Component):
library = "react-icons/bs"
tag = "BsSlashCircle"
class BsSlashLg(rx.Component):
library = "react-icons/bs"
tag = "BsSlashLg"
class BsSlashSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsSlashSquareFill"
class BsSlashSquare(rx.Component):
library = "react-icons/bs"
tag = "BsSlashSquare"
class BsSlash(rx.Component):
library = "react-icons/bs"
tag = "BsSlash"
class BsSliders(rx.Component):
library = "react-icons/bs"
tag = "BsSliders"
class BsSliders2Vertical(rx.Component):
library = "react-icons/bs"
tag = "BsSliders2Vertical"
class BsSliders2(rx.Component):
library = "react-icons/bs"
tag = "BsSliders2"
class BsSmartwatch(rx.Component):
library = "react-icons/bs"
tag = "BsSmartwatch"
class BsSnapchat(rx.Component):
library = "react-icons/bs"
tag = "BsSnapchat"
class BsSnow(rx.Component):
library = "react-icons/bs"
tag = "BsSnow"
class BsSnow2(rx.Component):
library = "react-icons/bs"
tag = "BsSnow2"
class BsSnow3(rx.Component):
library = "react-icons/bs"
tag = "BsSnow3"
class BsSortAlphaDownAlt(rx.Component):
library = "react-icons/bs"
tag = "BsSortAlphaDownAlt"
class BsSortAlphaDown(rx.Component):
library = "react-icons/bs"
tag = "BsSortAlphaDown"
class BsSortAlphaUpAlt(rx.Component):
library = "react-icons/bs"
tag = "BsSortAlphaUpAlt"
class BsSortAlphaUp(rx.Component):
library = "react-icons/bs"
tag = "BsSortAlphaUp"
class BsSortDownAlt(rx.Component):
library = "react-icons/bs"
tag = "BsSortDownAlt"
class BsSortDown(rx.Component):
library = "react-icons/bs"
tag = "BsSortDown"
class BsSortNumericDownAlt(rx.Component):
library = "react-icons/bs"
tag = "BsSortNumericDownAlt"
class BsSortNumericDown(rx.Component):
library = "react-icons/bs"
tag = "BsSortNumericDown"
class BsSortNumericUpAlt(rx.Component):
library = "react-icons/bs"
tag = "BsSortNumericUpAlt"
class BsSortNumericUp(rx.Component):
library = "react-icons/bs"
tag = "BsSortNumericUp"
class BsSortUpAlt(rx.Component):
library = "react-icons/bs"
tag = "BsSortUpAlt"
class BsSortUp(rx.Component):
library = "react-icons/bs"
tag = "BsSortUp"
class BsSoundwave(rx.Component):
library = "react-icons/bs"
tag = "BsSoundwave"
class BsSpeakerFill(rx.Component):
library = "react-icons/bs"
tag = "BsSpeakerFill"
class BsSpeaker(rx.Component):
library = "react-icons/bs"
tag = "BsSpeaker"
class BsSpeedometer(rx.Component):
library = "react-icons/bs"
tag = "BsSpeedometer"
class BsSpeedometer2(rx.Component):
library = "react-icons/bs"
tag = "BsSpeedometer2"
class BsSpellcheck(rx.Component):
library = "react-icons/bs"
tag = "BsSpellcheck"
class BsSpotify(rx.Component):
library = "react-icons/bs"
tag = "BsSpotify"
class BsSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsSquareFill"
class BsSquareHalf(rx.Component):
library = "react-icons/bs"
tag = "BsSquareHalf"
class BsSquare(rx.Component):
library = "react-icons/bs"
tag = "BsSquare"
class BsStackOverflow(rx.Component):
library = "react-icons/bs"
tag = "BsStackOverflow"
class BsStack(rx.Component):
library = "react-icons/bs"
tag = "BsStack"
class BsStarFill(rx.Component):
library = "react-icons/bs"
tag = "BsStarFill"
class BsStarHalf(rx.Component):
library = "react-icons/bs"
tag = "BsStarHalf"
class BsStar(rx.Component):
library = "react-icons/bs"
tag = "BsStar"
class BsStars(rx.Component):
library = "react-icons/bs"
tag = "BsStars"
class BsSteam(rx.Component):
library = "react-icons/bs"
tag = "BsSteam"
class BsStickiesFill(rx.Component):
library = "react-icons/bs"
tag = "BsStickiesFill"
class BsStickies(rx.Component):
library = "react-icons/bs"
tag = "BsStickies"
class BsStickyFill(rx.Component):
library = "react-icons/bs"
tag = "BsStickyFill"
class BsSticky(rx.Component):
library = "react-icons/bs"
tag = "BsSticky"
class BsStopBtnFill(rx.Component):
library = "react-icons/bs"
tag = "BsStopBtnFill"
class BsStopBtn(rx.Component):
library = "react-icons/bs"
tag = "BsStopBtn"
class BsStopCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsStopCircleFill"
class BsStopCircle(rx.Component):
library = "react-icons/bs"
tag = "BsStopCircle"
class BsStopFill(rx.Component):
library = "react-icons/bs"
tag = "BsStopFill"
class BsStop(rx.Component):
library = "react-icons/bs"
tag = "BsStop"
class BsStoplightsFill(rx.Component):
library = "react-icons/bs"
tag = "BsStoplightsFill"
class BsStoplights(rx.Component):
library = "react-icons/bs"
tag = "BsStoplights"
class BsStopwatchFill(rx.Component):
library = "react-icons/bs"
tag = "BsStopwatchFill"
class BsStopwatch(rx.Component):
library = "react-icons/bs"
tag = "BsStopwatch"
class BsStrava(rx.Component):
library = "react-icons/bs"
tag = "BsStrava"
class BsStripe(rx.Component):
library = "react-icons/bs"
tag = "BsStripe"
class BsSubscript(rx.Component):
library = "react-icons/bs"
tag = "BsSubscript"
class BsSubtract(rx.Component):
library = "react-icons/bs"
tag = "BsSubtract"
class BsSuitClubFill(rx.Component):
library = "react-icons/bs"
tag = "BsSuitClubFill"
class BsSuitClub(rx.Component):
library = "react-icons/bs"
tag = "BsSuitClub"
class BsSuitDiamondFill(rx.Component):
library = "react-icons/bs"
tag = "BsSuitDiamondFill"
class BsSuitDiamond(rx.Component):
library = "react-icons/bs"
tag = "BsSuitDiamond"
class BsSuitHeartFill(rx.Component):
library = "react-icons/bs"
tag = "BsSuitHeartFill"
class BsSuitHeart(rx.Component):
library = "react-icons/bs"
tag = "BsSuitHeart"
class BsSuitSpadeFill(rx.Component):
library = "react-icons/bs"
tag = "BsSuitSpadeFill"
class BsSuitSpade(rx.Component):
library = "react-icons/bs"
tag = "BsSuitSpade"
class BsSunFill(rx.Component):
library = "react-icons/bs"
tag = "BsSunFill"
class BsSun(rx.Component):
library = "react-icons/bs"
tag = "BsSun"
class BsSunglasses(rx.Component):
library = "react-icons/bs"
tag = "BsSunglasses"
class BsSunriseFill(rx.Component):
library = "react-icons/bs"
tag = "BsSunriseFill"
class BsSunrise(rx.Component):
library = "react-icons/bs"
tag = "BsSunrise"
class BsSunsetFill(rx.Component):
library = "react-icons/bs"
tag = "BsSunsetFill"
class BsSunset(rx.Component):
library = "react-icons/bs"
tag = "BsSunset"
class BsSuperscript(rx.Component):
library = "react-icons/bs"
tag = "BsSuperscript"
class BsSymmetryHorizontal(rx.Component):
library = "react-icons/bs"
tag = "BsSymmetryHorizontal"
class BsSymmetryVertical(rx.Component):
library = "react-icons/bs"
tag = "BsSymmetryVertical"
class BsTable(rx.Component):
library = "react-icons/bs"
tag = "BsTable"
class BsTabletFill(rx.Component):
library = "react-icons/bs"
tag = "BsTabletFill"
class BsTabletLandscapeFill(rx.Component):
library = "react-icons/bs"
tag = "BsTabletLandscapeFill"
class BsTabletLandscape(rx.Component):
library = "react-icons/bs"
tag = "BsTabletLandscape"
class BsTablet(rx.Component):
library = "react-icons/bs"
tag = "BsTablet"
class BsTagFill(rx.Component):
library = "react-icons/bs"
tag = "BsTagFill"
class BsTag(rx.Component):
library = "react-icons/bs"
tag = "BsTag"
class BsTagsFill(rx.Component):
library = "react-icons/bs"
tag = "BsTagsFill"
class BsTags(rx.Component):
library = "react-icons/bs"
tag = "BsTags"
class BsTaxiFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsTaxiFrontFill"
class BsTaxiFront(rx.Component):
library = "react-icons/bs"
tag = "BsTaxiFront"
class BsTelegram(rx.Component):
library = "react-icons/bs"
tag = "BsTelegram"
class BsTelephoneFill(rx.Component):
library = "react-icons/bs"
tag = "BsTelephoneFill"
class BsTelephoneForwardFill(rx.Component):
library = "react-icons/bs"
tag = "BsTelephoneForwardFill"
class BsTelephoneForward(rx.Component):
library = "react-icons/bs"
tag = "BsTelephoneForward"
class BsTelephoneInboundFill(rx.Component):
library = "react-icons/bs"
tag = "BsTelephoneInboundFill"
class BsTelephoneInbound(rx.Component):
library = "react-icons/bs"
tag = "BsTelephoneInbound"
class BsTelephoneMinusFill(rx.Component):
library = "react-icons/bs"
tag = "BsTelephoneMinusFill"
class BsTelephoneMinus(rx.Component):
library = "react-icons/bs"
tag = "BsTelephoneMinus"
class BsTelephoneOutboundFill(rx.Component):
library = "react-icons/bs"
tag = "BsTelephoneOutboundFill"
class BsTelephoneOutbound(rx.Component):
library = "react-icons/bs"
tag = "BsTelephoneOutbound"
class BsTelephonePlusFill(rx.Component):
library = "react-icons/bs"
tag = "BsTelephonePlusFill"
class BsTelephonePlus(rx.Component):
library = "react-icons/bs"
tag = "BsTelephonePlus"
class BsTelephoneXFill(rx.Component):
library = "react-icons/bs"
tag = "BsTelephoneXFill"
class BsTelephoneX(rx.Component):
library = "react-icons/bs"
tag = "BsTelephoneX"
class BsTelephone(rx.Component):
library = "react-icons/bs"
tag = "BsTelephone"
class BsTencentQq(rx.Component):
library = "react-icons/bs"
tag = "BsTencentQq"
class BsTerminalDash(rx.Component):
library = "react-icons/bs"
tag = "BsTerminalDash"
class BsTerminalFill(rx.Component):
library = "react-icons/bs"
tag = "BsTerminalFill"
class BsTerminalPlus(rx.Component):
library = "react-icons/bs"
tag = "BsTerminalPlus"
class BsTerminalSplit(rx.Component):
library = "react-icons/bs"
tag = "BsTerminalSplit"
class BsTerminalX(rx.Component):
library = "react-icons/bs"
tag = "BsTerminalX"
class BsTerminal(rx.Component):
library = "react-icons/bs"
tag = "BsTerminal"
class BsTextCenter(rx.Component):
library = "react-icons/bs"
tag = "BsTextCenter"
class BsTextIndentLeft(rx.Component):
library = "react-icons/bs"
tag = "BsTextIndentLeft"
class BsTextIndentRight(rx.Component):
library = "react-icons/bs"
tag = "BsTextIndentRight"
class BsTextLeft(rx.Component):
library = "react-icons/bs"
tag = "BsTextLeft"
class BsTextParagraph(rx.Component):
library = "react-icons/bs"
tag = "BsTextParagraph"
class BsTextRight(rx.Component):
library = "react-icons/bs"
tag = "BsTextRight"
class BsTextWrap(rx.Component):
library = "react-icons/bs"
tag = "BsTextWrap"
class BsTextareaResize(rx.Component):
library = "react-icons/bs"
tag = "BsTextareaResize"
class BsTextareaT(rx.Component):
library = "react-icons/bs"
tag = "BsTextareaT"
class BsTextarea(rx.Component):
library = "react-icons/bs"
tag = "BsTextarea"
class BsThermometerHalf(rx.Component):
library = "react-icons/bs"
tag = "BsThermometerHalf"
class BsThermometerHigh(rx.Component):
library = "react-icons/bs"
tag = "BsThermometerHigh"
class BsThermometerLow(rx.Component):
library = "react-icons/bs"
tag = "BsThermometerLow"
class BsThermometerSnow(rx.Component):
library = "react-icons/bs"
tag = "BsThermometerSnow"
class BsThermometerSun(rx.Component):
library = "react-icons/bs"
tag = "BsThermometerSun"
class BsThermometer(rx.Component):
library = "react-icons/bs"
tag = "BsThermometer"
class BsThreeDotsVertical(rx.Component):
library = "react-icons/bs"
tag = "BsThreeDotsVertical"
class BsThreeDots(rx.Component):
library = "react-icons/bs"
tag = "BsThreeDots"
class BsThunderboltFill(rx.Component):
library = "react-icons/bs"
tag = "BsThunderboltFill"
class BsThunderbolt(rx.Component):
library = "react-icons/bs"
tag = "BsThunderbolt"
class BsTicketDetailedFill(rx.Component):
library = "react-icons/bs"
tag = "BsTicketDetailedFill"
class BsTicketDetailed(rx.Component):
library = "react-icons/bs"
tag = "BsTicketDetailed"
class BsTicketFill(rx.Component):
library = "react-icons/bs"
tag = "BsTicketFill"
class BsTicketPerforatedFill(rx.Component):
library = "react-icons/bs"
tag = "BsTicketPerforatedFill"
class BsTicketPerforated(rx.Component):
library = "react-icons/bs"
tag = "BsTicketPerforated"
class BsTicket(rx.Component):
library = "react-icons/bs"
tag = "BsTicket"
class BsTiktok(rx.Component):
library = "react-icons/bs"
tag = "BsTiktok"
class BsToggleOff(rx.Component):
library = "react-icons/bs"
tag = "BsToggleOff"
class BsToggleOn(rx.Component):
library = "react-icons/bs"
tag = "BsToggleOn"
class BsToggle2Off(rx.Component):
library = "react-icons/bs"
tag = "BsToggle2Off"
class BsToggle2On(rx.Component):
library = "react-icons/bs"
tag = "BsToggle2On"
class BsToggles(rx.Component):
library = "react-icons/bs"
tag = "BsToggles"
class BsToggles2(rx.Component):
library = "react-icons/bs"
tag = "BsToggles2"
class BsTools(rx.Component):
library = "react-icons/bs"
tag = "BsTools"
class BsTornado(rx.Component):
library = "react-icons/bs"
tag = "BsTornado"
class BsTrainFreightFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsTrainFreightFrontFill"
class BsTrainFreightFront(rx.Component):
library = "react-icons/bs"
tag = "BsTrainFreightFront"
class BsTrainFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsTrainFrontFill"
class BsTrainFront(rx.Component):
library = "react-icons/bs"
tag = "BsTrainFront"
class BsTrainLightrailFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsTrainLightrailFrontFill"
class BsTrainLightrailFront(rx.Component):
library = "react-icons/bs"
tag = "BsTrainLightrailFront"
class BsTranslate(rx.Component):
library = "react-icons/bs"
tag = "BsTranslate"
class BsTrashFill(rx.Component):
library = "react-icons/bs"
tag = "BsTrashFill"
class BsTrash(rx.Component):
library = "react-icons/bs"
tag = "BsTrash"
class BsTrash2Fill(rx.Component):
library = "react-icons/bs"
tag = "BsTrash2Fill"
class BsTrash2(rx.Component):
library = "react-icons/bs"
tag = "BsTrash2"
class BsTrash3Fill(rx.Component):
library = "react-icons/bs"
tag = "BsTrash3Fill"
class BsTrash3(rx.Component):
library = "react-icons/bs"
tag = "BsTrash3"
class BsTreeFill(rx.Component):
library = "react-icons/bs"
tag = "BsTreeFill"
class BsTree(rx.Component):
library = "react-icons/bs"
tag = "BsTree"
class BsTrello(rx.Component):
library = "react-icons/bs"
tag = "BsTrello"
class BsTriangleFill(rx.Component):
library = "react-icons/bs"
tag = "BsTriangleFill"
class BsTriangleHalf(rx.Component):
library = "react-icons/bs"
tag = "BsTriangleHalf"
class BsTriangle(rx.Component):
library = "react-icons/bs"
tag = "BsTriangle"
class BsTrophyFill(rx.Component):
library = "react-icons/bs"
tag = "BsTrophyFill"
class BsTrophy(rx.Component):
library = "react-icons/bs"
tag = "BsTrophy"
class BsTropicalStorm(rx.Component):
library = "react-icons/bs"
tag = "BsTropicalStorm"
class BsTruckFlatbed(rx.Component):
library = "react-icons/bs"
tag = "BsTruckFlatbed"
class BsTruckFrontFill(rx.Component):
library = "react-icons/bs"
tag = "BsTruckFrontFill"
class BsTruckFront(rx.Component):
library = "react-icons/bs"
tag = "BsTruckFront"
class BsTruck(rx.Component):
library = "react-icons/bs"
tag = "BsTruck"
class BsTsunami(rx.Component):
library = "react-icons/bs"
tag = "BsTsunami"
class BsTvFill(rx.Component):
library = "react-icons/bs"
tag = "BsTvFill"
class BsTv(rx.Component):
library = "react-icons/bs"
tag = "BsTv"
class BsTwitch(rx.Component):
library = "react-icons/bs"
tag = "BsTwitch"
class BsTwitter(rx.Component):
library = "react-icons/bs"
tag = "BsTwitter"
class BsTypeBold(rx.Component):
library = "react-icons/bs"
tag = "BsTypeBold"
class BsTypeH1(rx.Component):
library = "react-icons/bs"
tag = "BsTypeH1"
class BsTypeH2(rx.Component):
library = "react-icons/bs"
tag = "BsTypeH2"
class BsTypeH3(rx.Component):
library = "react-icons/bs"
tag = "BsTypeH3"
class BsTypeItalic(rx.Component):
library = "react-icons/bs"
tag = "BsTypeItalic"
class BsTypeStrikethrough(rx.Component):
library = "react-icons/bs"
tag = "BsTypeStrikethrough"
class BsTypeUnderline(rx.Component):
library = "react-icons/bs"
tag = "BsTypeUnderline"
class BsType(rx.Component):
library = "react-icons/bs"
tag = "BsType"
class BsUbuntu(rx.Component):
library = "react-icons/bs"
tag = "BsUbuntu"
class BsUiChecksGrid(rx.Component):
library = "react-icons/bs"
tag = "BsUiChecksGrid"
class BsUiChecks(rx.Component):
library = "react-icons/bs"
tag = "BsUiChecks"
class BsUiRadiosGrid(rx.Component):
library = "react-icons/bs"
tag = "BsUiRadiosGrid"
class BsUiRadios(rx.Component):
library = "react-icons/bs"
tag = "BsUiRadios"
class BsUmbrellaFill(rx.Component):
library = "react-icons/bs"
tag = "BsUmbrellaFill"
class BsUmbrella(rx.Component):
library = "react-icons/bs"
tag = "BsUmbrella"
class BsUnindent(rx.Component):
library = "react-icons/bs"
tag = "BsUnindent"
class BsUnion(rx.Component):
library = "react-icons/bs"
tag = "BsUnion"
class BsUnity(rx.Component):
library = "react-icons/bs"
tag = "BsUnity"
class BsUniversalAccessCircle(rx.Component):
library = "react-icons/bs"
tag = "BsUniversalAccessCircle"
class BsUniversalAccess(rx.Component):
library = "react-icons/bs"
tag = "BsUniversalAccess"
class BsUnlockFill(rx.Component):
library = "react-icons/bs"
tag = "BsUnlockFill"
class BsUnlock(rx.Component):
library = "react-icons/bs"
tag = "BsUnlock"
class BsUpcScan(rx.Component):
library = "react-icons/bs"
tag = "BsUpcScan"
class BsUpc(rx.Component):
library = "react-icons/bs"
tag = "BsUpc"
class BsUpload(rx.Component):
library = "react-icons/bs"
tag = "BsUpload"
class BsUsbCFill(rx.Component):
library = "react-icons/bs"
tag = "BsUsbCFill"
class BsUsbC(rx.Component):
library = "react-icons/bs"
tag = "BsUsbC"
class BsUsbDriveFill(rx.Component):
library = "react-icons/bs"
tag = "BsUsbDriveFill"
class BsUsbDrive(rx.Component):
library = "react-icons/bs"
tag = "BsUsbDrive"
class BsUsbFill(rx.Component):
library = "react-icons/bs"
tag = "BsUsbFill"
class BsUsbMicroFill(rx.Component):
library = "react-icons/bs"
tag = "BsUsbMicroFill"
class BsUsbMicro(rx.Component):
library = "react-icons/bs"
tag = "BsUsbMicro"
class BsUsbMiniFill(rx.Component):
library = "react-icons/bs"
tag = "BsUsbMiniFill"
class BsUsbMini(rx.Component):
library = "react-icons/bs"
tag = "BsUsbMini"
class BsUsbPlugFill(rx.Component):
library = "react-icons/bs"
tag = "BsUsbPlugFill"
class BsUsbPlug(rx.Component):
library = "react-icons/bs"
tag = "BsUsbPlug"
class BsUsbSymbol(rx.Component):
library = "react-icons/bs"
tag = "BsUsbSymbol"
class BsUsb(rx.Component):
library = "react-icons/bs"
tag = "BsUsb"
class BsValentine(rx.Component):
library = "react-icons/bs"
tag = "BsValentine"
class BsValentine2(rx.Component):
library = "react-icons/bs"
tag = "BsValentine2"
class BsVectorPen(rx.Component):
library = "react-icons/bs"
tag = "BsVectorPen"
class BsViewList(rx.Component):
library = "react-icons/bs"
tag = "BsViewList"
class BsViewStacked(rx.Component):
library = "react-icons/bs"
tag = "BsViewStacked"
class BsVimeo(rx.Component):
library = "react-icons/bs"
tag = "BsVimeo"
class BsVinylFill(rx.Component):
library = "react-icons/bs"
tag = "BsVinylFill"
class BsVinyl(rx.Component):
library = "react-icons/bs"
tag = "BsVinyl"
class BsVirus(rx.Component):
library = "react-icons/bs"
tag = "BsVirus"
class BsVirus2(rx.Component):
library = "react-icons/bs"
tag = "BsVirus2"
class BsVoicemail(rx.Component):
library = "react-icons/bs"
tag = "BsVoicemail"
class BsVolumeDownFill(rx.Component):
library = "react-icons/bs"
tag = "BsVolumeDownFill"
class BsVolumeDown(rx.Component):
library = "react-icons/bs"
tag = "BsVolumeDown"
class BsVolumeMuteFill(rx.Component):
library = "react-icons/bs"
tag = "BsVolumeMuteFill"
class BsVolumeMute(rx.Component):
library = "react-icons/bs"
tag = "BsVolumeMute"
class BsVolumeOffFill(rx.Component):
library = "react-icons/bs"
tag = "BsVolumeOffFill"
class BsVolumeOff(rx.Component):
library = "react-icons/bs"
tag = "BsVolumeOff"
class BsVolumeUpFill(rx.Component):
library = "react-icons/bs"
tag = "BsVolumeUpFill"
class BsVolumeUp(rx.Component):
library = "react-icons/bs"
tag = "BsVolumeUp"
class BsVr(rx.Component):
library = "react-icons/bs"
tag = "BsVr"
class BsWalletFill(rx.Component):
library = "react-icons/bs"
tag = "BsWalletFill"
class BsWallet(rx.Component):
library = "react-icons/bs"
tag = "BsWallet"
class BsWallet2(rx.Component):
library = "react-icons/bs"
tag = "BsWallet2"
class BsWatch(rx.Component):
library = "react-icons/bs"
tag = "BsWatch"
class BsWater(rx.Component):
library = "react-icons/bs"
tag = "BsWater"
class BsWebcamFill(rx.Component):
library = "react-icons/bs"
tag = "BsWebcamFill"
class BsWebcam(rx.Component):
library = "react-icons/bs"
tag = "BsWebcam"
class BsWechat(rx.Component):
library = "react-icons/bs"
tag = "BsWechat"
class BsWhatsapp(rx.Component):
library = "react-icons/bs"
tag = "BsWhatsapp"
class BsWifi1(rx.Component):
library = "react-icons/bs"
tag = "BsWifi1"
class BsWifi2(rx.Component):
library = "react-icons/bs"
tag = "BsWifi2"
class BsWifiOff(rx.Component):
library = "react-icons/bs"
tag = "BsWifiOff"
class BsWifi(rx.Component):
library = "react-icons/bs"
tag = "BsWifi"
class BsWikipedia(rx.Component):
library = "react-icons/bs"
tag = "BsWikipedia"
class BsWind(rx.Component):
library = "react-icons/bs"
tag = "BsWind"
class BsWindowDash(rx.Component):
library = "react-icons/bs"
tag = "BsWindowDash"
class BsWindowDesktop(rx.Component):
library = "react-icons/bs"
tag = "BsWindowDesktop"
class BsWindowDock(rx.Component):
library = "react-icons/bs"
tag = "BsWindowDock"
class BsWindowFullscreen(rx.Component):
library = "react-icons/bs"
tag = "BsWindowFullscreen"
class BsWindowPlus(rx.Component):
library = "react-icons/bs"
tag = "BsWindowPlus"
class BsWindowSidebar(rx.Component):
library = "react-icons/bs"
tag = "BsWindowSidebar"
class BsWindowSplit(rx.Component):
library = "react-icons/bs"
tag = "BsWindowSplit"
class BsWindowStack(rx.Component):
library = "react-icons/bs"
tag = "BsWindowStack"
class BsWindowX(rx.Component):
library = "react-icons/bs"
tag = "BsWindowX"
class BsWindow(rx.Component):
library = "react-icons/bs"
tag = "BsWindow"
class BsWindows(rx.Component):
library = "react-icons/bs"
tag = "BsWindows"
class BsWordpress(rx.Component):
library = "react-icons/bs"
tag = "BsWordpress"
class BsWrenchAdjustableCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsWrenchAdjustableCircleFill"
class BsWrenchAdjustableCircle(rx.Component):
library = "react-icons/bs"
tag = "BsWrenchAdjustableCircle"
class BsWrenchAdjustable(rx.Component):
library = "react-icons/bs"
tag = "BsWrenchAdjustable"
class BsWrench(rx.Component):
library = "react-icons/bs"
tag = "BsWrench"
class BsXCircleFill(rx.Component):
library = "react-icons/bs"
tag = "BsXCircleFill"
class BsXCircle(rx.Component):
library = "react-icons/bs"
tag = "BsXCircle"
class BsXDiamondFill(rx.Component):
library = "react-icons/bs"
tag = "BsXDiamondFill"
class BsXDiamond(rx.Component):
library = "react-icons/bs"
tag = "BsXDiamond"
class BsXLg(rx.Component):
library = "react-icons/bs"
tag = "BsXLg"
class BsXOctagonFill(rx.Component):
library = "react-icons/bs"
tag = "BsXOctagonFill"
class BsXOctagon(rx.Component):
library = "react-icons/bs"
tag = "BsXOctagon"
class BsXSquareFill(rx.Component):
library = "react-icons/bs"
tag = "BsXSquareFill"
class BsXSquare(rx.Component):
library = "react-icons/bs"
tag = "BsXSquare"
class BsX(rx.Component):
library = "react-icons/bs"
tag = "BsX"
class BsXbox(rx.Component):
library = "react-icons/bs"
tag = "BsXbox"
class BsYelp(rx.Component):
library = "react-icons/bs"
tag = "BsYelp"
class BsYinYang(rx.Component):
library = "react-icons/bs"
tag = "BsYinYang"
class BsYoutube(rx.Component):
library = "react-icons/bs"
tag = "BsYoutube"
class BsZoomIn(rx.Component):
library = "react-icons/bs"
tag = "BsZoomIn"
class BsZoomOut(rx.Component):
library = "react-icons/bs"
tag = "BsZoomOut" | /reflex-icons-1.0.2.tar.gz/reflex-icons-1.0.2/reflex_icons/BootStrap.py | 0.449151 | 0.183978 | BootStrap.py | pypi |
[![][linux-badge]][Releases] [![][macos-badge]][Releases]
[![][windows-badge]][Releases] [![][pypi-badge]][pypi]
[![][docs-badge]][Documentation] [![][lint-badge]][lint]
[![][test-badge]][test] [![][coverage-badge]][coverage]
[![][license-badge]][LICENSE] [![][tag-badge]][tag]
[![][discord-badge]][discord]
__Note: This is a work in progress project. References made in the readme__
__and documentation are currently unfulfilled.__
# RE:Flex Nexus - Universal Dance Pad Utilities
RE:Flex Nexus aims to provide a comprehensive collection of PC software
utilities for dance pads. The interface specification is flexible, allowing
any dance pad to be quickly integrated and begin using available software
features.
In addition, the project aims to provide a comprehensive set of user guides
for open-source dance pad design. The goal is to create a centralised
ecosystem for both software tools and documentation of dance pads. This
will hopefully inspire creativity/freedom, while letting dance gamers share
technology together.
## Installation
- The latest executable can be found on the [Releases] page
- Download the application for your respective operating system
- Download the `nexus-resources.zip` package and extract to the location
you would like to store program data
- Open the `reflex-nexus` application
- You will be prompted to set up your program data folder, enter the directory
of the folder that you just extracted
## Usage
For details on usage, check out the [Documentation].
## Contributing
Install the latest version of [Python] and [git], use your terminal/command
prompt to navigate to the directory you would like to install this project to,
then run the following commands:
```bash
# Clone and enter repository.
git clone https://github.com/ReflexCreations/Nexus.git
cd nexus
# Get the build/environment manager.
pip install poetry
# Set up virtual environment and install reflex_nexus as package.
poetry install
```
You're all set up! You can now use [Poetry] to run the application scripts.
```bash
# Build the executable.
poetry run build
# Build the documentation HTML.
poetry run docs
# Lint the project.
poetry run lint
# Test the project for your Python version and operating system.
poetry run test
```
To synchronise your code editor with the virtual environment that Poetry
creates, you can use `poetry env info -p`, copy the path, and supply that path
to your code editor for the Python interpreter location.
## Acknowledgements
The following Python packages are used in this project, and are greatly
appreciated:
#### Runtime dependencies:
- [libusb-package] - Container package for libusb
- [PyQtDarkTheme] - Dark/light theme for Python QT applications
- [PySide6] - Python bindings for QT GUI framework
- [PyUSB] - Python USB access module
- [QtAwesome] - FontAwesome/Elusive Icons for Python QT applications
#### Development dependencies:
- [Poetry] - Dependency, environment and packaging manager
- [PyInstaller] - Cross-platform executable generator
- [pytest] - Project testing
- [pytest-cov] - Coverage integration for pytest
- [Ruff] - Project linting
- [Sphinx] - Documentation generator
- [Sphinx-Rtd-Theme] - Read the Docs Sphinx theme
*RE:Flex Nexus is released under the MIT License, more details in [LICENSE]
file.*
<!--- Site links -->
[coverage]: https://coveralls.io/github/ReflexCreations/Nexus?branch=main
[discord]: https://discord.gg/TCn3emnwZU
[Documentation]: https://reflex-nexus.readthedocs.io/
[Git]: https://git-scm.com/downloads/
[LICENSE]: https://github.com/ReflexCreations/Nexus/blob/master/LICENSE
[lint]: https://github.com/ReflexCreations/Nexus/actions
[Python]: https://python.org/downloads/
[pypi]: https://pypi.org/project/reflex-nexus
[Releases]: https://github.com/ReflexCreations/Nexus/releases/
[tag]: https://github.com/ReflexCreations/Nexus/tags
[test]: https://github.com/ReflexCreations/Nexus/actions
<!--- Runtime dependency links -->
[libusb-package]: https://pypi.org/project/libusb-package/
[PyQtDarkTheme]: https://pypi.org/project/pyqtdarktheme/
[PySide6]: https://pypi.org/project/PySide6/
[PyUSB]: https://pypi.org/project/pyusb/
[QtAwesome]: https://pypi.org/project/QtAwesome/
<!--- Development dependency links -->
[Poetry]: https://pypi.org/project/poetry/
[PyInstaller]: https://pypi.org/project/pyinstaller/
[pytest]: https://pypi.org/project/pytest/
[pytest-cov]: https://pypi.org/project/pytest-cov/
[Ruff]: https://pypi.org/project/ruff/
[Sphinx]: https://pypi.org/project/Sphinx/
[Sphinx-Rtd-Theme]: https://pypi.org/project/sphinx-rtd-theme/
<!--- Badge images -->
[coverage-badge]: https://coveralls.io/repos/github/ReflexCreations/Nexus/badge.svg?branch=main
[discord-badge]: https://img.shields.io/discord/738700768147669088?label=discord
[docs-badge]: https://readthedocs.org/projects/reflex-nexus/badge/?version=latest
[license-badge]: https://img.shields.io/github/license/ReflexCreations/Nexus
[lint-badge]: https://img.shields.io/github/actions/workflow/status/ReflexCreations/Nexus/project-lint.yml?label=linting
[linux-badge]: https://img.shields.io/github/actions/workflow/status/ReflexCreations/Nexus/build-linux.yml?label=linux%20build
[macos-badge]: https://img.shields.io/github/actions/workflow/status/ReflexCreations/Nexus/build-macos.yml?label=macos%20build
[pypi-badge]: https://img.shields.io/pypi/v/reflex-nexus
[tag-badge]: https://img.shields.io/github/v/tag/ReflexCreations/Nexus
[test-badge]: https://img.shields.io/github/actions/workflow/status/ReflexCreations/Nexus/project-test.yml?label=tests
[windows-badge]: https://img.shields.io/github/actions/workflow/status/ReflexCreations/Nexus/build-windows.yml?label=windows%20build
| /reflex_nexus-0.0.22.tar.gz/reflex_nexus-0.0.22/README.md | 0.455683 | 0.792304 | README.md | pypi |
from dataclasses import KW_ONLY, dataclass
from typing import * # type: ignore
# https://github.com/forgedsoftware/measurementcommon/blob/master/systems.json
SI_PREFIXES = (
(24, "yotta", "Y"),
(21, "zetta", "Z"),
(18, "exa", "E"),
(15, "peta", "P"),
(12, "tera", "T"),
(9, "giga", "G"),
(6, "mega", "M"),
(3, "kilo", "k"),
(2, "hecto", "h"),
(1, "deca", "da"),
(-1, "deci", "d"),
(-2, "centi", "c"),
(-3, "milli", "m"),
(-6, "micro", "μ"),
(-9, "nano", "n"),
(-12, "pico", "p"),
(-15, "femto", "f"),
(-18, "atto", "a"),
(-21, "zepto", "z"),
(-24, "yocto", "y"),
)
INFORMATION_PREFIXES = (
(10, "kibi", "Ki"),
(20, "mebi", "Mi"),
(30, "gibi", "Gi"),
(40, "tebi", "Ti"),
(50, "pebi", "Pi"),
(60, "exbi", "Ei"),
(70, "zebi", "Zi"),
(80, "yobi", "Yi"),
)
ALL_PREFIXES = SI_PREFIXES + INFORMATION_PREFIXES
@dataclass(frozen=True)
class Dimension:
names: Sequence[str]
_: KW_ONLY
ampere: int = 0
candela: int = 0
kelvin: int = 0
kilogram: int = 0
metre: int = 0
mole: int = 0
second: int = 0
bytes: int = 0
def __mul__(self, other: "Dimension") -> "Dimension":
return Dimension(
tuple(),
ampere=self.ampere + other.ampere,
candela=self.candela + other.candela,
kelvin=self.kelvin + other.kelvin,
kilogram=self.kilogram + other.kilogram,
metre=self.metre + other.metre,
mole=self.mole + other.mole,
second=self.second + other.second,
bytes=self.bytes + other.bytes,
)
# Base dimensions
ELECTRICAL_CURRENT = Dimension(("electrical current", "current"), ampere=1)
LUMINOSITY = Dimension(("luminosity",), candela=1)
TEMPERATURE = Dimension(("temperature",), kelvin=1)
MASS = Dimension(("mass", "weight", "tonnage"), kilogram=1)
LENGTH = Dimension(("length", "distance"), metre=1)
AMOUNT_OF_SUBSTANCE = Dimension(("amount of substance",), mole=1)
TIME = Dimension(("time", "duration"), second=1)
FILE_SIZE = Dimension(("file size",), bytes=1)
# Common derived dimensions
AREA = Dimension(("area",), metre=2)
VOLUME = Dimension(("volume",), metre=3)
SPEED = Dimension(("speed",), metre=1, second=-1)
ACCELERATION = Dimension(("acceleration",), metre=1, second=-2)
JERK = Dimension(("jerk",), metre=1, second=-3)
FORCE = Dimension(("force",), kilogram=1, metre=1, second=-2)
PRESSURE = Dimension(("pressure",), kilogram=1, metre=-1, second=-2)
ENERGY = Dimension(("energy",), kilogram=1, metre=2, second=-2)
POWER = Dimension(("power",), kilogram=1, metre=2, second=-3)
MOMENTUM = Dimension(("momentum",), kilogram=1, metre=2, second=-1)
FREQUENCY = Dimension(("frequency",), second=-1)
@dataclass(frozen=True)
class Unit:
dimension: Dimension
base_multiple: float
prefixes: Sequence[Tuple[int, str, str]]
AMPERE = Unit(ELECTRICAL_CURRENT, 1, SI_PREFIXES)
CANDELA = Unit(LUMINOSITY, 1, SI_PREFIXES)
KELVIN = Unit(TEMPERATURE, 1, SI_PREFIXES)
GRAM = Unit(MASS, 1e-3, SI_PREFIXES)
METRE = Unit(LENGTH, 1, SI_PREFIXES)
MOLE = Unit(AMOUNT_OF_SUBSTANCE, 1, SI_PREFIXES)
SECOND = Unit(TIME, 1, SI_PREFIXES)
BIT = Unit(FILE_SIZE, 1 / 8, INFORMATION_PREFIXES)
BYTE = Unit(FILE_SIZE, 1, INFORMATION_PREFIXES)
METRIC_TONNE = Unit(MASS, 1e3, SI_PREFIXES)
US_TON = Unit(MASS, 907.18474, ())
POUND = Unit(MASS, 0.45359237, ())
OUNCE = Unit(MASS, 0.028349523125, ())
STONE = Unit(MASS, 6.35029318, ())
GRAIN = Unit(MASS, 0.00006479891, ())
CARAT = Unit(MASS, 0.0002, ())
US_FOOT = Unit(LENGTH, 0.3048, ())
US_INCH = Unit(LENGTH, 0.0254, ())
US_MILE = Unit(LENGTH, 1609.344, ())
NAUTICAL_MILE = Unit(LENGTH, 1852, ())
LIGHT_YEAR = Unit(LENGTH, 9.4607304725808e15, ())
PARSEC = Unit(LENGTH, 3.0856775814914e16, ())
ANGSTROM = Unit(LENGTH, 1e-10, ())
ASTRONOMICAL_UNIT = Unit(LENGTH, 1.495978707e11, ()) | /reflex_ui-0.2.0.tar.gz/reflex_ui-0.2.0/reflex/world_units.py | 0.834879 | 0.448849 | world_units.py | pypi |
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Dict, Iterable, Literal, Tuple, Union
from uniserde import JsonDoc
from .color import Color
from .image_source import ImageLike, ImageSource
__all__ = [
"Fill",
"FillLike",
"ImageFill",
"LinearGradientFill",
"SolidFill",
]
FillLike = Union["Fill", "Color"]
class Fill(ABC):
@staticmethod
def _try_from(value: FillLike) -> "Fill":
if isinstance(value, Fill):
return value
if isinstance(value, Color):
return SolidFill(value)
raise TypeError(f"Expected Fill or Color, got {type(value)}")
@abstractmethod
def _serialize(self) -> JsonDoc:
raise NotImplementedError()
@dataclass(frozen=True, eq=True)
class SolidFill(Fill):
color: Color
def _serialize(self) -> JsonDoc:
return {
"type": "solid",
"color": self.color.rgba,
}
@dataclass(frozen=True, eq=True)
class LinearGradientFill(Fill):
stops: Iterable[Tuple[Color, float]]
angle_degrees: float = 0.0
def __init__(
self,
*stops: Tuple[Color, float],
angle_degrees: float = 0.0,
):
# Make sure there's at least one stop
if not self.stops:
raise ValueError("Gradients must have at least 1 stop")
# Sort and store the stops
vars(self).update(
stops=tuple(sorted(stops, key=lambda x: x[1])),
angle_degrees=angle_degrees,
)
def _serialize(self) -> JsonDoc:
return {
"type": "linearGradient",
"stops": [(color.rgba, position) for color, position in self.stops],
"angleDegrees": self.angle_degrees,
}
class ImageFill(Fill):
def __init__(
self,
image: ImageLike,
*,
fill_mode: Literal["fit", "stretch", "tile", "zoom"] = "fit",
keep_aspect_ratio: bool = True,
fill_entire_shape: bool = False,
):
self._image = ImageSource(image)
self._fill_mode = fill_mode
def _serialize(self) -> JsonDoc:
image_url = (
self._image._asset.url()
if self._image._asset is not None
else self._image._url
)
return {
"type": "image",
"imageUrl": image_url,
"fillMode": self._fill_mode,
}
def __eq__(self, other: object) -> bool:
if not isinstance(other, ImageFill):
return NotImplemented
return self._image == other._image and self._fill_mode == other._fill_mode
def __hash__(self) -> int:
return hash((self._image, self._fill_mode)) | /reflex_ui-0.2.0.tar.gz/reflex_ui-0.2.0/reflex/fills.py | 0.935147 | 0.303229 | fills.py | pypi |
import io
import mimetypes
from pathlib import Path
from typing import Optional, Tuple, Union
import aiohttp
from PIL.Image import Image
from . import assets
class ImageSource:
def __init__(
self,
image: "ImageLike",
*,
media_type: Optional[str] = None,
):
if isinstance(image, ImageSource):
self._url = image._url
self._asset = image._asset
elif isinstance(image, str):
self._url = image
self._asset = None
elif isinstance(image, Path):
if media_type is None:
media_type, _ = mimetypes.guess_type(image)
if media_type is None:
raise ValueError(f"Could not guess MIME type for `{image}`")
self._url = None
self._asset = assets.HostedAsset(
media_type,
data=image,
)
else:
buffer = io.BytesIO()
image.save(buffer, format="PNG")
self._url = None
self._asset = assets.HostedAsset(
"image/png",
data=buffer.getvalue(),
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, ImageSource):
return NotImplemented
return self._url == other._url and self._asset == other._asset
def __hash__(self) -> int:
return hash((self._url, self._asset))
async def _try_fetch_as_blob(self) -> Tuple[bytes, str]:
"""
Try to fetch the image as blob & media type. Raises a `ValueError` if
fetching fails.
"""
# URL
if self._url is not None:
async with aiohttp.ClientSession() as session:
async with session.get(self._url) as response:
if response.status != 200:
raise ValueError(f"Failed to fetch favicon from {self._url}.")
return await response.read(), response.content_type
# Straight bytes
assert self._asset is not None
if isinstance(self._asset.data, bytes):
return self._asset.data, self._asset.media_type
# File
assert isinstance(self._asset.data, Path)
try:
return self._asset.data.read_bytes(), self._asset.media_type
except (FileNotFoundError, IOError, OSError):
raise ValueError(f"Failed to read image from {self._asset.data}")
ImageLike = Union[Path, Image, str, ImageSource] | /reflex_ui-0.2.0.tar.gz/reflex_ui-0.2.0/reflex/image_source.py | 0.860721 | 0.162347 | image_source.py | pypi |
from __future__ import annotations
import copy
import json
import re
from dataclasses import dataclass
from pathlib import Path
from typing import * # type: ignore
from uniserde import Jsonable, JsonDoc
from . import session
from .widgets import widget_metadata
__all__ = [
"ClientWidget",
"ValidationError",
"Validator",
]
@dataclass
class ClientWidget:
id: int
type: str
state: JsonDoc
@classmethod
def from_json(
cls,
id: int,
delta_state: JsonDoc,
registered_html_widgets: Set[str],
) -> "ClientWidget":
# Don't modify the original dict
delta_state = copy.deepcopy(delta_state)
# Get the widget type
try:
type = delta_state.pop("_type_")
except KeyError:
raise ValidationError(f"Widget with id `{id}` is missing `_type_` field")
if not isinstance(type, str):
raise ValidationError(f"Widget with id `{id}` has non-string type `{type}`")
if (
type not in widget_metadata.CHILD_ATTRIBUTE_NAMES
and type not in registered_html_widgets
):
raise ValidationError(f"Widget with id `{id}` has unknown type `{type}`")
# Construct the result
return cls(
id=id,
type=type,
state=delta_state,
)
def _get_child_attribute_names(self) -> Iterable[str]:
try:
return widget_metadata.CHILD_ATTRIBUTE_NAMES[self.type]
except KeyError:
return tuple() # TODO: How to get the children of HTML widgets?
@property
def non_child_containing_properties(
self,
) -> JsonDoc:
child_attribute_names = self._get_child_attribute_names()
result = {}
for name, value in self.state.items():
if name in child_attribute_names:
continue
result[name] = value
return result
@property
def child_containing_properties(
self,
) -> Dict[str, Union[None, int, List[int]]]:
child_attribute_names = self._get_child_attribute_names()
result = {}
for name, value in self.state.items():
if name not in child_attribute_names:
continue
result[name] = value
return result
@property
def referenced_child_ids(self) -> Iterable[int]:
for property_value in self.child_containing_properties.values():
if property_value is None:
continue
if isinstance(property_value, int):
yield property_value
continue
assert isinstance(property_value, list), property_value
yield from property_value
class ValidationError(Exception):
pass
class Validator:
def __init__(
self,
session_: session.Session,
*,
dump_directory_path: Optional[Path] = None,
):
self.session = session_
if dump_directory_path is not None:
assert dump_directory_path.exists(), dump_directory_path
assert dump_directory_path.is_dir(), dump_directory_path
self.dump_directory_path = dump_directory_path
self.root_widget: Optional[ClientWidget] = None
self.widgets_by_id: Dict[int, ClientWidget] = {}
# HTML widgets must be registered with the frontend before use. This set
# contains the ids (`HtmlWidget._unique_id`) of all registered widgets.
self.registered_html_widgets: Set[str] = set(
widget_metadata.CHILD_ATTRIBUTE_NAMES.keys()
)
def dump_message(
self,
msg: Jsonable,
*,
incoming: bool,
):
"""
Dump the message to a JSON file.
If no path is set in the validator, this function does nothing.
"""
if self.dump_directory_path is None:
return
direction = "incoming" if incoming else "outgoing"
path = self.dump_directory_path / f"message-{direction}.json"
with open(path, "w", encoding="utf-8") as f:
json.dump(msg, f, indent=4)
def dump_client_state(
self,
widget: Optional[ClientWidget] = None,
path: Optional[Path] = None,
) -> None:
"""
Dump the client state to a JSON file.
If no widget is specified, the root widget is used.
If no path is used the Validator's `dump_client_state_path` is used. If
no path is used and no path set in the validator, this function does
nothing.
"""
if path is None and self.dump_directory_path is not None:
path = self.dump_directory_path / "client-state.json"
if path is None:
return
if widget is None:
assert self.root_widget is not None
widget = self.root_widget
with open(path, "w") as f:
json.dump(
self.as_json(widget),
f,
indent=4,
# The keys are intentionally in a legible order. Don't destroy
# that.
sort_keys=False,
)
def prune_widgets(self) -> None:
"""
Remove all widgets which are not referenced directly or indirectly by
the root widget.
"""
# If there is no root widget, everybody is an orphan
if self.root_widget is None:
self.widgets_by_id.clear()
return
# Find all widgets which are referenced directly or indirectly by the
# root widget
visited_ids: Set[int] = set()
to_do = [self.root_widget]
while to_do:
current = to_do.pop()
# TODO Use this opportunity to detect cycles?
if current.id in visited_ids:
print(
f"Warning: Validator found a cycle in the widget tree involving widget with id `{current.id}`"
)
continue
# Mark the current widget as visited
visited_ids.add(current.id)
# Chain to its children
for child_id in current.referenced_child_ids:
to_do.append(self.widgets_by_id[child_id])
# Remove all superfluous widgets
self.widgets_by_id = {
id: widget for id, widget in self.widgets_by_id.items() if id in visited_ids
}
def as_json(self, widget: Optional[ClientWidget] = None) -> JsonDoc:
"""
Return a JSON-serializable representation of the client state.
"""
if widget is None:
assert self.root_widget is not None
widget = self.root_widget
result = {
"_type_": widget.type,
"_id_": widget.id,
}
for name, value in widget.non_child_containing_properties.items():
result[name] = value
for name, value in widget.child_containing_properties.items():
if value is None:
result[name] = None
continue
if isinstance(value, int):
result[name] = self.as_json(self.widgets_by_id[value])
continue
assert isinstance(value, list), value
result[name] = [self.as_json(self.widgets_by_id[id]) for id in value]
return result
def handle_incoming_message(self, msg: Any) -> None:
"""
Process a message passed from Client -> Server.
This will update the `Validator`'s internal client state and validate
the message, raising a `ValidationError` if any issues are detected.
"""
# Delegate to the appropriate handler
try:
method = msg["method"]
except KeyError:
return
handler_name = f"_handle_incoming_{method}"
try:
handler = getattr(self, handler_name)
except AttributeError:
return
handler(msg["params"])
def handle_outgoing_message(self, msg: Any) -> None:
"""
Process a message passed from Server -> Client.
This will update the `Validator`'s internal client state and validate
the message, raising a `ValidationError` if any issues are detected.
"""
# Delegate to the appropriate handler
try:
method = msg["method"]
except KeyError:
return
handler_name = f"_handle_outgoing_{method}"
try:
handler = getattr(self, handler_name)
except AttributeError:
return
handler(msg["params"])
def _handle_outgoing_updateWidgetStates(self, msg: Any) -> None:
# Dump the message, if requested
self.dump_message(msg, incoming=False)
# Update the individual widget states
for widget_id, delta_state in msg["deltaStates"].items():
# Get the widget's existing state
try:
widget = self.widgets_by_id[widget_id]
except KeyError:
widget = ClientWidget.from_json(
widget_id,
delta_state,
self.registered_html_widgets,
)
self.widgets_by_id[widget_id] = widget
else:
delta_state = delta_state.copy()
# A widget's `_type_` cannot be modified. This value is also
# stored separately by `ClientWidget`, so make sure it never
# makes it into the widget's state.
try:
new_type = delta_state.pop("_type_")
except KeyError:
pass
else:
if new_type != widget.type:
raise ValidationError(
f"Attempted to modify the `_type_` for widget with id `{widget_id}` from `{widget.type}` to `{new_type}`"
) from None
# Update the widget's state
widget.state.update(delta_state)
# Update the root widget if requested
if msg["rootWidgetId"] is not None:
try:
self.root_widget = self.widgets_by_id[msg["rootWidgetId"]]
except KeyError:
raise ValidationError(
f"Attempted to set root widget to unknown widget with id `{msg['rootWidgetId']}`"
) from None
# If no root widget is known yet, this message has to contain one
if self.root_widget is None:
raise ValidationError(
"Despite no root widget being known yet, an `UpdateWidgetStates` message was sent without a `root_widget_id`",
)
# Make sure no invalid widget references are present
invalid_references = {}
for widget in self.widgets_by_id.values():
for child_id in widget.referenced_child_ids:
if child_id not in self.widgets_by_id:
invalid_references.setdefault(widget.id, []).append(child_id)
if invalid_references:
raise ValidationError(
f"Invalid widget references detected: {invalid_references}"
)
# Make sure all widgets in the session have had their session injected
for widget in self.session._root_widget._iter_direct_and_indirect_children(
include_self=True
):
if widget._session_ is None:
raise ValidationError(
f"Widget `{widget}` has not had its session injected"
)
# Prune the widget tree
self.prune_widgets()
# Look for any widgets which were sent in the message, but are not
# actually used in the widget tree
ids_sent = set(msg["deltaStates"].keys())
ids_existing = set(self.widgets_by_id.keys())
ids_superfluous = sorted(ids_sent - ids_existing)
if ids_superfluous:
print(
f"Validator Warning: Message contained superfluous widget ids: {ids_superfluous}"
)
# Dump the client state if requested
self.dump_client_state()
def _handle_outgoing_evaluateJavascript(self, msg: Any):
# Is this message registering a new widget class?
match = re.search(r"window.widgetClasses\['(.*)'\]", msg["javaScriptSource"])
if match is None:
return
# Remember the widget class as registered
self.registered_html_widgets.add(match.group(1)) | /reflex_ui-0.2.0.tar.gz/reflex_ui-0.2.0/reflex/validator.py | 0.685634 | 0.236494 | validator.py | pypi |
from __future__ import annotations
import enum
from dataclasses import dataclass
from typing import * # type: ignore
import reflex as rx
from .. import fundamental
from . import button, switch, text
__all__ = [
"AutoFormBuilder",
]
T = TypeVar("T")
@dataclass(frozen=True)
class FormField(Generic[T]):
name: str
type: Type[T]
check: Optional[Callable[[T], Optional[str]]]
def prettify_name(name: str) -> str:
parts = name.split("_")
return " ".join(p.title() for p in parts)
class AutoFormBuilder:
def __init__(
self,
fields: Iterable[FormField],
check: Optional[Callable[[Dict[str, Any]], Optional[str]]] = None,
*,
spacing: float = 0.4,
):
self.fields = tuple(fields)
self.check = check
self.spacing = spacing
def _build_input_field(self, field: FormField) -> rx.Widget:
origin = get_origin(field.type)
args = get_args(field.type)
# `bool` -> `Switch`
if field.type is bool:
return switch.Switch()
# `int` -> `NumberInput`
if field.type is int:
raise NotImplementedError("TODO: Support `NumberInput`")
# `float` -> `NumberInput`
if field.type is float:
raise NotImplementedError("TODO: Support `NumberInput`")
# `str` -> `TextInput`
if field.type is str:
return fundamental.TextInput()
# `Literal` or `Enum` -> `Dropdown`
if origin is Literal or issubclass(field.type, enum.Enum):
if origin is Literal:
mapping = {a: a for a in args}
else:
mapping = {prettify_name(f.name): f.value for f in field.type}
return fundamental.Dropdown(mapping)
# Unsupported type
raise TypeError(f"{__class__.__name__} does not support type `{field.type}`")
def build(self) -> rx.Widget:
rows: List[rx.Widget] = []
# One row per field
for field in self.fields:
rows.append(
fundamental.Row(
text.Text(field.name),
self._build_input_field(field),
spacing=self.spacing,
)
)
# Add a submit button
rows.append(button.MajorButton("Submit"))
# Wrap everything in one container
return fundamental.Column(
*rows,
spacing=self.spacing,
) | /reflex_ui-0.2.0.tar.gz/reflex_ui-0.2.0/reflex/widgets/default_design/auto_form.py | 0.852552 | 0.188399 | auto_form.py | pypi |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
from typing_extensions import Self
from uniserde import JsonDoc
from ... import fills
from ...color import Color
@dataclass(frozen=True)
class BoxStyle:
fill: fills.Fill
stroke_color: Color
stroke_width: float
corner_radius: Tuple[float, float, float, float]
shadow_color: Color
shadow_radius: float
shadow_offset_x: float
shadow_offset_y: float
def __init__(
self,
*,
fill: fills.FillLike,
stroke_color: Color = Color.BLACK,
stroke_width: float = 0.0,
corner_radius: Union[float, Tuple[float, float, float, float]] = 0.0,
shadow_color: Color = Color.BLACK,
shadow_radius: float = 0.0,
shadow_offset_x: float = 0.0,
shadow_offset_y: float = 0.0,
):
fill = fills.Fill._try_from(fill)
if isinstance(corner_radius, (int, float)):
corner_radius = (
corner_radius,
corner_radius,
corner_radius,
corner_radius,
)
vars(self).update(
fill=fill,
stroke_color=stroke_color,
stroke_width=stroke_width,
corner_radius=corner_radius,
shadow_color=shadow_color,
shadow_radius=shadow_radius,
shadow_offset_x=shadow_offset_x,
shadow_offset_y=shadow_offset_y,
)
def replace(
self,
*,
fill: Optional[fills.FillLike] = None,
stroke_color: Optional[Color] = None,
stroke_width: Optional[float] = None,
corner_radius: Optional[Union[float, Tuple[float, float, float, float]]] = None,
shadow_color: Optional[Color] = None,
shadow_radius: Optional[float] = None,
shadow_offset_x: Optional[float] = None,
shadow_offset_y: Optional[float] = None,
) -> Self:
if fill is not None:
fill = fills.Fill._try_from(fill)
if isinstance(corner_radius, (int, float)):
corner_radius = (
corner_radius,
corner_radius,
corner_radius,
corner_radius,
)
return BoxStyle(
fill=fill if fill is not None else self.fill,
# Stroke Color
stroke_color=self.stroke_color if stroke_color is None else stroke_color,
# Stroke Width
stroke_width=self.stroke_width if stroke_width is None else stroke_width,
# Corner Radius
corner_radius=self.corner_radius
if corner_radius is None
else corner_radius,
# shadow
shadow_color=self.shadow_color if shadow_color is None else shadow_color,
shadow_radius=self.shadow_radius
if shadow_radius is None
else shadow_radius,
shadow_offset_x=self.shadow_offset_x
if shadow_offset_x is None
else shadow_offset_x,
shadow_offset_y=self.shadow_offset_y
if shadow_offset_y is None
else shadow_offset_y,
)
def _serialize(self) -> JsonDoc:
return {
"fill": self.fill._serialize(),
"strokeColor": self.stroke_color.rgba,
"strokeWidth": self.stroke_width,
"cornerRadius": self.corner_radius,
"shadowColor": self.shadow_color.rgba,
"shadowRadius": self.shadow_radius,
"shadowOffset": (self.shadow_offset_x, self.shadow_offset_y),
} | /reflex_ui-0.2.0.tar.gz/reflex_ui-0.2.0/reflex/widgets/fundamental/box_style.py | 0.92617 | 0.27671 | box_style.py | pypi |
```diff
+ Searching for Pynecone? You are in the right repo. Pynecone has been renamed to Reflex. +
```
<div align="center">
<img src="https://raw.githubusercontent.com/reflex-dev/reflex/main/docs/images/reflex_dark.svg#gh-light-mode-only" alt="Reflex Logo" width="300px">
<img src="https://raw.githubusercontent.com/reflex-dev/reflex/main/docs/images/reflex_light.svg#gh-dark-mode-only" alt="Reflex Logo" width="300px">
<hr>
### **✨ Performant, customizable web apps in pure Python. Deploy in seconds. ✨**
[](https://badge.fury.io/py/reflex)


[](https://reflex.dev/docs/getting-started/introduction)
[](https://discord.gg/T5WSbC2YtQ)
</div>
---
[English](https://github.com/reflex-dev/reflex/blob/main/README.md) | [简体中文](https://github.com/reflex-dev/reflex/blob/main/docs/zh/zh_cn/README.md) | [繁體中文](https://github.com/reflex-dev/reflex/blob/main/docs/zh/zh_tw/README.md)
---
## ⚙️ Installation
Open a terminal and run (Requires Python 3.7+):
```bash
pip install reflex
```
## 🥳 Create your first app
Installing `reflex` also installs the `reflex` command line tool.
Test that the install was successful by creating a new project. (Replace `my_app_name` with your project name):
```bash
mkdir my_app_name
cd my_app_name
reflex init
```
This command initializes a template app in your new directory.
You can run this app in development mode:
```bash
reflex run
```
You should see your app running at http://localhost:3000.
Now you can modify the source code in `my_app_name/my_app_name.py`. Reflex has fast refreshes so you can see your changes instantly when you save your code.
## 🫧 Example App
Let's go over an example: creating an image generation UI around DALL·E. For simplicity, we just call the OpenAI API, but you could replace this with an ML model run locally.
<div align="center">
<img src="https://raw.githubusercontent.com/reflex-dev/reflex/main/docs/images/dalle.gif" alt="A frontend wrapper for DALL·E, shown in the process of generating an image." width="550" />
</div>
Here is the complete code to create this. This is all done in one Python file!
```python
import reflex as rx
import openai
openai.api_key = "YOUR_API_KEY"
class State(rx.State):
"""The app state."""
prompt = ""
image_url = ""
processing = False
complete = False
def get_image(self):
"""Get the image from the prompt."""
if self.prompt == "":
return rx.window_alert("Prompt Empty")
self.processing, self.complete = True, False
yield
response = openai.Image.create(prompt=self.prompt, n=1, size="1024x1024")
self.image_url = response["data"][0]["url"]
self.processing, self.complete = False, True
def index():
return rx.center(
rx.vstack(
rx.heading("DALL·E"),
rx.input(placeholder="Enter a prompt", on_blur=State.set_prompt),
rx.button(
"Generate Image",
on_click=State.get_image,
is_loading=State.processing,
width="100%",
),
rx.cond(
State.complete,
rx.image(
src=State.image_url,
height="25em",
width="25em",
)
),
padding="2em",
shadow="lg",
border_radius="lg",
),
width="100%",
height="100vh",
)
# Add state and page to the app.
app = rx.App()
app.add_page(index, title="reflex:DALL·E")
app.compile()
```
## Let's break this down.
### **Reflex UI**
Let's start with the UI.
```python
def index():
return rx.center(
...
)
```
This `index` function defines the frontend of the app.
We use different components such as `center`, `vstack`, `input`, and `button` to build the frontend. Components can be nested within each other
to create complex layouts. And you can use keyword args to style them with the full power of CSS.
Reflex comes with [60+ built-in components](https://reflex.dev/docs/library) to help you get started. We are actively adding more components, and it's easy to [create your own components](https://reflex.dev/docs/advanced-guide/wrapping-react).
### **State**
Reflex represents your UI as a function of your state.
```python
class State(rx.State):
"""The app state."""
prompt = ""
image_url = ""
processing = False
complete = False
```
The state defines all the variables (called vars) in an app that can change and the functions that change them.
Here the state is comprised of a `prompt` and `image_url`. There are also the booleans `processing` and `complete` to indicate when to show the circular progress and image.
### **Event Handlers**
```python
def get_image(self):
"""Get the image from the prompt."""
if self.prompt == "":
return rx.window_alert("Prompt Empty")
self.processing, self.complete = True, False
yield
response = openai.Image.create(prompt=self.prompt, n=1, size="1024x1024")
self.image_url = response["data"][0]["url"]
self.processing, self.complete = False, True
```
Within the state, we define functions called event handlers that change the state vars. Event handlers are the way that we can modify the state in Reflex. They can be called in response to user actions, such as clicking a button or typing in a text box. These actions are called events.
Our DALL·E. app has an event handler, `get_image` to which get this image from the OpenAI API. Using `yield` in the middle of an event handler will cause the UI to update. Otherwise the UI will update at the end of the event handler.
### **Routing**
Finally, we define our app.
```python
app = rx.App()
```
We add a page from the root of the app to the index component. We also add a title that will show up in the page preview/browser tab.
```python
app.add_page(index, title="DALL-E")
app.compile()
```
You can create a multi-page app by adding more pages.
## 📑 Resources
<div align="center">
📑 [Docs](https://reflex.dev/docs/getting-started/introduction) | 🗞️ [Blog](https://reflex.dev/blog) | 📱 [Component Library](https://reflex.dev/docs/library) | 🖼️ [Gallery](https://reflex.dev/docs/gallery) | 🛸 [Deployment](https://reflex.dev/docs/hosting/deploy)
</div>
## ✅ Status
Reflex launched in December 2022 with the name Pynecone.
As of July 2023, we are in the **Public Beta** stage.
- :white_check_mark: **Public Alpha**: Anyone can install and use Reflex. There may be issues, but we are working to resolve them actively.
- :large_orange_diamond: **Public Beta**: Stable enough for non-enterprise use-cases.
- **Public Hosting Beta**: _Optionally_, deploy and host your apps on Reflex!
- **Public**: Reflex is production ready.
Reflex has new releases and features coming every week! Make sure to :star: star and :eyes: watch this repository to stay up to date.
## Contributing
We welcome contributions of any size! Below are some good ways to get started in the Reflex community.
- **Join Our Discord**: Our [Discord](https://discord.gg/T5WSbC2YtQ) is the best place to get help on your Reflex project and to discuss how you can contribute.
- **GitHub Discussions**: A great way to talk about features you want added or things that are confusing/need clarification.
- **GitHub Issues**: These are an excellent way to report bugs. Additionally, you can try and solve an existing issue and submit a PR.
We are actively looking for contributors, no matter your skill level or experience.
## License
Reflex is open-source and licensed under the [Apache License 2.0](LICENSE).
| /reflex-0.2.5a1.tar.gz/reflex-0.2.5a1/README.md | 0.549399 | 0.929472 | README.md | pypi |
from app.core.repository import RepositoryData
from app.helpers.app_config import Config
import reflex as rx
class NavHelper:
@staticmethod
def __get_repository__():
data = RepositoryData().build()
return data
@staticmethod
def __get_navigation_titles__() -> list:
navigation = (
[
name.capitalize()
for name in list(
Config.__navigation__().keys(),
)
]
if Config.__navigation__()
else []
)
return navigation
@staticmethod
def __get_navigation_paths__(
data: dict = Config.__navigation__(),
parent: str = "/",
paths: list = [],
) -> list[tuple]:
for key, value in data.items():
if isinstance(value, str) and key == "home":
paths.append("/")
if isinstance(value, dict):
for path in list(value.values()):
paths.append(f"{parent}{key}/{path.split('.py')[0]}")
break
return paths
@staticmethod
def __get_nav_link__(
title: str,
route_to: str,
size: str,
color: str,
):
style = nav_helper_css.copy()
style["text"]["font_size"] = size
style["text"]["color"] = color
return rx.link(
rx.text(title, style=style["text"]), href=route_to, style=style["link"]
)
@staticmethod
def __get_left_navigation__(
ref: str,
data: dict = Config.__navigation__(),
):
paths: list = []
for key, value in data.items():
if isinstance(value, dict) and key == ref.lower():
for title, path in value.items():
paths.append(
[
title.capitalize(),
f"/{ref.lower()}/{path.split('.py')[0]}",
]
)
return paths
@staticmethod
def __set_left_navigation__(inc_paths: list) -> rx.Component:
out_paths: list = []
nav: rx.Component = rx.vstack(style=nav_helper_css["nav"])
if inc_paths:
for title, route in inc_paths:
out_paths.append(
NavHelper.__get_nav_link__(
title=title.capitalize(),
route_to=route,
size=13,
color=None,
)
)
nav.children = out_paths
return nav
nav_helper_css: dict = {
"nav": {
"align_items": "start",
},
"link": {
"_hover": {"text_decoration": "None"},
"padding": "0.25rem 0rem",
},
"text": {
"font_size": "%s",
"font_weight": "500",
"color": "%s",
"opacity": "0.85",
"transition": "opacity 350ms ease",
"_hover": {"opacity": "1"},
},
} | /reflexify-0.0.8.tar.gz/reflexify-0.0.8/app/helpers/nav_helpers.py | 0.585812 | 0.153359 | nav_helpers.py | pypi |
from app.helpers.app_config import Config
# set the width and height of social media icons
SOCIAL_SIZE = 19
# set inverse filteration color scheme for social media icons
SOCIAL_COLOR = r"filter: brightness(0) invert(1)"
# main base css stylesheet for preconfigured web application
base_css: dict = {
"app": {
"font_family": Config.__theme_font__(),
},
"base": {
"width": "100%",
"min_height": "100vh",
"spacing": "0rem",
"padding": "0",
"margin": "0",
},
"left": {
"width": "20%",
"top": "0",
"position": "sticky",
"padding_top": "5rem",
"align_items": "start",
"padding_left": ["", "", "", "4rem", "10rem"],
"transition": "all 550ms ease",
},
"middle": {
"width": ["100%", "100%", "100%", "60%", "60%"],
"top": "0",
"position": "block",
"padding_top": ["2rem", "2rem", "2rem", "5rem", "5rem"],
"align_items": "start",
"padding_left": ["2rem", "2rem", "2rem", "2rem", "2rem"],
"padding_right": ["2rem", "2rem", "2rem", "2rem", "2rem"],
"padding_bottom": "6rem",
"transition": "all 550ms ease",
"min_height": "100vh",
},
"right": {
"width": ["0%", "0%", "0%", "20%", "20%"],
"top": "0",
"position": "sticky",
"padding_top": "5rem",
"align_items": ["end", "end", "end", "start", "start"],
"padding_right": ["1rem", "1rem", "1rem", "", ""],
"transition": "all 550ms ease",
},
"header": {
"main": {
"width": "100%",
"height": "50px",
"position": "sticky",
"bg": Config.__theme_primary__(),
"box_shadow": "0 3px 6px 0 rgba(0, 0, 0, 0.5)",
"transition": "height 350ms ease",
"top": "0",
"z_index": "2",
},
"icon": {
"font_size": "xl",
"cursor": "pointer",
"color": "white",
},
"navigation": {
"align_items": "end",
"transition": "opacity 500ms ease 500ms",
},
"link_text": {
"size": "s",
"padding_top": "0.3rem",
"color": "white",
"font_weight": "semibold",
},
"site_name": {
"font_size": ["100%", "115%", "130%", "135%", "150%"],
"color": "white",
"transition": "all 550ms ease",
"opacity": "1",
"_hover": {"opacity": "0.85"},
"padding_right": "3.5rem",
},
"max_header": {
"width": "100%",
"padding_left": ["", "", "", "4rem", "10rem"],
"padding_right": ["", "", "", "4rem", "10rem"],
"transition": "all 550ms ease",
},
"min_header": {
"width": "100%",
"padding_left": ["1rem", "1rem", "0.5rem", "", ""],
"padding_right": ["1rem", "1rem", "0.5rem", "", ""],
"transition": "all 550ms ease",
},
},
"footer": {
"style": {
"width": "100%",
"height": ["105px", "75px", "65px", "65px", "65px"],
"position": "sticky",
"bg": "#15171b",
"transition": "height 350ms ease",
"top": "0",
"overflow": "hidden",
},
"socials": {
"github": f"<img width='{SOCIAL_SIZE}' height='{SOCIAL_SIZE}' src='https://img.icons8.com/material-outlined/24/github.png' style='{SOCIAL_COLOR}';/>", # noqa: E501
"twitter": f"<img width='{SOCIAL_SIZE}' height='{SOCIAL_SIZE}' src='https://img.icons8.com/ios-filled/24/twitter.png' style='{SOCIAL_COLOR}';/>", # noqa: E501
"youtube": f"<img width='{SOCIAL_SIZE}' height='{SOCIAL_SIZE}' src='https://img.icons8.com/ios-filled/24/youtube.png' style='{SOCIAL_COLOR}';/>", # noqa: E501
"mastodon": f"<img width='{SOCIAL_SIZE}' height='{SOCIAL_SIZE}' src='https://img.icons8.com/windows/24/mastodon.png' style='{SOCIAL_COLOR}';/>", # noqa: E501
"discord": f"<img width='{SOCIAL_SIZE}' height='{SOCIAL_SIZE}' src='https://img.icons8.com/ios-filled/24/discord.png' style='{SOCIAL_COLOR}';/>", # noqa: E501
},
},
"drawer": {
"heading": {
"width": "100%",
"height": "100px",
"align_items": "end",
"bg": Config.__theme_primary__(),
"padding_left": "1rem",
"padding_bottom": "1rem",
"transition": "all 550ms ease",
"color": "white",
},
"repo": {
"width": "100%",
"height": "45px",
"bg": Config.__theme_primary__(),
"padding_left": "1rem",
"transition": "all 550ms ease",
},
"router": {
"align_items": "center",
"width": "100%",
"cursor": "pointer",
"opacity": "0.8",
"_hover": {"opacity": "1"},
},
},
} | /reflexify-0.0.8.tar.gz/reflexify-0.0.8/app/styles/_base.py | 0.584508 | 0.263002 | _base.py | pypi |
admonition_css: dict = {
"info": {
"body": {
"transition": "all 550ms ease",
"_light": {
"border": "0.055rem solid #52b6d1",
"_hover": {
"box_shadow": "0px 0.75px 4px 4px rgba(231, 248, 251, 0.85)",
},
},
"_dark": {
"border": "0.055rem solid #52b6d1",
"_hover": {
"box_shadow": "0px 0.75px 4px 4px rgba(41, 63, 72, 0.65)",
},
},
},
"header": {
"_light": {
"background": "rgba(231, 248, 251, 1)",
},
"_dark": {
"background": "rgba(41, 63, 72, 0.65)",
},
},
"icon": {"color": "#52b6d1"},
},
"warning_two": {
"body": {
"transition": "all 550ms ease",
"_light": {
"border": "0.055rem solid #f09737",
"_hover": {
"box_shadow": "0px 0.75px 4px 4px rgba(253, 244, 231, 0.85)",
},
},
"_dark": {
"border": "0.055rem solid #f09737",
"_hover": {
"box_shadow": "0px 0.75px 4px 4px rgba(65, 58, 55, 0.65)",
},
},
},
"header": {
"_light": {
"background": "rgba(253, 244, 231, 1)",
},
"_dark": {
"background": "rgba(65, 58, 55, 0.65)",
},
},
"icon": {"color": "#f09737"},
},
"close": {
"body": {
"transition": "all 550ms ease",
"_light": {
"border": "0.055rem solid #ec5f59",
"_hover": {
"box_shadow": "0px 0.75px 4px 4px rgba(252, 238, 237, 0.85)",
},
},
"_dark": {
"border": "0.055rem solid #ec5f59",
"_hover": {
"box_shadow": "0px 0.75px 4px 4px rgba(64, 52, 62, 0.65)",
},
},
},
"header": {
"_light": {
"background": "rgba(252, 238, 237, 1)",
},
"_dark": {
"background": "rgba(64, 52, 62, 0.65)",
},
},
"icon": {"color": "#ec5f59"},
},
"calendar": {
"body": {
"transition": "all 550ms ease",
"_light": {
"border": "0.055rem solid #5688f7",
"_hover": {
"box_shadow": "0px 0.75px 4px 4px rgba(237, 243, 254, 0.85)",
},
},
"_dark": {
"border": "0.055rem solid #5688f7",
"_hover": {
"box_shadow": "0px 0.75px 4px 4px rgba(49, 56, 80, 0.65)",
},
},
},
"header": {
"_light": {
"background": "rgba(237, 243, 254, 1)",
},
"_dark": {
"background": "rgba(49, 56, 80, 0.65)",
},
},
"icon": {"color": "#5688f7"},
},
"question": {
"body": {
"transition": "all 550ms ease",
"_light": {
"border": "0.055rem solid #84db46",
"_hover": {
"box_shadow": "0px 0.75px 4px 4px rgba(241, 252, 233, 0.85)",
},
},
"_dark": {
"border": "0.055rem solid #84db46",
"_hover": {
"box_shadow": "0px 0.75px 4px 4px rgba(55, 66, 59, 0.65)",
},
},
},
"header": {
"_light": {
"background": "rgba(241, 252, 233, 1)",
},
"_dark": {
"background": "rgba(55, 66, 59, 0.65)",
},
},
"icon": {"color": "#84db46"},
},
"check": {
"body": {
"transition": "all 550ms ease",
"_light": {
"border": "0.055rem solid #5ac561",
"_hover": {
"box_shadow": "0px 0.75px 4px 4px rgba(233, 248, 238, 0.85)",
},
},
"_dark": {
"border": "0.055rem solid #5ac561",
"_hover": {
"box_shadow": "0px 0.75px 4px 4px rgba(46, 62, 64, 0.65)",
},
},
},
"header": {
"_light": {
"background": "rgba(233, 248, 238, 1)",
},
"_dark": {
"background": "rgba(46, 62, 64, 0.65)",
},
},
"icon": {"color": "#5ac561"},
},
} | /reflexify-0.0.8.tar.gz/reflexify-0.0.8/app/styles/_admonition.py | 0.454714 | 0.394638 | _admonition.py | pypi |
import reflex as rx
from bs4 import BeautifulSoup
import httpx
from app.helpers.app_config import Config
icon_list = [
"<img width='10' height='10' src='https://img.icons8.com/ios-filled/50/price-tag.png' style='filter: brightness(0) invert(1);' />",
"<img width='10' height='10' src='https://img.icons8.com/ios-filled/50/star--v1.png' style='filter: brightness(0) invert(1);'/>",
"<img width='10' height='10' src='https://img.icons8.com/ios/50/code-fork.png' style='filter: brightness(0) invert(1);'/>",
]
class RepositoryData:
def __init__(self):
self.rx_repo_data = rx.hstack(
spacing="1.15rem",
cursor="pointer",
)
self.git_repo_name = rx.hstack(
rx.text(Config.__repo_name__(), color="white", font_weight="semibold"),
)
if Config.__repo_url__():
self.git_icon = rx.html(
"<img width='24' height='24' src='https://img.icons8.com/ios-filled/50/000000/git.png' style='filter: brightness(0) invert(1);'/>"
)
self.git_repo_data = self.get_repository_data()
else:
self.git_icon = rx.container()
self.git_repo_data = rx.hstack()
self.git_data = rx.vstack(
self.git_repo_name,
self.git_repo_data,
spacing="0rem",
align_items="start",
)
def get_repository_data(self):
temp_repo_data = rx.hstack()
span_elements = [
"css-truncate css-truncate-target text-bold mr-2",
"Counter js-social-count",
"Counter",
]
with httpx.Client() as client:
response = client.get(Config.__repo_url__())
data = response.content
soup = BeautifulSoup(data, "html.parser")
for i, span in enumerate(span_elements):
span_element = soup.find("span", span)
if span_element is not None:
txt = span_element.text.strip()
temp_repo_data.children.append(
rx.hstack(
rx.html(icon_list[i]),
rx.text(txt, color="white", font_size=11),
spacing="0.35rem",
)
)
else:
pass
return temp_repo_data
def build(self):
self.rx_repo_data.children.append(self.git_icon)
self.rx_repo_data.children.append(self.git_data)
return rx.tooltip(
rx.link(
self.rx_repo_data,
href=Config.__repo_url__(),
_hover={"text_decoration": "None"},
),
label="Go to repository",
) | /reflexify-0.0.8.tar.gz/reflexify-0.0.8/app/core/repository.py | 0.519278 | 0.230638 | repository.py | pypi |
[](https://github.com/Impact-I/reFlutter/stargazers)
<p align="center"><img src="https://user-images.githubusercontent.com/87244850/135659542-22bb8496-bf26-4e25-b7c1-ffd8fc0cea10.png" width="75%"/></p>
#
**Read more on the blog:** https://swarm.ptsecurity.com/fork-bomb-for-flutter/
This framework helps with Flutter apps reverse engineering using the patched version of the Flutter library which is already compiled and ready for app repacking. This library has snapshot deserialization process modified to allow you perform dynamic analysis in a convenient way.
Key features:
- `socket.cc` is patched for traffic monitoring and interception;
- `dart.cc` is modified to print classes, functions and some fields;
- display absolute code offset for functions
- contains minor changes for successfull compilation;
- if you would like to implement your own patches, there is manual Flutter code change is supported using specially crafted `Dockerfile`
### Supported engines
- Android: arm64, arm32;
- iOS: arm64;
- Release: Stable, Beta
### Install
```
# Linux, Windows, MacOS
pip3 install reflutter==0.7.7
```
### Usage
```console
impact@f:~$ reflutter main.apk
Please enter your Burp Suite IP: <input_ip>
SnapshotHash: 8ee4ef7a67df9845fba331734198a953
The resulting apk file: ./release.RE.apk
Please sign the apk file
Configure Burp Suite proxy server to listen on *:8083
Proxy Tab -> Options -> Proxy Listeners -> Edit -> Binding Tab
Then enable invisible proxying in Request Handling Tab
Support Invisible Proxying -> true
impact@f:~$ reflutter main.ipa
```
### Traffic interception
You need to specify the IP of your Burp Suite Proxy Server located in the same network where the device with the flutter application is. Next, you should configure the Proxy in `BurpSuite -> Listener Proxy -> Options tab`
- Add port: `8083`
- Bind to address: `All interfaces`
- Request handling: Support invisible proxying = `True`
<p align="center"><img src="https://user-images.githubusercontent.com/87244850/135753172-20489ef9-0759-432f-b2fa-220607e896b8.png" width="84%"/></p>
You don't need to install any certificates. On an Android device, you don't need root access as well. reFlutter also allows to bypass some of the flutter certificate pinning implementations.
### Usage on Android
The resulting apk must be aligned and signed. I use [uber-apk-signer](https://github.com/patrickfav/uber-apk-signer/releases/tag/v1.2.1)
```java -jar uber-apk-signer.jar --allowResign -a release.RE.apk```.
To see which code is loaded through DartVM, you need to run the application on the device. Note that you must manually find what `_kDartIsolateSnapshotInstructions` (ex. 0xB000 ) equals to using a binary search. reFlutter writes the dump to the root folder of the application and sets `777` permissions to the file and folder. You can pull the file with adb command
```console
impact@f:~$ adb -d shell "cat /data/data/<PACKAGE_NAME>/dump.dart" > dump.dart
```
<details>
<summary>file contents</summary>
```dart
Library:'package:anyapp/navigation/DeepLinkImpl.dart' Class: Navigation extends Object {
String* DeepUrl = anyapp://evil.com/ ;
Function 'Navigation.': constructor. (dynamic, dynamic, dynamic, dynamic) => NavigationInteractor {
Code Offset: _kDartIsolateSnapshotInstructions + 0x0000000000009270
}
Function 'initDeepLinkHandle':. (dynamic) => Future<void>* {
Code Offset: _kDartIsolateSnapshotInstructions + 0x0000000000412fe8
}
Function '_navigateDeepLink@547106886':. (dynamic, dynamic, {dynamic navigator}) => void {
Code Offset: _kDartIsolateSnapshotInstructions + 0x0000000000002638
}
}
Library:'package:anyapp/auth/navigation/AuthAccount.dart' Class: AuthAccount extends Account {
PlainNotificationToken* _instance = sentinel;
Function 'getAuthToken':. (dynamic, dynamic, dynamic, dynamic) => Future<AccessToken*>* {
Code Offset: _kDartIsolateSnapshotInstructions + 0x00000000003ee548
}
Function 'checkEmail':. (dynamic, dynamic) => Future<bool*>* {
Code Offset: _kDartIsolateSnapshotInstructions + 0x0000000000448a08
}
Function 'validateRestoreCode':. (dynamic, dynamic, dynamic) => Future<bool*>* {
Code Offset: _kDartIsolateSnapshotInstructions + 0x0000000000412c34
}
Function 'sendSmsRestorePassword':. (dynamic, dynamic) => Future<bool*>* {
Code Offset: _kDartIsolateSnapshotInstructions + 0x00000000003efb88
}
}
```
</details>
### Usage on iOS
Use the IPA file created after the execution of `reflutter main.ipa` command. To see which code is loaded through DartVM, you need to run the application on the device. reFlutter will print the dump file path to the Xcode console logs with the reFlutter tag
`Current working dir: /private/var/mobile/Containers/Data/Application/<UUID>/dump.dart`
Next, you will need to pull the file from the device
<p align="center"><img src="https://user-images.githubusercontent.com/87244850/135860648-a13ba3fd-93d2-4eab-bd38-9aa775c3178f.png" width="100%"/></p>
### Frida
The resulting offset from the dump can be used in the frida [script](https://github.com/Impact-I/reFlutter/blob/main/frida.js)
```
frida -U -f <package> -l frida.js --no-pause
```
To get value for `_kDartIsolateSnapshotInstructions` you can use `readelf -Ws libapp.so` Where is the value you need in the `Value` field
### To Do
- [x] Display absolute code offset for functions;
- [ ] Extract more strings and fields;
- [x] Add socket patch;
- [ ] Extend engine support to Debug using Fork and Github Actions;
- [ ] Improve detection of `App.framework` and `libapp.so` inside zip archive
### Build Engine
The engines are built using [reFlutter](https://github.com/Impact-I/reFlutter/blob/main/.github/workflows/main.yml) in [Github Actions](https://github.com/Impact-I/reFlutter/actions) to build the desired version, commits and snapshot hashes are used from this [table](https://github.com/Impact-I/reFlutter/blob/main/enginehash.csv).
The hash of the snapshot is extracted from ```storage.googleapis.com/flutter_infra_release/flutter/<hash>/android-arm64-release/linux-x64.zip```
<details>
<summary>release</summary>
[](https://github.com/Impact-I/reFlutter/actions)
</details>
### Custom Build
If you would like to implement your own patches, manual Flutter code change is supported using specially crafted [Docker](https://hub.docker.com/r/ptswarm/reflutter)
```sudo docker pull ptswarm/reflutter```
```
# Linux, Windows
EXAMPLE BUILD ANDROID ARM64:
sudo docker run -e WAIT=300 -e x64=0 -e arm=0 -e HASH_PATCH=<Snapshot_Hash> -e COMMIT=<Engine_commit> --rm -iv${PWD}:/t ptswarm/reflutter
FLAGS:
-e x64=0 <disables building for x64 archiitechture, use to reduce building time>
-e arm=0 <disables building for arm archiitechture, use to reduce building time>
-e WAIT=300 <the amount of time in seconds you need to edit source code>
-e HASH_PATCH=[Snapshot_Hash] <here you need to specify snapshot hash which matches the engine_commit line of enginehash.csv table best. It is used for proper patch search in reFlutter and for successfull compilation>
-e COMMIT=[Engine_commit] <here you specify commit for your engine version, take it from enginehash.csv table or from flutter/engine repo>
```
| /reflutter-0.7.7.tar.gz/reflutter-0.7.7/README.md | 0.452294 | 0.866133 | README.md | pypi |
# Reformat-gherkin
[](https://github.com/ducminh-phan/reformat-gherkin/actions/workflows/test.yml)
[](https://coveralls.io/github/ducminh-phan/reformat-gherkin?branch=master)
[](https://codeclimate.com/github/ducminh-phan/reformat-gherkin/maintainability)
[](https://www.codacy.com/app/ducminh-phan/reformat-gherkin)
[](https://opensource.org/licenses/MIT)
[](https://pypi.org/project/reformat-gherkin/)
[](https://github.com/python/black)
## Table of Contents
- [About](#about)
- [Installation](#installation)
- [Usage](#usage)
- [Version control integration](#version-control-integration)
- [Acknowledgements](#acknowledgements)
## About
Reformat-gherkin automatically formats Gherkin files. It ensures a consistent
look regardless of who wrote the file.
It can be used either as a command-line tool, or a
[pre-commit](https://pre-commit.com/) hook.
## Installation
Install reformat-gherkin using [pip](https://pypi.org/project/pip/).
```bash
pip install reformat-gherkin
```
It requires [Python 3.7+](https://www.python.org/downloads/) to run.
## Usage
To get started straight away:
```bash
reformat-gherkin {source_file_or_directory}
```
You should get good results without specifying any options, as reformat-gherkin
uses sensible defaults.
### Command-line options
You can list the available options by running `reformat-gherkin --help`.
```text
Usage: reformat-gherkin [OPTIONS] [SRC]...
Reformat the given SRC files and all .feature files in SRC folders. If -
is passed as a file, reformat stdin and print the result to stdout.
Options:
--check Don't write the files back, just return the
status. Return code 0 means nothing would
change. Return code 1 means some files would
be reformatted. Return code 123 means there
was an internal error.
-a, --alignment [left|right] Specify the alignment of step keywords
(Given, When, Then,...). If specified, all
statements after step keywords are left-
aligned, spaces are inserted before/after
the keywords to right/left align them. By
default, step keywords are left-aligned, and
there is a single space between the step
keyword and the statement.
-n, --newline [LF|CRLF] Specify the line separators when formatting
files inplace. If not specified, line
separators are preserved.
--fast / --safe If --fast given, skip the sanity checks of
file contents. [default: --safe]
--single-line-tags / --multi-line-tags
If --single-line-tags given, output
consecutive tags on one line. If --multi-
line-tags given, output one tag per line.
[default: --single-line-tags]
--tab-width INTEGER Specify the number of spaces per
indentation-level. [default: 2]
--use-tabs Indent lines with tabs instead of spaces.
--config FILE Read configuration from FILE.
--version Show the version and exit.
--help Show this message and exit.
```
Reformat-gherkin is a well-behaved Unix-style command-line tool:
- it does nothing if no sources are passed to it;
- it will read from standard input and write to standard output if - is used as the filename;
- it only outputs messages to users on standard error;
- it exits with code 0 unless an internal error occurred (or --check was used).
### Config file
Reformat-gherkin can read project-specific default values for its command line
options from a `.reformat-gherkin.yaml` file.
By default, `reformat-gherkin` looks for the config file starting from the
common base directory of all files and directories passed on the command line.
If it's not there, it looks in parent directories. It stops looking when it
finds the file, or a .git directory, or a .hg directory, or the root of the file
system, whichever comes first.
Example config file:
```yaml
check: False
alignment: left
tab_width: 4
```
## Version control integration
You can integrate reformat-gherkin into your version control workflow by using
[pre-commit](https://pre-commit.com/). Once you have installed pre-commit, add
this to the `.pre-commit-config.yaml` file in your repository:
```text
repos:
- repo: https://github.com/ducminh-phan/reformat-gherkin
rev: stable
hooks:
- id: reformat-gherkin
```
Then run `pre-commit install` and you're ready to go.
## Acknowledgements
This project is inspired by [black](https://github.com/psf/black). Some
functions are taken from `black`'s source code.
| /reformat-gherkin-3.0.1.tar.gz/reformat-gherkin-3.0.1/README.md | 0.669096 | 0.8989 | README.md | pypi |
from pathlib import Path
from typing import Iterable, Optional
import click
import yaml
CONFIG_FILE = ".reformat-gherkin.yaml"
SYSTEM_ROOT = Path("/").resolve()
def find_project_root(srcs: Iterable[str]) -> Path:
"""
Return a directory containing .git, .hg, or .reformat-gherkin.yaml.
That directory can be one of the directories passed in `srcs` or their
common parent.
If no directory in the tree contains a marker that would specify it's the
project root, the root of the file system is returned.
"""
if not srcs:
return SYSTEM_ROOT
common_base = min(Path(src).resolve() for src in srcs)
if common_base.is_dir():
# Append a dummy file so `parents` below returns `common_base_dir`, too.
common_base /= "dummy-file"
directory = SYSTEM_ROOT
for directory in common_base.parents:
if (directory / ".git").is_dir():
return directory
if (directory / ".hg").is_dir():
return directory
if (directory / CONFIG_FILE).is_file():
return directory
return directory # pragma: no cover
def read_config_file(
ctx: click.Context,
_: click.Parameter,
value: Optional[str],
) -> Optional[str]:
"""
Inject the configuration from ".reformat-gherkin.yaml" into defaults in `ctx`.
Returns the path to a successfully found and read configuration file, None
otherwise.
"""
if not value:
root = find_project_root(ctx.params.get("src", ()))
path = root / CONFIG_FILE
if path.is_file():
value = str(path.resolve())
else:
return None
try:
with open(value, "r") as f:
config = yaml.safe_load(f)
except (yaml.YAMLError, OSError) as e:
raise click.FileError(
filename=value,
hint=f"Error reading configuration file: {e}",
)
if not config:
return None
if ctx.default_map is None:
ctx.default_map = {}
ctx.default_map.update( # type: ignore # bad types in .pyi
{k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
)
return value | /reformat-gherkin-3.0.1.tar.gz/reformat-gherkin-3.0.1/reformat_gherkin/config.py | 0.788135 | 0.281148 | config.py | pypi |
import sys
import traceback
from io import TextIOWrapper
from pathlib import Path
from typing import BinaryIO, Iterable, Iterator, Set, Tuple, Union
from .ast_node import GherkinDocument
from .errors import (
BaseError,
EmptySources,
EquivalentError,
InternalError,
NothingChanged,
StableError,
)
from .formatter import LineGenerator
from .options import NewlineMode, Options, WriteBackMode
from .parser import parse
from .report import Report
from .utils import decode_stream, diff, dump_to_file, err, open_stream_or_path
REPORT_URL = "https://github.com/ducminh-phan/reformat-gherkin/issues"
NEWLINE_FROM_OPTION = {
NewlineMode.CRLF: "\r\n",
NewlineMode.LF: "\n",
}
def find_sources(src: Iterable[str]) -> Set[Path]:
sources: Set[Path] = set()
for s in src:
path = Path(s).resolve()
if path.is_dir():
sources.update(path.rglob("*.feature"))
elif path.is_file():
# If a file was explicitly given, we don't care about its extension
sources.add(path)
else: # pragma: no cover
err(f"Invalid path: {s}")
return sources
def reformat(src: Tuple[str], report: Report, *, options: Options):
use_stdin = "-" in src
sources = find_sources(filter((lambda it: it != "-"), src))
if not sources and not use_stdin:
raise EmptySources
if use_stdin:
changed = reformat_stdin(options=options)
report.done("stdin", changed)
for path in sources:
try:
changed = reformat_single_file(path, options=options)
report.done(str(path), changed)
except Exception as e:
report.failed(path, str(e))
def reformat_stdin(*, options: Options) -> bool:
output = sys.stdout.buffer if options.write_back == WriteBackMode.INPLACE else None
return reformat_stream_or_path(
sys.stdin.buffer,
output,
force_write=True,
options=options,
)
def reformat_single_file(path: Path, *, options: Options) -> bool:
out_path = path if options.write_back == WriteBackMode.INPLACE else None
return reformat_stream_or_path(path, out_path, options=options)
def reformat_stream_or_path(
in_stream_or_path: Union[BinaryIO, Path],
out_stream_or_path: Union[None, BinaryIO, Path],
*,
force_write: bool = False,
options: Options,
) -> bool:
with open_stream_or_path(in_stream_or_path, "rb") as in_stream:
src_contents, encoding, existing_newline = decode_stream(in_stream)
newline = NEWLINE_FROM_OPTION.get(options.newline, existing_newline)
newline_changed = newline != existing_newline
content_changed = True
try:
dst_contents = format_file_contents(src_contents, options=options)
except NothingChanged:
content_changed = False
dst_contents = src_contents
will_write = force_write or content_changed or newline_changed
if will_write and out_stream_or_path is not None:
with open_stream_or_path(out_stream_or_path, "wb") as out_stream:
tiow = TextIOWrapper(out_stream, encoding=encoding, newline=newline)
tiow.write(dst_contents)
# Ensures that the underlying stream is not closed when the
# TextIOWrapper is garbage collected. We don't want to close a
# stream that was passed to us.
tiow.detach()
return content_changed or newline_changed
def format_file_contents(src_contents: str, *, options: Options) -> str:
"""
Reformat the contents of a file and return new contents. Raise NothingChanged
if the contents were not changed after reformatting.
If `options.fast` is False, additionally confirm that the reformatted file is
valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
"""
if src_contents.strip() == "":
raise NothingChanged
dst_contents = format_str(src_contents, options=options)
if src_contents == dst_contents:
raise NothingChanged
if not options.fast:
assert_equivalent(src_contents, dst_contents)
assert_stable(src_contents, dst_contents, options=options)
return dst_contents
def format_str(src_contents: str, *, options: Options) -> str:
"""
Reformat a string and return new contents.
"""
ast = parse(src_contents)
line_generator = LineGenerator(
ast,
options.step_keyword_alignment,
options.tag_line_mode,
options.indent,
)
lines = line_generator.generate()
return "\n".join(lines)
def assert_equivalent(src: str, dst: str) -> None:
"""
Raise EquivalentError if `src` and `dst` aren't equivalent.
"""
def _v(ast: GherkinDocument) -> Iterator[str]:
"""
Simple visitor generating strings to compare ASTs by content
"""
for node in ast:
yield repr(node)
src_ast = parse(src)
try:
dst_ast = parse(dst)
except BaseError as exc:
log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst)
raise InternalError(
f"INTERNAL ERROR: Invalid file contents are produced:\n"
f"{exc}\n"
f"Please report a bug on {REPORT_URL}.\n"
f"This invalid output might be helpful:\n"
f"{log}\n"
) from exc
src_ast_str = "\n".join(_v(src_ast))
dst_ast_str = "\n".join(_v(dst_ast))
if src_ast_str != dst_ast_str:
log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst"))
raise EquivalentError(
f"INTERNAL ERROR: The new content produced is not equivalent to "
f"the source.\n"
f"Please report a bug on {REPORT_URL}.\n"
f"This diff might be helpful: {log}\n"
)
def assert_stable(src: str, dst: str, *, options: Options) -> None:
"""
Raise StableError if `dst` reformats differently the second time.
"""
new_dst = format_str(dst, options=options)
if dst != new_dst:
log = dump_to_file(
diff(src, dst, "source", "first pass"),
diff(dst, new_dst, "first pass", "second pass"),
)
raise StableError(
f"INTERNAL ERROR: Different contents are produced on the second pass "
f"of the formatter.\n"
f"Please report a bug on {REPORT_URL}.\n"
f"This diff might be helpful: {log}\n"
) from None | /reformat-gherkin-3.0.1.tar.gz/reformat-gherkin-3.0.1/reformat_gherkin/core.py | 0.566258 | 0.171616 | core.py | pypi |
from pathlib import Path
import click
from attr import dataclass
from .utils import err, out
@dataclass
class Report:
"""Provides a reformatting counter. Can be rendered with `str(report)`."""
check: bool
change_count: int = 0
same_count: int = 0
failure_count: int = 0
def done(self, path: str, changed: bool) -> None:
"""Increment the counter for successful reformatting. Write out a message."""
if changed:
reformatted = "Would reformat" if self.check else "Reformatted"
out(f"{reformatted} {path}")
self.change_count += 1
else:
self.same_count += 1
def failed(self, path: Path, message: str) -> None:
"""Increment the counter for failed reformatting. Write out a message."""
err(f"Error: cannot format {path}: {message}")
self.failure_count += 1
@property
def return_code(self) -> int:
"""Return the exit code that the app should use.
This considers the current state of changed files and failures:
- if there were any failures, return 123;
- if any files were changed and --check is being used, return 1;
- otherwise return 0.
"""
# According to http://tldp.org/LDP/abs/html/exitcodes.html starting with
# 126 we have special return codes reserved by the shell.
if self.failure_count:
return 123
elif self.change_count and self.check:
return 1
return 0
def __str__(self) -> str:
"""Render a color report of the current state.
Use `click.unstyle` to remove colors.
"""
if self.check:
reformatted = "would be reformatted"
unchanged = "would be left unchanged"
failed = "would fail to reformat"
else:
reformatted = "reformatted"
unchanged = "left unchanged"
failed = "failed to reformat"
report_lines = []
if self.change_count:
s = "s" if self.change_count > 1 else ""
report_lines.append(
click.style(f"{self.change_count} file{s} {reformatted}", bold=True)
)
if self.same_count:
s = "s" if self.same_count > 1 else ""
report_lines.append(f"{self.same_count} file{s} {unchanged}")
if self.failure_count:
s = "s" if self.failure_count > 1 else ""
report_lines.append(
click.style(f"{self.failure_count} file{s} {failed}", fg="red")
)
return ", ".join(report_lines) + "." | /reformat-gherkin-3.0.1.tar.gz/reformat-gherkin-3.0.1/reformat_gherkin/report.py | 0.820757 | 0.355887 | report.py | pypi |
from typing import Optional, Tuple
import click
from .config import read_config_file
from .core import reformat
from .errors import EmptySources
from .options import (
AlignmentMode,
NewlineMode,
Options,
TagLineMode,
WriteBackMode,
get_indent_from_configuration,
)
from .report import Report
from .utils import out
from .version import __version__
@click.command()
@click.argument(
"src",
nargs=-1,
type=click.Path(
exists=True,
file_okay=True,
dir_okay=True,
readable=True,
resolve_path=True,
allow_dash=True,
),
is_eager=True,
)
@click.option(
"--check",
is_flag=True,
help=(
"Don't write the files back, just return the status. Return code 0 "
"means nothing would change. Return code 1 means some files would be "
"reformatted. Return code 123 means there was an internal error."
),
)
@click.option(
"-a",
"--alignment",
type=click.Choice(
[
AlignmentMode.LEFT.value,
AlignmentMode.RIGHT.value,
]
),
help=(
"Specify the alignment of step keywords (Given, When, Then,...). "
"If specified, all statements after step keywords are left-aligned, "
"spaces are inserted before/after the keywords to right/left align them. "
"By default, step keywords are left-aligned, and there is a single "
"space between the step keyword and the statement."
),
)
@click.option(
"-n",
"--newline",
type=click.Choice(
[
NewlineMode.LF.value,
NewlineMode.CRLF.value,
]
),
help=(
"Specify the line separators when formatting files inplace. "
"If not specified, line separators are preserved."
),
)
@click.option(
"--fast/--safe",
is_flag=True,
help="If --fast given, skip the sanity checks of file contents. [default: --safe]",
)
@click.option(
"--single-line-tags/--multi-line-tags",
is_flag=True,
default=True,
help=(
"If --single-line-tags given, output consecutive tags on one line. "
"If --multi-line-tags given, output one tag per line. "
"[default: --single-line-tags]"
),
)
@click.option(
"--tab-width",
type=int,
default=2,
help="Specify the number of spaces per indentation-level. [default: 2]",
)
@click.option(
"--use-tabs",
is_flag=True,
default=False,
help="Indent lines with tabs instead of spaces.",
)
@click.option(
"--config",
type=click.Path(
exists=True,
file_okay=True,
dir_okay=False,
readable=True,
allow_dash=False,
),
is_eager=True,
callback=read_config_file,
help="Read configuration from FILE.",
)
@click.version_option(version=__version__)
@click.pass_context
def main(
ctx: click.Context,
src: Tuple[str],
check: bool,
alignment: Optional[str],
newline: Optional[str],
fast: bool,
single_line_tags: bool,
tab_width: int,
use_tabs: bool,
config: Optional[str],
) -> None:
"""
Reformat the given SRC files and all .feature files in SRC folders. If -
is passed as a file, reformat stdin and print the result to stdout.
"""
if config:
out(
f"Using configuration from {config}.",
bold=False,
fg="blue",
)
write_back_mode = WriteBackMode.from_configuration(check)
alignment_mode = AlignmentMode.from_configuration(alignment)
newline_mode = NewlineMode.from_configuration(newline)
tag_line_mode = TagLineMode.from_configuration(single_line_tags)
indent = get_indent_from_configuration(tab_width, use_tabs)
options = Options(
write_back=write_back_mode,
step_keyword_alignment=alignment_mode,
newline=newline_mode,
fast=fast,
tag_line_mode=tag_line_mode,
indent=indent,
)
report = Report(check=check)
try:
reformat(src, report, options=options)
except EmptySources:
out("No paths given. Nothing to do 😴")
ctx.exit(0)
bang = "💥 💔 💥" if report.return_code else "✨ 🍰 ✨"
out(f"All done! {bang}")
click.secho(str(report), err=True)
ctx.exit(report.return_code) | /reformat-gherkin-3.0.1.tar.gz/reformat-gherkin-3.0.1/reformat_gherkin/cli.py | 0.776114 | 0.158337 | cli.py | pypi |
import re
from typing import List
from reformat_money.match import TextMatch
from reformat_money.text_formatter import TextFormatter
class FileFormatter:
SEARCH_PATTERN = re.compile(r"Money\(")
REFORMAT_PATTERN = re.compile(r"(?P<class_name>Money)\((?P<contents>.*[\"\']\w{3}[\"\'])\)")
CLOSING_BRACKET = ")"
OPENING_BRACKET = "("
QUOTES = {"'", '"',}
ESCAPE = "\\"
COMMA = ","
def __init__(self, file):
self._file = file
self._total_matches = 0
self._formatted = 0
self._unable_to_format = 0
def reformat(self):
with open(self._file, 'r') as buf:
self.set_contents(buf.read())
self.find_and_reformat_matches()
with open(self._file, 'w') as buf:
buf.write(self.get_contents())
def set_contents(self, contents: str):
self._contents_str = contents
self._contents = list(contents)
self._file_characters = len(self._contents)
def get_contents(self) -> str:
return "".join(self._contents)
def find_and_reformat_matches(self):
for match in self.find_matches():
self._reformat_match(match)
self._maybe_print_summary()
def find_matches(self) -> List[TextMatch]:
"""Returns list of matches.
Matches are returned in the reverse order that
they appear in file.
"""
matches = []
for match in self.SEARCH_PATTERN.finditer(self._contents_str):
self._total_matches += 1
start = match.end()
text_match = self.split_args(start)
matches.append(text_match)
return matches[::-1]
def split_args(self, start: int) -> TextMatch:
inspect = True
opening_quote = "'"
opening_bracket_count = 1
closing_bracket_count = 0
argument_start_positions = []
stop = None
for i in range(start, self._file_characters):
char = self._contents[i]
prev_char = self._contents[i - 1] if i >= 1 else None
# Ignore everything that appears within quotes.
# TODO: handle triple quotes
if inspect and char in self.QUOTES:
opening_quote = char
inspect = False
continue
if not inspect and char == opening_quote and prev_char != self.ESCAPE:
inspect = True
continue
if inspect and char == self.OPENING_BRACKET:
opening_bracket_count += 1
if inspect and (opening_bracket_count - closing_bracket_count) == 1 and char == self.COMMA:
# This denotes a new argument starting
argument_start_positions.append(i)
if inspect and char == self.CLOSING_BRACKET:
closing_bracket_count += 1
if closing_bracket_count == opening_bracket_count:
# Stop position should not include the closing bracket.
stop = i - 1
break
if stop is None:
raise RuntimeError(f"Unable to find closing bracket for start position '{start}'")
if argument_start_positions:
# Append the last position so that we include the last argument too.
# As we will be using slicing, we need to increment stop by one.
argument_start_positions.append(stop+1)
match = TextMatch(
args=self._get_arguments_from_start_positions(start, argument_start_positions),
start=start,
stop=stop,
)
else:
match = TextMatch(
args=["".join(self._contents[start: stop+1])],
start=start,
stop=stop,
)
return match
def _get_arguments_from_start_positions(self, last_pos: int, positions: list) -> List[str]:
args = []
for pos in positions:
args.append("".join(self._contents[last_pos: pos]))
last_pos = pos + 1
return args
def _reformat_match(self, match: TextMatch):
original_amount = match.get_amount()
formatter = TextFormatter(match)
formatter.reformat()
self._update(formatter)
if not formatter.able_to_format:
self._unable_to_format += 1
elif original_amount != match.get_amount():
self._formatted += 1
def _maybe_print_summary(self):
if self._total_matches and self._unable_to_format == 0 and self._formatted > 0:
print(f"{self._file} -- Reformatted: {self._formatted}")
elif self._total_matches and self._unable_to_format > 0:
print(f"{self._file} -- Reformatted: {self._formatted}, Unable to reformat: {self._unable_to_format}")
def _update(self, formatter: TextFormatter):
start, end = formatter.span()
new_string = formatter.get_reformatted()
del (self._contents[start:end+1])
pos = start
for i, char in enumerate(new_string):
self._contents.insert(pos+i, char) | /reformat-money-0.0.2.tar.gz/reformat-money-0.0.2/reformat_money/file_formatter.py | 0.504394 | 0.20838 | file_formatter.py | pypi |
import re
from reformat_money.match import TextMatch
class TextFormatter:
"""Formats the amount using simple regex to determine the starting type."""
amount_as_string = re.compile(r"^[\"\'](?P<integral>\d+)\.?(?P<decimal>\d*)[\"\']$") # matches ("120.00", "120", '493.23', '392')
amount_as_integer = re.compile(r"^(?P<integral>[0-9]+)$") # matches: (100, 200, 593)
amount_as_float = re.compile(r"^(?P<integral>\d+)\.(?P<decimal>\d*)$") # matches: (100., 100.3, 200.32, etc.)
def __init__(self, match: TextMatch):
self._match = match
self._func = self._null_operator
def match_string_representation(self):
# Example: Money("1002.39", "GBP")
found = self.amount_as_string.search(self._match.get_amount())
if found:
self._func = self.reformat_string
return found
def match_integer_representation(self):
# Example: Money(200, "GBP")
found = self.amount_as_integer.search(self._match.get_amount())
if found:
self._func = self.reformat_integer
return found
def match_float_representation(self):
# Example: Money(203.23, "GBP")
found = self.amount_as_float.search(self._match.get_amount())
if found:
self._func = self.reformat_float
return found
def reformat(self):
self._inner_match = (
self.match_string_representation()
or self.match_integer_representation()
or self.match_float_representation()
)
def _null_operator(self):
# Method that just doesn't do anything (fun).
pass
def reformat_string(self):
integral = self._inner_match.group("integral")
decimal = self._inner_match.group("decimal")
reformatted = self._build_reformatted_amount(integral, decimal)
self._match.update_amount(reformatted)
def reformat_integer(self):
integral = self._inner_match.group("integral")
reformatted = self._build_reformatted_amount(integral)
self._match.update_amount(reformatted)
def reformat_float(self):
integral = self._inner_match.group("integral")
decimal = self._inner_match.group("decimal")
reformatted = self._build_reformatted_amount(integral, decimal)
self._match.update_amount(reformatted)
def _build_reformatted_amount(self, integral: str, decimal: str = "00"):
"""Ensure that we have at least 2 decimal places for consistency."""
while len(decimal) < 2:
decimal += "0"
return f'"{integral}.{decimal}"'
def get_reformatted(self):
self._func()
return str(self._match)
def span(self):
return (self._match.start, self._match.stop)
@property
def able_to_format(self) -> bool:
return bool(self._inner_match) | /reformat-money-0.0.2.tar.gz/reformat-money-0.0.2/reformat_money/text_formatter.py | 0.863966 | 0.345768 | text_formatter.py | pypi |
__all__ = ['Adafactor', 'adafactor']
# Cell
import torch
from fastai.basics import *
# Cell
class Adafactor(torch.optim.Optimizer):
def __init__(
self,
params,
lr=None,
eps=(1e-30, 1e-3),
clip_threshold=1.0,
decay_rate=-0.8,
mom=None,
weight_decay=0.0,
scale_parameter=True,
relative_step=True,
warmup_init=False,
):
if lr is not None and relative_step:
raise ValueError("Cannot combine manual lr and relative_step options")
if warmup_init and not relative_step:
raise ValueError("warmup_init requires relative_step=True")
defaults = dict(
lr=lr,
eps=eps,
clip_threshold=clip_threshold,
decay_rate=decay_rate,
mom=mom,
weight_decay=weight_decay,
scale_parameter=scale_parameter,
relative_step=relative_step,
warmup_init=warmup_init,
)
super(Adafactor, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return False
def _get_lr(self, param_group, param_state):
rel_step_sz = param_group["lr"]
if param_group["relative_step"]:
min_step = (
1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2
)
rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"]))
param_scale = 1.0
if param_group["scale_parameter"]:
param_scale = max(param_group["eps"][1], param_state["RMS"])
return param_scale * rel_step_sz
def _get_options(self, param_group, param_shape):
factored = len(param_shape) >= 2
use_first_moment = param_group["mom"] is not None
return factored, use_first_moment
def _rms(self, tensor):
return tensor.norm(2) / (tensor.numel() ** 0.5)
def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):
r_factor = (
(exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True))
.rsqrt_()
.unsqueeze(-1)
)
c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
return torch.mul(r_factor, c_factor)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError("Adafactor does not support sparse gradients.")
state = self.state[p]
grad_shape = grad.shape
factored, use_first_moment = self._get_options(group, grad_shape)
# State Initialization
if len(state) == 0:
state["step"] = 0
if use_first_moment:
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(grad)
if factored:
state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad)
state["exp_avg_sq_col"] = torch.zeros(
grad_shape[:-2] + grad_shape[-1:]
).to(grad)
else:
state["exp_avg_sq"] = torch.zeros_like(grad)
state["RMS"] = 0
else:
if use_first_moment:
state["exp_avg"] = state["exp_avg"].to(grad)
if factored:
state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad)
state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad)
else:
state["exp_avg_sq"] = state["exp_avg_sq"].to(grad)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state["step"] += 1
state["RMS"] = self._rms(p_data_fp32)
group["lr"] = self._get_lr(group, state)
beta2t = 1.0 - math.pow(state["step"], group["decay_rate"])
update = (grad ** 2) + group["eps"][0]
if factored:
exp_avg_sq_row = state["exp_avg_sq_row"]
exp_avg_sq_col = state["exp_avg_sq_col"]
exp_avg_sq_row.mul_(beta2t).add_(
update.mean(dim=-1), alpha=1.0 - beta2t
)
exp_avg_sq_col.mul_(beta2t).add_(
update.mean(dim=-2), alpha=1.0 - beta2t
)
# Approximation of exponential moving average of square of gradient
update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
update.mul_(grad)
else:
exp_avg_sq = state["exp_avg_sq"]
exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t)
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_(
(self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)
)
update.mul_(group["lr"])
if use_first_moment:
exp_avg = state["exp_avg"]
exp_avg.mul_(group["mom"]).add_(update, alpha=1 - group["mom"])
update = exp_avg
if group["weight_decay"] != 0:
p_data_fp32.add_(
p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
)
p_data_fp32.add_(-update)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
# Cell
@delegates(Adafactor.__init__)
def adafactor(param_groups, **kwargs):
return OptimWrapper(Adafactor([{'params': ps, **kwargs} for ps in param_groups])) | /reformer_fastai-0.0.2-py3-none-any.whl/reformer_fastai/optimizers.py | 0.886715 | 0.31042 | optimizers.py | pypi |
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"exists": "00_core.ipynb",
"default": "00_core.ipynb",
"expand_dim1": "00_core.ipynb",
"max_neg_value": "00_core.ipynb",
"setattr_on": "00_core.ipynb",
"top_p_filter": "00_core.ipynb",
"top_k_filter": "00_core.ipynb",
"cache_method_decorator": "00_core.ipynb",
"look_one_back": "00_core.ipynb",
"chunked_sum": "00_core.ipynb",
"sort_key_val": "00_core.ipynb",
"batched_index_select": "00_core.ipynb",
"do_cuda_timing": "00_core.ipynb",
"model_performance": "00_core.ipynb",
"total_params": "00_core.ipynb",
"CombineInputOutputCallback": "00_core.ipynb",
"RemoveEOSCallback": "00_core.ipynb",
"LossTargetShiftCallback": "00_core.ipynb",
"PadBatchCallback": "00_core.ipynb",
"AddEOSID": "00_core.ipynb",
"LabelSmoothingCrossEntropy": "00_core.ipynb",
"LabelSmoothingCrossEntropyFlat": "00_core.ipynb",
"Learner.distrib_ctx": "00_core.ipynb",
"Residual": "01_layers.ipynb",
"PostNorm": "01_layers.ipynb",
"PreNorm": "01_layers.ipynb",
"FeedForward": "01_layers.ipynb",
"get_axial_shape": "01_layers.ipynb",
"get_axial_dims": "01_layers.ipynb",
"AbsolutePositionalEmbedding": "01_layers.ipynb",
"FixedPositionalEmbedding": "01_layers.ipynb",
"TransformerEmbedding": "01_layers.ipynb",
"MASK_VAL": "02_attention.ipynb",
"SELF_ATTN_MASK_VAL": "02_attention.ipynb",
"AttnInProj": "02_attention.ipynb",
"AttnInProjV2": "02_attention.ipynb",
"SharedQKAttnInProj": "02_attention.ipynb",
"ScaledDotProdAttention": "02_attention.ipynb",
"Attention": "02_attention.ipynb",
"MemEfficientAttention": "02_attention.ipynb",
"ChunkedDotProdAttention": "02_attention.ipynb",
"ChunkedAttention": "02_attention.ipynb",
"AdditiveInProj": "02_attention.ipynb",
"AdditiveAttention": "02_attention.ipynb",
"LSHAttention": "02_attention.ipynb",
"LSHSelfAttention": "02_attention.ipynb",
"ReformerAttention": "02_attention.ipynb",
"ReformerAttentionV2": "02_attention.ipynb",
"LMMixin": "03_transformer.ipynb",
"EncDecMixin": "03_transformer.ipynb",
"TransformerEncoderBlock": "03_transformer.ipynb",
"TransformerEncoder": "03_transformer.ipynb",
"TransformerDecoderBlock": "03_transformer.ipynb",
"TransformerDecoderBlockV2": "03_transformer.ipynb",
"TransformerDecoder": "03_transformer.ipynb",
"TransformerLM": "03_transformer.ipynb",
"transformer_lm_splits": "03_transformer.ipynb",
"Transformer": "03_transformer.ipynb",
"transformer_splits": "03_transformer.ipynb",
"LowMemEncoderBlock": "03_transformer.ipynb",
"LowMemEncoder": "03_transformer.ipynb",
"ChunkedTransformerLM": "03_transformer.ipynb",
"from_config": "04_reformer.ipynb",
"MODELS": "04_reformer.ipynb",
"Chunk": "04_reformer.ipynb",
"ChunkedFeedForward": "04_reformer.ipynb",
"Deterministic": "04_reformer.ipynb",
"ReversibleBlock": "04_reformer.ipynb",
"IrreversibleBlock": "04_reformer.ipynb",
"ReversibleSequence": "04_reformer.ipynb",
"ReversibleEncoder": "04_reformer.ipynb",
"ReversibleDecoder": "04_reformer.ipynb",
"ReversibleLM": "04_reformer.ipynb",
"ReversibleTransformer": "04_reformer.ipynb",
"LSHEncoderBlock": "04_reformer.ipynb",
"LSHEncoder": "04_reformer.ipynb",
"LSHLM": "04_reformer.ipynb",
"ReformerEncoder": "04_reformer.ipynb",
"ReformerLM": "04_reformer.ipynb",
"reformer_lm_splits": "04_reformer.ipynb",
"ByteTextTokenizer": "05_tokenizers.ipynb",
"SubwordTextEncoder": "05_tokenizers.ipynb",
"read_lines": "06_data.ipynb",
"convert_data_to_seq_length": "06_data.ipynb",
"read_and_prepare_data": "06_data.ipynb",
"TwinSequence": "06_data.ipynb",
"MaskTargCallback": "06_data.ipynb",
"DeterministicTwinSequence": "06_data.ipynb",
"MaskedAccuracy": "07_metrics.ipynb",
"BPC": "07_metrics.ipynb",
"bpc": "07_metrics.ipynb",
"Adafactor": "08_optimizers.ipynb",
"adafactor": "08_optimizers.ipynb",
"Learner.gather_args": "09_tracking.ipynb",
"download_enwik8_data": "20_experiment-script.ipynb",
"download_wmt14_data": "20_experiment-script.ipynb",
"get_twin_sequence_dataloaders": "20_experiment-script.ipynb",
"get_enwik8_dataloader": "20_experiment-script.ipynb",
"get_wmt14_dataloader": "20_experiment-script.ipynb",
"get_synthetic_learner": "20_experiment-script.ipynb",
"get_lm_learner": "20_experiment-script.ipynb",
"get_reformerlm_learner": "20_experiment-script.ipynb",
"get_seq2seq_learner": "20_experiment-script.ipynb",
"init_wandb": "20_experiment-script.ipynb",
"run_exp": "20_experiment-script.ipynb",
"update_sig": "21_experiment-configs.ipynb",
"ConfigBase": "21_experiment-configs.ipynb",
"SyntheticConfig": "21_experiment-configs.ipynb",
"TransformerLMConfigEnwik8": "21_experiment-configs.ipynb",
"ReversibleLMConfigEnwik8": "21_experiment-configs.ipynb",
"NHashesConfig": "21_experiment-configs.ipynb",
"NLayersConfig": "21_experiment-configs.ipynb",
"ReversibleTransformerConfigWMT": "21_experiment-configs.ipynb",
"TransformerConfigWMT": "21_experiment-configs.ipynb"}
modules = ["core.py",
"layers.py",
"attention.py",
"transformer.py",
"reformer.py",
"tokenizers.py",
"data.py",
"metrics.py",
"optimizers.py",
"tracking.py",
"expscript.py",
"configs.py"]
doc_url = "https://arampacha.github.io/reformer_fastai/"
git_url = "https://github.com/arampacha/reformer_fastai/tree/master/"
def custom_doc_links(name): return None | /reformer_fastai-0.0.2-py3-none-any.whl/reformer_fastai/_nbdev.py | 0.644113 | 0.51013 | _nbdev.py | pypi |
__all__ = ['update_sig', 'ConfigBase', 'SyntheticConfig', 'TransformerLMConfigEnwik8', 'ReversibleLMConfigEnwik8',
'NHashesConfig', 'NLayersConfig', 'ReversibleTransformerConfigWMT', 'TransformerConfigWMT']
# Cell
from fastcore.all import *
from fastai.basics import *
from .core import *
from .transformer import *
from .reformer import *
import json
from inspect import signature, Parameter
# Cell
def _dummy(): return
# Cell
def update_sig(d):
"Update signature of `f` from dict `d`"
d = {k:Parameter(k, Parameter.KEYWORD_ONLY, default=v) for k,v in d.items()}
def _f(f):
sig = signature(f)
sigd = dict(sig.parameters)
sigd.pop('kwargs')
sigd.update(d)
f.__signature__ = sig.replace(parameters=sigd.values())
return f
return _f
# Cell
class ConfigBase:
"Base class for Configs"
_d:dict = None
_model = _dummy
def __init__(self, *, verbose=False, warn=True, **kwargs):
self.validate()
for k,v in kwargs.items():
if k in self._d:
self._d[k]=v
if verbose: print(f'Setting `{k}` = {v}')
elif warn: print(f'Parameter `{k}` is not accepted by {self._model.__name__}. Skipped')
def validate(self):
assert exists(self._d), "_d missing. You might want to provide defaults for config"
assert self._model is not _dummy, "_model missing. Provide a model class"
def validate_arg(self, k):
assert k in self._d.keys(), f"{self._model.__name__} does not accept `{k}` argument"
def __getattr__(self, k):
try:
res = self._d[k]
except KeyError:
raise AttributeError(f"{type(self).__name__} does not have attribute `{k}`")
return res
def __setattr__(self, k, v):
self.validate_arg(k)
self._d[k] = v
def __getitem__(self, k):
return self._d[k]
def __setitem__(self, k, v):
self.validate_arg(k)
self._d[k] = v
def __repr__(self):
s = f"{self._model.__name__} config \n" + '-'*20
s += ''.join(f'\n{k:16}{v}' for k,v in self._d.items())
return s
def dict(self): return self._d
def save(self, fn, add_tstmp=False):
os.makedirs('exp_configs', exist_ok=True)
if add_tstmp:
tstmp = time.strftime("_%d_%m_%Y_%H:%M", time.gmtime())
fn += tstmp
with open(f'exp_configs/{fn}.json', 'w') as f:
json.dump(self.dict(), f)
@classmethod
def from_file(cls, fn):
with open(f'exp_configs/{fn}.json') as f:
d = json.load(f)
return cls(**d)
# Cell
class SyntheticConfig(ConfigBase):
"""
Config for Synthetic Experiment.
See https://arampacha.github.io/reformer_fastai/experiment.synthetic-task.html for details
"""
_model = LSHLM
_d = {
'vocab_sz':128,
'd_model':256,
'n_layers':1,
'n_heads':4,
'd_ff':256,
'attn_dropout':0.0,
'ff_dropout':0.0,
'emb_dropout':0.0,
'tie_weights':True,
'causal':True,
'pos_enc':'absolute',
'max_seq_len':1024,
'axial_shape':None,
'axial_emb_dims':None,
'pad_idx':None,
'prenorm':False,
'attn_bias':False,
'bucket_size':64,
'use_lsh':True,
'n_hashes':4,
'seed':123,
}
@update_sig(_d)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Cell
class TransformerLMConfigEnwik8(ConfigBase):
"""
Config for enwik8 Experiment.
See https://arampacha.github.io/reformer_fastai/experiment.enwik8-baseline.html for details
"""
_model = TransformerLM
_d = {
'vocab_sz':256,
'd_model':1024,
'n_layers':3,
'n_heads':8,
'd_ff':4096,
'attn_dropout':0.1,
'ff_dropout':0.1,
'emb_dropout':0.1,
'tie_weights':True,
'causal':True,
'pos_enc':'axial',
'max_seq_len':2048,
'axial_shape':(64,32),
'axial_emb_dims':None,
'pad_idx':None,
'prenorm':False,
'attn_bias':False,
'shared_qk':False,
}
@update_sig(_d)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Cell
class ReversibleLMConfigEnwik8(ConfigBase):
"""
Config for enwik8 Experiment.
See https://arampacha.github.io/reformer_fastai/experiment.enwik8-reversible.html for details
"""
_model = ReversibleLM
_d = {
'vocab_sz':256,
'd_model':1024,
'n_layers':3,
'n_heads':8,
'd_ff':4096,
'attn_dropout':0.1,
'ff_dropout':0.1,
'emb_dropout':0.1,
'tie_weights':True,
'causal':True,
'pos_enc':'axial',
'max_seq_len':2048,
'axial_shape':(64,32),
'axial_emb_dims':None,
'pad_idx':None,
'prenorm':True,
'attn_bias':False,
'rev_thres':0,
}
@update_sig(_d)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Cell
class NHashesConfig(ConfigBase):
"""
Config for evaluating performance as function of `n_hashes`.
See https://arampacha.github.io/reformer_fastai/experiment.enwik8-n_hashes.html for details
"""
_model = LSHLM
_d = {
'vocab_sz':256,
'd_model':1024,
'n_layers':3,
'n_heads':8,
'd_ff':4096,
'attn_dropout':0.1,
'ff_dropout':0.1,
'emb_dropout':0.1,
'tie_weights':True,
'causal':True,
'pos_enc':'axial',
'max_seq_len':4096,
'axial_shape':None,
'axial_emb_dims':None,
'pad_idx':None,
'prenorm':False,
'attn_bias':False,
'bucket_size':64,
'use_lsh':True,
'n_hashes':2,
'seed':842,
}
@update_sig(_d)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Cell
class NLayersConfig(ConfigBase):
"""
Config for evaluating performance as function of `n_layers`.
See https://arampacha.github.io/reformer_fastai/experiment.enwik8-n_layers.html for details
"""
_model = ReformerLM
_d = {
'vocab_sz':256,
'd_model':1024,
'n_layers':3,
'n_heads':8,
'd_ff':4096,
'ff_chunks':64,
'attn_dropout':0.1,
'ff_dropout':0.1,
'emb_dropout':0.1,
'tie_weights':True,
'causal':True,
'pos_enc':'axial',
'max_seq_len':2**14,
'axial_shape':None,
'axial_emb_dims':None,
'pad_idx':None,
'prenorm':True,
'attn_bias':False,
'bucket_size':64,
'use_lsh':True,
'n_hashes':8,
'rev_thres':0,
'seed':842,
}
@update_sig(_d)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Cell
class ReversibleTransformerConfigWMT(ConfigBase):
"""
Config for WMT Experiment.
See https://arampacha.github.io/reformer_fastai/
"""
_model = ReversibleTransformer
_d = {
'enc_vocab_sz':33708,
'dec_vocab_sz':33708,
'n_enc_layers':6,
'n_dec_layers':6,
'n_heads':8,
'd_model':512,
'd_ff':2048,
'attn_dropout':0.1,
'ff_dropout':0.1,
'emb_dropout':0.1,
'tie_weights':True,
'shared_emb': True,
'pos_enc':'fixed',
'max_seq_len':256,
'axial_shape':(64,32),
'axial_emb_dims':None,
'pad_idx':None,
'prenorm':False,
'attn_bias':False,
'comb_attn':False,
}
@update_sig(_d)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Cell
class TransformerConfigWMT(ConfigBase):
"""
Config for WMT Experiment.
See https://arampacha.github.io/reformer_fastai/
"""
_model = Transformer
_d = {
'enc_vocab_sz':33708,
'dec_vocab_sz':33708,
'n_enc_layers':6,
'n_dec_layers':6,
'n_heads':8,
'd_model':512,
'd_ff':2048,
'attn_dropout':0.1,
'ff_dropout':0.1,
'emb_dropout':0.1,
'tie_weights':True,
'shared_emb': True,
'pos_enc':'fixed',
'max_seq_len':256,
'axial_shape':(64,32),
'axial_emb_dims':None,
'pad_idx':None,
'prenorm':False,
'attn_bias':False,
'comb_attn':True,
}
@update_sig(_d)
def __init__(self, **kwargs):
super().__init__(**kwargs) | /reformer_fastai-0.0.2-py3-none-any.whl/reformer_fastai/configs.py | 0.699665 | 0.151938 | configs.py | pypi |
__all__ = ['Chunk', 'ChunkedFeedForward', 'Deterministic', 'ReversibleBlock', 'IrreversibleBlock', 'ReversibleSequence',
'ReversibleEncoder', 'ReversibleDecoder', 'ReversibleLM', 'ReversibleTransformer', 'LSHEncoderBlock',
'LSHEncoder', 'LSHLM', 'ReformerEncoder', 'ReformerLM', 'reformer_lm_splits', 'from_config', 'MODELS']
# Cell
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
from functools import wraps
from fastai.basics import *
from .core import *
from .layers import *
from .attention import *
from .transformer import LMMixin, EncDecMixin
# Cell
class Chunk(Module):
"Applies fn to input chunked along dim"
def __init__(self, n_chunks:int, fn:Module, dim:int=-1):
store_attr()
def forward(self, x, **kwargs):
if self.n_chunks == 1:
return self.fn(x, **kwargs)
chunks = x.chunk(self.n_chunks, dim = self.dim)
return torch.cat([self.fn(c, **kwargs) for c in chunks], dim = self.dim)
# Cell
class ChunkedFeedForward(Module):
"Applies positionwise feed-forward layer to input chunced along dim"
def __init__(self, d:int, d_ff:int=None, n_chunks:int=1, dropout:float=0., dim:int=-1):
store_attr('n_chunks,dim')
d_ff = default(d_ff, 4*d)
self.net = nn.Sequential(
nn.Linear(d, d_ff),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(d_ff, d),
nn.Dropout(dropout)
)
def forward(self, x, **kwargs):
if self.n_chunks == 1:
return self.net(x)
chunks = x.chunk(self.n_chunks, dim = self.dim)
return torch.cat([self.net(c) for c in chunks], dim = self.dim)
# Cell
class Deterministic(Module):
"""
Wrapper module to ensure determinism for backward pass
following example for saving and setting rng here
https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
"""
def __init__(self, net:Module):
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# Cell
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(Module):
"Applies f and g in reversible manner. Avoids storing outputs for backpropagation"
def __init__(self, f:Module, g:Module, depth=None, send_signal=False):
store_attr('depth, send_signal')
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
if self.send_signal:
f_args['_reverse'] = g_args['_reverse'] = False
f_args['_depth'] = g_args['_depth'] = self.depth
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
if self.send_signal:
f_args['_reverse'] = g_args['_reverse'] = True
f_args['_depth'] = g_args['_depth'] = self.depth
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
# Cell
class IrreversibleBlock(Module):
"Mimics ReversibleBlock computation but gradients are computed as ussual"
def __init__(self, f, g):
store_attr()
def forward(self, x, f_args={}, g_args={}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1 = x1 + self.f(x2, **f_args)
y2 = x2 + self.g(y1, **g_args)
return torch.cat([y1, y2], dim=2)
# Cell
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
ctx.kwargs = kwargs
for block in blocks:
x = block(x, **kwargs)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
kwargs = ctx.kwargs
for block in ctx.blocks[::-1]:
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
# Cell
class ReversibleSequence(Module):
"""
Stack of ReversibleBlocks constructed from blocks.Applies ReversibleBlocks if
sequence length is > rev_thres or else IrreversibleBlocks.
"""
def __init__(self, blocks, rev_thres = 0, send_signal = False):
self.rev_thres = rev_thres # uses revblocks if seq_len else irrev_blocks
self.blocks = nn.ModuleList([ReversibleBlock(f, g, depth, send_signal) for depth, (f, g) in enumerate(blocks)])
self.irrev_blocks = nn.ModuleList([IrreversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, arg_route = (True, True), **kwargs):
reverse = x.shape[1] > self.rev_thres
blocks = self.blocks if reverse else self.irrev_blocks
f_args, g_args = map(lambda route: kwargs if route else {}, arg_route)
block_kwargs = {'f_args': f_args, 'g_args': g_args}
if not reverse:
for block in blocks:
x = block(x, **block_kwargs)
return x
return _ReversibleFunction.apply(x, blocks, block_kwargs)
# Cell
class ReversibleEncoder(Module):
"Stack of ReversibleBlocks"
def __init__(self,
d_model:int,
n_layers:int=6,
n_heads:int = 8,
max_seq_len:int = 512,
ff_chunks:int = 1,
causal:bool = False,
attn_dropout:float = 0.,
post_attn_dropout:float = None,
attn_bias:bool=False,
ff_dropout:float = 0.,
d_ff:int = None,
prenorm:bool=True,
final_norm:Module=None,
rev_thres:int=0):
# store_attr()
blocks = []
norm_wrapper = PreNorm if prenorm else PostNorm
for ind in range(n_layers):
attn = Attention(d_model, n_heads, causal=causal, dropout=attn_dropout, out_dropout=post_attn_dropout, bias=attn_bias)
ff = ChunkedFeedForward(d_model, d_ff, n_chunks=ff_chunks, dropout=ff_dropout, dim=1)
f = norm_wrapper(d_model, attn)
g = norm_wrapper(d_model, ff)
blocks.append(nn.ModuleList([f, g]))
self.norm = final_norm(d_model) if exists(final_norm) else None
self.layers = ReversibleSequence(nn.ModuleList(blocks), rev_thres=rev_thres, send_signal=False)
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim = -1)
arg_route = (False, False)
# pdb.set_trace()
x = self.layers(x, arg_route = arg_route, **kwargs)
x = torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
if exists(self.norm): x = self.norm(x)
return x
# Cell
class ReversibleDecoder(Module):
"Stack of ReversibleBlocks. Uses AdditiveAttention."
def __init__(self,
d_model,
n_layers = 6,
heads = 8,
max_seq_len = 512,
d_head = None,
bucket_size = 64,
n_hashes = 8,
ff_chunks = 1,
attn_chunks = None, # ??
attn_dropout = 0.,
post_attn_dropout = None,
attn_bias:bool=False,
ff_dropout = 0.,
d_ff = None,
prenorm=True,
final_norm:Module=None,
rev_thres = 0,
):
store_attr('d_model,n_layers')
get_attn = lambda: AdditiveAttention(d_model, heads, causal=True, dropout=attn_dropout, out_dropout=post_attn_dropout, bias=attn_bias)
get_ff = lambda: ChunkedFeedForward(d_model, d_ff, n_chunks=ff_chunks, dropout=ff_dropout, dim=1)
norm_wrapper = PreNorm if prenorm else PostNorm
blocks = []
for ind in range(n_layers):
f = norm_wrapper(d_model, get_attn())
g = norm_wrapper(d_model, get_ff())
blocks.append(nn.ModuleList([f, g]))
self.norm = final_norm(d_model) if exists(final_norm) else None
# send_signal is not implemented for now
self.layers = ReversibleSequence(nn.ModuleList(blocks), rev_thres=rev_thres, send_signal=False)
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim = -1)
arg_route = (True, False)
# pdb.set_trace()
x = self.layers(x, arg_route = arg_route, **kwargs)
x = torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
if exists(self.norm): x = self.norm(x)
return x
# Cell
class ReversibleLM(Module, LMMixin):
"""
Reversible Transformer for language modelling
Parameters:
* vocab_sz: int
* d_model: int - inner dimension of the model
* n_layers: int (default: 6)
* n_heads: int (default: 8)
* d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model
* ff_chunkes: int - number of chunks for FeedForward layer computation
* attn_dropout: float - attention dropout
* ff_dropout: float - feed-forward dropout
* emb_dropout: float - embedding dropout
* causal: bool (default: True) - if True does causal masking automatically
* max_seq_len: int (default: 512)
* tie_weights: bool - if True target embedding weights are used for computation output projection
* prenorm: bool - wether to use PreNorm or PostNorm
* attn_bias: bool - if True projection layers attention modules will have bias
* pad_idx: int - padding token id, required for autogeneration of padding mask
* pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use
* axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of
max_seq_len
* axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model
* rev_thres: int - if (seq_len < rev_thres) applies irreversible blocks
Inputs:
* x - input ids, shape [bs, sl]
* mask - optional boolean mask, shape [bs, sl]
Returns:
* logits - target token logits, shape [bs, sl, vocab_sz]
"""
def __init__(self,
vocab_sz:int,
d_model:int,
n_layers:int=6,
n_heads:int=8,
d_ff:int=None,
ff_chunks:int=1,
attn_dropout:float=0.1,
ff_dropout:float=0.1,
emb_dropout:float=0.1,
tie_weights:bool=True,
causal:bool=True,
pos_enc:str='absolute',
max_seq_len:int=512,
axial_shape=None,
axial_emb_dims=None,
pad_idx:int=None,
prenorm:bool=True,
attn_bias:bool=False,
rev_thres:int=0):
store_attr()
self.emb = TransformerEmbedding(vocab_sz, d_model, max_seq_len, dropout=emb_dropout,
pos_enc=pos_enc, axial_shape=axial_shape,
axial_emb_dims=axial_emb_dims)
self.encoder = ReversibleEncoder(d_model, n_layers, n_heads, causal=causal, d_ff=d_ff,
attn_dropout=attn_dropout, ff_dropout=ff_dropout,
prenorm=prenorm, attn_bias=attn_bias, ff_chunks=ff_chunks,
final_norm=nn.LayerNorm, rev_thres=rev_thres)
self.proj = nn.Linear(d_model, vocab_sz)
if tie_weights: self.proj.weight = self.emb.emb.weight
def forward(self, x, mask=None):
x = self.emb(x)
x = self.encoder(x, mask=mask)
return self.proj(x)
# Cell
#TODO test weight tying
# Note on weight tying: it's done like here in fastai AWD_LSTM model
# Lucidrains does it with custom MatrixMultiply module https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reformer_pytorch.py#L106
#TODO: update docstrings
class ReversibleTransformer(Module):
"""
Basic Transformer Encoder-Decoder model
Parameters:
* enc_vocab_sz: int - source vocab size
* dec_vocab_sz: int - target vocab size
* d_model: int - inner dimension of the model
* n_enc_layers: int (default: 6)
* n_dec_layers: int (default: 6)
* n_heads: int (default: 8)
* d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model
* ff_chunkes: int - number of chunks for FeedForward layer computation
* attn_dropout: float - attention dropout
* ff_dropout: float - feed-forward dropout
* emb_dropout: float - embedding dropout
* max_seq_len: int (default: 512)
* prenorm: bool - whether to use PreNorm or PostNorm
* attn_bias: bool - whether to allow biases in attention projection layers
* pad_idx: int - padding token id, if pad_idx is provided, and no mask/context_mask are
passed to forward method will be used to generate padding masks
* tie_weights: bool - if True target embedding weights are used for computation output projection
* shared_emb: bool - if True encoder and decoder will use shared embedding layer
* pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use
* axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of
max_seq_len
* axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model
Inputs:
* src - source input ids, shape [bs, src_sl]
* tgt - target input ids, shape [bs, tgt_sl]
* src_mask - optional boolean source mask, shape [bs, src_sl]
* tgt_mask - optional boolean target mask, shape [bs, tgt_sl]
Returns:
* logits - target token logits, shape [bs, tgt_sl, tgt_vocab_sz]
"""
def __init__(self,
enc_vocab_sz,
dec_vocab_sz,
d_model,
n_layers:int=6,
n_enc_layers=None,
n_dec_layers=None,
n_heads=8,
d_ff=None,
ff_chunks:int=1,
pad_idx=None,
tie_weights=True,
shared_emb = False,
attn_dropout=0.1,
ff_dropout=0.1,
emb_dropout=0.1,
prenorm=True,
attn_bias=False,
comb_attn=False,
pos_enc='absolute',
max_seq_len=512,
axial_shape=None,
axial_emb_dims=None):
store_attr()
n_enc_layers = ifnone(n_enc_layers, n_layers)
n_dec_layers = ifnone(n_dec_layers, n_layers)
self.enc_emb = TransformerEmbedding(enc_vocab_sz, d_model, max_seq_len, dropout=emb_dropout, pos_enc=pos_enc,
axial_shape=axial_shape, axial_emb_dims=axial_emb_dims)
if shared_emb:
assert (enc_vocab_sz == dec_vocab_sz), "Encoder and decoder vocab size doesn't match"
self.dec_emb = self.enc_emb
else:
self.dec_emb = TransformerEmbedding(dec_vocab_sz, d_model, max_seq_len, dropout=emb_dropout, pos_enc=pos_enc,
axial_shape=axial_shape, axial_emb_dims=axial_emb_dims)
self.encoder = ReversibleEncoder(d_model, n_enc_layers, n_heads, d_ff=d_ff, attn_dropout=attn_dropout,
ff_dropout=ff_dropout, prenorm=prenorm, attn_bias=attn_bias,
final_norm=nn.LayerNorm, causal=False, ff_chunks=ff_chunks)
self.decoder = ReversibleDecoder(d_model, n_dec_layers, n_heads, d_ff=d_ff, attn_dropout=attn_dropout,
ff_dropout=ff_dropout, prenorm=prenorm, attn_bias=attn_bias,
final_norm=nn.LayerNorm, ff_chunks=ff_chunks)
self.proj = nn.Linear(d_model, dec_vocab_sz)
if tie_weights: self.proj.weight = self.dec_emb.emb.weight
def forward(self, src, tgt, src_mask=None, tgt_mask=None):
src_mask = default(src_mask, self.get_padding_mask(src))
tgt_mask = default(tgt_mask, self.get_padding_mask(tgt))
enc = self.encoder(self.enc_emb(src), mask=src_mask)
out = self.decoder(self.dec_emb(tgt), context=enc, mask=tgt_mask, context_mask=src_mask)
return self.proj(out)
def get_padding_mask(self, x):
if self.pad_idx is None: return None
return (x != self.pad_idx)
# Cell
class LSHEncoderBlock(Module):
"Encoder block using ReformerAttention"
def __init__(self,
d_model:int,
n_heads:int = 8,
d_ff:int = None,
attn_dropout:float = 0.1,
ff_dropout:float = 0.1,
causal:bool = False,
attn_bias:bool = False,
prenorm:bool=False,
use_lsh:bool=True,
n_hashes:int=8,
bucket_size:int=64,
seed:int=None):
store_attr('attn_dropout') # mb separate argument attn_post_dropout
if prenorm:
self.attn = Residual(PreNorm(d_model, ReformerAttentionV2(d_model, n_heads=n_heads, causal=causal,
dropout=attn_dropout, bias=attn_bias, use_lsh=use_lsh,
n_hashes=n_hashes, bucket_size=bucket_size,
seed=seed)))
self.ff = Residual(PreNorm(d_model, FeedForward(d_model, d_ff=d_ff, dropout=ff_dropout)))
else:
self.attn = PostNorm(d_model, Residual(ReformerAttentionV2(d_model, n_heads=n_heads, causal=causal,
dropout=attn_dropout, bias=attn_bias, use_lsh=use_lsh,
n_hashes=n_hashes, bucket_size=bucket_size,
seed=seed)))
self.ff = PostNorm(d_model, Residual(FeedForward(d_model, d_ff=d_ff, dropout=ff_dropout)))
def forward(self, x, mask=None):
out = self.attn(x, mask=mask)
return self.ff(out)
# Cell
class LSHEncoder(Module):
"""Stack of TransformerEncoderBlocks"""
def __init__(self,
d_model,
n_layers=6,
n_heads=8,
d_ff=None,
ff_dropout=0.1,
attn_dropout=0.1,
attn_bias=False,
causal=False,
prenorm=False,
use_lsh:bool=True,
final_norm=None,
n_hashes:int=8,
bucket_size:int=64,
seed:int=None):
store_attr('d_model')
self.layers = nn.ModuleList([])
for _ in range(n_layers):
self.layers.append(LSHEncoderBlock(d_model, n_heads, causal=causal,
d_ff=d_ff, attn_dropout=attn_dropout, ff_dropout=ff_dropout,
prenorm=prenorm, attn_bias=attn_bias, use_lsh=use_lsh,
n_hashes=n_hashes, bucket_size=bucket_size,
seed=seed))
self.norm = None if final_norm is None else final_norm(d_model)
def forward(self, x, mask=None):
for layer in self.layers: x = layer(x, mask=mask)
if self.norm is not None: x = self.norm(x)
return x
# Cell
class LSHLM(Module, LMMixin):
"""
Transformer for language modelling with LSH attention
Parameters:
* vocab_sz: int
* d_model: int - inner dimension of the model
* n_layers: int (default: 6)
* n_heads: int (default: 8)
* d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model
* attn_dropout: float - attention dropout
* ff_dropout: float - feed-forward dropout
* emb_dropout: float - embedding dropout
* causal: bool (default: True) - if True does causal masking automatically
* max_seq_len: int (default: 512)
* tie_weights: bool - if True target embedding weights are used for computation output projection
* prenorm: bool - wether to use PreNorm or PostNorm
* attn_bias: bool - wether to allow biases in attention projection layers
* pad_idx: int - padding token id, required for autogeneration of padding mask
* pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use
* axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of
max_seq_len
* axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model
* use_slh: bool - parameter to switch between LSH and full attention
* n_hashes: int - number of hashing rounds for LSH
* bucket_size: int - input sequence length should be divisible by 2*bucket_size
* seed: int - for LSHAttention module
Inputs:
* x - input ids, shape [bs, sl]
* mask - optional boolean mask, shape [bs, sl]
Returns:
* logits - target token logits, shape [bs, sl, vocab_sz]
"""
def __init__(self,
vocab_sz:int,
d_model:int,
n_layers:int=6,
n_heads:int=8,
d_ff:int=None,
attn_dropout:float=0.1,
ff_dropout:float=0.1,
emb_dropout:float=0.1,
tie_weights:bool=True,
causal:bool=True,
pos_enc:str='absolute',
max_seq_len:int=512,
axial_shape:tuple=None,
axial_emb_dims:tuple=None,
pad_idx:int=None,
prenorm:bool=False,
attn_bias:bool=False,
use_lsh:bool=True,
n_hashes:int=8,
bucket_size:int=64,
seed:int=None):
store_attr()
self._use_lsh = use_lsh
self._n_hashes = n_hashes
self.emb = TransformerEmbedding(vocab_sz, d_model, max_seq_len, dropout=emb_dropout,
pos_enc=pos_enc, axial_shape=axial_shape,
axial_emb_dims=axial_emb_dims)
final_norm = nn.LayerNorm if prenorm else None
self.encoder = LSHEncoder(d_model, n_layers, n_heads, causal=causal, d_ff=d_ff,
attn_dropout=attn_dropout, ff_dropout=ff_dropout,
prenorm=prenorm, attn_bias=attn_bias, use_lsh=use_lsh,
final_norm=final_norm, n_hashes=n_hashes, bucket_size=bucket_size,
seed=seed)
self.proj = nn.Linear(d_model, vocab_sz)
if tie_weights: self.proj.weight = self.emb.emb.weight
def forward(self, x, mask=None):
x = self.emb(x)
x = self.encoder(x, mask=mask)
return self.proj(x)
@property
def use_lsh(self):
return self._use_lsh
@use_lsh.setter
def use_lsh(self, val):
self._use_lsh = val
for c in self.children():
for m in c.modules():
if hasattr(m, 'use_lsh'): m.use_lsh=val
@property
def n_hashes(self):
return self._n_hashes
@n_hashes.setter
def n_hashes(self, val):
self._n_hashes = val
for c in self.children():
for m in c.modules():
if hasattr(m, 'n_hashes'): m.n_hashes=val
# Cell
class ReformerEncoder(Module):
"Stack of ReversibleBlocks"
def __init__(self,
d_model:int,
n_layers:int=6,
n_heads:int = 8,
max_seq_len:int = 512,
ff_chunks:int = 1,
causal:bool = False,
attn_dropout:float = 0.,
post_attn_dropout:float = None,
attn_bias:bool=False,
ff_dropout:float = 0.,
d_ff:int = None,
prenorm:bool=True,
final_norm:Module=None,
rev_thres:int=0,
use_lsh:bool=True,
n_hashes:int=8,
bucket_size:int=64,
seed:int=None):
# store_attr()
blocks = []
norm_wrapper = PreNorm if prenorm else PostNorm
for ind in range(n_layers):
attn = ReformerAttentionV2(d_model, n_heads=n_heads, causal=causal, dropout=attn_dropout,
bias=attn_bias, use_lsh=use_lsh, n_hashes=n_hashes, bucket_size=bucket_size,
seed=seed)
ff = ChunkedFeedForward(d_model, d_ff, n_chunks=ff_chunks, dropout=ff_dropout, dim=1)
f = norm_wrapper(d_model, attn)
g = norm_wrapper(d_model, ff)
blocks.append(nn.ModuleList([f, g]))
self.norm = final_norm(d_model) if exists(final_norm) else None
self.layers = ReversibleSequence(nn.ModuleList(blocks), rev_thres=rev_thres, send_signal=True)
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim = -1)
arg_route = (True, False)
# pdb.set_trace()
x = self.layers(x, arg_route = arg_route, **kwargs)
x = torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
if exists(self.norm): x = self.norm(x)
return x
# Cell
class ReformerLM(Module, LMMixin):
"""
Reformer for language modelling. Uses LSH or full sharedQK attention
Parameters:
* vocab_sz: int
* d_model: int - inner dimension of the model
* n_layers: int (default: 6)
* n_heads: int (default: 8)
* d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model
* ff_chunkes: int - number of chunks for FeedForward layer computation
* attn_dropout: float - attention dropout
* ff_dropout: float - feed-forward dropout
* emb_dropout: float - embedding dropout
* causal: bool (default: True) - if True does causal masking automatically
* max_seq_len: int (default: 512)
* tie_weights: bool - if True target embedding weights are used for computation output projection
* prenorm: bool - wether to use PreNorm or PostNorm
* attn_bias: bool - wether to allow biases in attention projection layers
* pad_idx: int - padding token id, required for autogeneration of padding mask
* pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use
* axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of
max_seq_len
* axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model
* rev_thres: int - if (seq_len < rev_thres) applies irreversible blocks
* use_slh: bool - parameter to switch between LSH and full attention
* n_hashes: int - number of hashing rounds for LSH
* bucket_size: int - input sequence length should be divisible by 2*bucket_size
* seed: int - for LSHAttention module
Inputs:
* x - input ids, shape [bs, sl]
* mask - optional boolean mask, shape [bs, sl]
Returns:
* logits - target token logits, shape [bs, sl, vocab_sz]
"""
def __init__(self,
vocab_sz:int,
d_model:int,
n_layers:int=6,
n_heads:int=8,
d_ff:int=None,
ff_chunks:int=1,
attn_dropout:float=0.1,
ff_dropout:float=0.1,
emb_dropout:float=0.1,
tie_weights:bool=True,
causal:bool=True,
pos_enc:str='axial',
max_seq_len:int=512,
axial_shape:tuple=None,
axial_emb_dims:tuple=None,
pad_idx:int=None,
prenorm:bool=True,
attn_bias:bool=False,
use_lsh:bool=True,
n_hashes:int=8,
bucket_size:int=64,
rev_thres:int=0,
seed:int=None):
store_attr()
self._use_lsh = use_lsh
self._n_hashes = n_hashes
self.emb = TransformerEmbedding(vocab_sz, d_model, max_seq_len, dropout=emb_dropout,
pos_enc=pos_enc, axial_shape=axial_shape,
axial_emb_dims=axial_emb_dims)
final_norm = nn.LayerNorm if prenorm else None
self.encoder = ReformerEncoder(d_model, n_layers, n_heads, causal=causal, d_ff=d_ff,
attn_dropout=attn_dropout, ff_dropout=ff_dropout,
prenorm=prenorm, attn_bias=attn_bias, use_lsh=use_lsh,
final_norm=final_norm, n_hashes=n_hashes, bucket_size=bucket_size,
ff_chunks=ff_chunks, rev_thres=rev_thres, seed=seed)
self.proj = nn.Linear(d_model, vocab_sz)
if tie_weights: self.proj.weight = self.emb.emb.weight
def forward(self, x, mask=None):
x = self.emb(x)
x = self.encoder(x, mask=mask)
return self.proj(x)
@property
def use_lsh(self):
return self._use_lsh
@use_lsh.setter
def use_lsh(self, val):
self._use_lsh = val
for c in self.children():
for m in c.modules():
if hasattr(m, 'use_lsh'): m.use_lsh=val
@property
def n_hashes(self):
return self._n_hashes
@n_hashes.setter
def n_hashes(self, val):
self._n_hashes = val
for c in self.children():
for m in c.modules():
if hasattr(m, 'n_hashes'): m.n_hashes=val
# Cell
def reformer_lm_splits(model):
"Splits ReformerLM `model` into groups for differential learning rates."
groups = L([model.emb] + [l for l in model.encoder.layers.blocks] + [model.proj])
return groups.map(params)
# Cell
MODELS = (LSHLM, ReversibleLM, ReversibleTransformer, ReformerLM)
@patch(cls_method=True)
def from_config(cls:MODELS, config):
return cls(**config.dict()) | /reformer_fastai-0.0.2-py3-none-any.whl/reformer_fastai/reformer.py | 0.9324 | 0.342681 | reformer.py | pypi |
import torch
import torch.nn as nn
from reformer_lm.gelu import GeLU
class RevNetBlock(nn.Module):
def __init__(self, d_in, d_out, dropout=0.1, lol=[]):
super(RevNetBlock, self).__init__()
self.d_in = d_in
self.d_out = d_out
self.dropout = dropout
layers = []
if lol == list():
layers.append(nn.LayerNorm((d_in, d_out)))
layers.append(nn.Linear(d_in, d_out))
layers.append(GeLU())
layers.append(nn.Linear(d_in, d_out))
else:
for layer in lol:
layers.append(layer)
self.bottleneck_block = nn.Sequential(*layers)
def forward(self, x):
x = torch.cat((x, x), dim=1)
x1, x2 = self.split(x)
Fx2 = self.bottleneck_block(x2)
y1 = Fx2 + x1
return (x2, y1)
def inverse(self, x):
x2, y1 = x[0], x[1]
Fx2 = -self.bottleneck_block(x2)
x1 = Fx2 + y1
return (x1, x2)
@staticmethod
def split(x):
n = int(x.size()[1] / 2)
x1 = x[:, :n].contiguous()
x2 = x[:, n:].contiguous()
return (x1, x2)
class RevNetHalfAttnBlock(nn.Module):
def __init__(self, d_in, d_out, dropout=0.1, lol=[]):
super(RevNetHalfAttnBlock, self).__init__()
self.d_in = d_in
self.d_out = d_out
self.dropout = dropout
layers = []
if lol == list():
layers.append(nn.LayerNorm((d_in, d_out)))
layers.append(nn.Linear(d_out, d_out))
layers.append(GeLU())
layers.append(nn.Linear(d_out, d_out))
else:
for layer in lol:
layers.append(layer)
self.bottleneck_block = nn.Sequential(*layers)
def forward(self, x):
x = torch.cat((x, x), dim=1)
x1, x2 = self.split(x)
Fx2 = self.bottleneck_block(x2)
y1 = Fx2 + x1
return (x2, y1)
def inverse(self, x):
x2, y1 = x[0], x[1]
Fx2 = -self.bottleneck_block(x2)
x1 = Fx2 + y1
return (x1, x2)
@staticmethod
def split(x):
n = int(x.size()[1] / 2)
x1 = x[:, :n].contiguous()
x2 = x[:, n:].contiguous()
return (x1, x2) | /reformer_lm-1.0.1.tar.gz/reformer_lm-1.0.1/reformer_lm/revnetblock.py | 0.867219 | 0.403978 | revnetblock.py | pypi |
import torch.nn as nn
from .decoder import DecoderBlock
from .broadcasted_dropout import BroadcastedDropout
from .gelu import GeLU
class ReformerLM(nn.Module):
def __init__(
self,
vocab_size,
d_in,
d_out,
attn_k=64,
attn_v=64,
n_layers=6,
n_heads=1,
dropout=0.1,
max_len=2048,
n_chunks=2,
n_attention_chunks=2,
share_qk=True,
axial_pos_shape=(),
d_axial_pos_embs=None,
mode="train",
):
super(ReformerLM, self).__init__()
self.vocab_size = vocab_size
self.d_in = d_in
self.d_out = d_out
self.attn_k = attn_k
self.attn_v = attn_v
self.n_layers = n_layers
self.n_heads = n_heads
self.dropout = dropout
self.max_len = max_len
self.n_chunks = n_chunks
self.n_attention_chunks = n_attention_chunks
self.share_qk = share_qk
self.axial_pos_shape = axial_pos_shape
self.d_axial_pos_embs = d_axial_pos_embs
self.mode = mode
self.layers = []
self.layers.append(
DecoderBlock(
d_in=self.d_in,
d_out=self.d_out,
attn_k=self.attn_k,
attn_v=self.attn_v,
n_heads=self.n_heads,
n_chunks=self.n_attention_chunks,
share_qk=self.share_qk,
attn_type=None,
dropout=self.dropout,
)
)
for layer in range(self.n_layers - 1):
# self.layers.append(Chunk(n_sections=self.n_attention_chunks))
self.layers.append(
DecoderBlock(
d_in=self.d_out,
d_out=self.d_out,
attn_k=self.attn_k,
attn_v=self.attn_v,
n_heads=self.n_heads,
n_chunks=self.n_attention_chunks,
share_qk=self.share_qk,
attn_type=None,
dropout=self.dropout,
)
)
self.ff_layers = [
nn.LayerNorm((1, self.d_out * self.d_in)),
nn.Linear(self.d_out * self.d_in, self.d_out * self.d_in),
BroadcastedDropout(rate=self.dropout, mode=self.mode),
GeLU(),
nn.Linear(self.d_out * self.d_in, self.vocab_size),
nn.LogSoftmax(dim=0),
]
self.model = nn.Sequential(*self.layers)
self.ff_model = nn.Sequential(*self.ff_layers)
def forward(self, x):
x = self.model(x)
# Flattening
x = x.view(x.shape[0], 1, -1)
x = self.ff_model(x)
return x | /reformer_lm-1.0.1.tar.gz/reformer_lm-1.0.1/reformer_lm/reformer_lm.py | 0.924334 | 0.166438 | reformer_lm.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from reformer_lm.attention import ComputeAttentionHeads, ComputeAttentionOutput
from reformer_lm.chunk import Unchunk
from reformer_lm.broadcasted_dropout import BroadcastedDropout
class DecoderBlock(nn.Module):
def __init__(
self,
d_in,
d_out,
attn_k=64,
attn_v=64,
n_heads=1,
n_chunks=2,
share_qk=True,
attn_type=None,
dropout=None,
ff_activation=None,
ff_use_sru=None,
mode="train",
):
super(DecoderBlock, self).__init__()
self.d_in = d_in
self.d_out = d_out
self.attn_k = attn_k
self.attn_v = attn_v
self.n_heads = n_heads
self.n_chunks = n_chunks
self.attn_type = attn_type
self.dropout = dropout
self.share_qk = share_qk
self.ff_activation = ff_activation
self.ff_use_sru = ff_use_sru
self.mode = mode
def pre_attention(self, x):
x1, x2 = torch.chunk(x, self.n_chunks)
k_layers = [
ComputeAttentionHeads(self.n_heads, self.attn_k),
nn.LayerNorm((x.shape[1], x.shape[2])),
]
k_model = nn.Sequential(*k_layers)
v_layers = [
ComputeAttentionHeads(self.n_heads, self.attn_v),
nn.LayerNorm((x.shape[1], x.shape[2])),
]
v_model = nn.Sequential(*v_layers)
k = k_model(x1)
v = v_model(x2)
if not self.share_qk:
q_layers = k_layers
q_model = nn.Sequential(*q_layers)
q = q_model(x1)
return (q, k, v)
else:
return (k, k, v)
def attention(self, inputs):
assert len(inputs) == 2 or len(inputs) == 3
if len(inputs) == 2:
k, v = inputs
q = k
else:
q, k, v = inputs
mask_size = q.shape[-2]
mask = torch.tril(
torch.ones((1, mask_size, mask_size), dtype=torch.bool), diagonal=0
)
attn = self.dotproductattention(q, k, v, mask)
return attn
def dotproductattention(self, q, k, v, mask, dropout=0.1):
depth = q.shape[-1]
dots = torch.matmul(q, k.transpose(-1, -2)) / np.sqrt(depth)
dots = F.log_softmax(
torch.where(mask, dots, torch.full_like(dots, -1e9)), dim=0
)
keep_prob = 1 - dropout
keep = np.random.binomial(n=1, p=keep_prob, size=dots.shape)
dots = torch.where(
torch.tensor(keep, dtype=torch.bool),
dots / torch.tensor(keep_prob),
torch.zeros_like(dots),
)
attn = torch.matmul(dots, v)
return attn
def post_attention(self, x):
cao = ComputeAttentionOutput()
unchunk = Unchunk(n_sections=self.n_chunks, dim=-2)
bd = BroadcastedDropout(rate=self.dropout)
res = cao(x)
# res = torch.cat((res, res), dim=-3)
res = unchunk(res)
res = bd(res)
return res
def forward(self, x):
if not torch.is_tensor(x):
x = torch.tensor(x, dtype=torch.float32)
x = self.pre_attention(x)
# x = tuple(torch.tensor(y) for y in x)
x = self.attention(x)
x = self.post_attention(x)
return torch.cat((x, x)) | /reformer_lm-1.0.1.tar.gz/reformer_lm-1.0.1/reformer_lm/decoder.py | 0.923082 | 0.357876 | decoder.py | pypi |
import math
import torch
from torch import nn
import torch.nn.functional as F
from reformer_pytorch.reformer_pytorch import Reformer, ReformerLM, LSHSelfAttention
def pad_to_multiple(tensor, seqlen, multiple, dim=-1):
m = seqlen / multiple
if m.is_integer():
return tensor
remainder = math.ceil(m) * multiple - seqlen
pad_offset = (0,) * (-1 - dim) * 2
return F.pad(tensor, (*pad_offset, 0, remainder), value=0)
class Autopadder(nn.Module):
def __init__(self, net):
super().__init__()
assert isinstance(net, (LSHSelfAttention, Reformer, ReformerLM)), 'only modules LSHSelfAttention, Reformer, ReformerLM accepted'
self.net = net
reformer = net.reformer if isinstance(net, ReformerLM) else net
self.pad_dim = -1 if isinstance(net, ReformerLM) else -2
self.bucket_size = reformer.bucket_size
self.num_mem_kv = reformer.num_mem_kv
self.full_attn_thres = reformer.full_attn_thres
def forward(self, x, **kwargs):
b, t, m, device = *x.shape[:2], self.num_mem_kv, x.device
keys = kwargs.get('keys')
input_mask = kwargs.get('input_mask')
input_attn_mask = kwargs.get('input_attn_mask')
k_len = 0 if keys is None else keys.shape[1]
seqlen = t + m + k_len
if seqlen > self.full_attn_thres:
if input_mask is None:
input_mask = torch.full((b, t), True, device=x.device, dtype=torch.bool)
x = pad_to_multiple(x, seqlen, self.bucket_size * 2, dim=self.pad_dim)
if input_mask is not None:
new_mask = F.pad(input_mask, (0, x.shape[1] - input_mask.shape[1]), value=False)
kwargs.update(input_mask=new_mask)
if input_attn_mask is not None:
offset = x.shape[1] - input_attn_mask.shape[1]
new_mask = F.pad(input_attn_mask, (0, offset, 0, offset), value=False)
kwargs.update(input_attn_mask=new_mask)
out = self.net(x, **kwargs)
return out[:, 0:t] | /reformer_pytorch-1.4.4.tar.gz/reformer_pytorch-1.4.4/reformer_pytorch/autopadder.py | 0.854445 | 0.362461 | autopadder.py | pypi |
from functools import partial
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from reformer_pytorch.reformer_pytorch import ReformerLM
from reformer_pytorch.autopadder import Autopadder
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class TrainingWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
assert isinstance(net, ReformerLM), 'generative trainer wrapper can only accept ReformerLM class'
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = Autopadder(net)
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
input_mask = kwargs.pop('input_mask', None)
if input_mask is None:
input_mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
input_mask = input_mask[:, -self.max_seq_len:]
logits = self.net(x, input_mask=input_mask, **kwargs)[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
input_mask = F.pad(input_mask, (0, 1), value=True)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, return_loss = False, **kwargs):
pad = partial(pad_sequence, batch_first = True, padding_value = self.pad_value)
if not return_loss:
if not isinstance(x, torch.Tensor):
x = pad(x)
return self.net(x, **kwargs)
if isinstance(x, torch.Tensor):
xi = x[:, :-1]
xo = x[:, 1:]
else:
xi = pad(list(map(lambda t: t[:-1], x)))
xo = pad(list(map(lambda t: t[1:], x)))
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss | /reformer_pytorch-1.4.4.tar.gz/reformer_pytorch-1.4.4/reformer_pytorch/generative_tools.py | 0.92113 | 0.434581 | generative_tools.py | pypi |
import torch
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g, depth=None, send_signal = False):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
self.depth = depth
self.send_signal = send_signal
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
if self.send_signal:
f_args['_reverse'] = g_args['_reverse'] = False
f_args['_depth'] = g_args['_depth'] = self.depth
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
if self.send_signal:
f_args['_reverse'] = g_args['_reverse'] = True
f_args['_depth'] = g_args['_depth'] = self.depth
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class IrreversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = f
self.g = g
def forward(self, x, f_args, g_args):
x1, x2 = torch.chunk(x, 2, dim=2)
y1 = x1 + self.f(x2, **f_args)
y2 = x2 + self.g(y1, **g_args)
return torch.cat([y1, y2], dim=2)
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
ctx.kwargs = kwargs
for block in blocks:
x = block(x, **kwargs)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
kwargs = ctx.kwargs
for block in ctx.blocks[::-1]:
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class ReversibleSequence(nn.Module):
def __init__(self, blocks, layer_dropout = 0., reverse_thres = 0, send_signal = False):
super().__init__()
self.layer_dropout = layer_dropout
self.reverse_thres = reverse_thres
self.blocks = nn.ModuleList([ReversibleBlock(f, g, depth, send_signal) for depth, (f, g) in enumerate(blocks)])
self.irrev_blocks = nn.ModuleList([IrreversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, arg_route = (True, False), **kwargs):
reverse = x.shape[1] > self.reverse_thres
blocks = self.blocks if reverse else self.irrev_blocks
if self.training and self.layer_dropout > 0:
to_drop = torch.empty(len(self.blocks)).uniform_(0, 1) < self.layer_dropout
blocks = [block for block, drop in zip(self.blocks, to_drop) if not drop]
blocks = self.blocks[:1] if len(blocks) == 0 else blocks
f_args, g_args = map(lambda route: kwargs if route else {}, arg_route)
block_kwargs = {'f_args': f_args, 'g_args': g_args}
if not reverse:
for block in blocks:
x = block(x, **block_kwargs)
return x
return _ReversibleFunction.apply(x, blocks, block_kwargs) | /reformer_pytorch-1.4.4.tar.gz/reformer_pytorch-1.4.4/reformer_pytorch/reversible.py | 0.907361 | 0.40539 | reversible.py | pypi |
import re
from torch import nn
from reformer_pytorch.reformer_pytorch import ReformerLM
from reformer_pytorch.generative_tools import TrainingWrapper
ENC_PREFIX = 'enc_'
DEC_PREFIX = 'dec_'
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return bool(re.match(f'^{prefix}', str))
def group_by_key_prefix(prefix, d):
return group_dict_by_key(lambda x: string_begins_with(prefix, x), d)
def group_by_key_prefix_and_remove_prefix(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(lambda x: string_begins_with(prefix, x), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
def extract_enc_dec_kwargs(kwargs):
enc_kwargs, kwargs = group_by_key_prefix_and_remove_prefix(ENC_PREFIX, kwargs)
dec_kwargs, kwargs = group_by_key_prefix_and_remove_prefix(DEC_PREFIX, kwargs)
return enc_kwargs, dec_kwargs, kwargs
def extract_and_set_enc_dec_kwargs(kwargs):
enc_kwargs, dec_kwargs, kwargs = extract_enc_dec_kwargs(kwargs)
if 'input_mask' in enc_kwargs:
dec_kwargs.setdefault('context_mask', enc_kwargs['input_mask'])
return enc_kwargs, dec_kwargs, kwargs
class ReformerEncDec(nn.Module):
def __init__(self, dim, ignore_index = 0, pad_value = 0, **kwargs):
super().__init__()
enc_kwargs, dec_kwargs, _ = extract_enc_dec_kwargs(kwargs)
assert 'return_embedding' not in enc_kwargs, 'you cannot manually set the return embeddings flag for the encoder'
assert 'dim' not in dec_kwargs and 'dim' not in enc_kwargs, 'you must set the dim for both encoder and decoder'
enc_kwargs['dim'] = dec_kwargs['dim'] = dim
enc_kwargs['return_embeddings'] = True
dec_kwargs['causal'] = True
enc_kwargs.setdefault('bucket_size', 64)
dec_kwargs.setdefault('bucket_size', enc_kwargs['bucket_size'] * 2)
enc = ReformerLM(**enc_kwargs)
dec = ReformerLM(**dec_kwargs)
self.enc = TrainingWrapper(enc, ignore_index = ignore_index, pad_value = pad_value)
self.dec = TrainingWrapper(dec, ignore_index = ignore_index, pad_value = pad_value)
def generate(self, seq_in, seq_out_start, seq_len, **kwargs):
enc_kwargs, dec_kwargs, kwargs = extract_and_set_enc_dec_kwargs(kwargs)
enc_keys = self.enc(seq_in, **enc_kwargs)
return self.dec.generate(seq_out_start, seq_len, keys = enc_keys, **{**dec_kwargs, **kwargs})
def forward(self, seq_in, seq_out, return_loss = False, **kwargs):
enc_kwargs, dec_kwargs, kwargs = extract_and_set_enc_dec_kwargs(kwargs)
enc_keys = self.enc(seq_in, **enc_kwargs)
return self.dec(seq_out, return_loss = return_loss, keys = enc_keys, **dec_kwargs) | /reformer_pytorch-1.4.4.tar.gz/reformer_pytorch-1.4.4/reformer_pytorch/reformer_enc_dec.py | 0.845433 | 0.250729 | reformer_enc_dec.py | pypi |
import logging
from typing import Optional
from ..http_device import HttpDeviceInfo
from .device import BaseDevice
from ..enums import Namespace
_LOGGER = logging.getLogger(__name__)
class ToggleXMix(BaseDevice):
def __init__(self, device: HttpDeviceInfo):
self.device = device
self._channel_togglex_status = {}
super().__init__(device)
def is_on(self, channel=0) -> Optional[bool]:
return self._channel_togglex_status.get(channel, None)
async def async_handle_update(self, namespace: Namespace, data: dict) -> bool:
updated = False
if namespace == Namespace.SYSTEM_ALL:
payload = data.get("payload",{}).get('all', {}).get('digest', {}).get('togglex', [])
for c in payload:
channel = c['channel']
switch_state = c['onoff'] == 1
self._channel_togglex_status[channel] = switch_state
updated = True
await super().async_handle_update(namespace=namespace, data=data)
return updated
async def async_update_push_state(self, namespace: Namespace, data: dict,uuid:str) -> bool:
updated = False
if namespace == Namespace.CONTROL_TOGGLEX:
payload = data.get('togglex')
if payload is None:
_LOGGER.warning(f"{self.__class__.__name__} could not find 'togglex' attribute in push notification data: {data}")
elif isinstance(payload, list):
for c in payload:
channel = c['channel']
switch_state = c['onoff'] == 1
self._channel_togglex_status[channel] = switch_state
updated = True
elif isinstance(payload, dict):
channel = payload['channel']
switch_state = payload['onoff'] == 1
self._channel_togglex_status[channel] = switch_state
updated = True
await super().async_update_push_state(namespace=namespace, data=data,uuid=uuid)
return updated
async def async_turn_off(self, channel=0) -> None:
payload={'togglex': {"onoff": 0, "channel": channel}}
await self.device.async_execute_cmd(device_uuid=self.uuid,method="SET",namespace=Namespace.CONTROL_TOGGLEX,payload=payload)
self._channel_togglex_status[channel] = False
async def async_turn_on(self, channel=0) -> None:
payload={'togglex': {"onoff": 1, "channel": channel}}
await self.device.async_execute_cmd(device_uuid=self.uuid,method="SET",namespace=Namespace.CONTROL_TOGGLEX,payload=payload)
self._channel_togglex_status[channel] = True
async def async_toggle(self, channel=0) -> None:
if self.is_on(channel=channel):
await self.async_turn_off(channel=channel)
else:
await self.async_turn_on(channel=channel) | /refoss_ha-1.0.5-py3-none-any.whl/refoss_ha/controller/toggle.py | 0.671686 | 0.223557 | toggle.py | pypi |
=========
refpapers
=========
.. image:: https://img.shields.io/pypi/v/refpapers.svg
:target: https://pypi.python.org/pypi/refpapers
.. image:: https://readthedocs.org/projects/refpapers/badge/?version=latest
:target: https://refpapers.readthedocs.io/en/latest/?version=latest
:alt: Documentation Status
Lightweight command-line tool to manage bibliography (pdf collection)
* Free software: MIT License
* Documentation: https://refpapers.readthedocs.io.
.. image:: docs/source/figures/search_one_open.png
:alt: Screenshot of search functionality
Installing
------------
.. code-block:: bash
pip install refpapers
Depenencies (not including those automatically installed from pypi)
* Python 3
* pdftotext (from poppler-utils, Ubuntu: sudo apt install poppler-utils)
Introduction
------------
Motivation
~~~~~~~~~~
* Research involves reading a large number of scientific papers, and being able to later refer back to what you have read.
Each time searching again in online databases or search engines is cumbersome,
and unless remembering the exact title, you are likely to find new papers instead of the one you read previously.
* Keeping a personal database of the papers you read solves this problem.
Such a collection grows rapidly, necessitating a performant local search engine.
File names as source of truth
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Refpapers uses the files themselves as a source of truth.
Metadata, such as authors, title, and publication year are encoded in the filename.
Full text is extracted from the file contents.
* For performance reasons, the data is indexed into a whoosh database.
However, the database is only a cache: All the data is stored directly in the file.
* Using the file as a source of truth is useful in several ways:
* If you send pdf files to other people or to yourself on machines without refpapers installed,
your files will be systematically named with all the information you need.
* You can choose to stop using refpapers, and the work you put into curating your collection will not be wasted.
Refpapers is opinionated
~~~~~~~~~~~~~~~~~~~~~~~~
* The naming scheme is fixed: The basic pattern is :code:`FirstAuthor_SecondAuthor_-_PaperTitle_0000.pdf`.
The main fields are given in a fixed order, and the separator is mandatory.
However, you don't need to write this format yourself: the automatic renaming tool takes care of it for you.
* Bibtex keys are in the form :code:`surname0000word`,
with the surname of the first author, year, and the first word of the title (excluding stopwords).
* If you like, other naming formats that encode the same information could be supported.
All the code for implementing this is in :code:`filesystem.py`. Pull requests are welcome!
Features
--------
* Powerful **full-text search**.
* **Fast**, even with a large collection of papers.
* Use **git-annex** to track newly added papers to speed up indexing (optional).
* Automatically **retrieve metadata** from several APIs: ArXiv, crossref, Google Scholar.
* Userfriendly **autocomplete** when manually entering metadata.
* **Configurable**. Can support any document format, if you provide a tool to extract plain text.
Planned features
~~~~~~~~~~~~~~~~
* BibTeX integration.
* Improved data quality check, e.g. deduplication.
Features that will not be implemented
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* **Built-in synch:** refpapers is designed to work well together with git-annex.
To synch your papers between multiple machines, you should use git-annex.
Command line usage
------------------
The :code:`refpapers` command line interface is divided into several subcommands, each with their own argument signature.
This is the same type of interface that e.g. :code:`git` uses.
The overall structure is :code:`refpapers <subcommand> [OPTIONS] [ARGUMENTS]`, for example :code:`refpapers search gronroos` uses the :code:`search` subcommand, with the query term :code:`gronroos`.
See the `documentation <https://refpapers.readthedocs.io/en/latest/?version=latest>`_ for details.
**A list of subcommands for search:**
* **refpapers search**: Search for papers.
* **refpapers one**: Show details of one paper.
* **refpapers open**: Open one paper in viewer.
**A list of subcommands for managing your data:**
* **refpapers index**: Refresh the search index.
* **refpapers rename**: Propose renaming a single file automatically.
* **refpapers inbox**: Ingest files in inbox.
* auto-rename all the files in the inbox,
* commit the new files into git-annex,
* sync the contents of git-annex,
* index to make the new files searchable.
* **refpapers check**: Check for data issues.
Configuration
-------------
If you run refpapers without a configuration, it will ask for the information necessary to write a minimal config.
However, to use all the features of refpapers, you should edit the configuration file.
A full-featured example configuration file can be found in `example_conf/conf.yml <https://github.com/Waino/refpapers/blob/master/example_conf/conf.yml>`_.
My workflow
-----------
* As I browse, I download pdfs into an "inbox" directory (separate from the main collection).
* In the inbox directory, I run :code:`refpapers inbox --open`.
* This auto-renames all the files in the inbox, commits the new files into git-annex,
syncs the contents of git-annex, and indexes the new files.
* On other machines, I run :code:`git annex sync --content`, and then reindex. Now the files are available on those machines as well.
* Periodically, I run `refpapers check` to check for problems.
Alternatives
------------
* **papers** https://github.com/perrette/papers . Similar renaming functionality and API integrations. BibTeX integration.
* **zotero** https://www.zotero.org/ . A feature-rich GUI tool.
* **mendeley** https://www.mendeley.com/ . A proprietary tool, owned by Elsevier.
Acknowledgements
----------------
Thank you to arXiv for use of its open access interoperability.
Citing
------
If you find refpapers to be useful when writing your thesis or other scientific publications, please consider acknowledgeing it
.. code-block:: bibtex
@misc{refpapers,
title={Refpapers: Lightweight command-line tool to manage bibliography},
author={Grönroos, Stig-Arne},
year={2022},
note={\url{https://github.com/Waino/refpapers}},
}
| /refpapers-1.1.0.tar.gz/refpapers-1.1.0/README.rst | 0.911275 | 0.694756 | README.rst | pypi |
# Installation:
## without any dependencies:
```commandline
pip install refractio
```
## With all dependencies:
```commandline
pip install refractio[all]
```
## With snowflake:
```commandline
pip install refractio[snowflake]
```
## With s3:
```commandline
pip install refractio[s3]
```
## With azureblob:
```commandline
pip install refractio[azureblob]
```
## With local:
```commandline
pip install refractio[local]
```
## With sftp:
```commandline
pip install refractio[sftp]
```
## With mysql:
```commandline
pip install refractio[mysql]
```
## With hive:
```commandline
pip install refractio[hive]
```
## With sqlserver:
```commandline
pip install refractio[sqlserver]
```
## With postgres:
```commandline
pip install refractio[postgres]
```
#### Source code is available at: https://git.lti-aiq.in/refract-sdk/refract-sdk.git
# Usage:
## To read dataframe with dataset name only -
```python
from refractio import get_dataframe
get_dataframe("dataset_name")
# For reading data from any other RDBMS connection apart from snowflake, hive or mysql please pip install mosaic-connector-python package.
```
## To read dataframe with filename from local storage -
```python
from refracio import get_local_dataframe
get_local_dataframe("local_file_name_with_absolute_path")
```
## To use snowflake related operations -
```python
from refractio import snowflake
# To get snowflake connection object with a default snowflake connection created by the user, if available.
snowflake.get_connection()
# To get snowflake connection object with a specific connection name
snowflake.get_connection(connection_name="snowflake_con_name")
# To read a specific dataset published from a snowflake connection
snowflake.get_dataframe("dataset_name")
# To read a specific dataset published from a snowflake connection with only top few records.
snowflake.get_dataframe("dataset_name", row_count=3)
# To execute a user specific query in snowflake, with the specified connection name.
snowflake.execute_query(query="user_query", database="db_name", schema="schema", connection_name="connection_name")
# To execute a user specific query in snowflake, with the current connection object or with the default connection for the user.
snowflake.execute_query(query="user_query", database="db_name", schema="schema")
# To close snowflake connection, please do close the connection after use!
snowflake.close_connection()
```
## To use mysql related operations -
```python
from refractio import mysql
# To get mysql connection object with a default mysql connection created by the user, if available.
mysql.get_connection()
# To get mysql connection object with a specific connection name
mysql.get_connection(connection_name="mysql_con_name")
# To read a specific dataset published from a mysql connection
mysql.get_dataframe("dataset_name")
# To read a specific dataset published from a mysql connection with only top few records.
mysql.get_dataframe("dataset_name", row_count=3)
# To execute a user specific query in mysql, with the specified connection name.
mysql.execute_query(query="user_query", connection_name="connection_name")
# To execute a user specific query in mysql, with the current connection object or with the default connection for the user.
mysql.execute_query(query="user_query")
# To close mysql connection, please do close the connection after use!
mysql.close_connection()
```
## To use sqlserver related operations -
### Requires sqlserver driver library
```python
# Create a custom template with the following commands added in "Pre Init Script" section,
# sudo curl -o /etc/yum.repos.d/mssql-release.repo https://packages.microsoft.com/config/rhel/9.0/prod.repo
# sudo ACCEPT_EULA=Y yum install -y msodbcsql18
from refractio import sqlserver
# To get sqlserver connection object with a default sqlserver connection created by the user, if available.
sqlserver.get_connection()
# To get sqlserver connection object with a specific connection name
sqlserver.get_connection(connection_name="sqlserver_con_name")
# To read a specific dataset published from a sqlserver connection
sqlserver.get_dataframe("dataset_name")
# To read a specific dataset published from a sqlserver connection with only top few records.
sqlserver.get_dataframe("dataset_name", row_count=3)
# To execute a user specific query in sqlserver, with the specified connection name.
sqlserver.execute_query(query="user_query", database="db_name", connection_name="connection_name")
# To execute a user specific query in sqlserver, with the current connection object or with the default connection for the user.
sqlserver.execute_query(query="user_query", database="db_name")
# To close sqlserver connection, please do close the connection after use!
sqlserver.close_connection()
```
## To use hive related operations -
```python
from refractio import hive
# To get hive connection object with a default hive connection created by the user, if available. User id is required (1001 is default user_id used).
hive.get_connection(user_id=1001)
# To get hive connection object with a specific connection name, User id is required (1001 is default user_id used).
hive.get_connection(connection_name="hive_con_name", user_id=1001)
# To read a specific dataset published from a hive connection. User id is required (1001 is default user_id used).
hive.get_dataframe("dataset_name", user_id="1001")
# To read a specific dataset published from a hive connection with only top few records. User id is required (1001 is default user_id used)
hive.get_dataframe("dataset_name", user_id="1001", row_count=3)
# To execute a user specific query in hive, with the specified connection name. User id is required (1001 is default user_id used).
hive.execute_query(query="user_query", connection_name="connection_name", user_id="1001")
# To execute a user specific query in hive, with the current connection object or with the default connection for the user. User id is required (1001 is default user_id used).
hive.execute_query(query="user_query", user_id="1001")
# To close hive connection, please do close the connection after use!
hive.close_connection()
```
## To use postgres related operations -
```python
from refractio import postgres
# To get postgres connection object with a default postgres connection created by the user, if available.
postgres.get_connection()
# To get postgres connection object with a specific connection name
postgres.get_connection(connection_name="mysql_con_name")
# To read a specific dataset published from a postgres connection
postgres.get_dataframe("dataset_name")
# To read a specific dataset published from a postgres connection with only top few records.
postgres.get_dataframe("dataset_name", row_count=3)
# To execute a user specific query in postgres, with the specified connection name.
postgres.execute_query(query="user_query", connection_name="connection_name")
# To execute a user specific query in postgres, with the current connection object or with the default connection for the user.
postgres.execute_query(query="user_query")
# To close postgres connection, please do close the connection after use!
postgres.close_connection()
```
## To use sftp related operations -
```python
from refractio import sftp
# To get sftp connection object with a default sftp connection created by the user, if available.
sftp.get_connection()
# To get sftp connection object with a specific connection name
sftp.get_connection(connection_name="sftp_con_name")
# To read a specific dataset published from a sftp connection
sftp.get_dataframe("dataset_name")
# To read a specific dataset published from a sftp connection with only top few records.
sftp.get_dataframe("dataset_name", row_count=3)
# Use sftp connection object c to do any operation related to sftp like (get, put, listdir etc)
c = sftp.get_connection()
# To close sftp connection, please do close the connection after use!
sftp.close_connection()
```
## To use amazon S3 related operations -
```python
from refractio import s3
# To get s3 connection object with a default s3 connection created by the user, if available.
s3.get_connection()
# To get s3 connection object with a specific connection name
s3.get_connection(connection_name="s3_con_name")
# To read a specific dataset published from a s3 connection
s3.get_dataframe("dataset_name")
# To read a specific dataset published from a s3 connection with only top few records.
s3.get_dataframe("dataset_name", row_count=3)
# Use s3 connection object c to do any operation related to s3.
c = s3.get_connection()
```
## To use azure blob related operations -
```python
from refractio import azure
# To get azure blob connection object with a default azure connection created by the user, if available.
azure.get_connection()
# To get azure blob connection object with a specific connection name
azure.get_connection(connection_name="azureblob_con_name")
# To read a specific dataset published from a azureblob connection
azure.get_dataframe("dataset_name")
# To read a specific dataset published from a azure connection with only top few records.
azure.get_dataframe("dataset_name", row_count=3)
# Use azure connection object c to do any operation related to azure.
c = azure.get_connection()
```
*Note: Usage documentation will be updated in upcoming releases.*
| /refractio-2.0.5.5.tar.gz/refractio-2.0.5.5/README.md | 0.560854 | 0.763241 | README.md | pypi |
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
__all__ = ['Relation','GroupWrap']
class Relation(pd.DataFrame):
"""Create a Relation from a csv file of data for use with relational operators
This module is designed for educational purposes, specifically teaching and experimenting
with relational algebra. To that end, you can create a relation from a file of data typically
a csv file, although you can also specify a separator, for example a vertical bar may be
better in some cases.
This module is built on top of the pandas system in in many cases is just a thin shell.
- **parameters**::
:param filepath: a string specifying a path to a csv file, OR a Pandas DataFrame to convert to a Relation
:param sep: specify a separator for the data file. default is ``|``
"""
def __init__(self, filepath=None, sep='|'):
if type(filepath) == str:
super().__init__(pd.read_csv(filepath,sep=sep))
elif type(filepath) == pd.DataFrame:
super().__init__(filepath)
else:
print('help')
def project(self, cols):
"""returns a new Relation with only the specified columns
:param cols: a list of columns to project
:return: a Relation with duplicate rows dropped
:Example:
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.project(['region','continent','name']).head(10)
region continent name
0 Southern and Central Asia Asia Afghanistan
1 Western Europe Europe Netherlands
2 Caribbean North America Netherlands Antilles
3 Southern Europe Europe Albania
4 Northern Africa Africa Algeria
5 Polynesia Oceania American Samoa
6 Southern Europe Europe Andorra
7 Central Africa Africa Angola
8 Caribbean North America Anguilla
9 Caribbean North America Antigua and Barbuda
>>>
.. note:: Relations have no duplicate rows, so projecting a single column creates a relation with all of the distinct values for that column
"""
if type(cols) != list:
raise ValueError("You must provide the attributes to project inside square brackets []")
for name in cols:
if name not in self.columns:
raise ValueError("'{}' is not a valid attribute name in relation".format(name))
return Relation(self[cols].drop_duplicates())
def query(self, q):
"""return a new relation with tuples matching the query condition
:param q: a query string
:return: a Relation
:Example:
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.query('continent == "Antarctica"').project(['code','name'])
code name
232 ATA Antarctica
233 BVT Bouvet Island
235 SGS South Georgia and the South Sandwich Islands
236 HMD Heard Island and McDonald Islands
237 ATF French Southern territories
>>>
"""
return Relation(super().query(q).drop_duplicates())
def sort(self, *args, **kwargs):
"""sort the relation on the given columns
:param cols: A list of columns to sort on
:param ascending: Boolean, ascending=False implies a sort in reverse order
:Example:
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.sort(['indepyear'], ascending=False).query('indepyear < 1200').project(['name','indepyear'])
name indepyear
159 Portugal 1143
29 United Kingdom 1066
180 San Marino 885
164 France 843
170 Sweden 836
200 Denmark 800
81 Japan -660
48 Ethiopia -1000
93 China -1523
"""
return Relation(super().sort_values(*args, **kwargs))
def intersect(self, other):
"""Create a new relation that is the intersection of the two given relations
In order to compute the intersection the relations must be union compatible. That is they must
have exactly the same columns. This may require some projecting and renaming.
:param other: The relation to compute the intersection with.
:return:
:Example:
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.query('continent == "Africa"').project(['name', 'region']).intersect(country.query('region == "Western Africa"').project(['name', 'region']))
name region
0 Benin Western Africa
1 Burkina Faso Western Africa
2 Gambia Western Africa
3 Ghana Western Africa
4 Guinea Western Africa
5 Guinea-Bissau Western Africa
6 Cape Verde Western Africa
7 Liberia Western Africa
8 Mali Western Africa
9 Mauritania Western Africa
10 Niger Western Africa
11 Nigeria Western Africa
12 Côte d'Ivoire Western Africa
13 Saint Helena Western Africa
14 Senegal Western Africa
15 Sierra Leone Western Africa
16 Togo Western Africa
>>>
"""
if sorted(self.columns) != sorted(other.columns):
raise ValueError("Relations must be Union compatible")
else:
return Relation(pd.merge(self,other,how='inner',on=list(self.columns)))
def njoin(self, other):
"""Create a new relation that is the intersection of the two given relations
In order to compute the intersection the relations must be union compatible. That is they must
have exactly the same columns. This may require some projecting and renaming.
:param other: The relation to compute the intersection with.
:return:
:Example:
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.query('continent == "Africa"').project(['name', 'region']).njoin(country.query('region == "Western Africa"').project(['name', 'region']))
name region
0 Benin Western Africa
1 Burkina Faso Western Africa
2 Gambia Western Africa
3 Ghana Western Africa
4 Guinea Western Africa
5 Guinea-Bissau Western Africa
6 Cape Verde Western Africa
7 Liberia Western Africa
8 Mali Western Africa
9 Mauritania Western Africa
10 Niger Western Africa
11 Nigeria Western Africa
12 Côte d'Ivoire Western Africa
13 Saint Helena Western Africa
14 Senegal Western Africa
15 Sierra Leone Western Africa
16 Togo Western Africa
>>>
"""
col_list = [x for x in self.columns if x in other.columns]
if not col_list:
raise ValueError("The two relations must have some columns in common")
return Relation(pd.merge(self,other,how='inner',on=list(col_list)))
def union(self,other):
""" Take two Relations with the same columns and put them together top to bottom
:param other:
:return:
:Example:
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.query('region == "Western Africa"').union(country.query('region == "Polynesia"')).project(['name','region'])
name region
22 Benin Western Africa
33 Burkina Faso Western Africa
54 Gambia Western Africa
56 Ghana Western Africa
63 Guinea Western Africa
64 Guinea-Bissau Western Africa
89 Cape Verde Western Africa
112 Liberia Western Africa
124 Mali Western Africa
129 Mauritania Western Africa
144 Niger Western Africa
145 Nigeria Western Africa
149 Côte d'Ivoire Western Africa
171 Saint Helena Western Africa
183 Senegal Western Africa
185 Sierra Leone Western Africa
202 Togo Western Africa
5 American Samoa Polynesia
37 Cook Islands Polynesia
146 Niue Polynesia
157 Pitcairn Polynesia
166 French Polynesia Polynesia
179 Samoa Polynesia
203 Tokelau Polynesia
204 Tonga Polynesia
212 Tuvalu Polynesia
221 Wallis and Futuna Polynesia
>>>
"""
if sorted(self.columns) != sorted(other.columns):
raise ValueError("Relations must be Union compatible")
else:
return Relation(pd.concat([pd.DataFrame(self),pd.DataFrame(other)]))
def minus(self,other):
"""return a relation containing the rows in self 'but not' in other
:param other:
:return:
:Example:
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.query('continent == "Africa"').minus(country.query('region == "Western Africa"')).project(['name','region','continent'])
name region continent
4 Algeria Northern Africa Africa
7 Angola Central Africa Africa
27 Botswana Southern Africa Africa
34 Burundi Eastern Africa Africa
39 Djibouti Eastern Africa Africa
43 Egypt Northern Africa Africa
45 Eritrea Eastern Africa Africa
47 South Africa Southern Africa Africa
48 Ethiopia Eastern Africa Africa
53 Gabon Central Africa Africa
87 Cameroon Central Africa Africa
91 Kenya Eastern Africa Africa
92 Central African Republic Central Africa Africa
97 Comoros Eastern Africa Africa
98 Congo Central Africa Africa
99 Congo, The Democratic Republic of the Central Africa Africa
110 Lesotho Southern Africa Africa
112 Liberia Western Africa Africa
113 Libyan Arab Jamahiriya Northern Africa Africa
117 Western Sahara Northern Africa Africa
119 Madagascar Eastern Africa Africa
121 Malawi Eastern Africa Africa
126 Morocco Northern Africa Africa
130 Mauritius Eastern Africa Africa
131 Mayotte Eastern Africa Africa
138 Mozambique Eastern Africa Africa
140 Namibia Southern Africa Africa
162 Equatorial Guinea Central Africa Africa
167 Réunion Eastern Africa Africa
169 Rwanda Eastern Africa Africa
171 Saint Helena Western Africa Africa
178 Zambia Eastern Africa Africa
181 Sao Tome and Principe Central Africa Africa
184 Seychelles Eastern Africa Africa
189 Somalia Eastern Africa Africa
191 Sudan Northern Africa Africa
194 Swaziland Southern Africa Africa
199 Tanzania Eastern Africa Africa
206 Chad Central Africa Africa
208 Tunisia Northern Africa Africa
213 Uganda Eastern Africa Africa
230 Zimbabwe Eastern Africa Africa
234 British Indian Ocean Territory Eastern Africa Africa
"""
return Relation(self[~self.isin(other).all(1)])
def rename(self,old,new):
"""Rename old attribute to new
:param old: string, name of old attribute
:param new: string, name to change old to
:return: Relation
:Example:
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.project(['name']).rename('name','countryname').head()
countryname
0 Afghanistan
1 Netherlands
2 Netherlands Antilles
3 Albania
4 Algeria
>>>
"""
return Relation(super().rename(columns={old:new}).drop_duplicates())
def cartesian_product(self,other):
self['__cartkey__'] = 1
other['__cartkey__'] = 1
res = pd.merge(self,other,on='__cartkey__')
self.drop('__cartkey__',axis=1,inplace=True)
other.drop('__cartkey__',axis=1,inplace=True)
res.drop('__cartkey__',axis=1,inplace=True)
return Relation(res.drop_duplicates())
def groupby(self,cols):
""" Collapse a relation containing one row per unique value in the given group by attributes.
The groupby operator is always used in conjunction with an aggregate operator.
* count
* sum
* mean
* median
* min
* max
:param cols: A list of columns to group on
:return: A GroupWrap object for one of the aggregate operators to work on.
:Example:
How many countries are in each continent?
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.groupby(['continent']).count('name')
continent count_name
0 Africa 58
1 Antarctica 5
2 Asia 51
3 Europe 46
5 Oceania 28
6 South America 14
"""
res = super().groupby(cols)
return GroupWrap(res,cols)
def extend(self,newcol,series):
"""Create a new attribute by combining or modifying one or more existing attributes
:param newcol: Name of the new column to create
:param series: An expression involving one or more other attributes
:return:
:Example:
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.extend('gnpdiff',country.gnp - country.gnpold).project(['name','gnpdiff']).head(10)
name gnpdiff
0 Afghanistan NaN
1 Netherlands 10884
2 Netherlands Antilles NaN
3 Albania 705
4 Algeria 3016
5 American Samoa NaN
6 Andorra NaN
7 Angola -1336
8 Anguilla NaN
9 Antigua and Barbuda 28
>>>
"""
self[newcol] = series
return self
class GroupWrap(pd.core.groupby.DataFrameGroupBy):
"""Wrapper for a DataFrameGroupBy object -- invisible to end user
"""
def __init__(self, gbo, cols):
self.gbo = gbo
self.gb_cols = cols
def filteragg(self, res, col):
res = res.reset_index()
cl = []
if type(self.gb_cols) == list:
cl = self.gb_cols
else:
cl.append(self.gb_cols)
cl.append(col)
res = res[cl].drop_duplicates()
return res
def count(self, col):
"""
Count the number of occurrences of a value in the column for a group.
:param col:
:return: A Relation with the groupby column(s) and count for a single column
:Example:
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.groupby(['continent']).count('name')
continent count_name
0 Africa 58
1 Antarctica 5
2 Asia 51
3 Europe 46
5 Oceania 28
6 South America 14
>>>
"""
res = self.gbo.count()
return Relation(self.filteragg(res, col).rename(columns={col:"count_"+col}))
def mean(self, col):
"""
Count the number of occurrences of a value in the column for a group.
:param col:
:return: A Relation with the groupby column(s) and count for a single column
:Example:
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.groupby(['continent']).mean('gnp')
continent mean_gnp
0 Africa 10006.465517
1 Antarctica 0.000000
2 Asia 150105.725490
3 Europe 206497.065217
4 North America 261854.789189
5 Oceania 14991.953571
6 South America 107991.000000
>>>
"""
res = self.gbo.mean()
return Relation(self.filteragg(res, col).rename(columns={col:"mean_"+col}))
def min(self, col):
"""
Count the number of occurrences of a value in the column for a group.
:param col:
:return: A Relation with the groupby column(s) and count for a single column
:Example:
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.groupby(['continent']).min('lifeexpectancy')
continent min_lifeexpectancy
0 Africa 37.2
1 Antarctica NaN
2 Asia 45.9
3 Europe 64.5
4 North America 49.2
5 Oceania 59.8
6 South America 62.9
>>>
"""
res = self.gbo.min()
return Relation(self.filteragg(res, col).rename(columns={col:"min_"+col}))
def max(self, col):
"""
Count the number of occurrences of a value in the column for a group.
:param col:
:return: A Relation with the groupby column(s) and count for a single column
:Example:
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.groupby(['continent']).max('gnp')
continent max_gnp
0 Africa 116729
1 Antarctica 0
2 Asia 3787042
3 Europe 2133367
4 North America 8510700
5 Oceania 351182
6 South America 776739
>>>
"""
res = self.gbo.max()
return Relation(self.filteragg(res, col).rename(columns={col:"max_"+col}))
def sum(self, col):
"""
Count the number of occurrences of a value in the column for a group.
:param col:
:return: A Relation with the groupby column(s) and count for a single column
:Example:
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.groupby(['continent']).sum('surfacearea')
continent sum_surfacearea
0 Africa 30250377.0
1 Antarctica 13132101.0
2 Asia 31881008.0
3 Europe 23049133.9
4 North America 24214469.0
5 Oceania 8564294.0
6 South America 17864922.0
>>>
"""
res = self.gbo.sum()
return Relation(self.filteragg(res, col).rename(columns={col:"sum_"+col}))
def median(self, col):
"""
Count the number of occurrences of a value in the column for a group.
:param col:
:return: A Relation with the groupby column(s) and count for a single column
:Example:
>>> from reframe import Relation
>>> country = Relation('country.csv')
>>> country.groupby(['continent']).median('gnp')
continent median_gnp
0 Africa 2533.5
1 Antarctica 0.0
2 Asia 15706.0
3 Europe 20401.0
4 North America 2223.0
5 Oceania 123.0
6 South America 20300.5
>>>
"""
res = self.gbo.median()
return Relation(self.filteragg(res, col).rename(columns={col:"median_"+col}))
if __name__ == '__main__':
#country = Relation('country.csv')
import doctest
doctest.testmod() | /reframe-1.0.1.tar.gz/reframe-1.0.1/reframe.py | 0.708818 | 0.605158 | reframe.py | pypi |
import json
from fastapi import APIRouter, Depends
from reframe.server.lib.auth.prisma import JWTBearer, decodeJWT
from reframe.server.lib.db_models.tool import Tool
from reframe.server.lib.prisma import prisma
router = APIRouter()
@router.post("/tools", name="Create a tool", description="Create a new tool")
async def create_tool(body: Tool, token=Depends(JWTBearer())):
"""Create tool endpoint"""
decoded = decodeJWT(token)
tool = prisma.tool.create(
{
"name": body.name,
"type": body.type,
"metadata": json.dumps(body.metadata),
"userId": decoded["userId"],
"description": body.description,
},
include={"user": True},
)
return {"success": True, "data": tool}
@router.get("/tools", name="List tools", description="List all tools")
async def read_tools(token=Depends(JWTBearer())):
"""List tools endpoint"""
decoded = decodeJWT(token)
tools = prisma.tool.find_many(
where={"userId": decoded["userId"]},
include={"user": True},
order={"createdAt": "desc"},
)
return {"success": True, "data": tools}
@router.get(
"/tools/{toolId}",
name="Get tool",
description="Get a specific tool",
)
async def read_tool(toolId: str, token=Depends(JWTBearer())):
"""Get tool endpoint"""
tool = prisma.tool.find_unique(where={"id": toolId}, include={"user": True})
return {"success": True, "data": tool}
@router.delete(
"/tools/{toolId}",
name="Delete tool",
description="Delete a specific tool",
)
async def delete_tool(toolId: str, token=Depends(JWTBearer())):
"""Delete tool endpoint"""
prisma.tool.delete(where={"id": toolId})
return {"success": True, "data": None}
@router.patch("/tools/{toolId}", name="Patch tool", description="Patch a specific tool")
async def patch_tool(toolId: str, body: dict, token=Depends(JWTBearer())):
"""Patch tool endpoint"""
body["metadata"] = json.dumps(body["metadata"])
tool = prisma.tool.update(
data=body,
where={"id": toolId},
)
return {"success": True, "data": tool} | /reframer-0.0.1-py3-none-any.whl/reframe/server/api/tools.py | 0.61057 | 0.173498 | tools.py | pypi |
from fastapi import APIRouter, Depends
from starlette.requests import Request
from reframe.server.lib.auth.prisma import JWTBearer
from reframe.server.lib.db_models.agent_tool import AgentTool
from reframe.server.lib.prisma import prisma
router = APIRouter()
def parse_filter_params(request: Request):
query_params = request.query_params
filter_params = {}
for k, v in query_params.items():
if k.startswith("filter[") and k.endswith("]"):
# Removing 'filter[' from start and ']' from end
filter_key = k[7:-1]
filter_params[filter_key] = v
return filter_params
@router.post(
"/agent-tools",
name="Create agent tool",
description="Create a agent tool",
)
async def create_agent_tool(body: AgentTool, token=Depends(JWTBearer())):
"""Create agent tool endpoint"""
agent_tool = prisma.agenttool.create(
{"agentId": body.agentId, "toolId": body.toolId}
)
return {"success": True, "data": agent_tool}
@router.get(
"/agent-tools",
name="List agent tools",
description="List all agent tools",
)
async def read_agent_tools(
filters: dict = Depends(parse_filter_params),
expand: bool = False
):
"""List agent tools endpoint"""
agent_tools = prisma.agenttool.find_many(where=filters, include={"tool": expand})
return {"success": True, "data": agent_tools}
@router.get(
"/agent-tools/{agentToolId}",
name="Get agent tool",
description="Get a specific agent tool",
)
async def read_agent_tool(agentToolId: str, token=Depends(JWTBearer())):
"""Get an agent tool"""
agent_tool = prisma.agenttool.find_unique(where={"id": agentToolId})
return {"success": True, "data": agent_tool}
@router.delete(
"/agent-tools/{agentToolId}",
name="Delete agent tool",
description="Delete a specific agent tool",
)
async def delete_agent_tool(agentToolId: str, token=Depends(JWTBearer())):
"""Delete agent tool endpoint"""
prisma.agenttool.delete(where={"id": agentToolId})
return {"success": True, "data": None} | /reframer-0.0.1-py3-none-any.whl/reframe/server/api/agent_tools.py | 0.595257 | 0.173919 | agent_tools.py | pypi |
from fastapi import APIRouter, Depends, HTTPException, status
from reframe.server.lib.api_key import generate_api_key
from reframe.server.lib.auth.prisma import JWTBearer, decodeJWT
from reframe.server.lib.db_models.api_key import ApiKey
from reframe.server.lib.prisma import prisma
router = APIRouter()
@router.post(
"/api_key", name="Create API token", description="Create a new API token"
)
async def create_api_key(body: ApiKey, token=Depends(JWTBearer())):
"""Create api token endpoint"""
decoded = decodeJWT(token)
token = generate_api_key()
try:
agent = prisma.apitoken.create(
{
"description": body.description,
"token": token,
"userId": decoded["userId"],
},
include={"user": True},
)
return {"success": True, "data": agent}
except Exception as e:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=e,
)
@router.get("/api-tokens", name="List API tokens", description="List all API tokens")
async def read_api_keys(token=Depends(JWTBearer())):
"""List api tokens endpoint"""
decoded = decodeJWT(token)
api_keys = prisma.apitoken.find_many(
where={"userId": decoded["userId"]}, include={"user": True}
)
if api_keys:
return {"success": True, "data": api_keys}
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="No agents found",
)
@router.get(
"/api-tokens/{tokenId}",
name="Get API token",
description="Get a specific API token",
)
async def read_api_key(tokenId: str, token=Depends(JWTBearer())):
"""Get an api token endpoint"""
api_key = prisma.apitoken.find_unique(
where={"id": tokenId}, include={"user": True}
)
if api_key:
return {"success": True, "data": api_key}
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"API token with id: {tokenId} not found",
)
@router.delete(
"/api-tokens/{tokenId}",
name="Delete API token",
description="Delete a specific API token",
)
async def delete_api_key(tokenId: str, token=Depends(JWTBearer())):
"""Delete api token endpoint"""
try:
prisma.apitoken.delete(where={"id": tokenId})
return {"success": True, "data": None}
except Exception as e:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=e,
) | /reframer-0.0.1-py3-none-any.whl/reframe/server/api/api_tokens.py | 0.65379 | 0.158077 | api_tokens.py | pypi |
from fastapi import APIRouter, Depends
from starlette.requests import Request
from reframe.server.lib.auth.prisma import JWTBearer
from reframe.server.lib.db_models.agent_document import AgentDocument
from reframe.server.lib.prisma import prisma
router = APIRouter()
def parse_filter_params(request: Request):
query_params = request.query_params
filter_params = {}
for k, v in query_params.items():
if k.startswith("filter[") and k.endswith("]"):
# Removing 'filter[' from start and ']' from end
filter_key = k[7:-1]
filter_params[filter_key] = v
return filter_params
@router.post(
"/agent-documents",
name="Create agent document",
description="Create a agent document",
)
async def create_agent_document(body: AgentDocument, token=Depends(JWTBearer())):
"""Create api token endpoint"""
agent_document = prisma.agentdocument.create(
{"agentId": body.agentId, "documentId": body.documentId}
)
return {"success": True, "data": agent_document}
@router.get(
"/agent-documents",
name="List agent documents",
description="List all agent documents",
)
async def read_agent_documents(
filters: dict = Depends(parse_filter_params),
expand: bool = False,
token=Depends(JWTBearer()),
):
"""List api tokens endpoint"""
agent_documents = prisma.agentdocument.find_many(
where=filters, include={"document": expand}
)
return {"success": True, "data": agent_documents}
@router.get(
"/agent-documents/{agentDocumentId}",
name="Get agent document",
description="Get a specific agent document",
)
async def read_agent_document(agentDocumentId: str, token=Depends(JWTBearer())):
"""Get an agent document"""
agent_document = prisma.agentdocument.find_unique(where={"id": agentDocumentId})
return {"success": True, "data": agent_document}
@router.delete(
"/agent-documents/{agentDocumentId}",
name="Delete agent document",
description="Delete a specific agent document",
)
async def delete_agent_document(agentDocumentId: str, token=Depends(JWTBearer())):
"""Delete agent document endpoint"""
prisma.agentdocument.delete(where={"id": agentDocumentId})
return {"success": True, "data": None} | /reframer-0.0.1-py3-none-any.whl/reframe/server/api/agent_documents.py | 0.604282 | 0.176707 | agent_documents.py | pypi |
import json
from fastapi import APIRouter, Depends
from reframe.server.lib.auth.prisma import JWTBearer, decodeJWT
from reframe.server.lib.db_models.prompt import Prompt
from reframe.server.lib.prisma import prisma
router = APIRouter()
@router.post("/prompts", name="Create a prompt", description="Create a new prompt")
async def create_prompt(body: Prompt):
"""Create prompt endpoint"""
decoded = decodeJWT(token)
prompt = prisma.prompt.create(
{
"name": body.name,
"input_variables": json.dumps(body.input_variables),
"template": body.template,
"userId": decoded["userId"],
},
include={"user": True},
)
return {"success": True, "data": prompt}
@router.get("/prompts", name="List prompts", description="List all prompts")
async def read_prompts(token=Depends(JWTBearer())):
"""List prompts endpoint"""
decoded = decodeJWT(token)
prompts = prisma.prompt.find_many(
where={"userId": decoded["userId"]},
include={"user": True},
order={"createdAt": "desc"},
)
return {"success": True, "data": prompts}
@router.get(
"/prompts/{promptId}",
name="Get prompt",
description="Get a specific prompt",
)
async def read_prompt(promptId: str, token=Depends(JWTBearer())):
"""Get prompt endpoint"""
prompt = prisma.prompt.find_unique(where={"id": promptId}, include={"user": True})
return {"success": True, "data": prompt}
@router.delete(
"/prompts/{promptId}",
name="Delete prompt",
description="Delete a specific prompt",
)
async def delete_prompt(promptId: str, token=Depends(JWTBearer())):
"""Delete prompt endpoint"""
prisma.prompt.delete(where={"id": promptId})
return {"success": True, "data": None}
@router.patch(
"/prompts/{promptId}", name="Patch prompt", description="Patch a specific prompt"
)
async def patch_prompt(promptId: str, body: dict, token=Depends(JWTBearer())):
"""Patch prompt endpoint"""
input_variables = body["input_variables"]
if input_variables or input_variables == []:
body["input_variables"] = json.dumps(input_variables)
prompt = prisma.prompt.update(
data=body,
where={"id": promptId},
)
return {"success": True, "data": prompt} | /reframer-0.0.1-py3-none-any.whl/reframe/server/api/prompts.py | 0.47317 | 0.156556 | prompts.py | pypi |
from enum import Enum
from typing import Any, Dict, List, Optional, Union
try:
from typing import Literal
except ImportError:
# Python 3.7 backport
from typing_extensions import Literal
from pydantic import BaseModel, Field
from pydantic.types import StrictBool, StrictInt, StrictStr
import json
class AbortTransferOperation(BaseModel):
abort_transfer: "MoveShard" = Field(..., description="")
class AppBuildTelemetry(BaseModel):
version: str = Field(..., description="")
debug: bool = Field(..., description="")
web_feature: bool = Field(..., description="")
service_debug_feature: bool = Field(..., description="")
class Batch(BaseModel):
ids: List["ExtendedPointId"] = Field(..., description="")
vectors: List[List[float]] = Field(..., description="")
payloads: Optional[List["Payload"]] = Field(None, description="")
class ChangeAliasesOperation(BaseModel):
"""
Operation for performing changes of collection aliases. Alias changes are atomic, meaning that no collection modifications can happen between alias operations.
"""
actions: List["AliasOperations"] = Field(
...,
description="Operation for performing changes of collection aliases. Alias changes are atomic, meaning that no collection modifications can happen between alias operations.",
)
class ClusterConfigTelemetry(BaseModel):
enabled: bool = Field(..., description="")
grpc_timeout_ms: int = Field(..., description="")
p2p: "P2pConfigTelemetry" = Field(..., description="")
consensus: "ConsensusConfigTelemetry" = Field(..., description="")
class ClusterStatusOneOf(BaseModel):
status: Literal[
"disabled",
] = Field(..., description="")
class ClusterStatusOneOf1(BaseModel):
"""
Description of enabled cluster
"""
status: Literal[
"enabled",
] = Field(..., description="Description of enabled cluster")
peer_id: int = Field(..., description="ID of this peer")
peers: Dict[str, "PeerInfo"] = Field(..., description="Peers composition of the cluster with main information")
raft_info: "RaftInfo" = Field(..., description="Description of enabled cluster")
consensus_thread_status: "ConsensusThreadStatus" = Field(..., description="Description of enabled cluster")
class CollectionClusterInfo(BaseModel):
"""
Current clustering distribution for the collection
"""
peer_id: int = Field(..., description="ID of this peer")
shard_count: int = Field(..., description="Total number of shards")
local_shards: List["LocalShardInfo"] = Field(..., description="Local shards")
remote_shards: List["RemoteShardInfo"] = Field(..., description="Remote shards")
shard_transfers: List["ShardTransferInfo"] = Field(..., description="Shard transfers")
class CollectionConfig(BaseModel):
params: "CollectionParams" = Field(..., description="")
hnsw_config: "HnswConfig" = Field(..., description="")
optimizer_config: "OptimizersConfig" = Field(..., description="")
wal_config: "WalConfig" = Field(..., description="")
class CollectionDescription(BaseModel):
name: str = Field(..., description="")
class CollectionInfo(BaseModel):
"""
Current statistics and configuration of the collection
"""
status: "CollectionStatus" = Field(..., description="Current statistics and configuration of the collection")
optimizer_status: "OptimizersStatus" = Field(
..., description="Current statistics and configuration of the collection"
)
vectors_count: int = Field(..., description="Number of vectors in collection")
indexed_vectors_count: int = Field(..., description="Number of indexed vectors in the collection")
points_count: int = Field(..., description="Number of points in collection")
segments_count: int = Field(..., description="Number of segments in collection")
disk_data_size: int = Field(..., description="Disk space, used by collection")
ram_data_size: int = Field(..., description="RAM used by collection")
config: "CollectionConfig" = Field(..., description="Current statistics and configuration of the collection")
payload_schema: Dict[str, "PayloadIndexInfo"] = Field(..., description="Types of stored payload")
class CollectionParams(BaseModel):
vector_size: int = Field(..., description="Size of a vectors used")
distance: "Distance" = Field(..., description="")
shard_number: Optional[int] = Field(1, description="Number of shards the collection has")
on_disk_payload: Optional[bool] = Field(
False,
description="If true - point's payload will not be stored in memory. It will be read from the disk every time it is requested. This setting saves RAM by (slightly) increasing the response time. Note: those payload values that are involved in filtering and are indexed - remain in RAM.",
)
class CollectionStatus(str, Enum):
GREEN = "green"
YELLOW = "yellow"
RED = "red"
class CollectionTelemetry(BaseModel):
id: str = Field(..., description="")
config: "CollectionConfig" = Field(..., description="")
init_time: "Duration" = Field(..., description="")
shards: List["ShardTelemetry"] = Field(..., description="")
class CollectionsResponse(BaseModel):
collections: List["CollectionDescription"] = Field(..., description="")
class ConfigsTelemetry(BaseModel):
service_config: "ServiceConfigTelemetry" = Field(..., description="")
cluster_config: "ClusterConfigTelemetry" = Field(..., description="")
class ConsensusConfigTelemetry(BaseModel):
max_message_queue_size: int = Field(..., description="")
tick_period_ms: int = Field(..., description="")
bootstrap_timeout_sec: int = Field(..., description="")
class ConsensusThreadStatusOneOf(BaseModel):
consensus_thread_status: Literal[
"working",
] = Field(..., description="")
class ConsensusThreadStatusOneOf1(BaseModel):
consensus_thread_status: Literal[
"stopped",
] = Field(..., description="")
class ConsensusThreadStatusOneOf2(BaseModel):
consensus_thread_status: Literal[
"stopped_with_err",
] = Field(..., description="")
err: str = Field(..., description="")
class CountRequest(BaseModel):
"""
Count Request Counts the number of points which satisfy the given filter. If filter is not provided, the count of all points in the collection will be returned.
"""
filter: Optional["Filter"] = Field(None, description="Look only for points which satisfies this conditions")
exact: Optional[bool] = Field(
True,
description="If true, count exact number of points. If false, count approximate number of points faster. Approximate count might be unreliable during the indexing process. Default: true",
)
class CountResult(BaseModel):
count: int = Field(..., description="Number of points which satisfy the conditions")
class TableExistsResult(BaseModel):
data: dict = Field(..., description="Number of points which satisfy the conditions")
exists: bool = Field(..., description="Number of points which satisfy the conditions")
class CreateAlias(BaseModel):
"""
Create alternative name for a collection. Collection will be available under both names for search, retrieve,
"""
collection_name: str = Field(
...,
description="Create alternative name for a collection. Collection will be available under both names for search, retrieve,",
)
alias_name: str = Field(
...,
description="Create alternative name for a collection. Collection will be available under both names for search, retrieve,",
)
class CreateAliasOperation(BaseModel):
create_alias: "CreateAlias" = Field(..., description="")
class CreateDataset(BaseModel):
"""
Operation for creating new collection and (optionally) specify index params
"""
vector_size: int = Field(
..., description="Operation for creating new collection and (optionally) specify index params"
)
distance: "Distance" = Field(
..., description="Operation for creating new collection and (optionally) specify index params"
)
shard_number: Optional[int] = Field(
None,
description="Number of shards in collection. Default is 1 for standalone, otherwise equal to the number of nodes Minimum is 1",
)
on_disk_payload: Optional[bool] = Field(
None,
description="If true - point's payload will not be stored in memory. It will be read from the disk every time it is requested. This setting saves RAM by (slightly) increasing the response time. Note: those payload values that are involved in filtering and are indexed - remain in RAM.",
)
hnsw_config: Optional["HnswConfigDiff"] = Field(
None, description="Custom params for HNSW index. If none - values from service configuration file are used."
)
wal_config: Optional["WalConfigDiff"] = Field(
None, description="Custom params for WAL. If none - values from service configuration file are used."
)
optimizers_config: Optional["OptimizersConfigDiff"] = Field(
None, description="Custom params for Optimizers. If none - values from service configuration file are used."
)
class CreateTable(BaseModel):
"""
Operation for creating new collection and (optionally) specify index params
"""
table_name: str = Field(
..., description="Name of the table to be created"
)
vector_size: int = Field(
..., description="Operation for creating new collection and (optionally) specify index params"
)
distance: "Distance" = Field(
..., description="Operation for creating new collection and (optionally) specify index params"
)
shard_number: Optional[int] = Field(
None,
description="Number of shards in collection. Default is 1 for standalone, otherwise equal to the number of nodes Minimum is 1",
)
on_disk_payload: Optional[bool] = Field(
None,
description="If true - point's payload will not be stored in memory. It will be read from the disk every time it is requested. This setting saves RAM by (slightly) increasing the response time. Note: those payload values that are involved in filtering and are indexed - remain in RAM.",
)
hnsw_config: Optional["HnswConfigDiff"] = Field(
None, description="Custom params for HNSW index. If none - values from service configuration file are used."
)
wal_config: Optional["WalConfigDiff"] = Field(
None, description="Custom params for WAL. If none - values from service configuration file are used."
)
optimizers_config: Optional["OptimizersConfigDiff"] = Field(
None, description="Custom params for Optimizers. If none - values from service configuration file are used."
)
class ExistsTable(BaseModel):
"""
Operation for creating new collection and (optionally) specify index params
"""
table_name: str = Field(
..., description="Name of the table to be created"
)
class CreateFieldIndex(BaseModel):
field_name: str = Field(..., description="")
field_type: Optional["PayloadSchemaType"] = Field(None, description="")
class DeleteAlias(BaseModel):
"""
Delete alias if exists
"""
alias_name: str = Field(..., description="Delete alias if exists")
class DeleteAliasOperation(BaseModel):
"""
Delete alias if exists
"""
delete_alias: "DeleteAlias" = Field(..., description="Delete alias if exists")
class DeletePayload(BaseModel):
keys: List[str] = Field(..., description="")
points: List["ExtendedPointId"] = Field(..., description="Deletes values from each point in this list")
class Distance(str, Enum):
COSINE = "Cosine"
EUCLID = "Euclid"
DOT = "Dot"
class Duration(BaseModel):
secs: int = Field(..., description="")
nanos: int = Field(..., description="")
class ErrorResponse(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Optional["ErrorResponseStatus"] = Field(None, description="")
result: Optional[Any] = Field(None, description="")
class ErrorResponseStatus(BaseModel):
error: Optional[str] = Field(None, description="Description of the occurred error.")
class FieldCondition(BaseModel):
"""
All possible payload filtering conditions
"""
key: str = Field(..., description="Payload key")
match: Optional["Match"] = Field(None, description="Check if point has field with a given value")
range: Optional["Range"] = Field(None, description="Check if points value lies in a given range")
geo_bounding_box: Optional["GeoBoundingBox"] = Field(
None, description="Check if points geo location lies in a given area"
)
geo_radius: Optional["GeoRadius"] = Field(None, description="Check if geo point is within a given radius")
values_count: Optional["ValuesCount"] = Field(None, description="Check number of values of the field")
class Filter(BaseModel):
should: Optional[List["Condition"]] = Field(None, description="At least one of those conditions should match")
must: Optional[List["Condition"]] = Field(None, description="All conditions must match")
must_not: Optional[List["Condition"]] = Field(None, description="All conditions must NOT match")
class FilterSelector(BaseModel):
filter: "Filter" = Field(..., description="")
class GeoBoundingBox(BaseModel):
"""
Geo filter request Matches coordinates inside the rectangle, described by coordinates of lop-left and bottom-right edges
"""
top_left: "GeoPoint" = Field(
...,
description="Geo filter request Matches coordinates inside the rectangle, described by coordinates of lop-left and bottom-right edges",
)
bottom_right: "GeoPoint" = Field(
...,
description="Geo filter request Matches coordinates inside the rectangle, described by coordinates of lop-left and bottom-right edges",
)
class GeoPoint(BaseModel):
"""
Geo point payload schema
"""
lon: float = Field(..., description="Geo point payload schema")
lat: float = Field(..., description="Geo point payload schema")
class GeoRadius(BaseModel):
"""
Geo filter request Matches coordinates inside the circle of `radius` and center with coordinates `center`
"""
center: "GeoPoint" = Field(
...,
description="Geo filter request Matches coordinates inside the circle of `radius` and center with coordinates `center`",
)
radius: float = Field(..., description="Radius of the area in meters")
class HasIdCondition(BaseModel):
"""
ID-based filtering condition
"""
has_id: List["ExtendedPointId"] = Field(..., description="ID-based filtering condition")
class HnswConfig(BaseModel):
"""
Config of HNSW index
"""
m: int = Field(
...,
description="Number of edges per node in the index graph. Larger the value - more accurate the search, more space required.",
)
ef_construct: int = Field(
...,
description="Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build index.",
)
full_scan_threshold: int = Field(
...,
description="Minimal size (in KiloBytes) of vectors for additional payload-based indexing. If payload chunk is smaller than `full_scan_threshold_kb` additional indexing won't be used - in this case full-scan search should be preferred by query planner and additional indexing is not required. Note: 1Kb = 1 vector of size 256",
)
max_indexing_threads: Optional[int] = Field(
0, description="Number of parallel threads used for background index building. If 0 - auto selection."
)
class HnswConfigDiff(BaseModel):
m: Optional[int] = Field(
None,
description="Number of edges per node in the index graph. Larger the value - more accurate the search, more space required.",
)
ef_construct: Optional[int] = Field(
None,
description="Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build index.",
)
full_scan_threshold: Optional[int] = Field(
None,
description="Minimal size (in KiloBytes) of vectors for additional payload-based indexing. If payload chunk is smaller than `full_scan_threshold_kb` additional indexing won't be used - in this case full-scan search should be preferred by query planner and additional indexing is not required. Note: 1Kb = 1 vector of size 256",
)
class IndexesOneOf(BaseModel):
"""
Do not use any index, scan whole vector collection during search. Guarantee 100% precision, but may be time consuming on large collections.
"""
type: Literal["plain",] = Field(
...,
description="Do not use any index, scan whole vector collection during search. Guarantee 100% precision, but may be time consuming on large collections.",
)
options: Any = Field(
...,
description="Do not use any index, scan whole vector collection during search. Guarantee 100% precision, but may be time consuming on large collections.",
)
class IndexesOneOf1(BaseModel):
"""
Use filterable HNSW index for approximate search. Is very fast even on a very huge collections, but require additional space to store index and additional time to build it.
"""
type: Literal["hnsw",] = Field(
...,
description="Use filterable HNSW index for approximate search. Is very fast even on a very huge collections, but require additional space to store index and additional time to build it.",
)
options: "HnswConfig" = Field(
...,
description="Use filterable HNSW index for approximate search. Is very fast even on a very huge collections, but require additional space to store index and additional time to build it.",
)
class InlineResponse200(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Literal[
"ok",
] = Field(None, description="")
result: Optional[List["TelemetryData"]] = Field(None, description="")
class InlineResponse2001(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Literal[
"ok",
] = Field(None, description="")
result: Optional["ClusterStatus"] = Field(None, description="")
class InlineResponse20010(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Literal[
"ok",
] = Field(None, description="")
result: Optional[List["Record"]] = Field(None, description="")
class InlineResponse20011(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Literal[
"ok",
] = Field(None, description="")
result: Optional["ScrollResult"] = Field(None, description="")
class InlineResponse20012(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Literal[
"ok",
] = Field(None, description="")
result: Optional[List["ScoredPoint"]] = Field(None, description="")
class InlineResponse20013(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Literal[
"ok",
] = Field(None, description="")
result: Optional["CountResult"] = Field(None, description="")
class InlineResponse2002(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Literal[
"ok",
] = Field(None, description="")
result: Optional[bool] = Field(None, description="")
class InlineResponse2003(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Literal[
"ok",
] = Field(None, description="")
result: Optional["CollectionsResponse"] = Field(None, description="")
class InlineResponse2004(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Literal[
"ok",
] = Field(None, description="")
result: Optional["CollectionInfo"] = Field(None, description="")
class InlineResponse2005(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Literal[
"ok",
] = Field(None, description="")
result: Optional["UpdateResult"] = Field(None, description="")
class InlineResponse2006(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Literal[
"ok",
] = Field(None, description="")
result: Optional["CollectionClusterInfo"] = Field(None, description="")
class InlineResponse2007(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Literal[
"ok",
] = Field(None, description="")
result: Optional[List["SnapshotDescription"]] = Field(None, description="")
class InlineResponse2008(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Literal[
"ok",
] = Field(None, description="")
result: Optional["SnapshotDescription"] = Field(None, description="")
class InlineResponse2009(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Literal[
"ok",
] = Field(None, description="")
result: Optional["Record"] = Field(None, description="")
class IsEmptyCondition(BaseModel):
"""
Select points with empty payload for a specified field
"""
is_empty: "PayloadField" = Field(..., description="Select points with empty payload for a specified field")
class InlineResponse2010(BaseModel):
time: Optional[float] = Field(None, description="Time spent to process this request")
status: Literal[
"ok",
] = Field(None, description="")
data: Optional[dict] = Field(None, description="")
message: Optional[str] = Field(None, description="")
class LocalShardInfo(BaseModel):
shard_id: int = Field(..., description="Local shard id")
points_count: int = Field(..., description="Number of points in the shard")
class MatchInteger(BaseModel):
"""
Match filter request (deprecated)
"""
integer: int = Field(..., description="Integer value to match")
class MatchKeyword(BaseModel):
"""
Match by keyword (deprecated)
"""
keyword: str = Field(..., description="Keyword value to match")
class MatchValue(BaseModel):
value: "ValueVariants" = Field(..., description="")
class MoveShard(BaseModel):
shard_id: int = Field(..., description="")
to_peer_id: int = Field(..., description="")
from_peer_id: int = Field(..., description="")
class MoveShardOperation(BaseModel):
move_shard: "MoveShard" = Field(..., description="")
class OptimizerTelemetryOneOf(BaseModel):
indexing: "OptimizerTelemetryOneOfIndexing" = Field(..., description="")
class OptimizerTelemetryOneOf1(BaseModel):
merge: "OptimizerTelemetryOneOfIndexing" = Field(..., description="")
class OptimizerTelemetryOneOf2(BaseModel):
vacuum: "OptimizerTelemetryOneOfIndexing" = Field(..., description="")
class OptimizerTelemetryOneOfIndexing(BaseModel):
optimizations: "TelemetryOperationStatistics" = Field(..., description="")
class OptimizersConfig(BaseModel):
deleted_threshold: float = Field(
...,
description="The minimal fraction of deleted vectors in a segment, required to perform segment optimization",
)
vacuum_min_vector_number: int = Field(
..., description="The minimal number of vectors in a segment, required to perform segment optimization"
)
default_segment_number: int = Field(
...,
description="Target amount of segments optimizer will try to keep. Real amount of segments may vary depending on multiple parameters: - Amount of stored points - Current write RPS It is recommended to select default number of segments as a factor of the number of search threads, so that each segment would be handled evenly by one of the threads If `default_segment_number = 0`, will be automatically selected by the number of available CPUs",
)
max_segment_size: Optional[int] = Field(
None,
description="Do not create segments larger this size (in KiloBytes). Large segments might require disproportionately long indexation times, therefore it makes sense to limit the size of segments. If indexation speed have more priority for your - make this parameter lower. If search speed is more important - make this parameter higher. Note: 1Kb = 1 vector of size 256 If not set, will be automatically selected considering the number of available CPUs.",
)
memmap_threshold: Optional[int] = Field(
None,
description="Maximum size (in KiloBytes) of vectors to store in-memory per segment. Segments larger than this threshold will be stored as read-only memmaped file. To enable memmap storage, lower the threshold Note: 1Kb = 1 vector of size 256 If not set, mmap will not be used.",
)
indexing_threshold: int = Field(
...,
description="Maximum size (in KiloBytes) of vectors allowed for plain index. Default value based on https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md Note: 1Kb = 1 vector of size 256",
)
flush_interval_sec: int = Field(..., description="Minimum interval between forced flushes.")
max_optimization_threads: int = Field(..., description="Maximum available threads for optimization workers")
class OptimizersConfigDiff(BaseModel):
deleted_threshold: Optional[float] = Field(
None,
description="The minimal fraction of deleted vectors in a segment, required to perform segment optimization",
)
vacuum_min_vector_number: Optional[int] = Field(
None, description="The minimal number of vectors in a segment, required to perform segment optimization"
)
default_segment_number: Optional[int] = Field(
None,
description="Target amount of segments optimizer will try to keep. Real amount of segments may vary depending on multiple parameters: - Amount of stored points - Current write RPS It is recommended to select default number of segments as a factor of the number of search threads, so that each segment would be handled evenly by one of the threads If `default_segment_number = 0`, will be automatically selected by the number of available CPUs",
)
max_segment_size: Optional[int] = Field(
None,
description="Do not create segments larger this size (in KiloBytes). Large segments might require disproportionately long indexation times, therefore it makes sense to limit the size of segments. If indexation speed have more priority for your - make this parameter lower. If search speed is more important - make this parameter higher. Note: 1Kb = 1 vector of size 256",
)
memmap_threshold: Optional[int] = Field(
None,
description="Maximum size (in KiloBytes) of vectors to store in-memory per segment. Segments larger than this threshold will be stored as read-only memmaped file. To enable memmap storage, lower the threshold Note: 1Kb = 1 vector of size 256",
)
indexing_threshold: Optional[int] = Field(
None,
description="Maximum size (in KiloBytes) of vectors allowed for plain index. Default value based on https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md Note: 1Kb = 1 vector of size 256",
)
flush_interval_sec: Optional[int] = Field(None, description="Minimum interval between forced flushes.")
max_optimization_threads: Optional[int] = Field(
None, description="Maximum available threads for optimization workers"
)
class OptimizersStatusOneOf(str, Enum):
OK = "ok"
class OptimizersStatusOneOf1(BaseModel):
"""
Something wrong happened with optimizers
"""
error: str = Field(..., description="Something wrong happened with optimizers")
class P2pConfigTelemetry(BaseModel):
connection_pool_size: int = Field(..., description="")
Payload = dict
class PayloadField(BaseModel):
"""
Payload field
"""
key: str = Field(..., description="Payload field name")
class PayloadIndexInfo(BaseModel):
"""
Payload field type & index information
"""
data_type: "PayloadSchemaType" = Field(..., description="Payload field type & index information")
class PayloadIndexTelemetry(BaseModel):
points_values_count: int = Field(..., description="")
points_count: int = Field(..., description="")
histogram_bucket_size: Optional[int] = Field(None, description="")
class PayloadSchemaType(str, Enum):
KEYWORD = "keyword"
INTEGER = "integer"
FLOAT = "float"
GEO = "geo"
class PayloadSelectorExclude(BaseModel):
exclude: List[str] = Field(..., description="Exclude this fields from returning payload")
class PayloadSelectorInclude(BaseModel):
include: List[str] = Field(..., description="Only include this payload keys")
class PayloadStorageTypeOneOf(BaseModel):
"""
Store payload in memory and use persistence storage only if vectors are changed
"""
type: Literal[
"in_memory",
] = Field(..., description="Store payload in memory and use persistence storage only if vectors are changed")
class PayloadStorageTypeOneOf1(BaseModel):
"""
Store payload on disk only, read each time it is requested
"""
type: Literal[
"on_disk",
] = Field(..., description="Store payload on disk only, read each time it is requested")
class PeerInfo(BaseModel):
"""
Information of a peer in the cluster
"""
uri: str = Field(..., description="Information of a peer in the cluster")
class PointIdsList(BaseModel):
points: List["ExtendedPointId"] = Field(..., description="")
class PointRequest(BaseModel):
ids: List["ExtendedPointId"] = Field(..., description="Look for points with ids")
with_payload: Optional["WithPayloadInterface"] = Field(
None, description="Select which payload to return with the response. Default: All"
)
with_vector: Optional[bool] = Field(False, description="Whether to return the point vector with the result?")
class PointStruct(BaseModel):
id: "ExtendedPointId" = Field(..., description="")
vector: List[float] = Field(..., description="Vector")
payload: Optional["Payload"] = Field(None, description="Payload values (optional)")
class PointsBatch(BaseModel):
batch: "Batch" = Field(..., description="")
class PointsList(BaseModel):
points: List["PointStruct"] = Field(..., description="")
class RaftInfo(BaseModel):
"""
Summary information about the current raft state
"""
term: int = Field(
...,
description="Raft divides time into terms of arbitrary length, each beginning with an election. If a candidate wins the election, it remains the leader for the rest of the term. The term number increases monotonically. Each server stores the current term number which is also exchanged in every communication.",
)
commit: int = Field(
..., description="The index of the latest committed (finalized) operation that this peer is aware of."
)
pending_operations: int = Field(
..., description="Number of consensus operations pending to be applied on this peer"
)
leader: Optional[int] = Field(None, description="Leader of the current term")
role: Optional["StateRole"] = Field(None, description="Role of this peer in the current term")
is_voter: bool = Field(..., description="Is this peer a voter or a learner")
class Range(BaseModel):
"""
Range filter request
"""
lt: Optional[float] = Field(None, description="point.key < range.lt")
gt: Optional[float] = Field(None, description="point.key > range.gt")
gte: Optional[float] = Field(None, description="point.key >= range.gte")
lte: Optional[float] = Field(None, description="point.key <= range.lte")
class RecommendRequest(BaseModel):
"""
Recommendation request. Provides positive and negative examples of the vectors, which are already stored in the collection. Service should look for the points which are closer to positive examples and at the same time further to negative examples. The concrete way of how to compare negative and positive distances is up to implementation in `segment` crate.
"""
positive: List["ExtendedPointId"] = Field(..., description="Look for vectors closest to those")
negative: List["ExtendedPointId"] = Field(..., description="Try to avoid vectors like this")
filter: Optional["Filter"] = Field(None, description="Look only for points which satisfies this conditions")
params: Optional["SearchParams"] = Field(None, description="Additional search params")
limit: int = Field(..., description="Max number of result to return")
offset: Optional[int] = Field(
0,
description="Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues.",
)
with_payload: Optional["WithPayloadInterface"] = Field(
None, description="Select which payload to return with the response. Default: None"
)
with_vector: Optional[bool] = Field(False, description="Whether to return the point vector with the result?")
score_threshold: Optional[float] = Field(
None,
description="Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned.",
)
class Record(BaseModel):
"""
Point data
"""
id: "ExtendedPointId" = Field(..., description="Point data")
payload: Optional["Payload"] = Field(None, description="Payload - values assigned to the point")
vector: Optional[List[float]] = Field(None, description="Vector of the point")
class RemoteShardInfo(BaseModel):
shard_id: int = Field(..., description="Remote shard id")
peer_id: int = Field(..., description="Remote peer id")
class RenameAlias(BaseModel):
"""
Change alias to a new one
"""
old_alias_name: str = Field(..., description="Change alias to a new one")
new_alias_name: str = Field(..., description="Change alias to a new one")
class RenameAliasOperation(BaseModel):
"""
Change alias to a new one
"""
rename_alias: "RenameAlias" = Field(..., description="Change alias to a new one")
class RunningEnvironmentTelemetry(BaseModel):
distribution: Optional[str] = Field(None, description="")
distribution_version: Optional[str] = Field(None, description="")
is_docker: bool = Field(..., description="")
cores: Optional[int] = Field(None, description="")
ram_size: Optional[int] = Field(None, description="")
disk_size: Optional[int] = Field(None, description="")
cpu_flags: str = Field(..., description="")
class ScoredPoint(BaseModel):
"""
Search result
"""
id: "ExtendedPointId" = Field(..., description="Search result")
version: int = Field(..., description="Point version")
score: float = Field(..., description="Points vector distance to the query vector")
payload: Optional["Payload"] = Field(None, description="Payload - values assigned to the point")
vector: Optional[List[float]] = Field(None, description="Vector of the point")
class ScrollRequest(BaseModel):
"""
Scroll request - paginate over all points which matches given condition
"""
offset: Optional["ExtendedPointId"] = Field(None, description="Start ID to read points from.")
limit: Optional[int] = Field(None, description="Page size. Default: 10")
filter: Optional["Filter"] = Field(
None, description="Look only for points which satisfies this conditions. If not provided - all points."
)
with_payload: Optional["WithPayloadInterface"] = Field(
None, description="Select which payload to return with the response. Default: All"
)
with_vector: Optional[bool] = Field(False, description="Whether to return the point vector with the result?")
class ScrollResult(BaseModel):
"""
Result of the points read request
"""
points: List["Record"] = Field(..., description="List of retrieved points")
next_page_offset: Optional["ExtendedPointId"] = Field(
None, description="Offset which should be used to retrieve a next page result"
)
class SearchParams(BaseModel):
"""
Additional parameters of the search
"""
hnsw_ef: Optional[int] = Field(
None,
description="Params relevant to HNSW index /// Size of the beam in a beam-search. Larger the value - more accurate the result, more time required for search.",
)
class SearchRequest(BaseModel):
"""
Search request. Holds all conditions and parameters for the search of most similar points by vector similarity given the filtering restrictions.
"""
vector: List[float] = Field(..., description="Look for vectors closest to this")
filter: Optional["Filter"] = Field(None, description="Look only for points which satisfies this conditions")
params: Optional["SearchParams"] = Field(None, description="Additional search params")
limit: int = Field(..., description="Max number of result to return")
offset: Optional[int] = Field(
0,
description="Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues.",
)
with_payload: Optional["WithPayloadInterface"] = Field(
None, description="Select which payload to return with the response. Default: None"
)
with_vector: Optional[bool] = Field(False, description="Whether to return the point vector with the result?")
score_threshold: Optional[float] = Field(
None,
description="Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned.",
)
class SegmentConfig(BaseModel):
vector_size: int = Field(..., description="Size of a vectors used")
distance: "Distance" = Field(..., description="")
index: "Indexes" = Field(..., description="")
storage_type: "StorageType" = Field(..., description="")
payload_storage_type: Optional["PayloadStorageType"] = Field(None, description="")
class SegmentInfo(BaseModel):
"""
Aggregated information about segment
"""
segment_type: "SegmentType" = Field(..., description="Aggregated information about segment")
num_vectors: int = Field(..., description="Aggregated information about segment")
num_points: int = Field(..., description="Aggregated information about segment")
num_deleted_vectors: int = Field(..., description="Aggregated information about segment")
ram_usage_bytes: int = Field(..., description="Aggregated information about segment")
disk_usage_bytes: int = Field(..., description="Aggregated information about segment")
is_appendable: bool = Field(..., description="Aggregated information about segment")
index_schema: Dict[str, "PayloadIndexInfo"] = Field(..., description="Aggregated information about segment")
class SegmentTelemetry(BaseModel):
info: "SegmentInfo" = Field(..., description="")
config: "SegmentConfig" = Field(..., description="")
vector_index: "VectorIndexTelemetry" = Field(..., description="")
payload_field_indices: List["PayloadIndexTelemetry"] = Field(..., description="")
class SegmentType(str, Enum):
PLAIN = "plain"
INDEXED = "indexed"
SPECIAL = "special"
class ServiceConfigTelemetry(BaseModel):
grpc_enable: bool = Field(..., description="")
max_request_size_mb: int = Field(..., description="")
max_workers: Optional[int] = Field(None, description="")
enable_cors: bool = Field(..., description="")
class SetPayload(BaseModel):
payload: "Payload" = Field(..., description="")
points: List["ExtendedPointId"] = Field(..., description="Assigns payload to each point in this list")
class ShardTelemetryOneOf(BaseModel):
remote: "ShardTelemetryOneOfRemote" = Field(..., description="")
class ShardTelemetryOneOf1(BaseModel):
local: "ShardTelemetryOneOf1Local" = Field(..., description="")
class ShardTelemetryOneOf1Local(BaseModel):
segments: List["SegmentTelemetry"] = Field(..., description="")
optimizers: List["OptimizerTelemetry"] = Field(..., description="")
class ShardTelemetryOneOf2(BaseModel):
proxy: Any = Field(..., description="")
class ShardTelemetryOneOf3(BaseModel):
forward_proxy: Any = Field(..., description="")
class ShardTelemetryOneOfRemote(BaseModel):
shard_id: int = Field(..., description="")
searches: "TelemetryOperationStatistics" = Field(..., description="")
updates: "TelemetryOperationStatistics" = Field(..., description="")
class ShardTransferInfo(BaseModel):
shard_id: int = Field(..., description="")
_from: int = Field(..., description="")
to: int = Field(..., description="")
class SnapshotDescription(BaseModel):
name: str = Field(..., description="")
creation_time: str = Field(..., description="")
size: int = Field(..., description="")
class StateRole(str, Enum):
FOLLOWER = "Follower"
CANDIDATE = "Candidate"
LEADER = "Leader"
PRECANDIDATE = "PreCandidate"
class StorageTypeOneOf(BaseModel):
"""
Store vectors in memory and use persistence storage only if vectors are changed
"""
type: Literal[
"in_memory",
] = Field(..., description="Store vectors in memory and use persistence storage only if vectors are changed")
class StorageTypeOneOf1(BaseModel):
"""
Use memmap to store vectors, a little slower than `InMemory`, but requires little RAM
"""
type: Literal[
"mmap",
] = Field(..., description="Use memmap to store vectors, a little slower than `InMemory`, but requires little RAM")
class TelemetryData(BaseModel):
id: str = Field(..., description="")
app: "AppBuildTelemetry" = Field(..., description="")
system: "RunningEnvironmentTelemetry" = Field(..., description="")
configs: "ConfigsTelemetry" = Field(..., description="")
collections: List["CollectionTelemetry"] = Field(..., description="")
web: "WebApiTelemetry" = Field(..., description="")
grpc_calls_statistics: "TelemetryOperationStatistics" = Field(..., description="")
cluster_status: "ClusterStatus" = Field(..., description="")
class TelemetryOperationStatistics(BaseModel):
ok_count: int = Field(..., description="")
fail_count: int = Field(..., description="")
ok_avg_time: "Duration" = Field(..., description="")
class UpdateCollection(BaseModel):
"""
Operation for updating parameters of the existing collection
"""
optimizers_config: Optional["OptimizersConfigDiff"] = Field(
None,
description="Custom params for Optimizers. If none - values from service configuration file are used. This operation is blocking, it will only proceed ones all current optimizations are complete",
)
class UpdateResult(BaseModel):
operation_id: int = Field(..., description="Sequential number of the operation")
status: "UpdateStatus" = Field(..., description="")
class UpdateStatus(str, Enum):
ACKNOWLEDGED = "acknowledged"
COMPLETED = "completed"
class ValuesCount(BaseModel):
"""
Values count filter request
"""
lt: Optional[int] = Field(None, description="point.key.length() < values_count.lt")
gt: Optional[int] = Field(None, description="point.key.length() > values_count.gt")
gte: Optional[int] = Field(None, description="point.key.length() >= values_count.gte")
lte: Optional[int] = Field(None, description="point.key.length() <= values_count.lte")
class VectorIndexTelemetry(BaseModel):
small_cardinality_searches: "TelemetryOperationStatistics" = Field(..., description="")
large_cardinality_searches: "TelemetryOperationStatistics" = Field(..., description="")
positive_check_cardinality_searches: "TelemetryOperationStatistics" = Field(..., description="")
negative_check_cardinality_searches: "TelemetryOperationStatistics" = Field(..., description="")
class WalConfig(BaseModel):
wal_capacity_mb: int = Field(..., description="Size of a single WAL segment in MB")
wal_segments_ahead: int = Field(..., description="Number of WAL segments to create ahead of actually used ones")
class WalConfigDiff(BaseModel):
wal_capacity_mb: Optional[int] = Field(None, description="Size of a single WAL segment in MB")
wal_segments_ahead: Optional[int] = Field(
None, description="Number of WAL segments to create ahead of actually used ones"
)
class WebApiTelemetry(BaseModel):
responses: Dict[str, int] = Field(..., description="")
AliasOperations = Union[
CreateAliasOperation,
DeleteAliasOperation,
RenameAliasOperation,
]
ClusterOperations = Union[
MoveShardOperation,
AbortTransferOperation,
]
ClusterStatus = Union[
ClusterStatusOneOf,
ClusterStatusOneOf1,
]
Condition = Union[
FieldCondition,
IsEmptyCondition,
HasIdCondition,
Filter,
]
ConsensusThreadStatus = Union[
ConsensusThreadStatusOneOf,
ConsensusThreadStatusOneOf1,
ConsensusThreadStatusOneOf2,
]
ExtendedPointId = Union[
StrictInt,
StrictStr,
]
Indexes = Union[
IndexesOneOf,
IndexesOneOf1,
]
Match = Union[
MatchValue,
MatchKeyword,
MatchInteger,
]
OptimizerTelemetry = Union[
OptimizerTelemetryOneOf,
OptimizerTelemetryOneOf1,
OptimizerTelemetryOneOf2,
]
OptimizersStatus = Union[
OptimizersStatusOneOf,
OptimizersStatusOneOf1,
]
PayloadSelector = Union[
PayloadSelectorInclude,
PayloadSelectorExclude,
]
PayloadStorageType = Union[
PayloadStorageTypeOneOf,
PayloadStorageTypeOneOf1,
]
PointInsertOperations = Union[
PointsBatch,
PointsList,
]
PointsSelector = Union[
PointIdsList,
FilterSelector,
]
ShardTelemetry = Union[
ShardTelemetryOneOf,
ShardTelemetryOneOf1,
ShardTelemetryOneOf2,
ShardTelemetryOneOf3,
]
StorageType = Union[
StorageTypeOneOf,
StorageTypeOneOf1,
]
ValueVariants = Union[
StrictBool,
StrictInt,
StrictStr,
]
WithPayloadInterface = Union[
PayloadSelector,
List[StrictStr],
StrictBool,
] | /reframer-0.0.1-py3-none-any.whl/reframe/client/http/models/models.py | 0.89353 | 0.241098 | models.py | pypi |
import itertools
import math
from abc import ABC
from itertools import islice, count
from typing import Optional, Iterable, Union, List
import numpy as np
from reframe.conversions.common_types import Record
from reframe.client.http.models import ExtendedPointId
from reframe.parallel_processor import Worker
def iter_batch(iterable, size) -> Iterable:
"""
>>> list(iter_batch([1,2,3,4,5], 3))
[[1, 2, 3], [4, 5]]
"""
source_iter = iter(iterable)
while source_iter:
b = list(islice(source_iter, size))
if len(b) == 0:
break
yield b
class BaseUploader(Worker, ABC):
@classmethod
def iterate_records_batches(cls,
records: Iterable[Record],
batch_size: int
) -> Iterable:
record_batches = iter_batch(records, batch_size)
for record_batch in record_batches:
ids_batch = [record.id for record in record_batch]
vectors_batch = [record.vector for record in record_batch]
payload_batch = [record.payload for record in record_batch]
yield ids_batch, vectors_batch, payload_batch
@classmethod
def iterate_batches(cls,
vectors: Union[np.ndarray, Iterable[List[float]]],
payload: Optional[Iterable[dict]],
ids: Optional[Iterable[ExtendedPointId]],
batch_size: int,
) -> Iterable:
if ids is None:
ids = itertools.count()
ids_batches = iter_batch(ids, batch_size)
if payload is None:
payload_batches = (None for _ in count())
else:
payload_batches = iter_batch(payload, batch_size)
if isinstance(vectors, np.ndarray):
num_vectors = vectors.shape[0]
num_batches = int(math.ceil(num_vectors / batch_size))
vector_batches = (vectors[i * batch_size:(i + 1) * batch_size].tolist() for i in range(num_batches))
else:
vector_batches = iter_batch(vectors, batch_size)
yield from zip(ids_batches, vector_batches, payload_batches) | /reframer-0.0.1-py3-none-any.whl/reframe/client/uploader/uploader.py | 0.84916 | 0.238539 | uploader.py | pypi |
import warnings
from dataclasses import dataclass
from datetime import datetime
from typing import Dict, List, Optional
import betterproto
from betterproto.grpc.grpclib_server import ServiceBase
import grpclib
class Distance(betterproto.Enum):
UnknownDistance = 0
Cosine = 1
Euclid = 2
Dot = 3
class CollectionStatus(betterproto.Enum):
UnknownCollectionStatus = 0
Green = 1
Yellow = 2
Red = 3
class PayloadSchemaType(betterproto.Enum):
UnknownType = 0
Keyword = 1
Integer = 2
Float = 3
Geo = 4
class NullValue(betterproto.Enum):
"""
`NullValue` is a singleton enumeration to represent the null value for the
`Value` type union. The JSON representation for `NullValue` is JSON
`null`.
"""
# Null value.
NULL_VALUE = 0
class FieldType(betterproto.Enum):
FieldTypeKeyword = 0
FieldTypeInteger = 1
FieldTypeFloat = 2
FieldTypeGeo = 3
class UpdateStatus(betterproto.Enum):
UnknownUpdateStatus = 0
Acknowledged = 1
Completed = 2
@dataclass(eq=False, repr=False)
class GetCollectionInfoRequest(betterproto.Message):
collection_name: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class ListCollectionsRequest(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class CollectionDescription(betterproto.Message):
name: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class GetCollectionInfoResponse(betterproto.Message):
result: "CollectionInfo" = betterproto.message_field(1)
time: float = betterproto.double_field(2)
@dataclass(eq=False, repr=False)
class ListCollectionsResponse(betterproto.Message):
collections: List["CollectionDescription"] = betterproto.message_field(1)
time: float = betterproto.double_field(2)
@dataclass(eq=False, repr=False)
class OptimizerStatus(betterproto.Message):
ok: bool = betterproto.bool_field(1)
error: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class HnswConfigDiff(betterproto.Message):
# Number of edges per node in the index graph. Larger the value - more
# accurate the search, more space required.
m: Optional[int] = betterproto.uint64_field(1, optional=True, group="_m")
# Number of neighbours to consider during the index building. Larger the
# value - more accurate the search, more time required to build index.
ef_construct: Optional[int] = betterproto.uint64_field(
2, optional=True, group="_ef_construct"
)
# Minimal size (in KiloBytes) of vectors for additional payload-based
# indexing.If payload chunk is smaller than `full_scan_threshold` additional
# indexing won't be used -in this case full-scan search should be preferred
# by query planner and additional indexing is not required.Note: 1Kb = 1
# vector of size 256
full_scan_threshold: Optional[int] = betterproto.uint64_field(
3, optional=True, group="_full_scan_threshold"
)
# Number of parallel threads used for background index building. If 0 - auto
# selection.
max_indexing_threads: Optional[int] = betterproto.uint64_field(
4, optional=True, group="_max_indexing_threads"
)
@dataclass(eq=False, repr=False)
class WalConfigDiff(betterproto.Message):
wal_capacity_mb: Optional[int] = betterproto.uint64_field(
1, optional=True, group="_wal_capacity_mb"
)
wal_segments_ahead: Optional[int] = betterproto.uint64_field(
2, optional=True, group="_wal_segments_ahead"
)
@dataclass(eq=False, repr=False)
class OptimizersConfigDiff(betterproto.Message):
# The minimal fraction of deleted vectors in a segment, required to perform
# segment optimization
deleted_threshold: Optional[float] = betterproto.double_field(
1, optional=True, group="_deleted_threshold"
)
# The minimal number of vectors in a segment, required to perform segment
# optimization
vacuum_min_vector_number: Optional[int] = betterproto.uint64_field(
2, optional=True, group="_vacuum_min_vector_number"
)
# Target amount of segments optimizer will try to keep.Real amount of
# segments may vary depending on multiple parameters:- Amount of stored
# points.- Current write RPS.It is recommended to select default number of
# segments as a factor of the number of search threads,so that each segment
# would be handled evenly by one of the threads.
default_segment_number: Optional[int] = betterproto.uint64_field(
3, optional=True, group="_default_segment_number"
)
# Do not create segments larger this size (in KiloBytes).Large segments might
# require disproportionately long indexation times,therefore it makes sense
# to limit the size of segments.If indexation speed have more priority for
# your - make this parameter lower.If search speed is more important - make
# this parameter higher.Note: 1Kb = 1 vector of size 256
max_segment_size: Optional[int] = betterproto.uint64_field(
4, optional=True, group="_max_segment_size"
)
# Maximum size (in KiloBytes) of vectors to store in-memory per
# segment.Segments larger than this threshold will be stored as read-only
# memmaped file.To enable memmap storage, lower the thresholdNote: 1Kb = 1
# vector of size 256
memmap_threshold: Optional[int] = betterproto.uint64_field(
5, optional=True, group="_memmap_threshold"
)
# Maximum size (in KiloBytes) of vectors allowed for plain index.Default
# value based on https://github.com/google-research/google-
# research/blob/master/scann/docs/algorithms.mdNote: 1Kb = 1 vector of size
# 256
indexing_threshold: Optional[int] = betterproto.uint64_field(
6, optional=True, group="_indexing_threshold"
)
# Interval between forced flushes.
flush_interval_sec: Optional[int] = betterproto.uint64_field(
7, optional=True, group="_flush_interval_sec"
)
# Max number of threads, which can be used for optimization. If 0 - `NUM_CPU
# - 1` will be used
max_optimization_threads: Optional[int] = betterproto.uint64_field(
8, optional=True, group="_max_optimization_threads"
)
@dataclass(eq=False, repr=False)
class CreateCollection(betterproto.Message):
collection_name: str = betterproto.string_field(1)
vector_size: int = betterproto.uint64_field(2)
distance: "Distance" = betterproto.enum_field(3)
hnsw_config: Optional["HnswConfigDiff"] = betterproto.message_field(
4, optional=True, group="_hnsw_config"
)
wal_config: Optional["WalConfigDiff"] = betterproto.message_field(
5, optional=True, group="_wal_config"
)
optimizers_config: Optional["OptimizersConfigDiff"] = betterproto.message_field(
6, optional=True, group="_optimizers_config"
)
shard_number: Optional[int] = betterproto.uint32_field(
7, optional=True, group="_shard_number"
)
on_disk_payload: Optional[bool] = betterproto.bool_field(
8, optional=True, group="_on_disk_payload"
)
timeout: Optional[int] = betterproto.uint64_field(
9, optional=True, group="_timeout"
)
@dataclass(eq=False, repr=False)
class UpdateCollection(betterproto.Message):
collection_name: str = betterproto.string_field(1)
optimizers_config: Optional["OptimizersConfigDiff"] = betterproto.message_field(
2, optional=True, group="_optimizers_config"
)
timeout: Optional[int] = betterproto.uint64_field(
3, optional=True, group="_timeout"
)
@dataclass(eq=False, repr=False)
class DeleteCollection(betterproto.Message):
collection_name: str = betterproto.string_field(1)
timeout: Optional[int] = betterproto.uint64_field(
2, optional=True, group="_timeout"
)
@dataclass(eq=False, repr=False)
class CollectionOperationResponse(betterproto.Message):
result: bool = betterproto.bool_field(1)
time: float = betterproto.double_field(2)
@dataclass(eq=False, repr=False)
class CollectionParams(betterproto.Message):
vector_size: int = betterproto.uint64_field(1)
distance: "Distance" = betterproto.enum_field(2)
shard_number: int = betterproto.uint32_field(3)
on_disk_payload: bool = betterproto.bool_field(4)
@dataclass(eq=False, repr=False)
class CollectionConfig(betterproto.Message):
params: "CollectionParams" = betterproto.message_field(1)
hnsw_config: "HnswConfigDiff" = betterproto.message_field(2)
optimizer_config: "OptimizersConfigDiff" = betterproto.message_field(3)
wal_config: "WalConfigDiff" = betterproto.message_field(4)
@dataclass(eq=False, repr=False)
class PayloadSchemaInfo(betterproto.Message):
data_type: "PayloadSchemaType" = betterproto.enum_field(1)
@dataclass(eq=False, repr=False)
class CollectionInfo(betterproto.Message):
status: "CollectionStatus" = betterproto.enum_field(1)
optimizer_status: "OptimizerStatus" = betterproto.message_field(2)
vectors_count: int = betterproto.uint64_field(3)
segments_count: int = betterproto.uint64_field(4)
disk_data_size: int = betterproto.uint64_field(5)
ram_data_size: int = betterproto.uint64_field(6)
config: "CollectionConfig" = betterproto.message_field(7)
payload_schema: Dict[str, "PayloadSchemaInfo"] = betterproto.map_field(
8, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE
)
points_count: int = betterproto.uint64_field(9)
indexed_vectors_count: Optional[int] = betterproto.uint64_field(
10, optional=True, group="_indexed_vectors_count"
)
def __post_init__(self) -> None:
super().__post_init__()
if self.disk_data_size:
warnings.warn(
"CollectionInfo.disk_data_size is deprecated", DeprecationWarning
)
if self.ram_data_size:
warnings.warn(
"CollectionInfo.ram_data_size is deprecated", DeprecationWarning
)
@dataclass(eq=False, repr=False)
class ChangeAliases(betterproto.Message):
actions: List["AliasOperations"] = betterproto.message_field(1)
timeout: Optional[int] = betterproto.uint64_field(
2, optional=True, group="_timeout"
)
@dataclass(eq=False, repr=False)
class AliasOperations(betterproto.Message):
create_alias: "CreateAlias" = betterproto.message_field(1, group="action")
rename_alias: "RenameAlias" = betterproto.message_field(2, group="action")
delete_alias: "DeleteAlias" = betterproto.message_field(3, group="action")
@dataclass(eq=False, repr=False)
class CreateAlias(betterproto.Message):
collection_name: str = betterproto.string_field(1)
alias_name: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class RenameAlias(betterproto.Message):
old_alias_name: str = betterproto.string_field(1)
new_alias_name: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class DeleteAlias(betterproto.Message):
alias_name: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class Struct(betterproto.Message):
"""
`Struct` represents a structured data value, consisting of fields which map
to dynamically typed values. In some languages, `Struct` might be supported
by a native representation. For example, in scripting languages like JS a
struct is represented as an object. The details of that representation are
described together with the proto support for the language. The JSON
representation for `Struct` is JSON object.
"""
# Unordered map of dynamically typed values.
fields: Dict[str, "Value"] = betterproto.map_field(
1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE
)
@dataclass(eq=False, repr=False)
class Value(betterproto.Message):
"""
`Value` represents a dynamically typed value which can be either null, a
number, a string, a boolean, a recursive struct value, or a list of values.
A producer of value is expected to set one of that variants, absence of any
variant indicates an error. The JSON representation for `Value` is JSON
value.
"""
# Represents a null value.
null_value: "NullValue" = betterproto.enum_field(1, group="kind")
# Represents a double value.
double_value: float = betterproto.double_field(2, group="kind")
# Represents an integer value
integer_value: int = betterproto.int64_field(3, group="kind")
# Represents a string value.
string_value: str = betterproto.string_field(4, group="kind")
# Represents a boolean value.
bool_value: bool = betterproto.bool_field(5, group="kind")
# Represents a structured value.
struct_value: "Struct" = betterproto.message_field(6, group="kind")
# Represents a repeated `Value`.
list_value: "ListValue" = betterproto.message_field(7, group="kind")
@dataclass(eq=False, repr=False)
class ListValue(betterproto.Message):
"""
`ListValue` is a wrapper around a repeated field of values. The JSON
representation for `ListValue` is JSON array.
"""
# Repeated field of dynamically typed values.
values: List["Value"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class PointId(betterproto.Message):
num: int = betterproto.uint64_field(1, group="point_id_options")
uuid: str = betterproto.string_field(2, group="point_id_options")
@dataclass(eq=False, repr=False)
class UpsertPoints(betterproto.Message):
collection_name: str = betterproto.string_field(1)
wait: Optional[bool] = betterproto.bool_field(2, optional=True, group="_wait")
points: List["PointStruct"] = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class DeletePoints(betterproto.Message):
collection_name: str = betterproto.string_field(1)
wait: Optional[bool] = betterproto.bool_field(2, optional=True, group="_wait")
points: "PointsSelector" = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class GetPoints(betterproto.Message):
collection_name: str = betterproto.string_field(1)
ids: List["PointId"] = betterproto.message_field(2)
with_vector: Optional[bool] = betterproto.bool_field(
3, optional=True, group="_with_vector"
)
with_payload: "WithPayloadSelector" = betterproto.message_field(4)
@dataclass(eq=False, repr=False)
class SetPayloadPoints(betterproto.Message):
collection_name: str = betterproto.string_field(1)
wait: Optional[bool] = betterproto.bool_field(2, optional=True, group="_wait")
payload: Dict[str, "Value"] = betterproto.map_field(
3, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE
)
points: List["PointId"] = betterproto.message_field(4)
@dataclass(eq=False, repr=False)
class DeletePayloadPoints(betterproto.Message):
collection_name: str = betterproto.string_field(1)
wait: Optional[bool] = betterproto.bool_field(2, optional=True, group="_wait")
keys: List[str] = betterproto.string_field(3)
points: List["PointId"] = betterproto.message_field(4)
@dataclass(eq=False, repr=False)
class ClearPayloadPoints(betterproto.Message):
collection_name: str = betterproto.string_field(1)
wait: Optional[bool] = betterproto.bool_field(2, optional=True, group="_wait")
points: "PointsSelector" = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class CreateFieldIndexCollection(betterproto.Message):
collection_name: str = betterproto.string_field(1)
wait: Optional[bool] = betterproto.bool_field(2, optional=True, group="_wait")
field_name: str = betterproto.string_field(3)
field_type: Optional["FieldType"] = betterproto.enum_field(
4, optional=True, group="_field_type"
)
@dataclass(eq=False, repr=False)
class DeleteFieldIndexCollection(betterproto.Message):
collection_name: str = betterproto.string_field(1)
wait: Optional[bool] = betterproto.bool_field(2, optional=True, group="_wait")
field_name: str = betterproto.string_field(3)
@dataclass(eq=False, repr=False)
class PayloadIncludeSelector(betterproto.Message):
fields: List[str] = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class PayloadExcludeSelector(betterproto.Message):
fields: List[str] = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class WithPayloadSelector(betterproto.Message):
enable: bool = betterproto.bool_field(1, group="selector_options")
include: "PayloadIncludeSelector" = betterproto.message_field(
2, group="selector_options"
)
exclude: "PayloadExcludeSelector" = betterproto.message_field(
3, group="selector_options"
)
@dataclass(eq=False, repr=False)
class SearchParams(betterproto.Message):
# Params relevant to HNSW index. Size of the beam in a beam-search.Larger the
# value - more accurate the result, more time required for search.
hnsw_ef: Optional[int] = betterproto.uint64_field(
1, optional=True, group="_hnsw_ef"
)
@dataclass(eq=False, repr=False)
class SearchPoints(betterproto.Message):
collection_name: str = betterproto.string_field(1)
vector: List[float] = betterproto.float_field(2)
filter: "Filter" = betterproto.message_field(3)
limit: int = betterproto.uint64_field(4)
with_vector: Optional[bool] = betterproto.bool_field(
5, optional=True, group="_with_vector"
)
with_payload: "WithPayloadSelector" = betterproto.message_field(6)
params: "SearchParams" = betterproto.message_field(7)
score_threshold: Optional[float] = betterproto.float_field(
8, optional=True, group="_score_threshold"
)
offset: Optional[int] = betterproto.uint64_field(9, optional=True, group="_offset")
@dataclass(eq=False, repr=False)
class ScrollPoints(betterproto.Message):
collection_name: str = betterproto.string_field(1)
filter: "Filter" = betterproto.message_field(2)
offset: Optional["PointId"] = betterproto.message_field(
3, optional=True, group="_offset"
)
limit: Optional[int] = betterproto.uint32_field(4, optional=True, group="_limit")
with_vector: Optional[bool] = betterproto.bool_field(
5, optional=True, group="_with_vector"
)
with_payload: "WithPayloadSelector" = betterproto.message_field(6)
@dataclass(eq=False, repr=False)
class RecommendPoints(betterproto.Message):
collection_name: str = betterproto.string_field(1)
positive: List["PointId"] = betterproto.message_field(2)
negative: List["PointId"] = betterproto.message_field(3)
filter: "Filter" = betterproto.message_field(4)
limit: int = betterproto.uint64_field(5)
with_vector: Optional[bool] = betterproto.bool_field(
6, optional=True, group="_with_vector"
)
with_payload: "WithPayloadSelector" = betterproto.message_field(7)
params: "SearchParams" = betterproto.message_field(8)
score_threshold: Optional[float] = betterproto.float_field(
9, optional=True, group="_score_threshold"
)
offset: Optional[int] = betterproto.uint64_field(10, optional=True, group="_offset")
@dataclass(eq=False, repr=False)
class CountPoints(betterproto.Message):
collection_name: str = betterproto.string_field(1)
filter: "Filter" = betterproto.message_field(2)
exact: Optional[bool] = betterproto.bool_field(3, optional=True, group="_exact")
@dataclass(eq=False, repr=False)
class PointsOperationResponse(betterproto.Message):
result: "UpdateResult" = betterproto.message_field(1)
time: float = betterproto.double_field(2)
@dataclass(eq=False, repr=False)
class UpdateResult(betterproto.Message):
operation_id: int = betterproto.uint64_field(1)
status: "UpdateStatus" = betterproto.enum_field(2)
@dataclass(eq=False, repr=False)
class ScoredPoint(betterproto.Message):
id: "PointId" = betterproto.message_field(1)
payload: Dict[str, "Value"] = betterproto.map_field(
2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE
)
score: float = betterproto.float_field(3)
vector: List[float] = betterproto.float_field(4)
version: int = betterproto.uint64_field(5)
@dataclass(eq=False, repr=False)
class SearchResponse(betterproto.Message):
result: List["ScoredPoint"] = betterproto.message_field(1)
time: float = betterproto.double_field(2)
@dataclass(eq=False, repr=False)
class CountResponse(betterproto.Message):
result: "CountResult" = betterproto.message_field(1)
time: float = betterproto.double_field(2)
@dataclass(eq=False, repr=False)
class ScrollResponse(betterproto.Message):
next_page_offset: Optional["PointId"] = betterproto.message_field(
1, optional=True, group="_next_page_offset"
)
result: List["RetrievedPoint"] = betterproto.message_field(2)
time: float = betterproto.double_field(3)
@dataclass(eq=False, repr=False)
class CountResult(betterproto.Message):
count: int = betterproto.uint64_field(1)
@dataclass(eq=False, repr=False)
class RetrievedPoint(betterproto.Message):
id: "PointId" = betterproto.message_field(1)
payload: Dict[str, "Value"] = betterproto.map_field(
2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE
)
vector: List[float] = betterproto.float_field(3)
@dataclass(eq=False, repr=False)
class GetResponse(betterproto.Message):
result: List["RetrievedPoint"] = betterproto.message_field(1)
time: float = betterproto.double_field(2)
@dataclass(eq=False, repr=False)
class RecommendResponse(betterproto.Message):
result: List["ScoredPoint"] = betterproto.message_field(1)
time: float = betterproto.double_field(2)
@dataclass(eq=False, repr=False)
class Filter(betterproto.Message):
should: List["Condition"] = betterproto.message_field(1)
must: List["Condition"] = betterproto.message_field(2)
must_not: List["Condition"] = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class Condition(betterproto.Message):
field: "FieldCondition" = betterproto.message_field(1, group="condition_one_of")
is_empty: "IsEmptyCondition" = betterproto.message_field(
2, group="condition_one_of"
)
has_id: "HasIdCondition" = betterproto.message_field(3, group="condition_one_of")
filter: "Filter" = betterproto.message_field(4, group="condition_one_of")
@dataclass(eq=False, repr=False)
class IsEmptyCondition(betterproto.Message):
key: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class HasIdCondition(betterproto.Message):
has_id: List["PointId"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class FieldCondition(betterproto.Message):
key: str = betterproto.string_field(1)
match: "Match" = betterproto.message_field(2)
range: "Range" = betterproto.message_field(3)
geo_bounding_box: "GeoBoundingBox" = betterproto.message_field(4)
geo_radius: "GeoRadius" = betterproto.message_field(5)
values_count: "ValuesCount" = betterproto.message_field(6)
@dataclass(eq=False, repr=False)
class Match(betterproto.Message):
keyword: str = betterproto.string_field(1, group="match_value")
integer: int = betterproto.int64_field(2, group="match_value")
boolean: bool = betterproto.bool_field(3, group="match_value")
@dataclass(eq=False, repr=False)
class Range(betterproto.Message):
lt: Optional[float] = betterproto.double_field(1, optional=True, group="_lt")
gt: Optional[float] = betterproto.double_field(2, optional=True, group="_gt")
gte: Optional[float] = betterproto.double_field(3, optional=True, group="_gte")
lte: Optional[float] = betterproto.double_field(4, optional=True, group="_lte")
@dataclass(eq=False, repr=False)
class GeoBoundingBox(betterproto.Message):
top_left: "GeoPoint" = betterproto.message_field(1)
bottom_right: "GeoPoint" = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class GeoRadius(betterproto.Message):
center: "GeoPoint" = betterproto.message_field(1)
radius: float = betterproto.float_field(2)
@dataclass(eq=False, repr=False)
class ValuesCount(betterproto.Message):
lt: Optional[int] = betterproto.uint64_field(1, optional=True, group="_lt")
gt: Optional[int] = betterproto.uint64_field(2, optional=True, group="_gt")
gte: Optional[int] = betterproto.uint64_field(3, optional=True, group="_gte")
lte: Optional[int] = betterproto.uint64_field(4, optional=True, group="_lte")
@dataclass(eq=False, repr=False)
class PointsSelector(betterproto.Message):
points: "PointsIdsList" = betterproto.message_field(
1, group="points_selector_one_of"
)
filter: "Filter" = betterproto.message_field(2, group="points_selector_one_of")
@dataclass(eq=False, repr=False)
class PointsIdsList(betterproto.Message):
ids: List["PointId"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class PointStruct(betterproto.Message):
id: "PointId" = betterproto.message_field(1)
vector: List[float] = betterproto.float_field(2)
payload: Dict[str, "Value"] = betterproto.map_field(
3, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE
)
@dataclass(eq=False, repr=False)
class GeoPoint(betterproto.Message):
lon: float = betterproto.double_field(1)
lat: float = betterproto.double_field(2)
@dataclass(eq=False, repr=False)
class CreateFullSnapshotRequest(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class ListFullSnapshotsRequest(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class CreateSnapshotRequest(betterproto.Message):
collection_name: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class ListSnapshotsRequest(betterproto.Message):
collection_name: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class SnapshotDescription(betterproto.Message):
name: str = betterproto.string_field(1)
creation_time: datetime = betterproto.message_field(2)
size: int = betterproto.int64_field(3)
@dataclass(eq=False, repr=False)
class CreateSnapshotResponse(betterproto.Message):
snapshot_description: "SnapshotDescription" = betterproto.message_field(1)
time: float = betterproto.double_field(2)
@dataclass(eq=False, repr=False)
class ListSnapshotsResponse(betterproto.Message):
snapshot_descriptions: List["SnapshotDescription"] = betterproto.message_field(1)
time: float = betterproto.double_field(2)
@dataclass(eq=False, repr=False)
class HealthCheckRequest(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class HealthCheckReply(betterproto.Message):
title: str = betterproto.string_field(1)
version: str = betterproto.string_field(2)
class CollectionsStub(betterproto.ServiceStub):
async def get(self, *, collection_name: str = "") -> "GetCollectionInfoResponse":
request = GetCollectionInfoRequest()
request.collection_name = collection_name
return await self._unary_unary(
"/reframe.Collections/Get", request, GetCollectionInfoResponse
)
async def list(self) -> "ListCollectionsResponse":
request = ListCollectionsRequest()
return await self._unary_unary(
"/reframe.Collections/List", request, ListCollectionsResponse
)
async def create(
self,
*,
collection_name: str = "",
vector_size: int = 0,
distance: "Distance" = 0,
hnsw_config: Optional["HnswConfigDiff"] = None,
wal_config: Optional["WalConfigDiff"] = None,
optimizers_config: Optional["OptimizersConfigDiff"] = None,
shard_number: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
timeout: Optional[int] = None
) -> "CollectionOperationResponse":
request = CreateCollection()
request.collection_name = collection_name
request.vector_size = vector_size
request.distance = distance
if hnsw_config is not None:
request.hnsw_config = hnsw_config
if wal_config is not None:
request.wal_config = wal_config
if optimizers_config is not None:
request.optimizers_config = optimizers_config
request.shard_number = shard_number
request.on_disk_payload = on_disk_payload
request.timeout = timeout
return await self._unary_unary(
"/reframe.Collections/Create", request, CollectionOperationResponse
)
async def update(
self,
*,
collection_name: str = "",
optimizers_config: Optional["OptimizersConfigDiff"] = None,
timeout: Optional[int] = None
) -> "CollectionOperationResponse":
request = UpdateCollection()
request.collection_name = collection_name
if optimizers_config is not None:
request.optimizers_config = optimizers_config
request.timeout = timeout
return await self._unary_unary(
"/reframe.Collections/Update", request, CollectionOperationResponse
)
async def delete(
self, *, collection_name: str = "", timeout: Optional[int] = None
) -> "CollectionOperationResponse":
request = DeleteCollection()
request.collection_name = collection_name
request.timeout = timeout
return await self._unary_unary(
"/reframe.Collections/Delete", request, CollectionOperationResponse
)
async def update_aliases(
self,
*,
actions: Optional[List["AliasOperations"]] = None,
timeout: Optional[int] = None
) -> "CollectionOperationResponse":
actions = actions or []
request = ChangeAliases()
if actions is not None:
request.actions = actions
request.timeout = timeout
return await self._unary_unary(
"/reframe.Collections/UpdateAliases", request, CollectionOperationResponse
)
class PointsStub(betterproto.ServiceStub):
async def upsert(
self,
*,
collection_name: str = "",
wait: Optional[bool] = None,
points: Optional[List["PointStruct"]] = None
) -> "PointsOperationResponse":
points = points or []
request = UpsertPoints()
request.collection_name = collection_name
request.wait = wait
if points is not None:
request.points = points
return await self._unary_unary(
"/reframe.Points/Upsert", request, PointsOperationResponse
)
async def delete(
self,
*,
collection_name: str = "",
wait: Optional[bool] = None,
points: "PointsSelector" = None
) -> "PointsOperationResponse":
request = DeletePoints()
request.collection_name = collection_name
request.wait = wait
if points is not None:
request.points = points
return await self._unary_unary(
"/reframe.Points/Delete", request, PointsOperationResponse
)
async def get(
self,
*,
collection_name: str = "",
ids: Optional[List["PointId"]] = None,
with_vector: Optional[bool] = None,
with_payload: "WithPayloadSelector" = None
) -> "GetResponse":
ids = ids or []
request = GetPoints()
request.collection_name = collection_name
if ids is not None:
request.ids = ids
request.with_vector = with_vector
if with_payload is not None:
request.with_payload = with_payload
return await self._unary_unary("/reframe.Points/Get", request, GetResponse)
async def set_payload(
self,
*,
collection_name: str = "",
wait: Optional[bool] = None,
payload: Dict[str, "Value"] = None,
points: Optional[List["PointId"]] = None
) -> "PointsOperationResponse":
points = points or []
request = SetPayloadPoints()
request.collection_name = collection_name
request.wait = wait
request.payload = payload
if points is not None:
request.points = points
return await self._unary_unary(
"/reframe.Points/SetPayload", request, PointsOperationResponse
)
async def delete_payload(
self,
*,
collection_name: str = "",
wait: Optional[bool] = None,
keys: Optional[List[str]] = None,
points: Optional[List["PointId"]] = None
) -> "PointsOperationResponse":
keys = keys or []
points = points or []
request = DeletePayloadPoints()
request.collection_name = collection_name
request.wait = wait
request.keys = keys
if points is not None:
request.points = points
return await self._unary_unary(
"/reframe.Points/DeletePayload", request, PointsOperationResponse
)
async def clear_payload(
self,
*,
collection_name: str = "",
wait: Optional[bool] = None,
points: "PointsSelector" = None
) -> "PointsOperationResponse":
request = ClearPayloadPoints()
request.collection_name = collection_name
request.wait = wait
if points is not None:
request.points = points
return await self._unary_unary(
"/reframe.Points/ClearPayload", request, PointsOperationResponse
)
async def create_field_index(
self,
*,
collection_name: str = "",
wait: Optional[bool] = None,
field_name: str = "",
field_type: Optional["FieldType"] = None
) -> "PointsOperationResponse":
request = CreateFieldIndexCollection()
request.collection_name = collection_name
request.wait = wait
request.field_name = field_name
request.field_type = field_type
return await self._unary_unary(
"/reframe.Points/CreateFieldIndex", request, PointsOperationResponse
)
async def delete_field_index(
self,
*,
collection_name: str = "",
wait: Optional[bool] = None,
field_name: str = ""
) -> "PointsOperationResponse":
request = DeleteFieldIndexCollection()
request.collection_name = collection_name
request.wait = wait
request.field_name = field_name
return await self._unary_unary(
"/reframe.Points/DeleteFieldIndex", request, PointsOperationResponse
)
async def search(
self,
*,
collection_name: str = "",
vector: Optional[List[float]] = None,
filter: "Filter" = None,
limit: int = 0,
with_vector: Optional[bool] = None,
with_payload: "WithPayloadSelector" = None,
params: "SearchParams" = None,
score_threshold: Optional[float] = None,
offset: Optional[int] = None
) -> "SearchResponse":
vector = vector or []
request = SearchPoints()
request.collection_name = collection_name
request.vector = vector
if filter is not None:
request.filter = filter
request.limit = limit
request.with_vector = with_vector
if with_payload is not None:
request.with_payload = with_payload
if params is not None:
request.params = params
request.score_threshold = score_threshold
request.offset = offset
return await self._unary_unary("/reframe.Points/Search", request, SearchResponse)
async def scroll(
self,
*,
collection_name: str = "",
filter: "Filter" = None,
offset: Optional["PointId"] = None,
limit: Optional[int] = None,
with_vector: Optional[bool] = None,
with_payload: "WithPayloadSelector" = None
) -> "ScrollResponse":
request = ScrollPoints()
request.collection_name = collection_name
if filter is not None:
request.filter = filter
if offset is not None:
request.offset = offset
request.limit = limit
request.with_vector = with_vector
if with_payload is not None:
request.with_payload = with_payload
return await self._unary_unary("/reframe.Points/Scroll", request, ScrollResponse)
async def recommend(
self,
*,
collection_name: str = "",
positive: Optional[List["PointId"]] = None,
negative: Optional[List["PointId"]] = None,
filter: "Filter" = None,
limit: int = 0,
with_vector: Optional[bool] = None,
with_payload: "WithPayloadSelector" = None,
params: "SearchParams" = None,
score_threshold: Optional[float] = None,
offset: Optional[int] = None
) -> "RecommendResponse":
positive = positive or []
negative = negative or []
request = RecommendPoints()
request.collection_name = collection_name
if positive is not None:
request.positive = positive
if negative is not None:
request.negative = negative
if filter is not None:
request.filter = filter
request.limit = limit
request.with_vector = with_vector
if with_payload is not None:
request.with_payload = with_payload
if params is not None:
request.params = params
request.score_threshold = score_threshold
request.offset = offset
return await self._unary_unary(
"/reframe.Points/Recommend", request, RecommendResponse
)
async def count(
self,
*,
collection_name: str = "",
filter: "Filter" = None,
exact: Optional[bool] = None
) -> "CountResponse":
request = CountPoints()
request.collection_name = collection_name
if filter is not None:
request.filter = filter
request.exact = exact
return await self._unary_unary("/reframe.Points/Count", request, CountResponse)
class SnapshotsStub(betterproto.ServiceStub):
async def create(self, *, collection_name: str = "") -> "CreateSnapshotResponse":
request = CreateSnapshotRequest()
request.collection_name = collection_name
return await self._unary_unary(
"/reframe.Snapshots/Create", request, CreateSnapshotResponse
)
async def list(self, *, collection_name: str = "") -> "ListSnapshotsResponse":
request = ListSnapshotsRequest()
request.collection_name = collection_name
return await self._unary_unary(
"/reframe.Snapshots/List", request, ListSnapshotsResponse
)
async def create_full(self) -> "CreateSnapshotResponse":
request = CreateFullSnapshotRequest()
return await self._unary_unary(
"/reframe.Snapshots/CreateFull", request, CreateSnapshotResponse
)
async def list_full(self) -> "ListSnapshotsResponse":
request = ListFullSnapshotsRequest()
return await self._unary_unary(
"/reframe.Snapshots/ListFull", request, ListSnapshotsResponse
)
class NNextStub(betterproto.ServiceStub):
async def health_check(self) -> "HealthCheckReply":
request = HealthCheckRequest()
return await self._unary_unary(
"/reframe.NNext/HealthCheck", request, HealthCheckReply
)
class CollectionsBase(ServiceBase):
async def get(self, collection_name: str) -> "GetCollectionInfoResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def list(self) -> "ListCollectionsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def create(
self,
collection_name: str,
vector_size: int,
distance: "Distance",
hnsw_config: Optional["HnswConfigDiff"],
wal_config: Optional["WalConfigDiff"],
optimizers_config: Optional["OptimizersConfigDiff"],
shard_number: Optional[int],
on_disk_payload: Optional[bool],
timeout: Optional[int],
) -> "CollectionOperationResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def update(
self,
collection_name: str,
optimizers_config: Optional["OptimizersConfigDiff"],
timeout: Optional[int],
) -> "CollectionOperationResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def delete(
self, collection_name: str, timeout: Optional[int]
) -> "CollectionOperationResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def update_aliases(
self, actions: Optional[List["AliasOperations"]], timeout: Optional[int]
) -> "CollectionOperationResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_get(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
}
response = await self.get(**request_kwargs)
await stream.send_message(response)
async def __rpc_list(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.list(**request_kwargs)
await stream.send_message(response)
async def __rpc_create(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
"vector_size": request.vector_size,
"distance": request.distance,
"hnsw_config": request.hnsw_config,
"wal_config": request.wal_config,
"optimizers_config": request.optimizers_config,
"shard_number": request.shard_number,
"on_disk_payload": request.on_disk_payload,
"timeout": request.timeout,
}
response = await self.create(**request_kwargs)
await stream.send_message(response)
async def __rpc_update(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
"optimizers_config": request.optimizers_config,
"timeout": request.timeout,
}
response = await self.update(**request_kwargs)
await stream.send_message(response)
async def __rpc_delete(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
"timeout": request.timeout,
}
response = await self.delete(**request_kwargs)
await stream.send_message(response)
async def __rpc_update_aliases(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"actions": request.actions,
"timeout": request.timeout,
}
response = await self.update_aliases(**request_kwargs)
await stream.send_message(response)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/reframe.Collections/Get": grpclib.const.Handler(
self.__rpc_get,
grpclib.const.Cardinality.UNARY_UNARY,
GetCollectionInfoRequest,
GetCollectionInfoResponse,
),
"/reframe.Collections/List": grpclib.const.Handler(
self.__rpc_list,
grpclib.const.Cardinality.UNARY_UNARY,
ListCollectionsRequest,
ListCollectionsResponse,
),
"/reframe.Collections/Create": grpclib.const.Handler(
self.__rpc_create,
grpclib.const.Cardinality.UNARY_UNARY,
CreateCollection,
CollectionOperationResponse,
),
"/reframe.Collections/Update": grpclib.const.Handler(
self.__rpc_update,
grpclib.const.Cardinality.UNARY_UNARY,
UpdateCollection,
CollectionOperationResponse,
),
"/reframe.Collections/Delete": grpclib.const.Handler(
self.__rpc_delete,
grpclib.const.Cardinality.UNARY_UNARY,
DeleteCollection,
CollectionOperationResponse,
),
"/reframe.Collections/UpdateAliases": grpclib.const.Handler(
self.__rpc_update_aliases,
grpclib.const.Cardinality.UNARY_UNARY,
ChangeAliases,
CollectionOperationResponse,
),
}
class PointsBase(ServiceBase):
async def upsert(
self,
collection_name: str,
wait: Optional[bool],
points: Optional[List["PointStruct"]],
) -> "PointsOperationResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def delete(
self, collection_name: str, wait: Optional[bool], points: "PointsSelector"
) -> "PointsOperationResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def get(
self,
collection_name: str,
ids: Optional[List["PointId"]],
with_vector: Optional[bool],
with_payload: "WithPayloadSelector",
) -> "GetResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def set_payload(
self,
collection_name: str,
wait: Optional[bool],
payload: Dict[str, "Value"],
points: Optional[List["PointId"]],
) -> "PointsOperationResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def delete_payload(
self,
collection_name: str,
wait: Optional[bool],
keys: Optional[List[str]],
points: Optional[List["PointId"]],
) -> "PointsOperationResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def clear_payload(
self, collection_name: str, wait: Optional[bool], points: "PointsSelector"
) -> "PointsOperationResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def create_field_index(
self,
collection_name: str,
wait: Optional[bool],
field_name: str,
field_type: Optional["FieldType"],
) -> "PointsOperationResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def delete_field_index(
self, collection_name: str, wait: Optional[bool], field_name: str
) -> "PointsOperationResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def search(
self,
collection_name: str,
vector: Optional[List[float]],
filter: "Filter",
limit: int,
with_vector: Optional[bool],
with_payload: "WithPayloadSelector",
params: "SearchParams",
score_threshold: Optional[float],
offset: Optional[int],
) -> "SearchResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def scroll(
self,
collection_name: str,
filter: "Filter",
offset: Optional["PointId"],
limit: Optional[int],
with_vector: Optional[bool],
with_payload: "WithPayloadSelector",
) -> "ScrollResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def recommend(
self,
collection_name: str,
positive: Optional[List["PointId"]],
negative: Optional[List["PointId"]],
filter: "Filter",
limit: int,
with_vector: Optional[bool],
with_payload: "WithPayloadSelector",
params: "SearchParams",
score_threshold: Optional[float],
offset: Optional[int],
) -> "RecommendResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def count(
self, collection_name: str, filter: "Filter", exact: Optional[bool]
) -> "CountResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_upsert(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
"wait": request.wait,
"points": request.points,
}
response = await self.upsert(**request_kwargs)
await stream.send_message(response)
async def __rpc_delete(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
"wait": request.wait,
"points": request.points,
}
response = await self.delete(**request_kwargs)
await stream.send_message(response)
async def __rpc_get(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
"ids": request.ids,
"with_vector": request.with_vector,
"with_payload": request.with_payload,
}
response = await self.get(**request_kwargs)
await stream.send_message(response)
async def __rpc_set_payload(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
"wait": request.wait,
"payload": request.payload,
"points": request.points,
}
response = await self.set_payload(**request_kwargs)
await stream.send_message(response)
async def __rpc_delete_payload(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
"wait": request.wait,
"keys": request.keys,
"points": request.points,
}
response = await self.delete_payload(**request_kwargs)
await stream.send_message(response)
async def __rpc_clear_payload(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
"wait": request.wait,
"points": request.points,
}
response = await self.clear_payload(**request_kwargs)
await stream.send_message(response)
async def __rpc_create_field_index(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
"wait": request.wait,
"field_name": request.field_name,
"field_type": request.field_type,
}
response = await self.create_field_index(**request_kwargs)
await stream.send_message(response)
async def __rpc_delete_field_index(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
"wait": request.wait,
"field_name": request.field_name,
}
response = await self.delete_field_index(**request_kwargs)
await stream.send_message(response)
async def __rpc_search(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
"vector": request.vector,
"filter": request.filter,
"limit": request.limit,
"with_vector": request.with_vector,
"with_payload": request.with_payload,
"params": request.params,
"score_threshold": request.score_threshold,
"offset": request.offset,
}
response = await self.search(**request_kwargs)
await stream.send_message(response)
async def __rpc_scroll(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
"filter": request.filter,
"offset": request.offset,
"limit": request.limit,
"with_vector": request.with_vector,
"with_payload": request.with_payload,
}
response = await self.scroll(**request_kwargs)
await stream.send_message(response)
async def __rpc_recommend(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
"positive": request.positive,
"negative": request.negative,
"filter": request.filter,
"limit": request.limit,
"with_vector": request.with_vector,
"with_payload": request.with_payload,
"params": request.params,
"score_threshold": request.score_threshold,
"offset": request.offset,
}
response = await self.recommend(**request_kwargs)
await stream.send_message(response)
async def __rpc_count(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
"filter": request.filter,
"exact": request.exact,
}
response = await self.count(**request_kwargs)
await stream.send_message(response)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/reframe.Points/Upsert": grpclib.const.Handler(
self.__rpc_upsert,
grpclib.const.Cardinality.UNARY_UNARY,
UpsertPoints,
PointsOperationResponse,
),
"/reframe.Points/Delete": grpclib.const.Handler(
self.__rpc_delete,
grpclib.const.Cardinality.UNARY_UNARY,
DeletePoints,
PointsOperationResponse,
),
"/reframe.Points/Get": grpclib.const.Handler(
self.__rpc_get,
grpclib.const.Cardinality.UNARY_UNARY,
GetPoints,
GetResponse,
),
"/reframe.Points/SetPayload": grpclib.const.Handler(
self.__rpc_set_payload,
grpclib.const.Cardinality.UNARY_UNARY,
SetPayloadPoints,
PointsOperationResponse,
),
"/reframe.Points/DeletePayload": grpclib.const.Handler(
self.__rpc_delete_payload,
grpclib.const.Cardinality.UNARY_UNARY,
DeletePayloadPoints,
PointsOperationResponse,
),
"/reframe.Points/ClearPayload": grpclib.const.Handler(
self.__rpc_clear_payload,
grpclib.const.Cardinality.UNARY_UNARY,
ClearPayloadPoints,
PointsOperationResponse,
),
"/reframe.Points/CreateFieldIndex": grpclib.const.Handler(
self.__rpc_create_field_index,
grpclib.const.Cardinality.UNARY_UNARY,
CreateFieldIndexCollection,
PointsOperationResponse,
),
"/reframe.Points/DeleteFieldIndex": grpclib.const.Handler(
self.__rpc_delete_field_index,
grpclib.const.Cardinality.UNARY_UNARY,
DeleteFieldIndexCollection,
PointsOperationResponse,
),
"/reframe.Points/Search": grpclib.const.Handler(
self.__rpc_search,
grpclib.const.Cardinality.UNARY_UNARY,
SearchPoints,
SearchResponse,
),
"/reframe.Points/Scroll": grpclib.const.Handler(
self.__rpc_scroll,
grpclib.const.Cardinality.UNARY_UNARY,
ScrollPoints,
ScrollResponse,
),
"/reframe.Points/Recommend": grpclib.const.Handler(
self.__rpc_recommend,
grpclib.const.Cardinality.UNARY_UNARY,
RecommendPoints,
RecommendResponse,
),
"/reframe.Points/Count": grpclib.const.Handler(
self.__rpc_count,
grpclib.const.Cardinality.UNARY_UNARY,
CountPoints,
CountResponse,
),
}
class SnapshotsBase(ServiceBase):
async def create(self, collection_name: str) -> "CreateSnapshotResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def list(self, collection_name: str) -> "ListSnapshotsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def create_full(self) -> "CreateSnapshotResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def list_full(self) -> "ListSnapshotsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_create(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
}
response = await self.create(**request_kwargs)
await stream.send_message(response)
async def __rpc_list(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"collection_name": request.collection_name,
}
response = await self.list(**request_kwargs)
await stream.send_message(response)
async def __rpc_create_full(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.create_full(**request_kwargs)
await stream.send_message(response)
async def __rpc_list_full(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.list_full(**request_kwargs)
await stream.send_message(response)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/reframe.Snapshots/Create": grpclib.const.Handler(
self.__rpc_create,
grpclib.const.Cardinality.UNARY_UNARY,
CreateSnapshotRequest,
CreateSnapshotResponse,
),
"/reframe.Snapshots/List": grpclib.const.Handler(
self.__rpc_list,
grpclib.const.Cardinality.UNARY_UNARY,
ListSnapshotsRequest,
ListSnapshotsResponse,
),
"/reframe.Snapshots/CreateFull": grpclib.const.Handler(
self.__rpc_create_full,
grpclib.const.Cardinality.UNARY_UNARY,
CreateFullSnapshotRequest,
CreateSnapshotResponse,
),
"/reframe.Snapshots/ListFull": grpclib.const.Handler(
self.__rpc_list_full,
grpclib.const.Cardinality.UNARY_UNARY,
ListFullSnapshotsRequest,
ListSnapshotsResponse,
),
}
class NNextBase(ServiceBase):
async def health_check(self) -> "HealthCheckReply":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_health_check(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.health_check(**request_kwargs)
await stream.send_message(response)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/reframe.NNext/HealthCheck": grpclib.const.Handler(
self.__rpc_health_check,
grpclib.const.Cardinality.UNARY_UNARY,
HealthCheckRequest,
HealthCheckReply,
),
} | /reframer-0.0.1-py3-none-any.whl/reframe/client/grpc/__init__.py | 0.900951 | 0.440409 | __init__.py | pypi |
import collections
import imp
import inspect
import threading
import time
import traceback
class SafeRefreshMixin(object):
""" Provides a `.refresh()` method to reload a class
Adding the `SafeRefreshMixin` to a class allows you to "refresh" the class
using the `.refresh()` method. This method reloads the python file the class
came from and replaces all methods and *class* variables (*not* instance
variables!) with the versions from the new file.
The refresh is "safe" because it tries very hard to keep the program running.
On each refresh, a snapshot of the class is saved to form a history. If an
error is encountered while performing the refresh, the state is reverted.
In general, you can wrap calls to methods of your refreshed class in a try
block that catches all errors and call `.revert()` on failure.
Additionally, `DEFAULTS` and `AUTO_NONE` provide options for handling
missing attributes (preventing `AttributeError`s).
Usage
-----
You can configure the behavior by setting the following class variables:
- `STATICS` : List of variable names (strings) that are not refreshed.
- `DEFAULTS` : Dictionary of variable names -> values. If an `AttributeError` is
caught, and the attribute is in `DEFAULTS`, the attribute is
populated from the dictionary. This can be useful if you need to
initialize a new state variable.
- `AUTO_NONE`: If `True`, catch `AttributeErrors` and set the attribute to `None`
if the attribute is not in `DEFAULTS`.
Additionally, there are the `.pre_refresh()` and `.post_refresh()` hooks
which can be overriden.
Once initialized, instances have the following methods:
- `.init_defaults()`: Initialize attributes from the `DEFAULTS` dict.
- `.refresh()` : Attempt to reload the class from disk.
- `.revert()` : Revert the changes from the previous `.refresh()`.
- `.purge()` : Remove the state history. Each call to `.refresh()` takes a
snapshot of the class. If you refresh often w/ a big class,
this can get large.
Limitations
-----------
- `.refresh()` assumes all methods are bound (take a `self` parameter). As a
result, static/class methods (methods declared with `@staticmethod`, or
`@classmethod`) will not be refreshed properly. These method names should
be added to `STATICS` and they will not be refreshed.
- This framework was designed around the singleton model with one instance of
the given refreshed class. It hasn't been extensively tested with multiple
instances, and may cause weird behavior around class variables.
- The `__main__` module cannot be reloaded, so the class must exist in an
imported module.
"""
DEFAULTS = {
"_refresh_history": lambda : [],
"_refresh_rev": lambda : 0,
}
STATICS = [] # Things to not refreshable
AUTO_NONE = False # Automatically populate attributes with None
def __getattr__(self, name):
# Called when an attribute isn't found (AttributeError)
# Instantiate the value from DEFAULTS
if name not in self.DEFAULTS and not self.AUTO_NONE:
raise AttributeError
value = self.DEFAULTS.get(name)
if value and value.__call__:
value = value()
self.__setattr__( name, value)
return value
def init_defaults(self):
# Can be called to init all the parameters in DEFAULTS if you don't like surprises
for key in self.DEFAULTS:
self.__getattr__(key)
def pre_refresh(self):
# Pre-refresh hook
pass
def post_refresh(self):
# Post-refresh hook
pass
def refresh(self, NewClass=None):
""" Attempt to refresh the class.
The class's module is reloaded, and all of the methods and *class* (not instance)
variables are replaced with the new version.
A snapshot of the class is kept to allow revisions in case of error.
(See `.revert()`)
"""
try:
# Pre-refresh hook.
self.pre_refresh()
except:
# It's really bad if the pre_refresh hook fails, but there's a chicken-and-egg problem if it does fail XXX
traceback.print_exc()
print "WARNING: Pre-refresh hook failed for module {}. Continuing to refresh...".format(NewClass)
if NewClass is None:
# Try to reload the module & new class
try:
cls = self.__class__
module = inspect.getmodule(cls)
new_module = imp.reload(module) # `module` should also be updated
NewClass = new_module.__dict__[cls.__name__]
except:
traceback.print_exc()
raise Exception("Unable to reload module. Did not refresh.")
# Swap out class methods & variables
history = {}
try:
for key, item in NewClass.__dict__.items():
if key not in NewClass.STATICS:
# need to re-bind methods first;
#XXX: Assumes all methods are bound (i.e. take a `self` parameter)
# This means the class cannot refresh static or class methods.
if hasattr(item, '__call__'):
# Re-bind with .__get__
value = item.__get__(self, NewClass)
else:
value = item
if key in self.__dict__: # hasattr(...) won't work here
history[key] = self.__getattribute__(key)
self.__setattr__(key, value)
except:
traceback.print_exc()
# Rollback
self.revert(history=history)
raise Warning("Unable to refresh module: {}. Reverted.".format(NewClass))
try:
self.post_refresh()
except:
# We can revert if the post_refresh hook fails. That's solvable
traceback.print_exc()
# Rollback
self.revert(history=history)
raise Warning("Post-refresh hook failed for module {}. Reverted.".format(NewClass))
# Success!
self._refresh_history.append(history)
self._refresh_rev += 1
return self
def revert(self, history=None):
""" Revert to a previous snapshot of the class.
Usually called when an error is encountered."""
if not history:
try:
history = self._refresh_history.pop()
except IndexError:
return False # Failed to revert
for key, value in history.items():
self.__setattr__(key, value)
return True # Reverted!
def purge(self):
""" Remove all of the pre-refresh snapshots from the history."""
try:
del self._refresh_history
except NameError:
pass
self._refresh_history = []
class SafeRefreshableLoop(threading.Thread, SafeRefreshMixin):
""" Run a function in a loop while making the parent class refreshable.
The function `.step()` is called repeatedly while the loop is running.
You can start the loop in one of two ways:
- `.start()`: Run the loop in a thread.
- `.run()` : (the target of the thread) Run the loop "inline".
The loop can also be paused with `.stop()` and unpaused with `.restart()`.
If you subclass, make sure you call threading.Thread.__init__
As with the SafeRefreshMixin, you can set the following class variables:
- `STATICS` : List of variable names (strings) that are not refreshed.
- `DEFAULTS` : Dictionary of variable names -> values. If an `AttributeError` is
caught, and the attribute is in `DEFAULTS`, the attribute is
populated from the dictionary. This can be useful if you need to
initialize a new state variable.
- `AUTO_NONE`: If `True`, catch `AttributeErrors` and set the attribute to `None`
if the attribute is not in `DEFAULTS`.
And call the following methods:
- `.refresh()`: Attempt to reload the class from disk.
- `.revert()` : Revert the changes from the previous `.refresh()`.
- `.purge()` : Remove the state history. Each call to `.refresh()` takes a
snapshot of the class. If you refresh often w/ a big class,
this can get large.
Additionally, there are the `.pre_refresh()` and `.post_refresh()` hooks
which can be overriden.
"""
daemon = True
def stop(self):
""" Pauses the refresh loop until `restart` is called. """
self.stopped = True
def restart(self):
""" Restarts the refresh loop after `stop` was called."""
self.stopped = False
def step(self):
""" Override this method. This is called repeatedly in a loop."""
raise NotImplementedError
def run(self):
self.stopped = False
while True:
if self.stopped:
time.sleep(0.01)
else:
# Tolerate errors in step()
try:
self.step()
except KeyboardInterrupt:
print "Recieved KeyboardInterrupt. Stopping loop."
self.stopped = True
break
except:
traceback.print_exc()
if self.revert():
print "Error running loop. Reverting to previous version. Trying again..."
else:
print "Error running loop. No previous version to revert to. Stopping."
self.stopped = True | /refreshable-1.0.2.tar.gz/refreshable-1.0.2/src/refreshable.py | 0.843863 | 0.459682 | refreshable.py | pypi |
import struct
import win32api
class Icon:
"""
Icon class. Represents an Ico file.
"""
# Parsing constants
HEADER_FORMAT = "hhh"
ENTRY_FORMAT = "bbbbhhii"
ENTRY_FORMAT_ID = "bbbbhhih"
HEADER_SIZE = struct.calcsize(HEADER_FORMAT)
ENTRY_SIZE = struct.calcsize(ENTRY_FORMAT)
def __init__(self, filename):
"""
Create a Icon object from the path to a .ico file.
"""
# Icon sections
self._header = ""
self._entries = []
self._images = []
with open(filename, 'rb') as fd:
self._header = fd.read(self.HEADER_SIZE)
# Get the tuple of the header and get how many entries we have
count = self.header()[2]
# Collect entries in the ico file
for i in range(count):
# Read entry
e = fd.read(self.ENTRY_SIZE)
self._entries.append(e)
# Now collect images
for i, bentry in enumerate(self._entries):
entry = struct.unpack(self.ENTRY_FORMAT, bentry)
# Go to image and read bytes
fd.seek(entry[7], 0)
data = fd.read(entry[6])
self._images.append(data)
# Remove last item (offset) and add the id
entry = entry[:-1] + (i+1,)
# Save change back in bytes
self._entries[i] = struct.pack(self.ENTRY_FORMAT_ID,
*entry)
def header(self):
"""
Return a tuple with the values in the header of the Icon.
Header is made of three values:
- a reserved value
- the type id
- entries count
"""
return struct.unpack(self.HEADER_FORMAT, self._header)
def entries(self):
"""
Return an array with the tuples of the icons entries. An icon entry
is a special header that describes an image. A single .ico file can
contain multiple entries.
Each entry contains:
- width
- height
- color count
- reserved value
- planes
- bit count
- size of image
- id
"""
res = []
for e in self._entries:
res.append(struct.unpack(self.ENTRY_FORMAT_ID, e))
return res
def images(self):
"""
Return an array with the bytes for each of the images in the icon.
"""
return _images
def set_icon(exe_filename, ico_filename):
"""
Set the icon on a windows executable.
"""
# Icon file
icon = Icon(ico_filename)
# Begin update of executable
hdst = win32api.BeginUpdateResource (exe_filename, 0)
# Update entries
data = icon._header + reduce(str.__add__, icon._entries)
win32api.UpdateResource (hdst, 14, 1, data)
# Update images
for i, image in enumerate(icon._images):
win32api.UpdateResource (hdst, 3, i+1, image)
# Done
win32api.EndUpdateResource (hdst, 0) | /refrigerant-1.1.11.zip/refrigerant-1.1.11/bbfreeze/winexeutil.py | 0.604165 | 0.285739 | winexeutil.py | pypi |
import logging
import os
import re
from collections import defaultdict
from subprocess import Popen, PIPE
from typing import List, Tuple, Union, Optional, Any
import pandas as pd
from refseq_masher.const import REGEX_FASTA, REGEX_FASTQ
from .const import REGEX_FASTQ, REGEX_FASTA
NT_SUB = {x: y for x, y in zip('acgtrymkswhbvdnxACGTRYMKSWHBVDNX', 'tgcayrkmswdvbhnxTGCAYRKMSWDVBHNX')}
def run_command(cmdlist: List[str], stdin: Optional[Any] = None, stderr: Optional[Any] = PIPE) -> (int, str, str):
p = Popen(cmdlist,
stdout=PIPE,
stderr=stderr,
stdin=stdin)
stdout, stderr = p.communicate()
exit_code = p.returncode
if isinstance(stdout, bytes):
stdout = stdout.decode()
if isinstance(stderr, bytes):
stderr = stderr.decode()
return exit_code, stdout, stderr
def exc_exists(exc_name: str) -> bool:
"""Check if an executable exists
Args:
exc_name (str): Executable name or path (e.g. "blastn")
Returns:
bool: Does the executable exists in the user's $PATH?
"""
cmd = ['which', exc_name]
exit_code, stdout, stderr = run_command(cmd)
if exit_code == 0:
return True
else:
logging.warning('which exited with non-zero code {} with command "{}"'.format(exit_code, ' '.join(cmd)))
logging.warning(stderr)
return False
def sample_name_from_fasta_path(fasta_path: str) -> str:
"""Extract genome name from fasta filename
Get the filename without directory and remove the file extension.
Example:
With fasta file path ``/path/to/genome_1.fasta``::
fasta_path = '/path/to/genome_1.fasta'
genome_name = genome_name_from_fasta_path(fasta_path)
print(genome_name)
# => "genome_1"
Args:
fasta_path (str): fasta file path
Returns:
str: genome name
"""
filename = os.path.basename(fasta_path)
filename = re.sub(r'\.gz$', '', filename)
return re.sub(r'\.(fa|fas|fasta|fna|\w{1,})(\.gz)?$', '', filename)
def sample_name_from_fastq_paths(fastqs: List[str]) -> str:
"""Get the sample name from FASTQ file paths
Group FASTQs based on base filename without file extensions or expected FASTQ file delimiter (e.g. `_1`/`_2`)
Args:
fastqs: FASTQ paths
Returns:
(str): sample name
"""
grouped_fastqs = group_fastqs(fastqs)
for fastq_paths, sample_name in grouped_fastqs:
return sample_name
def group_fastqs(fastqs: List[str]) -> List[Tuple[List[str], str]]:
"""Group FASTQs based on common base filename
For example, if you have 2 FASTQs:
- reads_1.fastq
- reads_2.fastq
The common name would be `reads` and the files would be grouped based on that common name.
Args:
fastqs: FASTQ file paths
Returns:
list of grouped FASTQs grouped by common base filename
"""
genome_fastqs = defaultdict(list)
for fastq in fastqs:
filename = os.path.basename(fastq)
basefilename = re.sub(r'_\d', '', REGEX_FASTQ.sub(r'\1', filename))
genome_fastqs[basefilename].append(fastq)
return [(fastq_paths, sample_name) for sample_name, fastq_paths in genome_fastqs.items()]
def collect_fasta_from_dir(input_directory: str) -> List[Tuple[str, str]]:
fastas = []
for x in os.listdir(input_directory):
full_file_path = os.path.abspath(os.path.join(input_directory, x))
if os.path.isfile(full_file_path) and REGEX_FASTA.match(x):
sample_name = sample_name_from_fasta_path(full_file_path)
fastas.append((full_file_path, sample_name))
return fastas
def collect_fastq_from_dir(input_directory):
fastqs = []
for x in os.listdir(input_directory):
full_file_path = os.path.abspath(os.path.join(input_directory, x))
if os.path.isfile(full_file_path) and REGEX_FASTQ.match(x):
fastqs.append(full_file_path)
if len(fastqs) > 0:
logging.info('Found %s FASTQ files in %s',
len(fastqs),
input_directory)
reads_from_dir = group_fastqs(fastqs)
logging.info('Collected %s read sets from %s FASTQ files in %s',
len(reads_from_dir),
len(fastqs),
input_directory)
return reads_from_dir
return []
def collect_inputs(inputs: List[str]) -> Tuple[List[Tuple[str, str]], List[Tuple[List[str], str]]]:
"""Collect all input files for analysis
Sample names are derived from the base filename with no extensions.
Sequencing reads are paired if they share a common filename name without "_\d".
Filepaths for contigs and reads files are collected from an input directory if provided.
Args:
inputs: paths to FASTA/FASTQ files
Returns:
List of (contig filename, sample name)
List of ([reads filepaths], sample name)
"""
contigs = []
reads = []
fastas = [x for x in inputs if REGEX_FASTA.match(x)]
fastqs = [x for x in inputs if REGEX_FASTQ.match(x)]
dirs = [x for x in inputs if os.path.isdir(x)]
if len(fastas) > 0:
for fasta_path in fastas:
fasta_path = os.path.abspath(fasta_path)
if os.path.exists(fasta_path):
genome_name = sample_name_from_fasta_path(fasta_path)
contigs.append((fasta_path, genome_name))
else:
logging.error('Input fasta "%s" does not exist!', fasta_path)
if len(fastqs) > 0:
grouped_fastqs = group_fastqs(fastqs)
logging.info('Grouped %s fastqs into %s groups',
len(fastqs),
len(grouped_fastqs))
reads += grouped_fastqs
for d in dirs:
fasta_from_dir = collect_fasta_from_dir(d)
if len(fasta_from_dir) > 0:
logging.info('Collected %s FASTA from dir "%s"', len(fasta_from_dir), d)
contigs = contigs + fasta_from_dir
fastq_from_dir = collect_fastq_from_dir(d)
if len(fastq_from_dir) > 0:
logging.info('Collected %s FASTQ from dir "%s"', len(fastq_from_dir), d)
reads += fastq_from_dir
logging.info('Collected %s FASTA inputs and %s read sets', len(contigs), len(reads))
return contigs, reads
LOG_FORMAT = '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
def init_console_logger(logging_verbosity=3):
logging_levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
if logging_verbosity > (len(logging_levels) - 1):
logging_verbosity = 3
lvl = logging_levels[logging_verbosity]
logging.basicConfig(format=LOG_FORMAT, level=lvl)
return lvl
def order_output_columns(dfout: pd.DataFrame, cols: List[str]) -> pd.DataFrame:
set_columns = set(dfout.columns)
present_columns = [x for x in cols if x in set_columns]
rest_columns = list(set_columns - set(present_columns))
return dfout[present_columns + rest_columns] | /refseq_masher-0.1.2-py3-none-any.whl/refseq_masher/utils.py | 0.707607 | 0.273896 | utils.py | pypi |
import logging
import os
from typing import Optional, List
import pandas as pd
from .sketch import sketch_fasta, sketch_fastqs
from .parser import mash_dist_output_to_dataframe
from ..utils import run_command
from ..const import MASH_REFSEQ_MSH
def mash_dist_refseq(sketch_path: str, mash_bin: str = "mash") -> str:
"""Compute Mash distances of sketch file of genome fasta to RefSeq sketch DB.
Args:
mash_bin (str): Mash binary path
sketch_path (str): Mash sketch file path or genome fasta file path
Returns:
(str): Mash STDOUT string
"""
assert os.path.exists(sketch_path)
cmd_list = [mash_bin,
'dist',
MASH_REFSEQ_MSH,
sketch_path]
exit_code, stdout, stderr = run_command(cmd_list)
if exit_code != 0:
raise Exception(
'Could not run Mash dist. EXITCODE="{}" STDERR="{}" STDOUT="{}"'.format(exit_code, stderr, stdout))
return stdout
def fasta_vs_refseq(fasta_path: str,
mash_bin: str = "mash",
sample_name: Optional[str] = None,
tmp_dir: str = "/tmp",
k: int = 16,
s: int = 400) -> pd.DataFrame:
"""Compute Mash distances between input FASTA against all RefSeq genomes
Args:
fasta_path: FASTA file path
mash_bin: Mash binary path
sample_name: Sample name
tmp_dir: Temporary working directory
k: Mash kmer size
s: Mash number of min-hashes
Returns:
(pd.DataFrame): Mash genomic distance results ordered by ascending distance
"""
sketch_path = None
try:
sketch_path = sketch_fasta(fasta_path,
mash_bin=mash_bin,
tmp_dir=tmp_dir,
sample_name=sample_name,
k=k,
s=s)
mashout = mash_dist_refseq(sketch_path, mash_bin=mash_bin)
logging.info('Ran Mash dist successfully (output length=%s). Parsing Mash dist output', len(mashout))
df_mash = mash_dist_output_to_dataframe(mashout)
df_mash['sample'] = sample_name
logging.info('Parsed Mash dist output into Pandas DataFrame with %s rows', df_mash.shape[0])
logging.debug('df_mash: %s', df_mash.head(5))
return df_mash
finally:
if sketch_path and os.path.exists(sketch_path):
logging.info('Deleting temporary sketch file "%s"', sketch_path)
os.remove(sketch_path)
logging.info('Sketch file "%s" deleted!', sketch_path)
def fastq_vs_refseq(fastqs: List[str],
mash_bin: str = 'mash',
sample_name: str = None,
tmp_dir: str = '/tmp',
k: int = 16,
s: int = 400,
m: int = 8) -> pd.DataFrame:
"""Compute Mash distances between input reads against all RefSeq genomes
Args:
fastqs: FASTQ paths
mash_bin: Mash binary path
sample_name: Sample name
tmp_dir: Temporary working directory
k: Mash kmer size
s: Mash number of min-hashes
m: Mash number of times a k-mer needs to be observed in order to be considered for Mash sketch DB
Returns:
(pd.DataFrame): Mash genomic distance results ordered by ascending distance
"""
assert len(fastqs) > 0, "Must supply one or more FASTQ paths"
sketch_path = None
try:
sketch_path = sketch_fastqs(fastqs,
mash_bin=mash_bin,
tmp_dir=tmp_dir,
sample_name=sample_name,
k=k,
s=s,
m=m)
logging.info('Mash sketch database created for "%s" at "%s"', fastqs, sketch_path)
logging.info('Querying Mash sketches "%s" against RefSeq sketch database', sketch_path)
mashout = mash_dist_refseq(sketch_path, mash_bin=mash_bin)
logging.info('Queried "%s" against RefSeq sketch database. Parsing into Pandas DataFrame', sketch_path)
df_mash = mash_dist_output_to_dataframe(mashout)
df_mash['sample'] = sample_name
logging.info('Parsed Mash distance results into DataFrame with %s entries', df_mash.shape[0])
logging.debug('df_mash %s', df_mash.head(5))
return df_mash
finally:
if sketch_path and os.path.exists(sketch_path):
logging.info('Deleting temporary sketch file "%s"', sketch_path)
os.remove(sketch_path)
logging.info('Sketch file "%s" deleted!', sketch_path) | /refseq_masher-0.1.2-py3-none-any.whl/refseq_masher/mash/dist.py | 0.704668 | 0.263398 | dist.py | pypi |
import logging
from io import StringIO
from typing import Optional
import pandas as pd
#: Sometimes Mash dist outputs 4 columns other times it outputs 5 columns
MASH_DIST_4_COLUMNS = """
match_id
distance
pvalue
matching
""".strip().split('\n')
MASH_DIST_5_COLUMNS = """
match_id
query_id
distance
pvalue
matching
""".strip().split('\n')
#: Mash screen output columns
MASH_SCREEN_COLUMNS = """
identity
shared_hashes
median_multiplicity
pvalue
match_id
match_comment
""".strip().split('\n')
def _no_periods(s: str) -> Optional[str]:
return s if s != '.' else None
def parse_refseq_info(match_id: str) -> dict:
"""Parse a RefSeq Mash match_id
For example from the following `match_id`:
./rcn/refseq-NZ-1147754-PRJNA224116-.-GCF_000313715.1-.-Salmonella_enterica_subsp._enterica_serovar_Enteritidis_str._LA5.fna
If you split on '-' and ignoring the first two elements, you can extract, in order, the NCBI:
- Taxonomy UID = 1147754
- BioProject accession = PRJNA224116
- BioSample accession = None
- Genome accession = GCF_000313715.1
- plasmid name = None
- FNA filename (Salmonella_enterica_subsp._enterica_serovar_Enteritidis_str._LA5.fna)
If "Salmonella" is found in the FNA filename, then serovar and subspecies will be parsed if present.
For the example above, the subspecies would be "enterica" and the serovar would be "Enteritidis".
Values with periods ('.') will be treated as None (null).
Args:
match_id (str): Mash RefSeq match_id with taxid, bioproject, full strain name, etc delimited by '-'
Returns:
(dict): parsed NCBI accession and other info
"""
logging.debug('Parsing RefSeq info from "%s"', match_id)
sp = match_id.split('-')
_, prefix, taxid_str, bioproject, biosample, assembly_acc, plasmid, fullname = sp
taxid = int(taxid_str)
fullname = fullname.replace('.fna', '')
serovar = None
subsp = None
if 'Salmonella' in fullname:
if '_serovar_' in fullname:
serovar = fullname.split('_serovar_')[-1].split('_str.')[0]
if '_subsp._' in fullname:
subsp = fullname.split('_subsp._')[-1].split('_')[0]
return dict(match_id=match_id,
taxid=taxid,
biosample=_no_periods(biosample),
bioproject=_no_periods(bioproject),
assembly_accession=_no_periods(assembly_acc),
plasmid=_no_periods(plasmid),
serovar=serovar,
subspecies=subsp)
def mash_dist_output_to_dataframe(mash_out: str) -> pd.DataFrame:
"""Mash dist stdout to Pandas DataFrame
Args:
mash_out (str): Mash dist stdout
Returns:
(pd.DataFrame): Mash dist table ordered by ascending distance
"""
df = pd.read_table(StringIO(mash_out))
ncols = df.shape[1]
if ncols == 5:
df.columns = MASH_DIST_5_COLUMNS
df = df[MASH_DIST_4_COLUMNS]
if ncols == 4:
df.columns = MASH_DIST_4_COLUMNS
df.sort_values(by='distance', ascending=True, inplace=True)
match_ids = df.match_id
dfmatch = pd.DataFrame([parse_refseq_info(match_id=match_id) for match_id in match_ids])
return pd.merge(dfmatch, df, on='match_id')
def mash_screen_output_to_dataframe(mash_out: str) -> pd.DataFrame:
"""Mash screen stdout to Pandas DataFrame
Args:
mash_out: Mash screen stdout
Returns:
(pd.DataFrame): Mash screen output table ordered by `identity` and `median_multiplicity` columns in descending
order, or None if the Mash output is missing
"""
dfmerge = None
if len(mash_out) > 0:
df = pd.read_table(StringIO(mash_out))
ncols = df.shape[1]
df.columns = MASH_SCREEN_COLUMNS[:ncols]
df.sort_values(by=['identity', 'median_multiplicity'], ascending=[False, False], inplace=True)
match_ids = df.match_id
refseq_matches = [parse_refseq_info(match_id=match_id) for match_id in match_ids]
dfmatch = pd.DataFrame(refseq_matches)
dfmerge = pd.merge(dfmatch, df, on='match_id')
return dfmerge | /refseq_masher-0.1.2-py3-none-any.whl/refseq_masher/mash/parser.py | 0.8474 | 0.351144 | parser.py | pypi |
Base TCUP environment for refstack.org
==========================================
https://blueprints.launchpad.net/refstack/+spec/standalone-tcup-driver
TCUP (Tempest in a Container, Upload from Probe) is a self-contained, universal Tempest environment that can be widely used by the community with minimal effort AND minimal support effort by the Refstack team.
Problem description
===================
For DefCore and the core definition, we need to collect lots and lots of test runs against deployed OpenStack clouds. Many of these clouds are behind firewalls and not accessible by 3rd parties. So we need to make it super easy to make running Tempest and result uploads as accessible as possible.
Community access is the goal of TCUP. While the original and primary intent of Tempest was to test OpenStack code, having a large body of tests creates unique opportunities for us. DefCore uses the tests as a way to define core capabilities.
Installing and configuring Tempest presents a challenge for many in the community. TCUP's job is to reduce that complexity to the smallest possible set.
Who are "Users" below? The user in this context is the TCUP user, not user inferred from the OpenStack API credentials.
Requirements:
* It should not matter which Linux distro they are using
* Users should not have to figure out which Refstack and Tempest code to check out (beyond the single tcup.py file)
* Users should not have to deal with packages or pips (beyond Docker and the minimal tcup requirements)
* Users should not have to determine where to upload their results (but could override)
* Users identities must be hidden unless they agree/ask to have them published. There is a risk that their OpenStack credentials may be revealed in log messages - this should be addressed.
* When the test is complete, the test system dissolves
Anti-Requirements:
* Users should not need to checkout or clone any code
* Users should not have to edit configuration files
.. image:: https://wiki.openstack.org/w/images/f/f4/Tcup_flow.png
:width: 700px
:alt: TCUP workflow
Proposed change
===============
TCUP should be designed in as simple a way as possible.
Running TCUP should only require Docker (.9+), a single tcup.py file with minimal dependencies, working OpenStack cloud credentials and an Internet connection for install and results upload. The cloud being tested does _not_ have to be public. TCUP will work as long as the user and the TCUP install has network access to the cloud being tested.
Environment variables from the host (OS_*) will be passed into the container. The container should not start unless critical OS_ variables are in place. The specific OS_ items are limited:
* OS_AUTH_URL
* OS_TENANT_NAME
* OS_USERNAME
* OS_PASSWORD
There should be a `--debug` mode to allow for user testing and debug. The debug flag should NOT start tests automatically and should map the user pwd into /dev in the TCUP container.
There must be both a way to use local code (refstack) to run TCUP and also a simple file download approach. These methods should be functionally identical.
While there may be broad uses of TCUP for test automation, it is not desirable to overload them at the expense of manual usability. TCUP should be kept very simple for users in this pass.
By default, TCUP will upload results to the Refstack site (this is a requirement above); however, we anticipate other use cases. For users who do not want to upload their results, they can change their API target parameters. This will allow users to instead upload their results to an internal Refstack site or simply save the results to their local drive.
Alternatives
------------
* THESE ARE INCLUDED FOR COMPLETENESS, NOT IMPLEMENTATION *
It would be possible to create a single-use VM for this testing. That would require much more download time, build effort and maintenance.
An additional method is to package execute_test on its own allowing it to install on fedora and ubuntu. It already has tempest havana stable as a dependency. It can be installed and the rc file can be sourced and it can be kicked off. No container would be needed and you can log into any cloud instance on any cloud provider that has network reach to the cloud you want to test. Start an ephemeral vm and log into it and run two commands.
Yet another approach is to assume tempest havana is already installed. Users can invokes execute_test directly without using docker or any container. This omits the "minimal setup" TCUP approach.
It would be possible to setup a cloud-based process to run Tempest (this is a Refstack use case); however, this would not reach private clouds. It also does not give the user control of the data.
Data model impact
-----------------
None.
REST API impact
---------------
None; however, TCUP will rely on a stable upload REST API.
Security impact
---------------
User passwords are passed into the container and should be redacted from log entries or error messages.
We should prompt the user (from the tcup.py) code to enter a password if none is provided in the environment.
Passwords must not be stored by TCUP!
Notifications impact
--------------------
None
Other end user impact
---------------------
TCUP is designed as a stand-alone capability. It should not have interactions with other parts of the system except via the API as noted above.
Performance Impact
------------------
None.
Other deployer impact
---------------------
The community version of TCUP does NOT have to be coupled to other test running models.
It is _not_ desirable to complicate TCUP to serve other uses.
Developer impact
----------------
None. TCUP should use the standard API.
Implementation
==============
Assignee(s)
-----------
Primary assignee:
robhirschfeld
Other contributors:
praveen (test)
alexhirschfeld (dev & test)
dlenwell (review)
rockyg (documentation) * these documents are ripe with raw material for docs :)
Work Items
----------
* build TCUP docker container (via Dockerfile)
* build tcup.py to build and launch docker
* document run process
* update configuration generator to use environment variables
* integrate execute_test scripts into TCUP
* integrate default upload target into TCUP
Dependencies
============
* execute_test scripts must support environment variables
* upload API must function correctly
Testing
=======
Manual environment testing by Refstack and community.
Documentation Impact
====================
TCUP needs detailed community facing documentation and video tours.
References
==========
* http://docker.io
| /refstack-1.3.0.tar.gz/refstack-1.3.0/specs/prior/approved/refstack-org-tcup-base.rst | 0.774626 | 0.662196 | refstack-org-tcup-base.rst | pypi |
Intuitive visualization for test comparison
==========================================
Blueprint: https://blueprints.launchpad.net/refstack/+spec/results-visualization
Storyboard: https://storyboard.openstack.org/#!/story/111
Result comparison is the very essence of refstack. This spec lays out the
basic design objectives, comparison matrix and wire-frames for the initial
visualization between cloud test runs. The display of this information
must make it simple for users to map their cloud interoperability to other
clouds.
Note: Positive results only for refstack public site.
We will still handle negative/skip use cases.
Problem description
-------------------
Refstack collects substantial amounts of detailed raw data in the form
of passed test results. Individually, these tests provide little insight
about cloud interoperability; consequently, restack must provide a way to
group results (likely capabilities) and contract with other posted test
results.
Specific Items that Need Visualization
Comparison of results against:
* Core Test List ("am I core?")
* Universe of Tests ("close to 100%?")
* Other's runs ("do I interoperate?")
* Previous runs ("did I improve?")
To make it easier to determine, results should follow the capabilities
groups rather than individual tests. Users should be able to drill down
into a capability to look at the test detail.
Note about Capabilities versus Tests: In DefCore, capabilities are
tracked as the definition of "core." Each capability has a defined
set of tests. For a core capability to be considered as "passed,"
all of the tests in that capability must pass. Since we do not
track "failed" as a state, a non-passing test simply makes the whole
capability not passing.
General Visualization: Tristate
----------------------------
The general approach is to focus on _deltas_ from other sets rather
than showing the actual results. This means that visualizations
will be more about what's different or changed. The preferred tool
will be the "tristate" graph: http://omnipotent.net/jquery.sparkline/#s-about.
For consistency, users should expect that:
* +1 = good, match
* 0 = ok, no match (run is advantaged over reference/core)
* -1 = bad, missing functionality
.. image:: https://wiki.openstack.org/w/images/1/19/Refstack_mock_tristate.png
:width: 700px
:alt: Tristate Graph Sample
There are two consistent but slightly different ways that we will use tri-state:
1) comparing to core tests with a goal of showing compliance
* +1 = passed a core test or capability
* 0 = passed a non-core test or capability
* -1 = did not pass a core test or capability (this is the same as "not-reported")
2) compare to other tests with a goal of showing interoperability
* +1 = passed in both samples
* 0 = passed in subject but not in reference (subject is advantaged)
* -1 = not passed in subject but did in reference (subject is disadvantaged)
An example rendering would lock like this:
.. image:: https://wiki.openstack.org/w/images/5/5e/Refstack_mock_comparison.png
:width: 700px
:alt: Comparison Mock Up
Important Design Note: All tristate graphs must use the same ordered capability/test list
to ensure that results are easily to compare visually. The purpose of the tristate is
to help quickly find outliers not perform detailed comparison. Drill downs will be used
to resolve specific differences.
Detailed Visualization: Drill Down
----------------------------
We will expand the capabilities level tristate in the detailed visualization but
still retain the tristate meanings with specific tests. In the drill down, the
user will see the original tristate graph above a table with the capabilities
list (order preserved) by rows. In each row, the following columns:
* the name of the capability
* a tristate will visualize the individual test results using the same +1/0/-1 semantics
* a simple list of the -1 tests
Usability Note: The name of the test/capability should be included as a hover.
Alternatives
----------------------------
There are several other approaches to visualize this information including shaded table
cells and spider charts. This would be acceptable alternatives; however, the tristate
chart is compact, very simple to use and highly intuitive for comparing result sets.
It would be possible to use tristate shapes (circle, open circle, square) to reflect the same
data on tables.
Data model impact
Likely none; however, depending on the complexity of the queries,
it may be necessary to create intermediate tables to to summarize
capabilities from test results per run to improve performance.
If new models are needed, this spec should be updated with the design.
At this time, we assume that the collection does not require an
intermediate model.
Specification for the method
These are read-only reports and should use GETs.
The URL path should match the other UI paths with then following pattern:
Compare against previous results:
HTML response: GET /[refstack base]/compare/[release]/[cloud id]
Compare against other clouds:
HTML response: GET /[refstack base]/compare/[release]/[cloud id]?to=[other 1]|[other 2]
JSON response same as HTML but with .json
Security impact
None. These are open reports.
Notifications impact
None.
Other end user impact
None.
Developer impact
None.
Assignee(s)
TBD
Work Items
* Spec & Mock
* CSS & HTML Frame
* Data Collection
* Connect Data into UI Page
Dependencies
Sparklines JS libraries: http://omnipotent.net/jquery.sparkline/#s-about
Documentation Impact
Need to document screen and drill down expectation.
References
http://wiki.openstack.org/wiki/Governance/DefCoreCommittee | /refstack-1.3.0.tar.gz/refstack-1.3.0/specs/prior/approved/refstack-org-result-visualization.rst | 0.852997 | 0.842345 | refstack-org-result-visualization.rst | pypi |
=============================================
Subunit Data Management API
=============================================
Launchpad blueprint:
* https://blueprints.launchpad.net/refstack/+spec/subunit-data-api
This specification describes an expansion of the refstack api which, when
complete, will allow for the upload and management of subunit data files
to a RefStack server.
Problem description
===================
The current RefStack API allows for the upload, management, and verification
of test results by server administrators. These capabilities, though
sufficient for the current scope of RefStack, will need an API expansion in
order to allow for similar data management of subunit data results. This
expansion will enable those organizations looking to achieve a greater degree
of interoperability to securely share the details of test runs with the
Foundation so as to get assistance with getting their OpenStack instance to
successfully meet interop standards.
Proposed change
===============
**Add new API functionality to the RefStack v1 API**
* Upload new subunit data- nonadmin capability
* Link new subunit data to a corresponding existing test result-
nonadmin capability
* Delete subunit data- admin/owner capability
* Show subunit data for a given test result- admin/owner capability
Note that, amongst the additions to the table that stores test results,
there is no added field intended for the storage of a subunit result id.
This is because, as per the spec defining the changes needed to upload and
utilize subunit data, the current plan is to link the two entries via the
metadata table.
Alternatives
------------
* If subunit2sql takes too long to perform the aforementioned operations,
using asynchronous processing and upload may prove to be a better option.
For now though, it appears as though synchronous operations will be possible
* Possibly require subunit data to be converted into json before being passed
in for upload
Data model impact
------------------
This API will interface with subunit2sql, which will add several tables into
the RefStack database. Though these have been laid out already in the general
subunit data import spec, for the sake of thoroughness, here they
are again:::
--------------------------------------
| tests |
--------------------------------------
| id | String(256) |
| test_id | String(256) |
| run_count | Integer |
| failure | Integer |
| run_time | Float |
--------------------------------------
----------------------------------------
| runs |
----------------------------------------
| id | BigInteger |
| skips | Integer |
| fails | Integer |
| passes | Integer |
| run_time | Float |
| artifacts | Text |
| run_at | DateTime |
----------------------------------------
---------------------------------------------------
| test_runs |
---------------------------------------------------
| id | BigInteger |
| test_id | BigInteger |
| run_id | BigInteger |
| status | String(256) |
| start_time | DateTime |
| start_time_microseconds | Integer |
| stop_time | DateTime |
| stop_time_microseconds | Integer |
| test | Test |
| run | Run |
---------------------------------------------------
-------------------------------------------
| run_metadata |
-------------------------------------------
| id | BigInteger |
| key | String(255) |
| value | String(255) |
| run_id | BigInteger |
| run | Run |
-------------------------------------------
-------------------------------------------
| test_run_metadata |
-------------------------------------------
| id | BigInteger |
| key | String(255) |
| value | String(255) |
| test_run_id | BigInteger |
| test_run | TestRun |
-------------------------------------------
-------------------------------------------
| test_metadata |
-------------------------------------------
| id | BigInteger |
| key | String(255) |
| value | String(255) |
| test_id | BigInteger |
| test | Test |
-------------------------------------------
-------------------------------------------
| attachments |
-------------------------------------------
| id | BigInteger |
| test_run_id | BigInteger |
| label | String(255) |
| attachment | LargeBinary |
| test_run | TestRun |
-------------------------------------------
REST API impact
---------------
The current plan, as briefly outlined above, is to make the following
additions to the current API:
**Upload subunit data**
* Description:
This capability will be used to upload the subunit data of a test result
that is not already in the database. It will do so in a few steps. First,
it will take the subunit file open it, and convert it to v2 stream format
(refstack-client outputs a subunit v1 file). Then, it will check to make
sure the data is not already stored in the database, and if there is no
record matching the data stored in the passed-in file, the api should then
use subunit2sql to insert the subunit data into the appropriate fields, as
well as inserting using the parsed data to insert a new entry into the
refstack "runs" table using the existing refstack api utilities. This may
seem a bit complicated for an upload function, but the goal in doing this
all in one fell swoop is to ensure that no subunit data is ever uploaded
that is not connected to some test result. Uploading subunit data will not
require admin privileges.
* Method Type: POST
* URI: v1/subunit/
* Normal Response Codes:
* Created (201)
* Error Response Codes:
* Bad Request (400)
* Not found (404)
* Request parameters: N/A
* JSON schema definition for the body data:
.. parsed-literal::
{
{
'subunit_data': <subunit data file>
}
}
* JSON schema definition for the response data:
.. parsed-literal::
{
'subunit-uuid': 'subunit2sql-defined run id',
'result-id': 'result id'
}
**Link subunit data to a corresponding existing test result**
* Description:
This will allow for the linking of a new, unadded set of subunit data
to data a test result already existing in the database. It will do
so by converting the contents of the given file to a subunit v2 stream,
then using the stream to generate a corresponding test result,
and then comparing that to the passed in test result. If the
generated result and the stored result correspond to one another,
it should insert the subunit data into the database and link the two
entries via a key value pair in RefStack's meta table. The two keys I
plan to use are the subunit data's uuid and the test result's id.
Because the validity of the link is easily verifiable, this action will
not be one that requires admin privileges.
* Method Type: PUT
* URI: v1/subunit
* Normal Response Codes:
* OK (200)
* Error Response Codes:
* Bad Request (400)
* Unauthorized (401)
* Not Found (404)
* Request parameters:
+---------------+-------+--------------+-----------------------------------+
| Parameter | Style | Type | Description |
+===============+=======+==============+===================================+
| result_id | URI | csapi:UUID | test result ID to link to |
+---------------+-------+--------------+-----------------------------------+
* JSON schema definition for the body data:
.. parsed-literal::
{
'subunit data': <subunit data file>
}
* JSON schema definition for the response data:
.. parsed-literal::
{
'uuid': 'subunit2sql-defined run id',
'id': 'refstack test result id'
}
**Delete subunit data entry**
* Description
This utility will be used to delete subunit data from the RefStack
database. Foundation and vendor admins, along with entry owners will
be able to delete subunit data entry.
* Method type: DELETE
* URI: v1/subunit/{id}
* Normal Response Codes:
* No content (204)
* Error Response Codes:
* Bad Request (400)
* Unauthorized (401)
* Forbidden (403)
* Not found (404)
* Request parameters:
+---------------+-------+--------------+-----------------------------------+
| Parameter | Style | Type | Description |
+===============+=======+==============+===================================+
| id | URI | csapi:UUID | ID to be removed. |
+---------------+-------+--------------+-----------------------------------+
* JSON schema definition for the body data: N/A
* JSON schema definition for the response data: N/A
**Show subunit data**
* Description
This utility will be used to list the subunit data that has been
uploaded into the RefStack database. This action will be available
to vendor and Foundation admins only. A specific subunit data entry
can be selected and viewed using the result_id parameter. It will do
so in two steps. First, it will take the given test result id, and
reference refstack's meta table to find the corresponding subunit
uuid. Then, it will use that uuid to GET the subunit data from the
v1/subunit/{uuid} endpoint.
* Method type: GET
* URI: v1/subunit/{uuid}
* Normal Response Codes:
* OK (200)
* Error Response Codes:
* Bad Request (400)
* Unauthorized (401)
* Forbidden (403)
* Request parameters:
+---------------+-------+--------------+-----------------------------------+
| Parameter | Style | Type | Description |
+===============+=======+==============+===================================+
| id | URI | csapi:UUID | test result id to search for. |
+---------------+-------+--------------+-----------------------------------+
* JSON schema definition for the body data: N/A
* JSON schema definition for the response data:
.. parsed-literal::
{
'subunit-data:': {
'run_at': 2017-08-16 18:34:58.367221Z
'uuid': '4d7950cb-586e-407e-9acf-5b169825af98',
'skips': 0,
'fails': 1,
'passes': 1,
'run_time': 2060.7
'artifacts': 'http://example-logs.log',
}
'tests': [
{
'id': '1
'test_id': 'tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_icmp_type_code'
'run_count': 1
'success': 1
'failure': 1
'run_time': 5.60538
},
{
'test_id': ' tempest.api.compute.keypairs.test_keypairs_negative.KeyPairsNegativeTestJSON.test_create_keypair_with_empty_public_key',
'run_count': 1,
'success': 0,
'failure': 1,
'run_time': 0.10919,
},
]
'test_runs': [
{
'test_id': 1,
'run_id': 1,
'status': 'success',
'start_time': 2017-08-16 07:21:56,
'stop_time': 2017-08-16 07:22:02,
'start_time_microsecond': 929341,
'stop_time_microsecond': 534721,
},
{
'test_id': 2,
'run_id': 2,
'status': 'fail',
'start_time': 2017-08-16 07:13:34,
'stop_time': 2017-08-16 07:13:35,
'start_time_microsecond': 693353,
'stop_time_microsecond': 726471,
},
]
'attachments': [
{
'test_run_id': 1,
'label': '<some label>'
'attachment': '<some link>'
}
]
}
**Delete Test Result**
* Description
This modification to the v1/results/ endpoint's delete function will
ensure that, when a test result is deleted, the corresponding subunit
data is too. This is neccessary largely because, in our data model,
subunit data should always be linked to an associated test result.
* Method type: DELETE
* URI: v1/result/{id}
* Normal Response Codes:
* No content (204)
* Error Response Codes:
* Bad Request (400)
* Unauthorized (401)
* Forbidden (403)
* Not found (404)
* Request parameters:
+---------------+-------+--------------+-----------------------------------+
| Parameter | Style | Type | Description |
+===============+=======+==============+===================================+
| id | URI | csapi:UUID | ID to be removed. |
+---------------+-------+--------------+-----------------------------------+
* JSON schema definition for the body data: N/A
* JSON schema definition for the response data: N/A
Security impact
---------------
There has been some concern over the sharing of subunit data via the RefStack
API, and though they are largely based on a misinformation, this is part of
why so few of the API additions are nonadmin. For more details about this
discussion, please refer to the generalized spec for the upload and usage of
subunit tests.
Notifications impact
--------------------
None.
Other end user impact
---------------------
None.
Performance impact
------------------
None.
Other deployer impact
---------------------
None.
Implementation
==============
Assignee(s)
-----------
Primary assignee:
Megan Guiney
Other contributors:
TBD
Work Items
----------
* Discuss, amend, and merge this spec
* Run subunit2sql performance tests
* add field to "test" table
* add subunit api functionity
* add subunit-adjacent test result api functionality
Dependencies
============
* subunit2sql and its dependencies will need to be installed
during refstack server setup. As a result, puppet-refstack may
need some adjustments.
Testing
=======
* Add unit tests to verify the proper functionality of the new API
additions.
Documentation Impact
====================
* Add documentation to detail the usage and functionality of the
new API additions.
References
==========
[1] https://github.com/openstack/refstack/blob/master/specs/pike/approved
/upload-subunit-tests.rst
| /refstack-1.3.0.tar.gz/refstack-1.3.0/specs/queens/approved/subunit-data-api.rst | 0.801198 | 0.770681 | subunit-data-api.rst | pypi |
<img alt="Refuel logo" src="https://raw.githubusercontent.com/refuel-ai/autolabel/main/docs/assets/Autolabel_blk_w_background.png">
<h4 align="center">
<a href="https://discord.gg/fweVnRx6CU">Discord</a> |
<a href="https://twitter.com/RefuelAI">Twitter</a> |
<a href="https://www.refuel.ai/">Website</a> |
<a href="https://www.refuel.ai/blog-posts/llm-labeling-technical-report">Benchmark</a>
</h4>
<div align="center" style="width:800px">
[](https://github.com/refuel-ai/autolabel/actions/workflows/black.yaml/badge.svg)   [](https://discord.gg/fweVnRx6CU) [](https://colab.research.google.com/drive/1t-9vNLkyoyySAG_0w3eR98biBOXlMO-E?usp=sharing)
</div>
## ⚡ Quick Install
`pip install refuel-autolabel`
## 📖 Documentation
[https://docs.refuel.ai/](https://docs.refuel.ai/)
## 🏷 What is Autolabel
Access to [large, clean and diverse](https://twitter.com/karpathy/status/1528443124577513472?lang=en) labeled datasets is a critical component for any machine learning effort to be successful. State-of-the-art LLMs like GPT-4 are able to [automatically label data](https://arxiv.org/abs/2303.15056) with [high accuracy](https://arxiv.org/abs/2303.16854), and at a fraction of the cost and time compared to manual labeling.
Autolabel is a Python library to label, clean and enrich text datasets with any Large Language Models (LLM) of your choice.
## 🚀 Getting started
Autolabel provides a simple 3-step process for labeling data:
1. Specify the labeling guidelines and LLM model to use in a JSON config.
2. Dry-run to make sure the final prompt looks good.
3. Kick off a labeling run for your dataset!
Let's imagine we are building an ML model to analyze sentiment analysis of movie review. We have a dataset of movie reviews that we'd like to get labeled first. For this case, here's what the example dataset and configs will look like:
```python
{
"task_name": "MovieSentimentReview",
"task_type": "classification",
"model": {
"provider": "openai",
"name": "gpt-3.5-turbo"
},
"dataset": {
"label_column": "label",
"delimiter": ","
},
"prompt": {
"task_guidelines": "You are an expert at analyzing the sentiment of movie reviews. Your job is to classify the provided movie review into one of the following labels: {labels}",
"labels": [
"positive",
"negative",
"neutral"
],
"few_shot_examples": [
{
"example": "I got a fairly uninspired stupid film about how human industry is bad for nature.",
"label": "negative"
},
{
"example": "I loved this movie. I found it very heart warming to see Adam West, Burt Ward, Frank Gorshin, and Julie Newmar together again.",
"label": "positive"
},
{
"example": "This movie will be played next week at the Chinese theater.",
"label": "neutral"
}
],
"example_template": "Input: {example}\nOutput: {label}"
}
}
```
Initialize the labeling agent and pass it the config:
```python
from autolabel import LabelingAgent, AutolabelDataset
agent = LabelingAgent(config='config.json')
```
Preview an example prompt that will be sent to the LLM:
```python
ds = AutolabelDataset('dataset.csv', config = config)
agent.plan(ds)
```
This prints:
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100/100 0:00:00 0:00:00
┌──────────────────────────┬─────────┐
│ Total Estimated Cost │ $0.538 │
│ Number of Examples │ 200 │
│ Average cost per example │ 0.00269 │
└──────────────────────────┴─────────┘
─────────────────────────────────────────
Prompt Example:
You are an expert at analyzing the sentiment of movie reviews. Your job is to classify the provided movie review into one of the following labels: [positive, negative, neutral]
Some examples with their output answers are provided below:
Example: I got a fairly uninspired stupid film about how human industry is bad for nature.
Output:
negative
Example: I loved this movie. I found it very heart warming to see Adam West, Burt Ward, Frank Gorshin, and Julie Newmar together again.
Output:
positive
Example: This movie will be played next week at the Chinese theater.
Output:
neutral
Now I want you to label the following example:
Input: A rare exception to the rule that great literature makes disappointing films.
Output:
─────────────────────────────────────────────────────────────────────────────────────────
```
Finally, we can run the labeling on a subset or entirety of the dataset:
```python
ds = agent.run(ds)
```
The output dataframe contains the label column:
```python
ds.df.head()
text ... MovieSentimentReview_llm_label
0 I was very excited about seeing this film, ant... ... negative
1 Serum is about a crazy doctor that finds a ser... ... negative
4 I loved this movie. I knew it would be chocked... ... positive
...
```
## Features
1. Label data for [NLP tasks](https://docs.refuel.ai/guide/tasks/classification_task/) such as classification, question-answering and named entity-recognition, entity matching and more.
2. Use commercial or open source [LLMs](https://docs.refuel.ai/guide/llms/llms/) from providers such as OpenAI, Anthropic, HuggingFace, Google and more.
3. Support for research-proven LLM techniques to boost label quality, such as few-shot learning and chain-of-thought prompting.
4. [Confidence estimation](https://docs.refuel.ai/guide/accuracy/confidence/) and explanations out of the box for every single output label
5. [Caching and state management](https://docs.refuel.ai/guide/reliability/state-management/) to minimize costs and experimentation time
## Access to Refuel hosted LLMs
Refuel provides access to hosted open source LLMs for labeling, and for estimating confidence This is helpful, because you can calibrate a confidence threshold for your labeling task, and then route less confident labels to humans, while you still get the benefits of auto-labeling for the confident examples.
In order to use Refuel hosted LLMs, you can [request access here](https://refuel-ai.typeform.com/llm-access).
## Benchmark
Check out our [technical report](https://www.refuel.ai/blog-posts/llm-labeling-technical-report) to learn more about the performance of various LLMs, and human annoators, on label quality, turnaround time and cost.
## 🛠️ Roadmap
Check out our [public roadmap](https://github.com/orgs/refuel-ai/projects/15) to learn more about ongoing and planned improvements to the Autolabel library.
We are always looking for suggestions and contributions from the community. Join the discussion on [Discord](https://discord.gg/fweVnRx6CU) or open a [Github issue](https://github.com/refuel-ai/autolabel/issues) to report bugs and request features.
## 🙌 Contributing
Autolabel is a rapidly developing project. We welcome contributions in all forms - bug reports, pull requests and ideas for improving the library.
1. Join the conversation on [Discord](https://discord.gg/fweVnRx6CU)
2. Open an [issue](https://github.com/refuel-ai/autolabel/issues) on Github for bugs and request features.
3. Grab an open issue, and submit a [pull request](https://github.com/refuel-ai/autolabel/blob/main/CONTRIBUTING.md).
| /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/README.md | 0.534612 | 0.97151 | README.md | pypi |
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import pandas as pd
from langchain.schema import Generation
from pydantic import BaseModel
from autolabel.configs import AutolabelConfig
from autolabel.utils import calculate_md5
class ModelProvider(str, Enum):
"""Enum containing all LLM providers currently supported by autolabeler"""
OPENAI = "openai"
ANTHROPIC = "anthropic"
HUGGINGFACE_PIPELINE = "huggingface_pipeline"
REFUEL = "refuel"
GOOGLE = "google"
COHERE = "cohere"
CUSTOM = "custom"
class TaskType(str, Enum):
"""Enum containing all the types of tasks that autolabeler currently supports"""
CLASSIFICATION = "classification"
NAMED_ENTITY_RECOGNITION = "named_entity_recognition"
QUESTION_ANSWERING = "question_answering"
ENTITY_MATCHING = "entity_matching"
MULTILABEL_CLASSIFICATION = "multilabel_classification"
class FewShotAlgorithm(str, Enum):
"""Enum of supported algorithms for choosing which examples to provide the LLM in its instruction prompt"""
FIXED = "fixed"
SEMANTIC_SIMILARITY = "semantic_similarity"
MAX_MARGINAL_RELEVANCE = "max_marginal_relevance"
LABEL_DIVERSITY_RANDOM = "label_diversity_random"
LABEL_DIVERSITY_SIMILARITY = "label_diversity_similarity"
class TaskStatus(str, Enum):
ACTIVE = "active"
class MetricType(str, Enum):
"""Enum of supported performance metrics. Some metrics are always available (task agnostic), while others are only supported by certain types of tasks"""
# Task agnostic
SUPPORT = "support"
COMPLETION_RATE = "completion_rate"
# Classification metrics
ACCURACY = "accuracy"
CONFUSION_MATRIX = "confusion_matrix"
LABEL_DISTRIBUTION = "label_distribution"
F1 = "f1"
F1_MICRO = "f1_micro"
F1_MACRO = "f1_macro"
F1_WEIGHTED = "f1_weighted"
TEXT_PARTIAL_MATCH = "text_partial_match"
# Confidence metrics
AUROC = "auroc"
THRESHOLD = "threshold"
# Aggregate Metrics
CLASSIFICATION_REPORT = "classification_report"
class F1Type(str, Enum):
MULTI_LABEL = "multi_label"
TEXT = "text"
class MetricResult(BaseModel):
"""Contains performance metrics gathered from autolabeler runs"""
name: str
value: Any
show_running: Optional[bool] = True
class ErrorType(str, Enum):
"""Enum of supported error types"""
LLM_PROVIDER_ERROR = "llm_provider_error"
PARSING_ERROR = "parsing_error"
OUTPUT_GUIDELINES_NOT_FOLLOWED_ERROR = "output_guidelines_not_followed_error"
EMPTY_RESPONSE_ERROR = "empty_response_error"
class LabelingError(BaseModel):
"""Contains information about an error that occurred during the labeling process"""
error_type: ErrorType
error_message: str
class LLMAnnotation(BaseModel):
"""Contains label information of a given data point, including the generated label, the prompt given to the LLM, and the LLMs response. Optionally includes a confidence_score if supported by the model"""
successfully_labeled: bool
label: Any
curr_sample: Optional[bytes] = ""
confidence_score: Optional[float] = None
generation_info: Optional[Dict[str, Any]] = None
raw_response: Optional[str] = ""
explanation: Optional[str] = ""
prompt: Optional[str] = ""
error: Optional[LabelingError] = None
class Dataset(BaseModel):
"""Contains Dataset parameters, including input file path, indexes for state management (e.g. job batching and retries), and a unique ID"""
id: str
input_file: str
start_index: int
end_index: int
class Config:
orm_mode = True
@classmethod
def create_id(
self,
dataset: Union[str, pd.DataFrame],
config: AutolabelConfig,
start_index: int,
max_items: int,
) -> str:
"""
Generates a unique ID for the given Dataset configuration
Args:
dataset: either 1) input file name or 2) pandas Dataframe
config: AutolabelConfig object containing project settings
start_index: index to begin labeling job at (used for job batching, retries, state management)
max_items: number of data points to label, beginning at start_index
Returns:
filehash: a unique ID generated from an MD5 hash of the functions parameters
"""
if isinstance(dataset, str):
filehash = calculate_md5(
[open(dataset, "rb"), config._dataset_config, start_index, max_items]
)
else:
filehash = calculate_md5(
[dataset.to_csv(), config._dataset_config, start_index, max_items]
)
return filehash
class Task(BaseModel):
id: str
task_type: TaskType
model_name: str
config: str
class Config:
orm_mode = True
@classmethod
def create_id(self, config: AutolabelConfig) -> str:
filehash = calculate_md5(config.config)
return filehash
class TaskRun(BaseModel):
id: Optional[str] = None
created_at: datetime
task_id: str
dataset_id: str
current_index: int
output_file: str
status: TaskStatus
error: Optional[str] = None
metrics: Optional[Dict[str, Any]] = None
class Config:
orm_mode = True
class Annotation(BaseModel):
id: Optional[str] = None
index: int
llm_annotation: Optional[LLMAnnotation] = None
class Config:
orm_mode = True
class GenerationCacheEntry(BaseModel):
model_name: str
prompt: str
model_params: str
generations: Optional[List[Generation]] = None
creation_time_ms: Optional[int] = -1
ttl_ms: Optional[int] = -1
class Config:
orm_mode = True
class RefuelLLMResult(BaseModel):
"""List of generated outputs. This is a List[List[]] because
each input could have multiple candidate generations."""
generations: List[List[Generation]]
"""Errors encountered while running the labeling job"""
errors: List[Optional[LabelingError]]
"""Costs incurred during the labeling job"""
costs: Optional[List[float]] = [] | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/schema.py | 0.881047 | 0.21713 | schema.py | pypi |
import asyncio
import hashlib
import os
import json
import logging
from string import Formatter
import re
import string
from typing import Any, Dict, Iterable, List, Optional, Sequence, Union
import shutil
import regex
import wget
from rich.console import Console, Group
from rich.live import Live
from rich.progress import (
BarColumn,
MofNCompleteColumn,
Progress,
ProgressColumn,
ProgressType,
TextColumn,
TimeElapsedColumn,
TimeRemainingColumn,
)
from rich.table import Table
logger = logging.getLogger(__name__)
EXAMPLE_DATASETS = [
"banking",
"civil_comments",
"ledgar",
"walmart_amazon",
"company",
"squad_v2",
"sciq",
"conll2003",
"movie_reviews",
"twitter_emotion_detection",
]
NO_SEED_DATASET = [
"movie_reviews",
]
DATASET_URL = "https://autolabel-benchmarking.s3.us-west-2.amazonaws.com/{dataset}/{partition}.csv"
def extract_valid_json_substring(string: str) -> str:
pattern = (
r"{(?:[^{}]|(?R))*}" # Regular expression pattern to match a valid JSON object
)
match = regex.search(pattern, string)
if match:
json_string = match.group(0)
try:
json.loads(json_string)
return json_string
except ValueError:
pass
return None
def calculate_md5(input_data: Any) -> str:
if isinstance(input_data, dict):
# Convert dictionary to a JSON-formatted string
input_str = json.dumps(input_data, sort_keys=True, skipkeys=True).encode(
"utf-8"
)
elif hasattr(input_data, "read"):
# Read binary data from file-like object
md5_hash = hashlib.md5()
for chunk in iter(lambda: input_data.read(4096), b""):
md5_hash.update(chunk)
return md5_hash.hexdigest()
elif isinstance(input_data, list):
md5_hash = hashlib.md5()
for item in input_data:
md5_hash.update(calculate_md5(item).encode("utf-8"))
return md5_hash.hexdigest()
else:
# Convert other input to byte string
input_str = str(input_data).encode("utf-8")
# Calculate MD5 hash of byte string
md5_hash = hashlib.md5(input_str)
return md5_hash.hexdigest()
def get_format_variables(fmt_string: str) -> List:
return [i[1] for i in Formatter().parse(fmt_string) if i[1] is not None]
def _autolabel_progress(
description: str = None,
console: Optional[Console] = None,
transient: bool = False,
disable: bool = False,
) -> Progress:
"""Create a progress bar for autolabel."""
columns: List[ProgressColumn] = (
[TextColumn("[progress.description]{task.description}")] if description else []
)
columns.extend(
(
BarColumn(),
MofNCompleteColumn(),
TimeElapsedColumn(),
TimeRemainingColumn(),
)
)
return Progress(
*columns,
console=console,
transient=transient,
disable=disable,
)
def track(
sequence: Union[Sequence[ProgressType], Iterable[ProgressType]],
description: str = None,
total: Optional[int] = None,
advance: int = 1,
transient: bool = False,
console: Optional[Console] = None,
disable: bool = False,
) -> Iterable[ProgressType]:
"""Track progress by iterating over a sequence.
Args:
sequence (Iterable[ProgressType]): A sequence (must support "len") you wish to iterate over.
description (str, optional): Description of task show next to progress bar. Defaults to `None`.
total (int, optional): Total number of steps. Default is len(sequence).
advance (int, optional): Number of steps to advance progress by. Defaults to 1. Total / advance must less than or equal to len(sequence) for progress to reach finished state.
transient (bool, optional): Clear the progress on exit. Defaults to False.
console (Console, optional): Console to write to. Default creates internal Console instance.
disable (bool, optional): Disable display of progress.
Returns:
Iterable[ProgressType]: An iterable of the values in the sequence.
"""
progress = _autolabel_progress(
description=description,
transient=transient,
console=console,
disable=disable,
)
if total is None:
total = len(sequence)
with progress:
progress_task = progress.add_task(description, total=total)
for value in sequence:
yield value
progress.advance(
progress_task,
advance=min(advance, total - progress.tasks[progress_task].completed),
)
progress.refresh()
async def gather_async_tasks_with_progress(
tasks: Iterable,
description: str = None,
total: Optional[int] = None,
advance: int = 1,
transient: bool = False,
console: Optional[Console] = None,
disable: bool = False,
) -> Iterable:
"""Gather async tasks with progress bar
Args:
tasks (Iterable): A sequence of async tasks you wish to gather.
description (str, optional): Description of task show next to progress bar. Defaults to `None`.
total (int, optional): Total number of steps. Default is len(sequence).
advance (int, optional): Number of steps to advance progress by. Defaults to 1. Total / advance must less than or equal to len(sequence) for progress to reach finished state.
transient (bool, optional): Clear the progress on exit. Defaults to False.
console (Console, optional): Console to write to. Default creates internal Console instance.
disable (bool, optional): Disable display of progress.
Returns:
Iterable: Returns an iterable of the results of the async tasks.
"""
progress = _autolabel_progress(
description=description,
transient=transient,
console=console,
disable=disable,
)
if total is None:
total = len(tasks)
async def _task_with_tracker(task, progress, progress_task):
res = await task
progress.advance(
progress_task,
advance=min(advance, total - progress.tasks[progress_task].completed),
)
progress.refresh()
return res
with progress:
progress_task = progress.add_task(description, total=total)
tasks = [_task_with_tracker(task, progress, progress_task) for task in tasks]
return await asyncio.gather(*tasks)
def track_with_stats(
sequence: Union[Sequence[ProgressType], Iterable[ProgressType]],
stats: Dict[str, str],
description: str = None,
total: Optional[float] = None,
advance: int = 1,
transient: bool = False,
console: Optional[Console] = None,
disable: bool = False,
) -> Iterable[ProgressType]:
"""Track progress and displays stats by iterating over a sequence.
Args:
sequence (Iterable[ProgressType]): A sequence (must support "len") you wish to iterate over.
stats (Dict[str, str]): A dictionary of stats to display.
description (str, optional): Description of task show next to progress bar. Defaults to `None`.
total (float, optional): Total number of steps. Default is len(sequence).
advance (int, optional): Number of steps to advance progress by. Defaults to 1. Total / advance must less than or equal to len(sequence) for progress to reach finished state.
transient (bool, optional): Clear the progress on exit. Defaults to False.
console (Console, optional): Console to write to. Default creates internal Console instance.
disable (bool, optional): Disable display of progress.
Returns:
Iterable[ProgressType]: An iterable of the values in the sequence.
"""
progress = _autolabel_progress(
description=description,
transient=transient,
console=console,
disable=disable,
)
stats_progress = Progress(
TextColumn("{task.fields[stats]}"),
console=console,
)
group = Group(progress, stats_progress)
live = Live(group, console=console)
if total is None:
total = len(sequence)
with live:
progress_task = progress.add_task(description=description, total=total)
stats_task = stats_progress.add_task(
"Stats", stats=", ".join(f"{k}={v}" for k, v in stats.items())
)
for value in sequence:
yield value
progress.advance(
progress_task,
advance=min(advance, total - progress.tasks[progress_task].completed),
)
stats_progress.update(
stats_task, stats=", ".join(f"{k}={v}" for k, v in stats.items())
)
live.refresh()
def maybe_round(value: Any) -> Any:
"""Round's value only if it has a round function"""
if hasattr(value, "__round__"):
return round(value, 4)
else:
return value
def print_table(
data: Dict,
show_header: bool = True,
console: Optional[Console] = None,
default_style: str = "bold",
styles: Dict = {},
) -> None:
"""Print a table of data.
Args:
data (Dict[str, List]): A dictionary of data to print.
show_header (bool, optional): Show the header row. Defaults to True.
console (Console, optional): Console to write to. Default creates internal Console instance.
default_style (str, optional): Default style to apply to the table. Defaults to "bold".
styles (Dict, optional): A dictionary of styles to apply to the table.
"""
# Convert all values to strings
data = {
str(key): [str(maybe_round(v)) for v in value]
if isinstance(value, List)
else [str(maybe_round(value))]
for key, value in data.items()
}
table = Table(show_header=show_header)
for key in data:
table.add_column(key, style=styles.get(key, default_style))
for i, row in enumerate(zip(*data.values())):
table.add_row(*row)
console = console or Console()
console.print(table)
def get_data(dataset_name: str, force: bool = False):
"""Download Datasets
Args:
dataset_name (str): dataset name
force (bool, optional): if set to True, downloads and overwrites the local test and seed files
if false then downloads onlyif the files are not present locally
"""
def download_bar(current, total, width=80):
"""custom progress bar for downloading data"""
width = shutil.get_terminal_size()[0] // 2
print(
f"{current//total*100}% [{'.' * (current//total * int(width))}] [{current}/{total}] bytes",
end="\r",
)
def download(url: str) -> None:
"""Downloads the data given an url"""
file_name = os.path.basename(url)
if force and os.path.exists(file_name):
print(f"File {file_name} exists. Removing")
os.remove(file_name)
if not os.path.exists(file_name):
print(f"Downloading example dataset from {url} to {file_name}...")
wget.download(url, bar=download_bar)
if dataset_name not in EXAMPLE_DATASETS:
logger.error(
f"{dataset_name} not in list of available datasets: {str(EXAMPLE_DATASETS)}. Exiting..."
)
return
seed_url = DATASET_URL.format(dataset=dataset_name, partition="seed")
test_url = DATASET_URL.format(dataset=dataset_name, partition="test")
try:
if dataset_name not in NO_SEED_DATASET:
download(seed_url)
download(test_url)
except Exception as e:
logger.error(f"Error downloading dataset: {e}")
def normalize_text(s: str) -> str:
"""Removing articles and punctuation, and standardizing whitespace are all typical text processing steps."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def in_notebook():
"""
Check if we are in a notebook. Taken from https://stackoverflow.com/a/39662359/9263185
"""
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config: # pragma: no cover
return False
except ImportError:
return False
except AttributeError:
return False
return True | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/utils.py | 0.730674 | 0.253699 | utils.py | pypi |
from typing import Dict, List, Union, Callable
import pandas as pd
from autolabel.configs import AutolabelConfig
from autolabel.dataset.validation import TaskDataValidation
from autolabel.schema import MetricResult, LLMAnnotation
from tabulate import tabulate
import logging
from autolabel.utils import print_table
from rich.console import Console
import json
import pickle
from autolabel.tasks import TaskFactory
logger = logging.getLogger(__name__)
METRIC_TABLE_STYLE = "cyan bold"
class AutolabelDataset:
"""The dataset for handling all operations on the dataset."""
inputs: List[Dict]
df: pd.DataFrame
gt_labels: List
config: AutolabelConfig
class Config:
arbitrary_types_allowed = True
def __init__(
self,
dataset: Union[pd.DataFrame, str],
config: Union[AutolabelConfig, str, Dict],
max_items: int = None,
start_index: int = 0,
validate: bool = False,
) -> None:
"""
Initializes the dataset.
Args:
dataset: The dataset to be used for labeling. Could be a path to a csv/jsonl file or a pandas dataframe.
config: The config to be used for labeling. Could be a path to a json file or a dictionary.
max_items: The maximum number of items to be parsed into the dataset object.
start_index: The index to start parsing the dataset from.
validate: Whether to validate the dataset or not.
"""
if not (isinstance(config, AutolabelConfig)):
self.config = AutolabelConfig(config)
else:
self.config = config
if isinstance(dataset, str):
if dataset.endswith(".csv"):
delimiter = self.config.delimiter()
df = pd.read_csv(dataset, sep=delimiter, dtype="str")
elif dataset.endswith(".jsonl"):
df = pd.read_json(dataset, lines=True, dtype="str")
elif isinstance(dataset, pd.DataFrame):
df = dataset.copy()
df = df[start_index:]
if max_items and max_items > 0:
max_items = min(max_items, len(df))
df = df[:max_items]
inputs = df.to_dict(orient="records")
label_column = self.config.label_column()
gt_labels = (
None
if not label_column or not len(inputs) or label_column not in inputs[0]
else df[label_column].tolist()
)
self.df = df
self.inputs = inputs
self.gt_labels = gt_labels
if validate:
self._validate()
def __repr__(self):
"""
Returns the representation of the dataset. We currently represent the dataset as a pandas dataframe.
"""
if self.df is not None:
return self.df.__repr__()
def __str__(self):
if self.df is not None:
return self.df.__str__()
def get_slice(self, max_items: int = None, start_index: int = 0):
df = self.df[start_index:]
if max_items and max_items > 0:
max_items = min(max_items, len(df))
df = df[:max_items]
return AutolabelDataset(df, self.config)
def process_labels(
self, llm_labels: List[LLMAnnotation], metrics: List[MetricResult] = None
):
# Add the LLM labels to the dataframe
self.df[self.generate_label_name("label")] = [x.label for x in llm_labels]
# Add the LLM errors to the dataframe
self.df[self.generate_label_name("error")] = [x.error for x in llm_labels]
# Add labeled success column to the dataframe
self.df[self.generate_label_name("successfully_labeled")] = [
x.successfully_labeled for x in llm_labels
]
# Add the LLM annotations to the dataframe
self.df[self.generate_label_name("annotation")] = llm_labels
# Add row level LLM metrics to the dataframe
if metrics is not None:
for metric in metrics:
if (
isinstance(metric.value, list)
and len(metric.value) == self.df.shape[0]
):
self.df[self.generate_label_name(metric.name)] = metric.value
# Add the LLM confidence scores to the dataframe if confidence is set in config
if self.config.confidence():
self.df[self.generate_label_name("confidence")] = [
x.confidence_score for x in llm_labels
]
# Add the LLM explanations to the dataframe if chain of thought is set in config
if self.config.chain_of_thought():
self.df[self.generate_label_name("explanation")] = [
l.explanation for l in llm_labels
]
def save(self, output_file_name: str):
"""
Saves the dataset to a file based on the file extension.
Args:
output_file_name: The name of the file to save the dataset to. Based on the extension we can save to a csv or jsonl file.
"""
if output_file_name.endswith(".csv"):
self.df.to_csv(
str(output_file_name),
sep=self.config.delimiter(),
header=True,
index=False,
)
elif output_file_name.endswith(".jsonl"):
self.df.to_json(
str(output_file_name),
orient="records",
lines=True,
force_ascii=False,
)
else:
raise ValueError(f"Unsupported output file format: {output_file_name}")
def filter(
self, label: str = None, ground_truth: str = None, filter_func: Callable = None
):
"""
Filter the dataset based on the label, ground truth or a custom filter function.
In case multiple filters are applied, the filters are applied in the following order:
label -> ground_truth -> filter_func
Args:
label: The llm label to filter on.
ground_truth: The ground truth label to filter on.
filter_func: A custom filter function to filter on.
"""
filtered_df = self.df
if label:
filtered_df = filtered_df[
filtered_df[self.generate_label_name("label")] == label
]
if ground_truth:
filtered_df = filtered_df[
filtered_df[self.config.label_column()] == ground_truth
]
if filter_func:
filtered_df = filtered_df.apply(filter_func, axis=1)
return AutolabelDataset(
filtered_df,
self.config,
)
def non_completed(self):
"""
Filter the dataset to only include non completed items. This means the labels
where the llm was not able to generate a label or there was some error while
generating the label.
"""
filtered_df = self.df[self.df[self.generate_label_name("error")].notnull()]
return AutolabelDataset(filtered_df, self.config)
def completed(self):
"""
Filter the dataset to only include completed items. This means the labels
where the llm was able to generate a label successfully.
"""
filtered_df = self.df[self.df[self.generate_label_name("error")].isnull()]
return AutolabelDataset(filtered_df, self.config)
def incorrect(self, label: str = None, ground_truth: str = None):
"""
Filter the dataset to only include incorrect items. This means the labels
where the llm label was incorrect.
Args:
label: The llm label to filter on.
ground_truth: The ground truth label to filter on.
"""
gt_label_column = self.config.label_column()
if gt_label_column is None:
raise ValueError(
"Cannot compute mistakes without ground truth label column"
)
filtered_df = self.df[
self.df[self.generate_label_name("label")] != self.df[gt_label_column]
]
if label:
filtered_df = filtered_df[
filtered_df[self.generate_label_name("label")] == label
]
if ground_truth:
filtered_df = filtered_df[filtered_df[gt_label_column] == ground_truth]
return AutolabelDataset(filtered_df, self.config)
def correct(self):
"""
Filter the dataset to only include correct items. This means the labels
where the llm label was correct.
"""
gt_label_column = self.config.label_column()
if gt_label_column is None:
raise ValueError("Cannot compute correct without ground truth label column")
filtered_df = self.df[
self.df[self.generate_label_name("label")] == self.df[gt_label_column]
]
return AutolabelDataset(filtered_df, self.config)
def filter_by_confidence(self, threshold: float = 0.5):
"""
Filter the dataset to only include items with confidence scores greater than the threshold.
Args:
threshold: The threshold to filter on. This means that only items with confidence scores greater than the threshold will be included.
"""
if not self.config.confidence():
raise ValueError(
"Cannot compute correct and confident without confidence scores"
)
filtered_df = self.df[
self.df[self.generate_label_name("confidence")] >= threshold
]
return AutolabelDataset(filtered_df, self.config)
def eval(self):
"""
Evaluate the dataset based on the task. We run the metrics that were
specified by the task being run.
"""
gt_label_column = self.config.label_column()
if gt_label_column is None:
raise ValueError("Cannot compute eval without ground truth label column")
gt_labels = self.df[gt_label_column]
llm_labels = self.df[self.generate_label_name("annotation")].tolist()
task = TaskFactory.from_config(self.config)
metrics = task.eval(llm_labels, gt_labels)
table = {}
for metric in metrics:
if not isinstance(metric.value, list):
table[metric.name] = metric.value
print_table(table, console=Console(), default_style=METRIC_TABLE_STYLE)
return metrics
def columns(self):
"""
Returns the columns in the dataframe.
"""
return self.df.columns.tolist()
def _validate(self):
"""
Validate the dataset by looking at all rows and making sure
that they follow the schema.
"""
data_validation = TaskDataValidation(config=self.config)
# Validate columns
data_validation.validate_dataset_columns(dataset_columns=self.columns())
# Validate datatype and data format
self.__malformed_records = data_validation.validate(data=self.inputs)
table = tabulate(
self.__malformed_records[0 : self.MAX_ERROR_DISPLAYED],
headers="keys",
tablefmt="fancy_grid",
numalign="center",
stralign="left",
)
if len(self.__malformed_records) > 0:
logger.warning(
f"Data Validation failed for {len(self.__malformed_records)} records: \n Stats: \n {table}"
)
raise DataValidationFailed(
f"Validation failed for {len(self.__malformed_records)} rows."
)
def generate_label_name(self, col_name: str):
return f"{self.config.task_name()}_{col_name}"
class DataValidationFailed(Exception):
def __init__(self, message):
self.message = message
super().__init__(self.message) | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/dataset/dataset.py | 0.894127 | 0.343631 | dataset.py | pypi |
import json
import re
from functools import cached_property
from json.decoder import JSONDecodeError
from typing import Dict, List, Optional, Union
from autolabel.configs import AutolabelConfig
from pydantic import BaseModel, ValidationError, create_model, root_validator
from pydantic.types import StrictStr
# Regex pattern to extract expected column from onfig.example_template()
EXPECTED_COLUMN_PATTERN = r"\{([^}]*)\}"
class NERTaskValidate(BaseModel):
"""Validate NER Task
The label column can either be a string or a json
"""
label_column: Optional[str]
labels_set: Optional[set] # A NER Task should have a unique set of labels in config
def validate(self, value: str):
"""Validate NER
A NER label can only be a dictionary
"""
# TODO: This can be made better
if value.startswith("{") and value.endswith("}"):
try:
seed_labels = json.loads(value)
unmatched_label = set(seed_labels.keys()) - self.labels_set
if len(unmatched_label) != 0:
raise ValueError(
f"labels: '{unmatched_label}' not in prompt/labels provided in config "
)
except JSONDecodeError:
raise
else:
raise
class ClassificationTaskValidate(BaseModel):
"""Validate Classification Task
The label column can either be a string or a string of list
"""
label_column: Optional[str]
labels_set: Optional[
set
] # A classification Task should have a unique set of labels in config
def validate(self, value: str):
"""Validate classification
A classification label(ground_truth) could either be a list or string
"""
# TODO: This can be made better
if value.startswith("[") and value.endswith("]"):
try:
seed_labels = eval(value)
if not isinstance(seed_labels, list):
raise
unmatched_label = set(seed_labels) - self.labels_set
if len(unmatched_label) != 0:
raise ValueError(
f"labels: '{unmatched_label}' not in prompt/labels provided in config "
)
except SyntaxError:
raise
else:
if value not in self.labels_set:
raise ValueError(
f"labels: '{value}' not in prompt/labels provided in config "
)
class EMTaskValidate(BaseModel):
"""Validate Entity Matching Task
As of now we assume that the input label_column is a string
"""
label_column: Optional[str]
labels_set: Optional[
set
] # An EntityMatching Task should have a unique set of labels in config
def validate(self, value: str):
if value not in self.labels_set:
raise ValueError(
f"labels: '{value}' not in prompt/labels provided in config "
)
class QATaskValidate(BaseModel):
"""Validate Question Answering Task
As of now we assume that the input label_column is a string
"""
label_column: Optional[str]
labels_set: Optional[
set
] # A QA task may or may not have a unique set of label list
def validate(self, value: str):
"""Since question answering is arbitarary task we have no validation"""
pass
class MLCTaskValidate(BaseModel):
"""Validate Multilabel Classification Task
As of now we assume that the input label_column is a string
The label column can be a delimited string or a string of list
"""
label_column: Optional[str]
labels_set: Optional[
set
] # A Multilabel Classification Task should have a unique set of labels in config
def validate(self, value: str):
if value.startswith("[") and value.endswith("]"):
try:
seed_labels = eval(value)
if not isinstance(seed_labels, list):
raise ValueError(
f"value: '{value}' is not a list of labels as expected"
)
unmatched_label = set(seed_labels) - self.labels_set
if len(unmatched_label) != 0:
raise ValueError(
f"labels: '{unmatched_label}' not in prompt/labels provided in config "
)
except SyntaxError:
raise
else:
# TODO: split by delimiter specified in config and validate each label
pass
TaskTypeValidate = Union[
NERTaskValidate,
ClassificationTaskValidate,
EMTaskValidate,
QATaskValidate,
MLCTaskValidate,
]
class DataValidationTasks(BaseModel):
classification: TaskTypeValidate = ClassificationTaskValidate
named_entity_recognition: TaskTypeValidate = NERTaskValidate
entity_matching: TaskTypeValidate = EMTaskValidate
question_answering: TaskTypeValidate = QATaskValidate
multilabel_classification: TaskTypeValidate = MLCTaskValidate
class TaskDataValidation:
"""Task Validation"""
def __init__(self, config: AutolabelConfig):
"""Task Validation
Args:
config: AutolabelConfig = User passed parsed configuration
"""
# the type of task, classification, named_entity_recognition, etc.., "config/task_type"
task_type: str = config.task_type()
# the label column as specified in config, "config/dataset/label_column"
label_column: str = config.label_column()
# list of valid labels provided in config "config/prompt/labels"
labels_list: Optional[List] = config.labels_list()
# example template from config "config/prompt/example_template"
self.example_template: str = config.example_template()
# the explanation column as specified in config, "config/dataset/explanation_column"
self.explanation_column: str = config.explanation_column()
# the label column as specified in config, "config/dataset/label_column"
self.label_column = config.label_column()
self.__schema = {col: (StrictStr, ...) for col in self.expected_columns}
self.__validation_task = DataValidationTasks.__dict__[task_type](
label_column=label_column, labels_set=set(labels_list)
)
self.__data_validation = self.data_validation_and_schema_check(
self.__validation_task
)
@cached_property
def expected_columns(self) -> List:
"""Fetch expected columns"""
column_name_lists = []
for text in self.example_template.split("\n"):
matches = re.findall(EXPECTED_COLUMN_PATTERN, text)
column_name_lists += matches
if self.explanation_column and self.explanation_column in column_name_lists:
column_name_lists.remove(self.explanation_column)
if self.label_column and self.label_column in column_name_lists:
column_name_lists.remove(self.label_column)
return column_name_lists
@property
def schema(self) -> Dict:
"""Fecth Schema"""
return self.__schema
@property
def validation_task(
self,
) -> TaskTypeValidate:
"""Fetch validation task"""
return self.__validation_task
def data_validation_and_schema_check(self, validation_task: BaseModel):
"""Validate data format and datatype
Args:
validation_task (TaskTypeValidate): validation task
Raises:
e: Validation error if the inputs are not string
e: Validation error if validation_task fails
Returns:
DataValidation: Pydantic Model for validation
"""
Model = create_model("Model", **self.__schema)
class DataValidation(BaseModel):
"""Data Validation"""
# We define validate as a classmethod such that a dynamic `data` can be passed
# iteratively to the validate method using `DataValidation.validate`
@classmethod
def validate(cls, data):
"""Valdiate data types"""
model = Model(**data)
try:
# We perform the normal pydantic validation here
# This checks both the Schema and also calls check_fields
cls(**model.dict())
except ValidationError as e:
raise e
@root_validator(pre=True, allow_reuse=True)
def check_fields(cls, values):
"""Validate data format"""
try:
if (
validation_task.label_column
and validation_task.label_column in values
):
label_column_value = values[validation_task.label_column]
validation_task.validate(label_column_value)
except ValidationError as e:
raise e
return DataValidation
def validate(self, data: List[dict]) -> List[Dict]:
"""Validate Data"""
error_messages = []
for index, item in enumerate(data):
try:
self.__data_validation.validate(item)
except ValidationError as e:
for err in e.errors():
field = ".".join(err["loc"])
error_messages += [
{
"row_num": index,
"loc": field,
"msg": err["msg"],
"type": err["type"],
}
]
return error_messages
def validate_dataset_columns(self, dataset_columns: List):
"""Validate columns
Validate if the columns mentioned in example_template dataset are correct
and are contained within the columns of the dataset(seed.csv)
"""
missing_columns = set(self.expected_columns) - set(dataset_columns)
assert (
len(missing_columns) == 0
), f"columns={missing_columns} missing in seed.csv file" | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/dataset/validation.py | 0.646237 | 0.496216 | validation.py | pypi |
from typing import Optional, Dict, List
import csv
import json
from rich import print
from rich.prompt import Prompt, IntPrompt, FloatPrompt, Confirm
from simple_term_menu import TerminalMenu
import pandas as pd
from autolabel import AutolabelDataset
from autolabel.configs import AutolabelConfig as ALC
from autolabel.configs.schema import schema
from autolabel.schema import TaskType, FewShotAlgorithm, ModelProvider
from autolabel.tasks import TASK_TYPE_TO_IMPLEMENTATION
from autolabel.cli.template import (
TEMPLATE_CONFIG,
TEMPLATE_TASK_NAME,
TEMPLATE_LABEL_SEPARATOR,
)
def _get_sub_config(key: str, **kwargs) -> Dict:
"""Get a sub-configuration dictionary for the specified key.
This function processes the provided keyword arguments to extract specific configurations
for the given key and returns a dictionary containing the key's template configuration merged
with the relevant configuration values from the provided keyword arguments.
Args:
key (str): The key for which the sub-configuration is to be generated.
**kwargs: Keyword arguments containing configuration values with keys in the format "{key}_{property}".
Returns:
Dict: A dictionary containing the sub-configuration for the specified key.
"""
config = {}
for p in schema["properties"][key]["properties"]:
if f"{key}_{p}" in kwargs and kwargs[f"{key}_{p}"] is not None:
if isinstance(kwargs[f"{key}_{p}"], str):
config[p] = kwargs[f"{key}_{p}"].replace("\\n", "\n")
else:
config[p] = kwargs[f"{key}_{p}"]
return {**TEMPLATE_CONFIG[key], **config}
def _get_labels_from_seed(df: pd.DataFrame, config: Dict) -> List[str]:
"""Get the list of unique labels from the given DataFrame based on the task type specified in the configuration.
Args:
df (pd.DataFrame): The DataFrame containing the dataset.
config (Dict): Configuration settings for the labeling task.
Returns:
List[str]: A list of unique labels extracted from the DataFrame based on the task type.
"""
if config[ALC.TASK_TYPE_KEY] in [
TaskType.CLASSIFICATION.value,
TaskType.ENTITY_MATCHING.value,
]:
return (
df[config[ALC.DATASET_CONFIG_KEY][ALC.LABEL_COLUMN_KEY]].unique().tolist()
)
elif config[ALC.TASK_TYPE_KEY] == TaskType.NAMED_ENTITY_RECOGNITION.value:
return list(
pd.json_normalize(
df[config[ALC.DATASET_CONFIG_KEY][ALC.LABEL_COLUMN_KEY]].apply(
json.loads
)
).columns
)
elif config[ALC.TASK_TYPE_KEY] == TaskType.MULTILABEL_CLASSIFICATION.value:
return (
df[config[ALC.DATASET_CONFIG_KEY][ALC.LABEL_COLUMN_KEY]]
.str.split(config[ALC.DATASET_CONFIG_KEY][ALC.LABEL_SEPARATOR_KEY])
.explode()
.unique()
.tolist()
)
def init(
seed: Optional[str] = None,
task_name: Optional[str] = None,
task_type: Optional[str] = None,
guess_labels: bool = False,
**kwargs,
):
"""Initialize and create a configuration for the Autolabel task.
This function takes various arguments to set up the configuration for the Autolabel task,
including the task name, task type, dataset configuration, model configuration, prompt configuration,
and more. If guess_labels is True and a seed file is provided, it attempts to infer labels from the seed data.
Args:
seed (Optional[str]): Path to the seed file containing the dataset (default is None).
task_name (Optional[str]): Name of the task (default is None, which uses a template name).
task_type (Optional[str]): Type of the task (default is None, which uses a template type).
guess_labels (bool): Whether to attempt inferring labels from the seed data (default is False).
**kwargs: Additional keyword arguments for configuring the dataset, model, and prompt.
Returns:
None: The function writes the generated configuration to a JSON file.
"""
if not task_name:
task_name = TEMPLATE_CONFIG[ALC.TASK_NAME_KEY]
try:
TaskType(task_type)
except ValueError:
task_type = TEMPLATE_CONFIG[ALC.TASK_TYPE_KEY]
config = {ALC.TASK_NAME_KEY: task_name, ALC.TASK_TYPE_KEY: task_type}
if (
task_type == TaskType.MULTILABEL_CLASSIFICATION.value
and kwargs["dataset_label_separator"] is None
):
kwargs["dataset_label_separator"] = TEMPLATE_LABEL_SEPARATOR
config[ALC.DATASET_CONFIG_KEY] = _get_sub_config("dataset", **kwargs)
config[ALC.MODEL_CONFIG_KEY] = _get_sub_config("model", **kwargs)
if guess_labels and seed:
try:
df = pd.read_csv(
seed,
delimiter=config[ALC.DATASET_CONFIG_KEY][ALC.DELIMITER_KEY],
nrows=100,
)
labels = _get_labels_from_seed(df, config)
if labels:
kwargs["prompt_labels"] = labels
except Exception:
print("[red]Failed to infer labels[/red]")
# TODO: add automatic example template generation
config[ALC.PROMPT_CONFIG_KEY] = _get_sub_config("prompt", **kwargs)
print(config)
config_name = (
config[ALC.TASK_NAME_KEY]
if config[ALC.TASK_NAME_KEY] != TEMPLATE_TASK_NAME
else "template"
)
print(f"Writing config to {config_name}_config.json")
with open(f"{config_name}_config.json", "w") as config_file:
json.dump(config, config_file, indent=4)
def _create_dataset_config_wizard(
task_type: TaskType, seed: Optional[str] = None
) -> Dict:
"""Create a dataset configuration based on user input and task type.
This function interacts with the user through prompts to set up the dataset configuration
for the Autolabel task. The user provides details like delimiter, label column, explanation column,
and label separator (for multi-label classification) to create the dataset configuration dictionary.
Args:
task_type (TaskType): Type of the task, such as classification or multi-label classification.
seed (Optional[str]): Path to the seed file containing the dataset (default is None).
Returns:
Dict: A dictionary containing the dataset configuration for the Autolabel task.
"""
print("[bold]Dataset Configuration[/bold]")
dataset_config = {}
detected_delimiter = ","
if seed:
if seed.endswith(".csv"):
try:
with open(seed, "r") as f:
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
detected_delimiter = dialect.delimiter
except Exception:
pass
delimiter = Prompt.ask(
"Enter the delimiter",
default=detected_delimiter,
)
dataset_config[ALC.DELIMITER_KEY] = delimiter
if seed:
df = pd.read_csv(seed, delimiter=delimiter, nrows=0)
column_names = df.columns.tolist()
label_column = column_names[
TerminalMenu(column_names, title="Choose a label column").show()
]
dataset_config[ALC.LABEL_COLUMN_KEY] = label_column
options = [None] + column_names
explanation_column = options[
TerminalMenu(options, title="Choose an explanation column").show()
]
if explanation_column:
dataset_config[ALC.EXPLANATION_COLUMN_KEY] = explanation_column
else:
label_column = Prompt.ask("Enter the label column name")
while not label_column:
print("[red]The label column name cannot be blank[/red]")
label_column = Prompt.ask("Enter the label column name")
dataset_config[ALC.LABEL_COLUMN_KEY] = label_column
explanation_column = Prompt.ask(
"Enter the explanation column name (optional)", default=None
)
if explanation_column:
dataset_config[ALC.EXPLANATION_COLUMN_KEY] = explanation_column
if task_type == TaskType.MULTILABEL_CLASSIFICATION:
label_separator = Prompt.ask(
"Enter the label separator",
default=";",
)
dataset_config[ALC.LABEL_SEPARATOR_KEY] = label_separator
return dataset_config
def _create_model_config_wizard() -> Dict:
"""Create a model configuration through interactive prompts.
This function guides the user through interactive prompts to set up the model configuration
for the Autolabel task. The user provides details such as the model provider, model name,
model parameters, and whether the model should compute confidence or use logit bias.
Returns:
Dict: A dictionary containing the model configuration for the Autolabel task.
"""
print("[bold]Model Configuration[/bold]")
model_config = {}
options = [p.value for p in ModelProvider]
model_config[ALC.PROVIDER_KEY] = options[
TerminalMenu(options, title="Enter the model provider").show()
]
model_config[ALC.MODEL_NAME_KEY] = Prompt.ask("Enter the model name")
model_params = {}
model_param = Prompt.ask(
"Enter a model parameter name (or leave blank for none)",
default=None,
)
while model_param:
model_param_value = Prompt.ask(
f"Enter the value for {model_param}",
)
if model_param_value.lower() in ["true", "false"]:
model_params[model_param] = model_param_value.lower() == "true"
elif model_param_value.isdigit():
model_params[model_param] = int(model_param_value)
else:
try:
model_params[model_param] = float(model_param_value)
except ValueError:
model_params[model_param] = model_param_value
model_param = Prompt.ask(
"Enter a model parameter name (or leave blank to finish)",
default=None,
)
if model_params:
model_config[ALC.MODEL_PARAMS_KEY] = model_params
model_config[ALC.COMPUTE_CONFIDENCE_KEY] = Confirm.ask(
"Should the model compute confidence?", default=False
)
if model_config[ALC.PROVIDER_KEY] in [
ModelProvider.HUGGINGFACE_PIPELINE,
ModelProvider.OPENAI,
]:
model_config[ALC.LOGIT_BIAS_KEY] = FloatPrompt.ask(
"What is the strength of logit bias?", default=0.0
)
return model_config
def _create_prompt_config_wizard(config: Dict, seed: Optional[str] = None) -> Dict:
"""Create a prompt configuration through interactive prompts.
This function guides the user through interactive prompts to set up the prompt configuration
for the Autolabel task based on the provided dataset and task type configuration.
The user provides details such as task guidelines, valid labels, example template, and few-shot examples.
Args:
config (Dict): Configuration settings for the Autolabel task.
seed (Optional[str]): Path to the seed file containing the dataset (default is None).
Returns:
Dict: A dictionary containing the prompt configuration for the Autolabel task.
"""
print("[bold]Prompt Configuration[/bold]")
prompt_config = {}
if seed:
unvalidated_config = ALC(config, validate=False)
dataset = AutolabelDataset(seed, unvalidated_config, validate=False)
prompt_config[ALC.TASK_GUIDELINE_KEY] = Prompt.ask(
"Enter the task guidelines",
default=TASK_TYPE_TO_IMPLEMENTATION[
TaskType(config[ALC.TASK_TYPE_KEY])
].DEFAULT_TASK_GUIDELINES,
).replace("\\n", "\n")
seed_labels = (
dataset.df[unvalidated_config.label_column()].unique().tolist() if seed else []
)
if seed_labels and Confirm.ask(
f"Detected {len(seed_labels)} unique labels in seed dataset. Use these labels?"
):
prompt_config[ALC.VALID_LABELS_KEY] = seed_labels
else:
labels = []
label = Prompt.ask("Enter a valid label (or leave blank for none)")
while label:
labels.append(label)
label = Prompt.ask("Enter a valid label (or leave blank to finish)")
if labels:
prompt_config[ALC.VALID_LABELS_KEY] = labels
prompt_config[ALC.EXAMPLE_TEMPLATE_KEY] = Prompt.ask(
"Enter the example template",
).replace("\\n", "\n")
example_template_variables = [
v.split("}")[0].split("{")[1]
for v in prompt_config[ALC.EXAMPLE_TEMPLATE_KEY].split(" ")
if "{" in v and "}" in v
]
while (
config[ALC.DATASET_CONFIG_KEY][ALC.LABEL_COLUMN_KEY]
not in example_template_variables
):
print(
"[red]The label column name must be included in the example template[/red]"
)
prompt_config[ALC.EXAMPLE_TEMPLATE_KEY] = Prompt.ask(
"Enter the example template",
).replace("\\n", "\n")
example_template_variables = [
v.split("}")[0].split("{")[1]
for v in prompt_config[ALC.EXAMPLE_TEMPLATE_KEY].split(" ")
if "{" in v and "}" in v
]
if seed and Confirm.ask(f"Use {seed} as few shot example dataset?"):
prompt_config[ALC.FEW_SHOT_EXAMPLE_SET_KEY] = seed
else:
few_shot_example_set = []
example = Prompt.ask(
f"Enter the value for {example_template_variables[0]} {'or row number ' if seed else ''}(or leave blank for none)"
)
while example:
example_dict = {}
if seed and example.isdigit():
example_dict = dataset.df.iloc[int(example)].to_dict()
print(example_dict)
else:
example_dict[example_template_variables[0]] = example
for variable in example_template_variables[1:]:
example_dict[variable] = Prompt.ask(
f"Enter the value for {variable}"
)
few_shot_example_set.append(example_dict)
example = Prompt.ask(
f"Enter the value for {example_template_variables[0]} {'or row number ' if seed else ''}(or leave blank to finish)"
)
if few_shot_example_set:
prompt_config[ALC.FEW_SHOT_EXAMPLE_SET_KEY] = few_shot_example_set
if ALC.FEW_SHOT_EXAMPLE_SET_KEY in prompt_config:
options = [a.value for a in FewShotAlgorithm]
prompt_config[ALC.FEW_SHOT_SELECTION_ALGORITHM_KEY] = options[
TerminalMenu(options, title="Enter the few shot selection algorithm").show()
]
prompt_config[ALC.FEW_SHOT_NUM_KEY] = IntPrompt.ask(
"Enter the number of few shot examples to use",
default=min(len(prompt_config[ALC.FEW_SHOT_EXAMPLE_SET_KEY]), 5),
)
output_guideline = Prompt.ask(
"Enter the output guideline (optional)",
default=None,
)
if output_guideline:
prompt_config[ALC.OUTPUT_GUIDELINE_KEY] = output_guideline
output_format = Prompt.ask(
"Enter the output format (optional)",
default=None,
)
if output_format:
prompt_config[ALC.OUTPUT_FORMAT_KEY] = output_format
prompt_config[ALC.CHAIN_OF_THOUGHT_KEY] = Confirm.ask(
"Should the prompt use a chain of thought?", default=False
)
return prompt_config
def create_config_wizard(
seed: Optional[str] = None,
**kwargs,
) -> None:
"""Create a configuration wizard for the Autolabel task.
This function guides the user through interactive prompts to set up the complete configuration
for the Autolabel task, including task name, task type, dataset configuration, model configuration,
and prompt configuration. It validates the configuration and writes it to a JSON file.
Args:
seed (Optional[str]): Path to the seed file containing the dataset (default is None).
**kwargs: Additional keyword arguments for configuring the dataset, model, and prompt.
Returns:
None: The function writes the generated configuration to a JSON file.
"""
config = {}
task_name = Prompt.ask("Enter the task name")
config[ALC.TASK_NAME_KEY] = task_name
options = [t.value for t in TaskType]
config[ALC.TASK_TYPE_KEY] = options[
TerminalMenu(options, title="Choose a task type").show()
]
config[ALC.DATASET_CONFIG_KEY] = _create_dataset_config_wizard(
config[ALC.TASK_TYPE_KEY], seed
)
config[ALC.MODEL_CONFIG_KEY] = _create_model_config_wizard()
config[ALC.PROMPT_CONFIG_KEY] = _create_prompt_config_wizard(config, seed)
print(config)
try:
ALC(config)
except Exception as e:
print(f"error validating config: {e}")
if Confirm.ask("Would you like to fix the config?"):
create_config_wizard(seed)
return
print(f"Writing config to {config[ALC.TASK_NAME_KEY]}_config.json")
with open(f"{config[ALC.TASK_NAME_KEY]}_config.json", "w") as config_file:
json.dump(config, config_file, indent=4) | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/cli/config.py | 0.749271 | 0.261962 | config.py | pypi |
from typing import Optional
from typing_extensions import Annotated
import logging
import typer
from autolabel import LabelingAgent
from autolabel.schema import TaskType, ModelProvider, FewShotAlgorithm
from autolabel.few_shot import PROVIDER_TO_MODEL
from autolabel.dataset import AutolabelDataset
from autolabel.cli.config import init, create_config_wizard
app = typer.Typer(
rich_markup_mode="rich",
no_args_is_help=True,
help="[bold]Autolabel CLI[/bold] 🏷️",
)
@app.command(name="config")
def config_command(
seed: Annotated[
Optional[str],
typer.Argument(
help="Optional seed dataset to help auto-fill the config. Recommended for a more accurate config"
),
] = None,
):
"""Create a new [bold]config[/bold] file using a wizard 🪄"""
create_config_wizard(seed)
@app.command(
name="init",
)
def init_command(
seed: Annotated[
Optional[str],
typer.Argument(
help="Optional seed dataset to help auto-fill the config. Recommended for a more accurate config"
),
] = None,
task_name: Annotated[
str,
typer.Option(
help="Name of the task to create a config for",
show_default=False,
),
] = None,
task_type: Annotated[
str,
typer.Option(
help=f"Type of task to create. Options: [magenta]{', '.join([t for t in TaskType])}[/magenta]",
show_default=False,
),
] = None,
dataset_label_column: Annotated[
str,
typer.Option(
"--label-column",
help="Name of the column containing the labels",
show_default=False,
rich_help_panel="Dataset Configuration",
),
] = None,
dataset_label_separator: Annotated[
str,
typer.Option(
"--label-separator",
help="Separator to use when separating multiple labels for multilabel classification",
show_default=False,
rich_help_panel="Dataset Configuration",
),
] = None,
dataset_explanation_column: Annotated[
str,
typer.Option(
"--explanation-column",
help="Name of the column containing the explanations",
show_default=False,
rich_help_panel="Dataset Configuration",
),
] = None,
dataset_text_column: Annotated[
str,
typer.Option(
"--text-column",
help="Name of the column containing the text to label",
show_default=False,
rich_help_panel="Dataset Configuration",
),
] = None,
dataset_delimiter: Annotated[
str,
typer.Option(
"--delimiter",
help="Delimiter to use when parsing the dataset",
show_default=False,
rich_help_panel="Dataset Configuration",
),
] = None, # None means it will be guessed from seed.csv or default to a comma
model_provider: Annotated[
str,
typer.Option(
"--provider",
help=f"Provider of the model to use. Options: [magenta]{', '.join([p for p in ModelProvider])}[/magenta]",
rich_help_panel="Model Configuration",
),
] = "openai",
model_name: Annotated[
str,
typer.Option(
"--model",
help="Name of the model to use",
rich_help_panel="Model Configuration",
),
] = "gpt-3.5-turbo",
model_compute_confidence: Annotated[
bool,
typer.Option(
"--compute-confidence",
help="Whether to compute confidence scores for each label",
show_default=False,
rich_help_panel="Model Configuration",
),
] = None,
model_logit_bias: Annotated[
bool,
typer.Option(
"--logit-bias",
help="Whether to use logit biasing to constrain the model to certain tokens",
show_default=False,
rich_help_panel="Model Configuration",
),
] = None,
embedding_provider: Annotated[
str,
typer.Option(
"--embedding-provider",
help=f"Provider of the embedding model to use. Options: [magenta]{', '.join([p for p in PROVIDER_TO_MODEL])}[/magenta]",
show_default=False,
rich_help_panel="Embedding Configuration",
),
] = None,
embedding_model_name: Annotated[
str,
typer.Option(
"--embedding-model",
help="Name of the embedding model to use",
show_default=False,
rich_help_panel="Embedding Configuration",
),
] = None,
guess_labels: Annotated[
bool,
typer.Option(
"--guess-labels",
help="Whether to guess the labels from the seed dataset. If set, --task-type, --delimiter, and --label-column (and --label-separator for mulitlabel classification) must be defined",
rich_help_panel="Prompt Configuration",
),
] = False,
prompt_task_guidelines: Annotated[
str,
typer.Option(
"--task-guidelines",
help="Guidelines for the task. [code]{labels}[/code] will be replaced with a newline-separated list of labels",
show_default=False,
rich_help_panel="Prompt Configuration",
),
] = None,
prompt_few_shot_examples: Annotated[
str,
typer.Option(
"--few-shot-examples",
help="Seed dataset to use for few-shot prompting",
show_default=False,
rich_help_panel="Prompt Configuration",
),
] = None,
prompt_few_shot_selection: Annotated[
str,
typer.Option(
"--few-shot-selection",
help=f"What algorithm to use to select examples from the seed dataset. Options: [magenta]{', '.join([a for a in FewShotAlgorithm])}[/magenta]",
show_default=False,
rich_help_panel="Prompt Configuration",
),
] = None,
prompt_few_shot_num: Annotated[
int,
typer.Option(
"--few-shot-num",
help="Number of examples to select from the seed dataset",
show_default=False,
rich_help_panel="Prompt Configuration",
),
] = None,
prompt_example_template: Annotated[
str,
typer.Option(
"--example-template",
help="Template to use for each example. [code]{column_name}[/code] will be replaced with the corresponding column value for each example",
show_default=False,
rich_help_panel="Prompt Configuration",
),
] = None,
prompt_output_guidelines: Annotated[
str,
typer.Option(
"--output-guidelines",
help="Guidelines for the output",
show_default=False,
rich_help_panel="Prompt Configuration",
),
] = None,
prompt_output_format: Annotated[
str,
typer.Option(
"--output-format",
help="Format to use for the output",
show_default=False,
rich_help_panel="Prompt Configuration",
),
] = None,
prompt_chain_of_thought: Annotated[
bool,
typer.Option(
"--chain-of-thought",
help="Whether to use chain of thought",
show_default=False,
rich_help_panel="Prompt Configuration",
),
] = None,
):
"""Generate a new template [bold]config[/bold] file 📄"""
init(
seed,
task_name,
task_type,
dataset_label_column=dataset_label_column,
dataset_label_separator=dataset_label_separator,
dataset_explanation_column=dataset_explanation_column,
dataset_text_column=dataset_text_column,
dataset_delimiter=dataset_delimiter,
model_provider=model_provider,
model_name=model_name,
model_compute_confidence=model_compute_confidence,
model_logit_bias=model_logit_bias,
embedding_provider=embedding_provider,
embedding_model_name=embedding_model_name,
guess_labels=guess_labels,
prompt_task_guidelines=prompt_task_guidelines,
prompt_few_shot_examples=prompt_few_shot_examples,
prompt_few_shot_selection=prompt_few_shot_selection,
prompt_few_shot_num=prompt_few_shot_num,
prompt_example_template=prompt_example_template,
prompt_output_guidelines=prompt_output_guidelines,
prompt_output_format=prompt_output_format,
prompt_chain_of_thought=prompt_chain_of_thought,
)
def setup_logging(
verbose_debug: bool = False,
verbose_info: bool = False,
quiet_warning: bool = False,
quiet_error: bool = False,
):
if verbose_debug:
log_level = logging.DEBUG
elif verbose_info:
log_level = logging.INFO
elif quiet_warning:
log_level = logging.ERROR
elif quiet_error:
log_level = logging.CRITICAL
else:
log_level = logging.WARNING
logging.getLogger("autolabel").setLevel(log_level)
logging.getLogger("langchain").setLevel(log_level)
@app.command()
def plan(
dataset: Annotated[
str, typer.Argument(help="Path to dataset to label", show_default=False)
],
config: Annotated[
str, typer.Argument(help="Path to config file", show_default=False)
],
max_items: Annotated[int, typer.Option(help="Max number of items to label")] = None,
start_index: Annotated[int, typer.Option(help="Index to start at")] = 0,
cache: Annotated[bool, typer.Option(help="Cache results")] = True,
verbose_debug: Annotated[
bool, typer.Option("--debug", "-vv", help="Verbose (debug log level)")
] = False,
verbose_info: Annotated[
bool, typer.Option("--info", "-v", help="Verbose (info log level)")
] = False,
quiet_warning: Annotated[
bool, typer.Option("--error", "-q", help="Quiet (error log level)")
] = False,
quiet_error: Annotated[
bool, typer.Option("--critical", "-qq", help="Quiet (critical log level)")
] = False,
):
"""[bold]Plan[/bold] 🔍 a labeling session in accordance with the provided dataset and config file"""
setup_logging(verbose_debug, verbose_info, quiet_warning, quiet_error)
agent = LabelingAgent(config=config, cache=cache)
config = agent.config
dataset = AutolabelDataset(dataset, config)
agent.plan(dataset, max_items=max_items, start_index=start_index)
@app.command()
def run(
dataset: Annotated[
str, typer.Argument(help="Path to dataset to label", show_default=False)
],
config: Annotated[
str, typer.Argument(help="Path to config file", show_default=False)
],
max_items: Annotated[int, typer.Option(help="Max number of items to label")] = None,
start_index: Annotated[int, typer.Option(help="Index to start at")] = 0,
cache: Annotated[bool, typer.Option(help="Cache results")] = True,
verbose_debug: Annotated[
bool, typer.Option("--debug", "-vv", help="Verbose (debug log level)")
] = False,
verbose_info: Annotated[
bool, typer.Option("--info", "-v", help="Verbose (info log level)")
] = False,
quiet_warning: Annotated[
bool, typer.Option("--error", "-q", help="Quiet (error log level)")
] = False,
quiet_error: Annotated[
bool, typer.Option("--critical", "-qq", help="Quiet (critical log level)")
] = False,
):
"""[bold]Run[/bold] ▶️ a labeling session in accordance with the provided dataset and config file"""
setup_logging(verbose_debug, verbose_info, quiet_warning, quiet_error)
agent = LabelingAgent(config=config, cache=cache)
config = agent.config
dataset = AutolabelDataset(dataset, config)
agent.run(dataset, max_items=max_items, start_index=start_index)
if __name__ == "__main__":
app() | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/cli/main.py | 0.820037 | 0.388415 | main.py | pypi |
from autolabel.cache import BaseCache
from autolabel.transforms import BaseTransform
from langchain.utilities import SerpAPIWrapper
from typing import Dict, Any
import logging
import pandas as pd
from autolabel.transforms.schema import (
TransformError,
TransformErrorType,
TransformType,
)
logger = logging.getLogger(__name__)
class SerpApi(BaseTransform):
COLUMN_NAMES = [
"result_column",
]
DEFAULT_ARGS = {
"engine": "google",
"google_domain": "google.com",
"gl": "us",
"hl": "en",
}
def __init__(
self,
cache: BaseCache,
output_columns: Dict[str, Any],
query_column: str,
serp_api_key: str,
serp_args: dict = DEFAULT_ARGS,
) -> None:
super().__init__(cache, output_columns)
self.query_column = query_column
self.serp_api_key = serp_api_key
self.serp_args = serp_args
self.serp_api_wrapper = SerpAPIWrapper(
search_engine=None, params=self.serp_args, serpapi_api_key=self.serp_api_key
)
def name(self) -> str:
return TransformType.SERP_API
async def _get_result(self, query):
"""
Makes a request to Serp API with the query
and returns the search results.
"""
try:
search_result = await self.serp_api_wrapper.arun(query=query)
except Exception as e:
raise TransformError(
TransformErrorType.SERP_API_ERROR,
f"Error while making request to Serp API: {e}",
)
return search_result
async def _apply(self, row: Dict[str, Any]) -> Dict[str, Any]:
query = row[self.query_column]
search_result = self.NULL_TRANSFORM_TOKEN
if pd.isna(query) or query == self.NULL_TRANSFORM_TOKEN:
raise TransformError(
TransformErrorType.INVALID_INPUT,
f"Empty query in row {row}",
)
else:
search_result = await self._get_result(query)
transformed_row = {self.output_columns["result_column"]: search_result}
return self._return_output_row(transformed_row)
def params(self):
return {
"query_column": self.query_column,
"output_columns": self.output_columns,
"serp_api_key": self.serp_api_key,
"serp_args": self.serp_args,
} | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/transforms/serp_api.py | 0.75927 | 0.206514 | serp_api.py | pypi |
from abc import ABC, abstractmethod
from autolabel.cache import BaseCache
from autolabel.transforms.schema import TransformCacheEntry, TransformError
from typing import Dict, Any
import logging
logger = logging.getLogger(__name__)
class BaseTransform(ABC):
"""Base class for all transforms."""
TTL_MS = 60 * 60 * 24 * 7 * 1000 # 1 week
NULL_TRANSFORM_TOKEN = "NO_TRANSFORM"
def __init__(self, cache: BaseCache, output_columns: Dict[str, Any]) -> None:
"""
Initialize a transform.
Args:
cache: A cache object to use for caching the results of this transform.
output_columns: A dictionary of output columns. The keys are the names of the output columns as expected by the transform. The values are the column names they should be mapped to in the dataset.
"""
super().__init__()
self._output_columns = output_columns
self.cache = cache
@staticmethod
@abstractmethod
def name() -> str:
"""
Returns the name of the transform.
"""
pass
@property
def output_columns(self) -> Dict[str, Any]:
"""
Returns a dictionary of output columns. The keys are the names of the output columns
as expected by the transform. The values are the column names they should be mapped to in
the dataset.
"""
return {k: self._output_columns.get(k, None) for k in self.COLUMN_NAMES}
@property
def transform_error_column(self) -> str:
"""
Returns the name of the column that stores the error if transformation fails.
"""
return f"{self.name()}_error"
@abstractmethod
async def _apply(self, row: Dict[str, Any]) -> Dict[str, Any]:
"""
Applies the transform to the given row.
Args:
row: A dictionary representing a row in the dataset. The keys are the column names and the values are the column values.
Returns:
A dictionary representing the transformed row. The keys are the column names and the values are the column values.
"""
pass
@abstractmethod
def params(self) -> Dict[str, Any]:
"""
Returns a dictionary of parameters that can be used to uniquely identify this transform.
Returns:
A dictionary of parameters that can be used to uniquely identify this transform.
"""
return {}
async def apply(self, row: Dict[str, Any]) -> Dict[str, Any]:
if self.cache is not None:
cache_entry = TransformCacheEntry(
transform_name=self.name(),
transform_params=self.params(),
input=row,
ttl_ms=self.TTL_MS,
)
output = self.cache.lookup(cache_entry)
if output is not None:
# Cache hit
return output
try:
output = await self._apply(row)
except Exception as e:
logger.error(f"Error applying transform {self.name()}. Exception: {str(e)}")
output = {
k: self.NULL_TRANSFORM_TOKEN
for k in self.output_columns.values()
if k is not None
}
output[self.transform_error_column] = str(e)
return output
if self.cache is not None:
cache_entry.output = output
self.cache.update(cache_entry)
return output
def _return_output_row(self, row: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns the output row with the correct column names.
Args:
row: The output row.
Returns:
The output row with the correct column names.
"""
# remove null key
row.pop(None, None)
return row | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/transforms/base.py | 0.949212 | 0.555375 | base.py | pypi |
from typing import List, Dict, Any
from autolabel.transforms.schema import TransformType
from autolabel.transforms import BaseTransform
from autolabel.cache import BaseCache
class PDFTransform(BaseTransform):
"""This class is used to extract text from PDFs. The output columns dictionary for this class should include the keys 'content_column' and 'metadata_column'"""
COLUMN_NAMES = [
"content_column",
"metadata_column",
]
def __init__(
self,
cache: BaseCache,
output_columns: Dict[str, Any],
file_path_column: str,
ocr_enabled: bool = False,
page_format: str = "Page {page_num}: {page_content}",
page_sep: str = "\n\n",
lang: str = None,
) -> None:
super().__init__(cache, output_columns)
self.file_path_column = file_path_column
self.ocr_enabled = ocr_enabled
self.page_format = page_format
self.page_sep = page_sep
self.lang = lang
if self.ocr_enabled:
try:
from pdf2image import convert_from_path
import pytesseract
self.convert_from_path = convert_from_path
self.pytesseract = pytesseract
self.pytesseract.get_tesseract_version()
except ImportError:
raise ImportError(
"pdf2image and pytesseract are required to use the pdf transform with ocr. Please install pdf2image and pytesseract with the following command: pip install pdf2image pytesseract"
)
except EnvironmentError:
raise EnvironmentError(
"The tesseract engine is required to use the pdf transform with ocr. Please see https://tesseract-ocr.github.io/tessdoc/Installation.html for installation instructions."
)
else:
try:
from langchain.document_loaders import PDFPlumberLoader
self.PDFPlumberLoader = PDFPlumberLoader
except ImportError:
raise ImportError(
"pdfplumber is required to use the pdf transform. Please install pdfplumber with the following command: pip install pdfplumber"
)
@staticmethod
def name() -> str:
return TransformType.PDF
def get_page_texts(self, row: Dict[str, Any]) -> List[str]:
"""This function gets the text from each page of a PDF file.
If OCR is enabled, it uses the pdf2image library to convert the PDF into images and then uses
pytesseract to convert the images into text. Otherwise, it uses pdfplumber to extract the text.
Args:
row (Dict[str, Any]): The row of data to be transformed.
Returns:
List[str]: A list of strings containing the text from each page of the PDF.
"""
if self.ocr_enabled:
pages = self.convert_from_path(row[self.file_path_column])
return [
self.pytesseract.image_to_string(page, lang=self.lang) for page in pages
]
else:
loader = self.PDFPlumberLoader(row[self.file_path_column])
return [page.page_content for page in loader.load()]
async def _apply(self, row: Dict[str, Any]) -> Dict[str, Any]:
"""This function transforms a PDF file into a string of text.
The text is formatted according to the page_format and
page_sep parameters and returned as a string.
Args:
row (Dict[str, Any]): The row of data to be transformed.
Returns:
Dict[str, Any]: The dict of output columns.
"""
texts = []
for idx, text in enumerate(self.get_page_texts(row)):
texts.append(self.page_format.format(page_num=idx + 1, page_content=text))
output = self.page_sep.join(texts)
transformed_row = {
self.output_columns["content_column"]: output,
self.output_columns["metadata_column"]: {"num_pages": len(texts)},
}
return self._return_output_row(transformed_row)
def params(self):
return {
"file_path_column": self.file_path_column,
"ocr_enabled": self.ocr_enabled,
"page_header": self.page_format,
"page_sep": self.page_sep,
"output_columns": self.output_columns,
"lang": self.lang,
} | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/transforms/pdf.py | 0.877948 | 0.412057 | pdf.py | pypi |
from typing import Dict, Any
from autolabel.transforms.schema import TransformType
from autolabel.transforms import BaseTransform
from autolabel.cache import BaseCache
class ImageTransform(BaseTransform):
"""This class is used to extract text from images using OCR. The output columns dictionary for this class should include the keys 'content_column' and 'metadata_column'
This transform supports the following image formats: PNG, JPEG, TIFF, JPEG 2000, GIF, WebP, BMP, and PNM
"""
COLUMN_NAMES = [
"content_column",
"metadata_column",
]
def __init__(
self,
cache: BaseCache,
output_columns: Dict[str, Any],
file_path_column: str,
lang: str = None,
) -> None:
super().__init__(cache, output_columns)
self.file_path_column = file_path_column
self.lang = lang
try:
from PIL import Image
import pytesseract
self.Image = Image
self.pytesseract = pytesseract
self.pytesseract.get_tesseract_version()
except ImportError:
raise ImportError(
"pillow and pytesseract are required to use the image transform with ocr. Please install pillow and pytesseract with the following command: pip install pillow pytesseract"
)
except EnvironmentError:
raise EnvironmentError(
"The tesseract engine is required to use the image transform with ocr. Please see https://tesseract-ocr.github.io/tessdoc/Installation.html for installation instructions."
)
@staticmethod
def name() -> str:
return TransformType.IMAGE
def get_image_metadata(self, file_path: str):
try:
image = self.Image.open(file_path)
metadata = {
"format": image.format,
"mode": image.mode,
"size": image.size,
"width": image.width,
"height": image.height,
"exif": image._getexif(), # Exif metadata
}
return metadata
except Exception as e:
return {"error": str(e)}
async def _apply(self, row: Dict[str, Any]) -> Dict[str, Any]:
"""This function transforms an image into text using OCR.
Args:
row (Dict[str, Any]): The row of data to be transformed.
Returns:
Dict[str, Any]: The dict of output columns.
"""
content = self.pytesseract.image_to_string(
row[self.file_path_column], lang=self.lang
)
metadata = self.get_image_metadata(row[self.file_path_column])
transformed_row = {
self.output_columns["content_column"]: content,
self.output_columns["metadata_column"]: metadata,
}
return transformed_row
def params(self) -> Dict[str, Any]:
return {
"output_columns": self.output_columns,
"file_path_column": self.file_path_column,
"lang": self.lang,
} | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/transforms/image.py | 0.893672 | 0.446555 | image.py | pypi |
from autolabel.transforms.schema import (
TransformType,
TransformError,
TransformErrorType,
)
from autolabel.transforms import BaseTransform
from typing import Dict, Any
import asyncio
import logging
import pandas as pd
import ssl
from autolabel.cache import BaseCache
logger = logging.getLogger(__name__)
MAX_RETRIES = 3
MAX_KEEPALIVE_CONNECTIONS = 20
MAX_CONNECTIONS = 100
BACKOFF = 2
HEADERS = {}
HTML_PARSER = "html.parser"
class WebpageTransform(BaseTransform):
COLUMN_NAMES = [
"content_column",
"content_in_bytes_column",
"soup_column",
"metadata_column",
]
def __init__(
self,
cache: BaseCache,
output_columns: Dict[str, Any],
url_column: str,
timeout: int = 5,
headers: Dict[str, str] = HEADERS,
) -> None:
super().__init__(cache, output_columns)
self.url_column = url_column
self.headers = headers
self.max_retries = MAX_RETRIES
try:
from bs4 import BeautifulSoup
import httpx
if not headers.get("User-Agent"):
from fake_useragent import UserAgent
headers["User-Agent"] = UserAgent().random
self.httpx = httpx
self.timeout_time = timeout
self.timeout = httpx.Timeout(timeout)
limits = httpx.Limits(
max_keepalive_connections=MAX_KEEPALIVE_CONNECTIONS,
max_connections=MAX_CONNECTIONS,
keepalive_expiry=timeout,
)
self.client = httpx.AsyncClient(
timeout=self.timeout, limits=limits, follow_redirects=True
)
self.client_with_no_verify = httpx.AsyncClient(
timeout=self.timeout, limits=limits, follow_redirects=True, verify=False
)
self.beautiful_soup = BeautifulSoup
except ImportError:
raise ImportError(
"BeautifulSoup, httpx and fake_useragent are required to use the webpage transform. Please install them with the following command: pip install beautifulsoup4 httpx fake_useragent"
)
def name(self) -> str:
return TransformType.WEBPAGE_TRANSFORM
def _load_metadata(self, url, soup) -> Dict[str, Any]:
metadata = {"url": url}
if soup.find("title"):
metadata["title"] = soup.find("title").get_text()
for meta in soup.find_all("meta"):
if meta.get("name") and meta.get("content"):
metadata[meta.get("name")] = meta.get("content")
elif meta.get("property") and meta.get("content"):
metadata[meta.get("property")] = meta.get("content")
return metadata
async def _load_url(
self, url: str, verify=True, headers=HEADERS, retry_count=0
) -> Dict[str, Any]:
if retry_count >= self.max_retries:
logger.warning(f"Max retries reached for URL: {url}")
raise TransformError(
TransformErrorType.MAX_RETRIES_REACHED, "Max retries reached"
)
try:
client = self.client
if not verify:
client = self.client_with_no_verify
response = await client.get(url, headers=headers)
# TODO: Add support for other parsers
content_bytes = response.content
soup = self.beautiful_soup(content_bytes, HTML_PARSER)
return {
"content": soup.get_text(),
"content_bytes": content_bytes,
"soup": soup,
"metadata": self._load_metadata(url, soup),
}
except self.httpx.ConnectTimeout as e:
logger.error(f"Timeout when fetching content from URL: {url}")
raise TransformError(
TransformErrorType.TRANSFORM_TIMEOUT,
"Timeout when fetching content from URL",
)
except ssl.SSLCertVerificationError as e:
logger.warning(
f"SSL verification error when fetching content from URL: {url}, retrying with verify=False"
)
await asyncio.sleep(BACKOFF**retry_count)
return await self._load_url(
url, verify=False, headers=headers, retry_count=retry_count + 1
)
except Exception as e:
logger.error(f"Error fetching content from URL: {url}. Exception: {e}")
raise e
async def _apply(self, row: Dict[str, Any]) -> Dict[str, Any]:
url = row[self.url_column]
url_response_data = {}
if pd.isna(url) or url == self.NULL_TRANSFORM_TOKEN:
raise TransformError(
TransformErrorType.INVALID_INPUT,
f"Empty url in row {row}",
)
else:
url_response_data = await self._load_url(url)
transformed_row = {
self.output_columns["content_column"]: url_response_data.get("content"),
self.output_columns["content_in_bytes_column"]: url_response_data.get(
"content_bytes"
),
self.output_columns["soup_column"]: url_response_data.get("soup"),
self.output_columns["metadata_column"]: url_response_data.get("metadata"),
}
return self._return_output_row(transformed_row)
def params(self):
return {
"url_column": self.url_column,
"output_columns": self.output_columns,
"timeout": self.timeout_time,
} | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/transforms/webpage_transform.py | 0.55917 | 0.172311 | webpage_transform.py | pypi |
import logging
from typing import List, Optional, Dict
from autolabel.models import BaseModel
from autolabel.configs import AutolabelConfig
from autolabel.cache import BaseCache
from autolabel.schema import RefuelLLMResult
logger = logging.getLogger(__name__)
class HFPipelineLLM(BaseModel):
DEFAULT_MODEL = "google/flan-t5-xxl"
DEFAULT_PARAMS = {"temperature": 0.0, "quantize": 8}
def __init__(self, config: AutolabelConfig, cache: BaseCache = None) -> None:
super().__init__(config, cache)
from langchain.llms import HuggingFacePipeline
try:
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoModelForCausalLM,
AutoTokenizer,
pipeline,
)
from transformers.models.auto.modeling_auto import (
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
)
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please it install it with `pip install transformers`."
)
try:
import torch
except ImportError:
raise ValueError(
"Could not import torch package. "
"Please it install it with `pip install torch`."
)
# populate model name
self.model_name = config.model_name() or self.DEFAULT_MODEL
# populate model params
model_params = config.model_params()
self.model_params = {**self.DEFAULT_PARAMS, **model_params}
if config.logit_bias() != 0:
self.model_params = {
**self._generate_sequence_bias(),
**self.model_params,
}
# initialize HF pipeline
tokenizer = AutoTokenizer.from_pretrained(
self.model_name, use_fast=False, add_prefix_space=True
)
quantize_bits = self.model_params["quantize"]
model_config = AutoConfig.from_pretrained(self.model_name)
if isinstance(model_config, tuple(MODEL_FOR_CAUSAL_LM_MAPPING)):
AutoModel = AutoModelForCausalLM
elif isinstance(model_config, tuple(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)):
AutoModel = AutoModelForSeq2SeqLM
else:
raise ValueError(
"model_name is neither a causal LM nor a seq2seq LM. Please check the model_name."
)
if not torch.cuda.is_available():
model = AutoModel.from_pretrained(self.model_name)
elif quantize_bits == 8:
model = AutoModel.from_pretrained(
self.model_name, load_in_8bit=True, device_map="auto"
)
elif quantize_bits == "16":
model = AutoModel.from_pretrained(
self.model_name, torch_dtype=torch.float16, device_map="auto"
)
else:
model = AutoModel.from_pretrained(self.model_name, device_map="auto")
model_kwargs = dict(self.model_params) # make a copy of the model params
model_kwargs.pop("quantize", None) # remove quantize from the model params
pipe = pipeline(
"text2text-generation",
model=model,
tokenizer=tokenizer,
**model_kwargs,
)
# initialize LLM
self.llm = HuggingFacePipeline(pipeline=pipe, model_kwargs=model_kwargs)
def _generate_sequence_bias(self) -> Dict:
"""Generates sequence bias dict to add to the config for the labels specified
Returns:
Dict: sequence bias, max new tokens, and num beams
"""
if len(self.config.labels_list()) == 0:
logger.warning(
"No labels specified in the config. Skipping logit bias generation."
)
return {}
try:
from transformers import AutoTokenizer
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please it install it with `pip install transformers`."
)
tokenizer = AutoTokenizer.from_pretrained(
self.model_name, use_fast=False, add_prefix_space=True
)
sequence_bias = {tuple([tokenizer.eos_token_id]): self.config.logit_bias()}
max_new_tokens = 0
for label in self.config.labels_list():
tokens = tuple(tokenizer([label], add_special_tokens=False).input_ids[0])
for token in tokens:
sequence_bias[tuple([token])] = self.config.logit_bias()
max_new_tokens = max(max_new_tokens, len(tokens))
return {
"sequence_bias": sequence_bias,
"max_new_tokens": max_new_tokens,
}
def _label(self, prompts: List[str]) -> RefuelLLMResult:
try:
result = self.llm.generate(prompts)
return RefuelLLMResult(
generations=result.generations, errors=[None] * len(result.generations)
)
except Exception as e:
return self._label_individually(prompts)
def get_cost(self, prompt: str, label: Optional[str] = "") -> float:
# Model inference for this model is being run locally
# Revisit this in the future when we support HF inference endpoints
return 0.0
def returns_token_probs(self) -> bool:
return False | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/models/hf_pipeline.py | 0.869382 | 0.174621 | hf_pipeline.py | pypi |
from typing import List, Optional
from autolabel.configs import AutolabelConfig
from autolabel.models import BaseModel
from autolabel.cache import BaseCache
from autolabel.schema import RefuelLLMResult
from langchain.schema import HumanMessage
class AnthropicLLM(BaseModel):
DEFAULT_MODEL = "claude-instant-v1"
DEFAULT_PARAMS = {
"max_tokens_to_sample": 1000,
"temperature": 0.0,
}
# Reference: https://cdn2.assets-servd.host/anthropic-website/production/images/apr-pricing-tokens.pdf
COST_PER_PROMPT_TOKEN = {
# $11.02 per million tokens
"claude-v1": (11.02 / 1000000),
"claude-instant-v1": (1.63 / 1000000),
}
COST_PER_COMPLETION_TOKEN = {
# $32.68 per million tokens
"claude-v1": (32.68 / 1000000),
"claude-instant-v1": (5.51 / 1000000),
}
def __init__(self, config: AutolabelConfig, cache: BaseCache = None) -> None:
super().__init__(config, cache)
try:
from langchain.chat_models import ChatAnthropic
from anthropic import tokenizer
except ImportError:
raise ImportError(
"anthropic is required to use the anthropic LLM. Please install it with the following command: pip install 'refuel-autolabel[anthropic]'"
)
# populate model name
self.model_name = config.model_name() or self.DEFAULT_MODEL
# populate model params
model_params = config.model_params()
self.model_params = {**self.DEFAULT_PARAMS, **model_params}
# initialize LLM
self.llm = ChatAnthropic(model=self.model_name, **self.model_params)
self.tokenizer = tokenizer
def _label(self, prompts: List[str]) -> RefuelLLMResult:
prompts = [[HumanMessage(content=prompt)] for prompt in prompts]
try:
result = self.llm.generate(prompts)
return RefuelLLMResult(
generations=result.generations, errors=[None] * len(result.generations)
)
except Exception as e:
return self._label_individually(prompts)
def get_cost(self, prompt: str, label: Optional[str] = "") -> float:
num_prompt_toks = self.tokenizer.count_tokens(prompt)
if label:
num_label_toks = self.tokenizer.count_tokens(label)
else:
# get an upper bound
num_label_toks = self.model_params["max_tokens_to_sample"]
cost_per_prompt_token = self.COST_PER_PROMPT_TOKEN[self.model_name]
cost_per_completion_token = self.COST_PER_COMPLETION_TOKEN[self.model_name]
return (num_prompt_toks * cost_per_prompt_token) + (
num_label_toks * cost_per_completion_token
)
def returns_token_probs(self) -> bool:
return False | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/models/anthropic.py | 0.852675 | 0.213603 | anthropic.py | pypi |
from typing import List, Optional
import os
from autolabel.models import BaseModel
from autolabel.configs import AutolabelConfig
from autolabel.cache import BaseCache
from autolabel.schema import RefuelLLMResult
class CohereLLM(BaseModel):
# Default parameters for OpenAILLM
DEFAULT_MODEL = "command"
DEFAULT_MODEL_PARAMS = {
"max_tokens": 512,
"temperature": 0.0,
}
# Reference: https://cohere.com/pricing
COST_PER_TOKEN = 15 / 1_000_000
def __init__(self, config: AutolabelConfig, cache: BaseCache = None) -> None:
super().__init__(config, cache)
try:
import cohere
from langchain.llms import Cohere
except ImportError:
raise ImportError(
"cohere is required to use the cohere LLM. Please install it with the following command: pip install 'refuel-autolabel[cohere]'"
)
# populate model name
self.model_name = config.model_name() or self.DEFAULT_MODEL
# populate model params and initialize the LLM
model_params = config.model_params()
self.model_params = {
**self.DEFAULT_MODEL_PARAMS,
**model_params,
}
self.llm = Cohere(model=self.model_name, **self.model_params)
self.co = cohere.Client(api_key=os.environ["COHERE_API_KEY"])
def _label(self, prompts: List[str]) -> RefuelLLMResult:
try:
result = self.llm.generate(prompts)
return RefuelLLMResult(
generations=result.generations, errors=[None] * len(result.generations)
)
except Exception as e:
return self._label_individually(prompts)
def get_cost(self, prompt: str, label: Optional[str] = "") -> float:
num_prompt_toks = len(self.co.tokenize(prompt).tokens)
if label:
num_label_toks = len(self.co.tokenize(label).tokens)
else:
num_label_toks = self.model_params["max_tokens"]
return self.COST_PER_TOKEN * (num_prompt_toks + num_label_toks)
def returns_token_probs(self) -> bool:
return False | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/models/cohere.py | 0.846133 | 0.206794 | cohere.py | pypi |
from collections import defaultdict
from typing import List, Dict, Tuple
import json
from langchain.prompts.prompt import PromptTemplate
from sklearn.metrics import accuracy_score
from autolabel.configs import AutolabelConfig
from autolabel.schema import LLMAnnotation, MetricResult
from autolabel.tasks import BaseTask
from autolabel.utils import get_format_variables
from autolabel.tasks.utils import filter_unlabeled_examples
from autolabel.metrics import (
AccuracyMetric,
AUROCMetric,
SupportMetric,
CompletionRateMetric,
ClassificationReportMetric,
BaseMetric,
)
class EntityMatchingTask(BaseTask):
DEFAULT_OUTPUT_GUIDELINES = (
'You will return the answer with one element: "the correct option"\n'
)
DEFAULT_TASK_GUIDELINES = "Your job is to tell if the two given entities are duplicates or not. You will return the answer from one of the choices. Choices:\n{labels}\n"
GENERATE_EXPLANATION_PROMPT = "You are an expert at providing a well reasoned explanation for the output of a given task. \n\nBEGIN TASK DESCRIPTION\n{task_guidelines}\nEND TASK DESCRIPTION\nYou will be given an input example and the corresponding output. Your job is to provide an explanation for why the output is correct for the task above.\nThink step by step and generate an explanation. The last line of the explanation should be - So, the answer is <label>.\n{labeled_example}\nExplanation: "
GENERATE_DATASET_TEMPLATE = "{guidelines}\n\nThe inputs must be diverse, covering a wide range of scenarios. You will not generate duplicate inputs. These inputs should be organized in rows in csv format with the columns {columns}.\n\n{label_descriptions}\n\n{format_guidelines}\n\n{output_guidelines}\n\n```csv"
DEFAULT_DATASET_GENERATION_GUIDELINES = "You are an expert at generating plausible inputs for a given task.\n\nBEGIN TASK DESCRIPTION\n{task_guidelines}\nEND TASK DESCRIPTION"
LABEL_DESCRIPTIONS_PROMPT = "Each input should fall into one of these {num_labels} categories. These are the only categories that the inputs can belong to."
GENERATE_DATASET_FORMAT_GUIDELINES = "Your response should be in csv format with the following columns: {columns}.\n\nHere is a template you can follow for your output:\n```csv\n{columns}\n{example_rows}\n```\n\nMake sure to replace the placeholder variables with your own values."
GENERATE_DATASET_OUTPUT_GUIDELINES = 'Now I want you to generate {num_rows} excerpts that follow the guidelines and all belong to the "{label}" category. They should not belong to any of the other categories.'
def __init__(self, config: AutolabelConfig) -> None:
super().__init__(config)
self.metrics = [
AccuracyMetric(),
SupportMetric(),
CompletionRateMetric(),
ClassificationReportMetric(),
]
if self.config.confidence():
self.metrics.append(AUROCMetric())
def construct_prompt(self, input: Dict, examples: List[Dict]) -> str:
# Copy over the input so that we can modify it
input = input.copy()
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
# prepare seed examples
example_template = self.config.example_template()
label_column = self.config.label_column()
fmt_examples = []
for eg in examples:
eg_copy = eg.copy()
# If chain of thought is enabled
if label_column and self.config.chain_of_thought():
eg_copy[label_column] = json.dumps({"label": eg[label_column]})
fmt_examples.append(example_template.format_map(defaultdict(str, eg_copy)))
# populate the current example in the prompt
if label_column:
input[label_column] = ""
# populate the explanation column with empty string for current example
explanation_column = self.config.explanation_column()
if explanation_column:
input[explanation_column] = ""
# populate the current example in the prompt
current_example = example_template.format_map(defaultdict(str, input))
if self._is_few_shot_mode():
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=self.output_guidelines,
seed_examples="\n\n".join(fmt_examples),
current_example=current_example,
)
else:
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=self.output_guidelines,
current_example=current_example,
)
def get_explanation_prompt(self, example: Dict) -> str:
pt = PromptTemplate(
input_variables=get_format_variables(self.GENERATE_EXPLANATION_PROMPT),
template=self.GENERATE_EXPLANATION_PROMPT,
)
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
# prepare labeled example
example_template = self.config.example_template()
fmt_example = example_template.format_map(defaultdict(str, example))
return pt.format(
task_guidelines=fmt_task_guidelines,
labeled_example=fmt_example,
)
def get_generate_dataset_prompt(self, label: str) -> str:
pt = PromptTemplate(
input_variables=get_format_variables(self.GENERATE_DATASET_TEMPLATE),
template=self.GENERATE_DATASET_TEMPLATE,
)
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
fmt_guidelines = self.dataset_generation_guidelines.format(
task_guidelines=fmt_task_guidelines
)
# prepare columns
columns = get_format_variables(self.config.example_template())
columns.remove(self.config.label_column())
# prepare label descriptions
fmt_label_descriptions = self.LABEL_DESCRIPTIONS_PROMPT.format(
num_labels=num_labels
)
for i, l in enumerate(labels_list):
fmt_label_descriptions += f"\n{i+1}. {l}{': ' + self.config.label_descriptions()[l] if self.config.label_descriptions() is not None and l in self.config.label_descriptions() else ''}"
# prepare format
example_rows = "\n".join(
[",".join([f'"{column}_{i+1}"' for column in columns]) for i in range(3)]
)
fmt_format_guidelines = self.GENERATE_DATASET_FORMAT_GUIDELINES.format(
columns=",".join(columns), example_rows=example_rows
)
# prepare output guidelines
fmt_output_guidelines = self.GENERATE_DATASET_OUTPUT_GUIDELINES.format(
num_rows=self.config.dataset_generation_num_rows(), label=label
)
return pt.format(
guidelines=fmt_guidelines,
columns=columns,
label_descriptions=fmt_label_descriptions,
format_guidelines=fmt_format_guidelines,
output_guidelines=fmt_output_guidelines,
)
def eval(
self,
llm_labels: List[LLMAnnotation],
gt_labels: List[str],
additional_metrics: List[BaseMetric] = [],
) -> List[MetricResult]:
"""Evaluate the LLM generated labels by comparing them against ground truth
Args:
llm_labels (List[LLMAnnotation]): _description_
gt_labels (List[str]): _description_
additional_metrics (List[BaseMetric], optional): List of additional metrics to run. Defaults to [].
Returns:
List[MetricResult]: list of metrics and corresponding values
"""
eval_metrics = []
for metric in self.metrics + additional_metrics:
eval_metrics.extend(metric.compute(llm_labels, gt_labels))
return eval_metrics | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/tasks/entity_matching.py | 0.759315 | 0.510374 | entity_matching.py | pypi |
from collections import defaultdict
from typing import List, Dict, Tuple, Optional
import json
from langchain.prompts.prompt import PromptTemplate
from sklearn.metrics import accuracy_score
from autolabel.confidence import ConfidenceCalculator
from autolabel.configs import AutolabelConfig
from autolabel.schema import LLMAnnotation, MetricType, MetricResult, F1Type
from autolabel.tasks import BaseTask
from autolabel.tasks.utils import normalize_text
from autolabel.utils import get_format_variables
from autolabel.tasks.utils import filter_unlabeled_examples
from autolabel.metrics import (
AccuracyMetric,
AUROCMetric,
SupportMetric,
CompletionRateMetric,
F1Metric,
BaseMetric,
)
class QuestionAnsweringTask(BaseTask):
DEFAULT_OUTPUT_GUIDELINES = (
'You will return the answer one element: "the correct label"\n'
)
DEFAULT_TASK_GUIDELINES = "Your job is to answer the following questions using the options provided for each question. Choose the best answer for the question.\n"
NULL_LABEL_TOKEN = "NO_LABEL"
GENERATE_EXPLANATION_PROMPT = "You are an expert at providing a well reasoned explanation for the output of a given task. \n\nBEGIN TASK DESCRIPTION\n{task_guidelines}\nEND TASK DESCRIPTION\nYou will be given an input example and the corresponding output. You will be given a question and an answer. Your job is to provide an explanation for why the answer is correct for the task above.\nThink step by step and generate an explanation. The last line of the explanation should be - So, the answer is <label>.\n{labeled_example}\nExplanation: "
def __init__(self, config: AutolabelConfig) -> None:
super().__init__(config)
self.metrics = [
AccuracyMetric(),
SupportMetric(),
CompletionRateMetric(),
F1Metric(
type=F1Type.TEXT,
),
]
if self.config.confidence():
self.metrics.append(AUROCMetric())
def construct_prompt(self, input: Dict, examples: List[Dict]) -> str:
# Copy over the input so that we can modify it
input = input.copy()
# prepare seed examples
example_template = self.config.example_template()
label_column = self.config.label_column()
fmt_examples = []
for eg in examples:
eg_copy = eg.copy()
# If chain of thought is enabled
if label_column and self.config.chain_of_thought():
eg_copy[label_column] = json.dumps({"label": eg[label_column]})
fmt_examples.append(example_template.format_map(defaultdict(str, eg_copy)))
# populate the current example in the prompt
if label_column:
input[label_column] = ""
# populate the explanation column with empty string for current example
explanation_column = self.config.explanation_column()
if explanation_column:
input[explanation_column] = ""
# populate the current example in the prompt
current_example = example_template.format_map(defaultdict(str, input))
if self._is_few_shot_mode():
return self.prompt_template.format(
task_guidelines=self.task_guidelines,
output_guidelines=self.output_guidelines,
seed_examples="\n\n".join(fmt_examples),
current_example=current_example,
)
else:
return self.prompt_template.format(
task_guidelines=self.task_guidelines,
output_guidelines=self.output_guidelines,
current_example=current_example,
)
def get_explanation_prompt(self, example: Dict) -> str:
pt = PromptTemplate(
input_variables=get_format_variables(self.GENERATE_EXPLANATION_PROMPT),
template=self.GENERATE_EXPLANATION_PROMPT,
)
example_template = self.config.example_template()
fmt_example = example_template.format_map(defaultdict(str, example))
return pt.format(
task_guidelines=self.task_guidelines,
labeled_example=fmt_example,
)
def get_generate_dataset_prompt(
self, label: str, num_rows: int, guidelines: str = None
) -> str:
raise NotImplementedError("Dataset generation not implemented for this task")
def eval(
self,
llm_labels: List[LLMAnnotation],
gt_labels: List[str],
additional_metrics: Optional[List[BaseMetric]] = [],
) -> List[MetricResult]:
"""Evaluate the LLM generated labels by comparing them against ground truth
Args:
llm_labels (List[LLMAnnotation]): _description_
gt_labels (List[str]): _description_
additional_metrics (Optional[List[BaseMetric]], optional): _description_. Defaults to [].
Returns:
List[MetricResult]: list of metrics and corresponding values
"""
eval_metrics = []
for metric in self.metrics + additional_metrics:
eval_metrics.extend(metric.compute(llm_labels, gt_labels))
return eval_metrics | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/tasks/question_answering.py | 0.779616 | 0.394551 | question_answering.py | pypi |
from collections import defaultdict
from typing import List, Dict, Tuple
from langchain.prompts.prompt import PromptTemplate
from autolabel.configs import AutolabelConfig
from autolabel.schema import LLMAnnotation, MetricType, MetricResult, F1Type
from autolabel.tasks import BaseTask
from autolabel.utils import get_format_variables
import json
from autolabel.metrics import (
AccuracyMetric,
AUROCMetric,
SupportMetric,
CompletionRateMetric,
F1Metric,
BaseMetric,
)
class MultilabelClassificationTask(BaseTask):
DEFAULT_OUTPUT_GUIDELINES = 'You will return the answer as a semicolon-separated list of labels. For example: "label1;label2;label3"'
DEFAULT_TASK_GUIDELINES = "Your job is to correctly label the provided input example into one or more of the following {num_labels} categories.\nCategories:\n{labels}\n"
GENERATE_EXPLANATION_PROMPT = "You are an expert at providing a well reasoned explanation for the output of a given task. \n\nBEGIN TASK DESCRIPTION\n{task_guidelines}\nEND TASK DESCRIPTION\nYou will be given an input example and the corresponding output. Your job is to provide an explanation for why the output is correct for the task above.\nThink step by step and generate an explanation. The last line of the explanation should be - So, the answer is <label>.\n{labeled_example}\nExplanation: "
def __init__(self, config: AutolabelConfig) -> None:
super().__init__(config)
self.metrics = [
AccuracyMetric(),
SupportMetric(),
CompletionRateMetric(),
F1Metric(
type=F1Type.MULTI_LABEL,
labels=self.config.labels_list(),
sep=self.config.label_separator(),
average=[MetricType.F1_MACRO, MetricType.F1_WEIGHTED],
),
]
if self.config.confidence():
self.metrics.append(AUROCMetric())
def construct_prompt(self, input: Dict, examples: List) -> str:
# Copy over the input so that we can modify it
input = input.copy()
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
# prepare seed examples
example_template = self.config.example_template()
label_column = self.config.label_column()
fmt_examples = []
for eg in examples:
eg_copy = eg.copy()
# If chain of thought is enabled
if label_column and self.config.chain_of_thought():
eg_copy[label_column] = json.dumps({label_column: eg[label_column]})
fmt_examples.append(example_template.format_map(defaultdict(str, eg_copy)))
# populate the current example in the prompt
if label_column:
input[label_column] = ""
# populate the explanation column with empty string for current example
explanation_column = self.config.explanation_column()
if explanation_column:
input[explanation_column] = ""
# populate the current example in the prompt
current_example = example_template.format_map(defaultdict(str, input))
if self._is_few_shot_mode():
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=self.output_guidelines,
seed_examples="\n\n".join(fmt_examples),
current_example=current_example,
)
else:
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=self.output_guidelines,
current_example=current_example,
)
def get_explanation_prompt(self, example: Dict) -> str:
pt = PromptTemplate(
input_variables=get_format_variables(self.GENERATE_EXPLANATION_PROMPT),
template=self.GENERATE_EXPLANATION_PROMPT,
)
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
# prepare labeled example
example_template = self.config.example_template()
fmt_example = example_template.format_map(defaultdict(str, example))
return pt.format(
task_guidelines=fmt_task_guidelines,
labeled_example=fmt_example,
)
def get_generate_dataset_prompt(
self, label: str, num_rows: int, guidelines: str = None
) -> str:
raise NotImplementedError("Dataset generation not implemented for this task")
def eval(
self,
llm_labels: List[LLMAnnotation],
gt_labels: List[str],
additional_metrics: List[BaseMetric] = [],
) -> List[MetricResult]:
"""Evaluate the LLM generated labels by comparing them against ground truth
Args:
llm_labels (List[LLMAnnotation]): list of LLM generated labels
gt_labels (List[str]): list of ground truth labels
additional_metrics (List[BaseMetric], optional): list of additional metrics to compute. Defaults to [].
Returns:
List[MetricResult]: list of metrics and corresponding values
"""
eval_metrics = []
for metric in self.metrics + additional_metrics:
eval_metrics.extend(metric.compute(llm_labels, gt_labels))
return eval_metrics | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/tasks/multilabel_classification.py | 0.781581 | 0.336277 | multilabel_classification.py | pypi |
from collections import defaultdict
from typing import List, Dict, Tuple
from langchain.prompts.prompt import PromptTemplate
from sklearn.metrics import accuracy_score
from autolabel.confidence import ConfidenceCalculator
from autolabel.configs import AutolabelConfig
from autolabel.schema import LLMAnnotation, MetricType, MetricResult
from autolabel.tasks import BaseTask
from autolabel.utils import get_format_variables
from autolabel.tasks.utils import filter_unlabeled_examples
from autolabel.metrics import (
AccuracyMetric,
AUROCMetric,
SupportMetric,
CompletionRateMetric,
ClassificationReportMetric,
BaseMetric,
)
import json
class ClassificationTask(BaseTask):
DEFAULT_OUTPUT_GUIDELINES = (
'You will return the answer with just one element: "the correct label"'
)
DEFAULT_TASK_GUIDELINES = "Your job is to correctly label the provided input example into one of the following {num_labels} categories.\nCategories:\n{labels}\n"
GENERATE_EXPLANATION_PROMPT = "You are an expert at providing a well reasoned explanation for the output of a given task. \n\nBEGIN TASK DESCRIPTION\n{task_guidelines}\nEND TASK DESCRIPTION\nYou will be given an input example and the corresponding output. Your job is to provide an explanation for why the output is correct for the task above.\nThink step by step and generate an explanation. The last line of the explanation should be - So, the answer is <label>.\n{labeled_example}\nExplanation: "
GENERATE_DATASET_TEMPLATE = "{guidelines}\n\nThe inputs must be diverse, covering a wide range of scenarios. You will not generate duplicate inputs. These inputs should be organized in rows in csv format with the columns {columns}.\n\n{label_descriptions}\n\n{format_guidelines}\n\n{output_guidelines}\n\n```csv"
DEFAULT_DATASET_GENERATION_GUIDELINES = "You are an expert at generating plausible inputs for a given task.\n\nBEGIN TASK DESCRIPTION\n{task_guidelines}\nEND TASK DESCRIPTION"
LABEL_DESCRIPTIONS_PROMPT = "Each input should fall into one of these {num_labels} categories. These are the only categories that the inputs can belong to."
GENERATE_DATASET_FORMAT_GUIDELINES = "Your response should be in csv format with the following columns: {columns}.\n\nHere is a template you can follow for your output:\n```csv\n{columns}\n{example_rows}\n```\n\nMake sure to replace the placeholder variables with your own values."
GENERATE_DATASET_OUTPUT_GUIDELINES = 'Now I want you to generate {num_rows} excerpts that follow the guidelines and all belong to the "{label}" category. They should not belong to any of the other categories.'
def __init__(self, config: AutolabelConfig) -> None:
super().__init__(config)
self.metrics = [
AccuracyMetric(),
SupportMetric(),
CompletionRateMetric(),
ClassificationReportMetric(),
]
if self.config.confidence():
self.metrics.append(AUROCMetric())
def construct_prompt(self, input: Dict, examples: List) -> str:
# Copy over the input so that we can modify it
input = input.copy()
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
# prepare seed examples
example_template = self.config.example_template()
label_column = self.config.label_column()
fmt_examples = []
for eg in examples:
eg_copy = eg.copy()
# If chain of thought is enabled
if label_column and self.config.chain_of_thought():
eg_copy[label_column] = json.dumps({"label": eg[label_column]})
fmt_examples.append(example_template.format_map(defaultdict(str, eg_copy)))
# populate the current example in the prompt
if label_column:
input[label_column] = ""
# populate the explanation column with empty string for current example
explanation_column = self.config.explanation_column()
if explanation_column:
input[explanation_column] = ""
# populate the current example in the prompt
current_example = example_template.format_map(defaultdict(str, input))
if self._is_few_shot_mode():
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=self.output_guidelines,
seed_examples="\n\n".join(fmt_examples),
current_example=current_example,
)
else:
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=self.output_guidelines,
current_example=current_example,
)
def get_explanation_prompt(self, example: Dict) -> str:
pt = PromptTemplate(
input_variables=get_format_variables(self.GENERATE_EXPLANATION_PROMPT),
template=self.GENERATE_EXPLANATION_PROMPT,
)
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
# prepare labeled example
example_template = self.config.example_template()
fmt_example = example_template.format_map(defaultdict(str, example))
return pt.format(
task_guidelines=fmt_task_guidelines,
labeled_example=fmt_example,
)
def get_generate_dataset_prompt(self, label: str) -> str:
pt = PromptTemplate(
input_variables=get_format_variables(self.GENERATE_DATASET_TEMPLATE),
template=self.GENERATE_DATASET_TEMPLATE,
)
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
fmt_guidelines = self.dataset_generation_guidelines.format(
task_guidelines=fmt_task_guidelines
)
# prepare columns
columns = get_format_variables(self.config.example_template())
columns.remove(self.config.label_column())
# prepare label descriptions
fmt_label_descriptions = self.LABEL_DESCRIPTIONS_PROMPT.format(
num_labels=num_labels
)
for i, l in enumerate(labels_list):
fmt_label_descriptions += f"\n{i+1}. {l}{': ' + self.config.label_descriptions()[l] if self.config.label_descriptions() is not None and l in self.config.label_descriptions() else ''}"
# prepare format
example_rows = "\n".join(
[",".join([f'"{column}_{i+1}"' for column in columns]) for i in range(3)]
)
fmt_format_guidelines = self.GENERATE_DATASET_FORMAT_GUIDELINES.format(
columns=",".join(columns), example_rows=example_rows
)
# prepare output guidelines
fmt_output_guidelines = self.GENERATE_DATASET_OUTPUT_GUIDELINES.format(
num_rows=self.config.dataset_generation_num_rows(), label=label
)
return pt.format(
guidelines=fmt_guidelines,
columns=columns,
label_descriptions=fmt_label_descriptions,
format_guidelines=fmt_format_guidelines,
output_guidelines=fmt_output_guidelines,
)
def eval(
self,
llm_labels: List[LLMAnnotation],
gt_labels: List[str],
additional_metrics: List[BaseMetric] = [],
) -> List[MetricResult]:
"""Evaluate the LLM generated labels by comparing them against ground truth
Args:
llm_labels (List[LLMAnnotation]): _description_
gt_labels (List[str]): _description_
additional_metrics (List[BaseMetric], optional): The additional metrics to run. Defaults to [].
Returns:
List[MetricResult]: list of metrics and corresponding values
"""
eval_metrics = []
for metric in self.metrics + additional_metrics:
eval_metrics.extend(metric.compute(llm_labels, gt_labels))
return eval_metrics | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/tasks/classification.py | 0.801548 | 0.677694 | classification.py | pypi |
from __future__ import annotations
import heapq
from itertools import groupby
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type
from autolabel.database import create_db_engine
import numpy as np
import torch
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
from torch import Tensor
import pickle
from sqlalchemy.sql import text as sql_text
EMBEDDINGS_TABLE = "autolabel_embeddings"
def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]:
return [
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
)
]
def cos_sim(a: Tensor, b: Tensor) -> Tensor:
"""
Computes the cosine similarity cos_sim(a[i], b[j]) for all i and j.
Returns:
cos_sim: Matrix with res(i)(j) = cos_sim(a[i], b[j])
"""
if not isinstance(a, torch.Tensor):
a = torch.tensor(a)
if not isinstance(b, torch.Tensor):
b = torch.tensor(b)
if len(a.shape) == 1:
a = a.unsqueeze(0)
if len(b.shape) == 1:
b = b.unsqueeze(0)
a_norm = torch.nn.functional.normalize(a, p=2, dim=1)
b_norm = torch.nn.functional.normalize(b, p=2, dim=1)
return torch.mm(a_norm, b_norm.transpose(0, 1))
def semantic_search(
query_embeddings: Tensor,
corpus_embeddings: Tensor,
query_chunk_size: int = 100,
corpus_chunk_size: int = 500000,
top_k: int = 10,
score_function: Callable[[Tensor, Tensor], Tensor] = cos_sim,
):
"""
Semantic similarity search based on cosine similarity score. Implementation from this project: https://github.com/UKPLab/sentence-transformers
"""
if isinstance(query_embeddings, (np.ndarray, np.generic)):
query_embeddings = torch.from_numpy(query_embeddings)
elif isinstance(query_embeddings, list):
query_embeddings = torch.stack(query_embeddings)
if len(query_embeddings.shape) == 1:
query_embeddings = query_embeddings.unsqueeze(0)
if isinstance(corpus_embeddings, (np.ndarray, np.generic)):
corpus_embeddings = torch.from_numpy(corpus_embeddings)
elif isinstance(corpus_embeddings, list):
corpus_embeddings = torch.stack(corpus_embeddings)
# Check that corpus and queries are on the same device
if corpus_embeddings.device != query_embeddings.device:
query_embeddings = query_embeddings.to(corpus_embeddings.device)
queries_result_list = [[] for _ in range(len(query_embeddings))]
for query_start_idx in range(0, len(query_embeddings), query_chunk_size):
# Iterate over chunks of the corpus
for corpus_start_idx in range(0, len(corpus_embeddings), corpus_chunk_size):
# Compute cosine similarities
cos_scores = score_function(
query_embeddings[query_start_idx : query_start_idx + query_chunk_size],
corpus_embeddings[
corpus_start_idx : corpus_start_idx + corpus_chunk_size
],
)
# Get top-k scores
cos_scores_top_k_values, cos_scores_top_k_idx = torch.topk(
cos_scores,
min(top_k, len(cos_scores[0])),
dim=1,
largest=True,
sorted=False,
)
cos_scores_top_k_values = cos_scores_top_k_values.cpu().tolist()
cos_scores_top_k_idx = cos_scores_top_k_idx.cpu().tolist()
for query_itr in range(len(cos_scores)):
for sub_corpus_id, score in zip(
cos_scores_top_k_idx[query_itr], cos_scores_top_k_values[query_itr]
):
corpus_id = corpus_start_idx + sub_corpus_id
query_id = query_start_idx + query_itr
if len(queries_result_list[query_id]) < top_k:
heapq.heappush(
queries_result_list[query_id], (score, corpus_id)
) # heaqp tracks the quantity of the first element in the tuple
else:
heapq.heappushpop(
queries_result_list[query_id], (score, corpus_id)
)
# change the data format and sort
for query_id in range(len(queries_result_list)):
for doc_itr in range(len(queries_result_list[query_id])):
score, corpus_id = queries_result_list[query_id][doc_itr]
queries_result_list[query_id][doc_itr] = {
"corpus_id": corpus_id,
"score": score,
}
queries_result_list[query_id] = sorted(
queries_result_list[query_id], key=lambda x: x["score"], reverse=True
)
return queries_result_list
class VectorStoreWrapper(VectorStore):
def __init__(
self,
embedding_function: Optional[Embeddings] = None,
corpus_embeddings: Optional[Tensor] = None,
texts: Optional[List[str]] = None,
metadatas: Optional[List[Dict[str, str]]] = None,
cache: bool = True,
) -> None:
self._embedding_function = embedding_function
self._corpus_embeddings = corpus_embeddings
self._texts = texts
self._metadatas = metadatas
if cache:
self._db_engine = create_db_engine()
with self._db_engine.connect() as conn:
query = f"CREATE TABLE IF NOT EXISTS {EMBEDDINGS_TABLE} (embedding_function TEXT, text TEXT, embedding BLOB)"
conn.execute(sql_text(query))
conn.commit()
else:
self._db_engine = None
def _get_embeddings(self, texts: Iterable[str]) -> List[List[float]]:
"""Get embeddings from the database. If not found, compute them and add them to the database.
If no database is used, compute the embeddings and return them.
Args:
texts (Iterable[str]): Iterable of texts to embed.
Returns:
List[List[float]]: List of embeddings.
"""
if self._db_engine:
with self._db_engine.connect() as conn:
embeddings = []
uncached_texts = []
uncached_texts_indices = []
for idx, text in enumerate(texts):
query = sql_text(
f"SELECT embedding FROM {EMBEDDINGS_TABLE} WHERE embedding_function = :x AND text = :y",
)
params = {
"x": self._embedding_function.model
if self._embedding_function.__class__.__name__
!= "HuggingFaceEmbeddings"
else self._embedding_function.model_name,
"y": text,
}
result = conn.execute(query, params).fetchone()
if result:
embeddings.append(pickle.loads(result[0]))
else:
embeddings.append(None)
uncached_texts.append(text)
uncached_texts_indices.append(idx)
uncached_embeddings = self._embedding_function.embed_documents(
uncached_texts
)
self._add_embeddings_to_cache(uncached_texts, uncached_embeddings)
for idx, embedding in zip(uncached_texts_indices, uncached_embeddings):
embeddings[idx] = embedding
return embeddings
else:
return self._embedding_function.embed_documents(list(texts))
def _add_embeddings_to_cache(
self, texts: Iterable[str], embeddings: List[List[float]]
) -> None:
"""Save embeddings to the database. If self._db_engine is None, do nothing.
Args:
texts (Iterable[str]): Iterable of texts.
embeddings (List[List[float]]): List of embeddings.
"""
if self._db_engine:
with self._db_engine.connect() as conn:
for text, embedding in zip(texts, embeddings):
query = sql_text(
f"INSERT INTO {EMBEDDINGS_TABLE} (embedding_function, text, embedding) VALUES (:x, :y, :z)"
)
params = {
"x": self._embedding_function.model
if self._embedding_function.__class__.__name__
!= "HuggingFaceEmbeddings"
else self._embedding_function.model_name,
"y": text,
"z": pickle.dumps(embedding),
}
conn.execute(query, params)
conn.commit()
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[str, str]]] = None,
) -> List[str]:
"""Run texts through the embeddings and add to the vectorstore. Currently, the vectorstore is reinitialized each time, because we do not require a persistent vector store for example selection.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
Returns:
List[str]: List of IDs of the added texts.
"""
if self._embedding_function is not None:
embeddings = self._get_embeddings(texts)
self._corpus_embeddings = torch.tensor(embeddings)
self._texts = texts
self._metadatas = metadatas
return metadatas
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Run semantic similarity search.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of documents most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=filter)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Run semantic similarity search and retrieve distances.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to the query
text with distance in float.
"""
query_embeddings = torch.tensor([self._get_embeddings([query])[0]])
result_ids_and_scores = semantic_search(
corpus_embeddings=self._corpus_embeddings,
query_embeddings=query_embeddings,
top_k=k,
)
result_ids = [result["corpus_id"] for result in result_ids_and_scores[0]]
scores = [result["score"] for result in result_ids_and_scores[0]]
results = {}
results["documents"] = [[self._texts[index] for index in result_ids]]
results["distances"] = [scores]
results["metadatas"] = [[self._metadatas[index] for index in result_ids]]
return _results_to_docs_and_scores(results)
def label_diversity_similarity_search(
self,
query: str,
label_key: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Run semantic similarity search.
Args:
query (str): Query text to search for.
k (int): Number of results to return per label.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of documents most similar to the query text.
"""
docs_and_scores = self.label_diversity_similarity_search_with_score(
query, label_key, k, filter=filter
)
return [doc for doc, _ in docs_and_scores]
def label_diversity_similarity_search_with_score(
self,
query: str,
label_key: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Run semantic similarity search and retrieve distances.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to the query
text with distance in float.
"""
query_embeddings = torch.tensor([self._get_embeddings([query])[0]])
data = []
data = zip(self._corpus_embeddings, self._texts, self._metadatas)
sorted_data = sorted(data, key=lambda item: item[2].get(label_key))
documents = []
scores = []
metadatas = []
for label, label_examples in groupby(
sorted_data, key=lambda item: item[2].get(label_key)
):
label_examples_list = list(label_examples)
label_embeddings = list(
map(lambda label_example: label_example[0], label_examples_list)
)
label_texts = list(
map(lambda label_example: label_example[1], label_examples_list)
)
label_metadatas = list(
map(lambda label_example: label_example[2], label_examples_list)
)
result_ids_and_scores = semantic_search(
corpus_embeddings=label_embeddings,
query_embeddings=query_embeddings,
top_k=k,
)
result_ids = [result["corpus_id"] for result in result_ids_and_scores[0]]
documents.extend([label_texts[index] for index in result_ids])
metadatas.extend([label_metadatas[index] for index in result_ids])
scores.extend([result["score"] for result in result_ids_and_scores[0]])
results = {}
results["documents"] = [documents]
results["distances"] = [scores]
results["metadatas"] = [metadatas]
return _results_to_docs_and_scores(results)
def max_marginal_relevance_search_by_vector(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
query_embedding = self._get_embeddings([query])[0]
query_embeddings = torch.tensor([query_embedding])
result_ids_and_scores = semantic_search(
corpus_embeddings=self._corpus_embeddings,
query_embeddings=query_embeddings,
top_k=fetch_k,
)
result_ids = [result["corpus_id"] for result in result_ids_and_scores[0]]
scores = [result["score"] for result in result_ids_and_scores[0]]
fetched_embeddings = torch.index_select(
input=self._corpus_embeddings, dim=0, index=torch.tensor(result_ids)
).tolist()
mmr_selected = maximal_marginal_relevance(
np.array([query_embedding], dtype=np.float32),
fetched_embeddings,
k=k,
lambda_mult=lambda_mult,
)
selected_result_ids = [result_ids[i] for i in mmr_selected]
selected_scores = [scores[i] for i in mmr_selected]
results = {}
results["documents"] = [[self._texts[index] for index in selected_result_ids]]
results["distances"] = [selected_scores]
results["metadatas"] = [
[self._metadatas[index] for index in selected_result_ids]
]
return _results_to_docs_and_scores(results)
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
docs_and_scores = self.max_marginal_relevance_search_by_vector(
query, k, fetch_k, lambda_mult=lambda_mult
)
return [doc for doc, _ in docs_and_scores]
@classmethod
def from_texts(
cls: Type[VectorStoreWrapper],
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
cache: bool = True,
**kwargs: Any,
) -> VectorStoreWrapper:
"""Create a vectorstore from raw text.
The data will be ephemeral in-memory.
Args:
texts (List[str]): List of texts to add to the collection.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
cache (bool): Whether to cache the embeddings. Defaults to True.
Returns:
vector_store: Vectorstore with seedset embeddings
"""
vector_store = cls(
embedding_function=embedding,
corpus_embeddings=None,
texts=None,
cache=cache,
**kwargs,
)
vector_store.add_texts(texts=texts, metadatas=metadatas)
return vector_store | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/few_shot/vector_store.py | 0.918015 | 0.394901 | vector_store.py | pypi |
from __future__ import annotations
import math
from itertools import groupby
from operator import itemgetter
from typing import Any, Dict, List, Optional, Type
from langchain.embeddings.base import Embeddings
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.vectorstores.base import VectorStore
from pydantic import BaseModel, Extra
def sorted_values(values: Dict[str, str]) -> List[Any]:
"""Return a list of values in dict sorted by key."""
return [values[val] for val in sorted(values)]
class LabelDiversityRandomExampleSelector(BaseExampleSelector, BaseModel):
"""Example selector that selects examples based on label diversity at random."""
examples: List[dict]
"""A list of the examples that the prompt template expects."""
k: int = 4
"""Number of examples to select."""
label_key: str
"""Name of the label column/key."""
num_labels: int
"""Number of different labels."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def add_example(self, example: Dict[str, str]) -> None:
self.examples.append(example)
def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
selected_examples = []
sorted_examples = sorted(self.examples, key=itemgetter(self.label_key))
num_examples_per_label = math.ceil(self.k / self.num_labels)
for label, label_examples in groupby(
sorted_examples, key=itemgetter(self.label_key)
):
label_examples_list = list(label_examples)
selected_examples.extend(label_examples_list[:num_examples_per_label])
return selected_examples[: self.k]
@classmethod
def from_examples(
cls,
examples: List[dict],
label_key: str,
num_labels: int,
k: int = 4,
) -> LabelDiversityRandomExampleSelector:
"""Create label diversity example selector using example list and embeddings.
Args:
examples: List of examples to use in the prompt.
k: Number of examples to select per label
label_key: Determines which variable corresponds to the example's label
Returns:
The ExampleSelector instantiated
"""
return cls(k=k, examples=examples, label_key=label_key, num_labels=num_labels)
class LabelDiversitySimilarityExampleSelector(BaseExampleSelector, BaseModel):
"""ExampleSelector that selects examples based on label diversity, while choosing the most similar examples for each label"""
vectorstore: VectorStore
"""VectorStore than contains information about examples."""
k: int = 4
"""Number of examples to select."""
input_keys: Optional[List[str]] = None
"""Optional keys to filter input to. If provided, the search is based on
the input variables instead of all variables."""
label_key: str
"""Name of the label column/key."""
num_labels: int
"""Number of different labels."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def add_example(self, example: Dict[str, str]) -> str:
"""Add new example to vectorstore."""
if self.input_keys:
string_example = " ".join(
sorted_values({key: example[key] for key in self.input_keys})
)
else:
string_example = " ".join(sorted_values(example))
ids = self.vectorstore.add_texts([string_example], metadatas=[example])
return ids[0]
def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Select which examples to use based on label diversity and semantic similarity."""
# Get the docs with the highest similarity for each label.
if self.input_keys:
input_variables = {key: input_variables[key] for key in self.input_keys}
query = " ".join(sorted_values(input_variables))
num_examples_per_label = math.ceil(self.k / self.num_labels)
example_docs = self.vectorstore.label_diversity_similarity_search(
query, self.label_key, k=num_examples_per_label
)
# Get the examples from the metadata.
# This assumes that examples are stored in metadata.
examples = [dict(e.metadata) for e in example_docs]
return examples[: self.k]
@classmethod
def from_examples(
cls,
examples: List[dict],
label_key: str,
embeddings: Embeddings,
vectorstore_cls: Type[VectorStore],
num_labels: int,
k: int = 4,
input_keys: Optional[List[str]] = None,
**vectorstore_cls_kwargs: Any,
) -> LabelDiversitySimilarityExampleSelector:
"""Create k-shot example selector using example list and embeddings, taking both label diversity and semantic similarity into account.
Args:
examples: List of examples to use in the prompt.
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select
input_keys: If provided, the search is based on the input variables
instead of all variables.
label_key: The column name corresponding to the label
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store.
"""
if input_keys:
string_examples = [
" ".join(sorted_values({k: eg[k] for k in input_keys}))
for eg in examples
]
else:
string_examples = [" ".join(sorted_values(eg)) for eg in examples]
vectorstore = vectorstore_cls.from_texts(
string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs
)
return cls(
vectorstore=vectorstore,
k=k,
input_keys=input_keys,
label_key=label_key,
num_labels=num_labels,
) | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/few_shot/label_diversity_example_selector.py | 0.956074 | 0.509276 | label_diversity_example_selector.py | pypi |
import logging
from typing import Dict, List
from autolabel.configs import AutolabelConfig
from autolabel.schema import FewShotAlgorithm, ModelProvider
from langchain.embeddings import (
CohereEmbeddings,
HuggingFaceEmbeddings,
OpenAIEmbeddings,
VertexAIEmbeddings,
)
from langchain.embeddings.base import Embeddings
from langchain.prompts.example_selector import (
MaxMarginalRelevanceExampleSelector,
SemanticSimilarityExampleSelector,
)
from langchain.prompts.example_selector.base import BaseExampleSelector
from .fixed_example_selector import FixedExampleSelector
from .label_diversity_example_selector import (
LabelDiversityRandomExampleSelector,
LabelDiversitySimilarityExampleSelector,
)
from .vector_store import VectorStoreWrapper
ALGORITHM_TO_IMPLEMENTATION: Dict[FewShotAlgorithm, BaseExampleSelector] = {
FewShotAlgorithm.FIXED: FixedExampleSelector,
FewShotAlgorithm.SEMANTIC_SIMILARITY: SemanticSimilarityExampleSelector,
FewShotAlgorithm.MAX_MARGINAL_RELEVANCE: MaxMarginalRelevanceExampleSelector,
FewShotAlgorithm.LABEL_DIVERSITY_RANDOM: LabelDiversityRandomExampleSelector,
FewShotAlgorithm.LABEL_DIVERSITY_SIMILARITY: LabelDiversitySimilarityExampleSelector,
}
DEFAULT_EMBEDDING_PROVIDER = OpenAIEmbeddings
PROVIDER_TO_MODEL: Dict[ModelProvider, Embeddings] = {
ModelProvider.OPENAI: OpenAIEmbeddings,
ModelProvider.GOOGLE: VertexAIEmbeddings,
ModelProvider.HUGGINGFACE_PIPELINE: HuggingFaceEmbeddings,
ModelProvider.COHERE: CohereEmbeddings,
}
logger = logging.getLogger(__name__)
class ExampleSelectorFactory:
CANDIDATE_EXAMPLES_FACTOR = 5
MAX_CANDIDATE_EXAMPLES = 100
@staticmethod
def initialize_selector(
config: AutolabelConfig,
examples: List[Dict],
columns: List[str],
cache: bool = True,
) -> BaseExampleSelector:
algorithm = config.few_shot_algorithm()
if not algorithm:
return None
try:
algorithm = FewShotAlgorithm(algorithm)
except ValueError as e:
logger.error(
f"{algorithm} is not in the list of supported few-shot algorithms: \
{ALGORITHM_TO_IMPLEMENTATION.keys()}"
)
return None
num_examples = config.few_shot_num_examples()
params = config.vector_store_params()
params["examples"] = examples
params["k"] = num_examples
if algorithm in [
FewShotAlgorithm.SEMANTIC_SIMILARITY,
FewShotAlgorithm.MAX_MARGINAL_RELEVANCE,
FewShotAlgorithm.LABEL_DIVERSITY_SIMILARITY,
]:
model_provider = config.embedding_provider()
embedding_model_class = PROVIDER_TO_MODEL.get(
model_provider, DEFAULT_EMBEDDING_PROVIDER
)
model_name = config.embedding_model_name()
if model_name:
embedding_model = embedding_model_class(model_name=model_name)
else:
embedding_model = embedding_model_class()
params["embeddings"] = embedding_model
params["vectorstore_cls"] = VectorStoreWrapper
input_keys = [
x
for x in columns
if x not in [config.label_column(), config.explanation_column()]
]
params["input_keys"] = input_keys
if algorithm == FewShotAlgorithm.MAX_MARGINAL_RELEVANCE:
params["fetch_k"] = min(
ExampleSelectorFactory.MAX_CANDIDATE_EXAMPLES,
ExampleSelectorFactory.CANDIDATE_EXAMPLES_FACTOR * params["k"],
)
if algorithm in [
FewShotAlgorithm.LABEL_DIVERSITY_RANDOM,
FewShotAlgorithm.LABEL_DIVERSITY_SIMILARITY,
]:
params["label_key"] = config.label_column()
params["num_labels"] = len(config.labels_list())
example_cls = ALGORITHM_TO_IMPLEMENTATION[algorithm]
if algorithm not in [
FewShotAlgorithm.FIXED,
FewShotAlgorithm.LABEL_DIVERSITY_RANDOM,
]:
params["cache"] = cache
return example_cls.from_examples(**params) | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/few_shot/__init__.py | 0.639624 | 0.194483 | __init__.py | pypi |
from sqlalchemy.orm import sessionmaker
from typing import Dict, Any
from autolabel.transforms.schema import TransformCacheEntry
from autolabel.database import create_db_engine
from autolabel.data_models import Base
from typing import Optional
from autolabel.data_models import TransformCacheEntryModel
from .base import BaseCache
import logging
logger = logging.getLogger(__name__)
class SQLAlchemyTransformCache(BaseCache):
"""
A cache system implemented with SQL Alchemy for storing the output of transforms.
This cache system is used to avoid re-computing the output of transforms that have already been computed.
This currently stores the input and the outputs of the transform.
Caching is based on the transform name, params and input.
"""
def __init__(self):
self.engine = create_db_engine()
self.base = Base
self.session = None
self.initialize()
def initialize(self):
self.base.metadata.create_all(self.engine)
self.session = sessionmaker(bind=self.engine)()
def lookup(self, entry: TransformCacheEntry) -> Optional[Dict[str, Any]]:
"""Retrieves an entry from the Cache. Returns None if not found.
Args:
entry: TransformCacheEntry we wish to retrieve from the Cache
Returns:
result: The output of the transform for this input. None if not found.
"""
cache_entry = TransformCacheEntryModel.get(self.session, entry)
if cache_entry is None:
return None
return cache_entry.output
def update(self, entry: TransformCacheEntry) -> None:
"""Inserts the provided TransformCacheEntry into the Cache, overriding it if it already exists
Args:
entry: TransformCacheEntry we wish to put into the Cache
"""
TransformCacheEntryModel.insert(self.session, entry)
def clear(self, use_ttl: bool = True) -> None:
"""Clears the entire Cache based on ttl"""
TransformCacheEntryModel.clear(self.session, use_ttl=use_ttl) | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/cache/sqlalchemy_transform_cache.py | 0.91637 | 0.242878 | sqlalchemy_transform_cache.py | pypi |
from functools import cached_property
from typing import Dict, List, Union
from jsonschema import validate
from .base import BaseConfig
class AutolabelConfig(BaseConfig):
"""Class to parse and store configs passed to Autolabel agent."""
# Top-level config keys
TASK_NAME_KEY = "task_name"
TASK_TYPE_KEY = "task_type"
DATASET_CONFIG_KEY = "dataset"
MODEL_CONFIG_KEY = "model"
EMBEDDING_CONFIG_KEY = "embedding"
PROMPT_CONFIG_KEY = "prompt"
DATASET_GENERATION_CONFIG_KEY = "dataset_generation"
# Dataset config keys (config["dataset"][<key>])
LABEL_COLUMN_KEY = "label_column"
LABEL_SEPARATOR_KEY = "label_separator"
EXPLANATION_COLUMN_KEY = "explanation_column"
TEXT_COLUMN_KEY = "text_column"
DELIMITER_KEY = "delimiter"
# Model config keys (config["model"][<key>])
PROVIDER_KEY = "provider"
MODEL_NAME_KEY = "name"
MODEL_PARAMS_KEY = "params"
COMPUTE_CONFIDENCE_KEY = "compute_confidence"
LOGIT_BIAS_KEY = "logit_bias"
# Embedding config keys (config["embedding"][<key>])
EMBEDDING_PROVIDER_KEY = "provider"
EMBEDDING_MODEL_NAME_KEY = "model"
# Prompt config keys (config["prompt"][<key>])
TASK_GUIDELINE_KEY = "task_guidelines"
VALID_LABELS_KEY = "labels"
FEW_SHOT_EXAMPLE_SET_KEY = "few_shot_examples"
FEW_SHOT_SELECTION_ALGORITHM_KEY = "few_shot_selection"
FEW_SHOT_NUM_KEY = "few_shot_num"
VECTOR_STORE_PARAMS_KEY = "vector_store_params"
EXAMPLE_TEMPLATE_KEY = "example_template"
OUTPUT_GUIDELINE_KEY = "output_guidelines"
OUTPUT_FORMAT_KEY = "output_format"
CHAIN_OF_THOUGHT_KEY = "chain_of_thought"
TRANSFORM_KEY = "transforms"
# Dataset generation config keys (config["dataset_generation"][<key>])
DATASET_GENERATION_GUIDELINES_KEY = "guidelines"
DATASET_GENERATION_NUM_ROWS_KEY = "num_rows"
def __init__(self, config: Union[str, Dict], validate: bool = True) -> None:
super().__init__(config, validate=validate)
def _validate(self) -> bool:
"""Returns true if the config settings are valid"""
from autolabel.configs.schema import schema
validate(
instance=self.config,
schema=schema,
)
return True
@cached_property
def _dataset_config(self) -> Dict:
"""Returns information about the dataset being used for labeling (e.g. label_column, text_column, delimiter)"""
return self.config.get(self.DATASET_CONFIG_KEY, {})
@cached_property
def _model_config(self) -> Dict:
"""Returns information about the model being used for labeling (e.g. provider name, model name, parameters)"""
return self.config[self.MODEL_CONFIG_KEY]
@cached_property
def _embedding_config(self) -> Dict:
"""Returns information about the model being used for computing embeddings (e.g. provider name, model name)"""
return self.config.get(self.EMBEDDING_CONFIG_KEY, {})
@cached_property
def _prompt_config(self) -> Dict:
"""Returns information about the prompt we are passing to the model (e.g. task guidelines, examples, output formatting)"""
return self.config[self.PROMPT_CONFIG_KEY]
@cached_property
def _dataset_generation_config(self) -> Dict:
"""Returns information about the prompt for synthetic dataset generation"""
return self.config.get(self.DATASET_GENERATION_CONFIG_KEY, {})
# project and task definition config
def task_name(self) -> str:
return self.config[self.TASK_NAME_KEY]
def task_type(self) -> str:
"""Returns the type of task we have configured the labeler to perform (e.g. Classification, Question Answering)"""
return self.config[self.TASK_TYPE_KEY]
# Dataset config
def label_column(self) -> str:
"""Returns the name of the column containing labels for the dataset. Used for comparing accuracy of autolabel results vs ground truth"""
return self._dataset_config.get(self.LABEL_COLUMN_KEY, None)
def label_separator(self) -> str:
"""Returns the token used to seperate multiple labels in the dataset. Defaults to a semicolon ';'"""
return self._dataset_config.get(self.LABEL_SEPARATOR_KEY, ";")
def text_column(self) -> str:
"""Returns the name of the column containing text data we intend to label"""
return self._dataset_config.get(self.TEXT_COLUMN_KEY, None)
def explanation_column(self) -> str:
"""Returns the name of the column containing an explanation as to why the data is labeled a certain way"""
return self._dataset_config.get(self.EXPLANATION_COLUMN_KEY, None)
def delimiter(self) -> str:
"""Returns the token used to seperate cells in the dataset. Defaults to a comma ','"""
return self._dataset_config.get(self.DELIMITER_KEY, ",")
# Model config
def provider(self) -> str:
"""Returns the name of the entity that provides the currently configured model (e.g. OpenAI, Anthropic, Refuel)"""
return self._model_config[self.PROVIDER_KEY]
def model_name(self) -> str:
"""Returns the name of the model being used for labeling (e.g. gpt-4, claude-v1)"""
return self._model_config[self.MODEL_NAME_KEY]
def model_params(self) -> Dict:
"""Returns a dict of configured settings for the model (e.g. hyperparameters)"""
return self._model_config.get(self.MODEL_PARAMS_KEY, {})
def confidence(self) -> bool:
"""Returns true if the model is able to return a confidence score along with its predictions"""
return self._model_config.get(self.COMPUTE_CONFIDENCE_KEY, False)
def logit_bias(self) -> float:
"""Returns the logit bias for the labels specified in the config"""
return self._model_config.get(self.LOGIT_BIAS_KEY, 0.0)
# Embedding config
def embedding_provider(self) -> str:
"""Returns the name of the entity that provides the model used for computing embeddings"""
return self._embedding_config.get(self.EMBEDDING_PROVIDER_KEY, self.provider())
def embedding_model_name(self) -> str:
"""Returns the name of the model being used for computing embeddings (e.g. sentence-transformers/all-mpnet-base-v2)"""
return self._embedding_config.get(self.EMBEDDING_MODEL_NAME_KEY, None)
# Prompt config
def task_guidelines(self) -> str:
return self._prompt_config.get(self.TASK_GUIDELINE_KEY, "")
def labels_list(self) -> List[str]:
"""Returns a list of valid labels"""
if isinstance(self._prompt_config.get(self.VALID_LABELS_KEY, []), List):
return self._prompt_config.get(self.VALID_LABELS_KEY, [])
else:
return self._prompt_config.get(self.VALID_LABELS_KEY, {}).keys()
def label_descriptions(self) -> Dict[str, str]:
"""Returns a dict of label descriptions"""
if isinstance(self._prompt_config.get(self.VALID_LABELS_KEY, []), List):
return {}
else:
return self._prompt_config.get(self.VALID_LABELS_KEY, {})
def few_shot_example_set(self) -> Union[str, List]:
"""Returns examples of how data should be labeled, used to guide context to the model about the task it is performing"""
return self._prompt_config.get(self.FEW_SHOT_EXAMPLE_SET_KEY, [])
def few_shot_algorithm(self) -> str:
"""Returns which algorithm is being used to construct the set of examples being given to the model about the labeling task"""
return self._prompt_config.get(self.FEW_SHOT_SELECTION_ALGORITHM_KEY, None)
def few_shot_num_examples(self) -> int:
"""Returns how many examples should be given to the model in its instruction prompt"""
return self._prompt_config.get(self.FEW_SHOT_NUM_KEY, 0)
def vector_store_params(self) -> Dict:
"""Returns any parameters to be passed to the vector store"""
return self._prompt_config.get(self.VECTOR_STORE_PARAMS_KEY, {})
def example_template(self) -> str:
"""Returns a string containing a template for how examples will be formatted in the prompt"""
example_template = self._prompt_config.get(self.EXAMPLE_TEMPLATE_KEY, None)
if not example_template:
raise ValueError("An example template needs to be specified in the config.")
return example_template
def output_format(self) -> str:
return self._prompt_config.get(self.OUTPUT_FORMAT_KEY, None)
def output_guidelines(self) -> str:
return self._prompt_config.get(self.OUTPUT_GUIDELINE_KEY, None)
def chain_of_thought(self) -> bool:
"""Returns true if the model is able to perform chain of thought reasoning."""
return self._prompt_config.get(self.CHAIN_OF_THOUGHT_KEY, False)
def transforms(self) -> List[Dict]:
"""Returns a list of transforms to apply to the data before sending to the model."""
return self.config.get(self.TRANSFORM_KEY, [])
def dataset_generation_guidelines(self) -> str:
"""Returns a string containing guidelines for how to generate a synthetic dataset"""
return self._dataset_generation_config.get(
self.DATASET_GENERATION_GUIDELINES_KEY, ""
)
def dataset_generation_num_rows(self) -> int:
"""Returns the number of rows to generate for the synthetic dataset"""
return self._dataset_generation_config.get(
self.DATASET_GENERATION_NUM_ROWS_KEY, 1
) | /refuel-autolabel-0.0.15.tar.gz/refuel-autolabel-0.0.15/src/autolabel/configs/config.py | 0.850251 | 0.18228 | config.py | pypi |
from aws_lambda_builders.architecture import X86_64
def set_funcdef_defaults(funcdef: dict):
spec: dict = funcdef.get("spec", {})
build: dict = spec.get("build", {})
if not build.get("architecture"):
build["architecture"] = X86_64
return funcdef
funcdef_schema = {
"type": "object",
"properties": {
"metadata": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"namespace": {
"type": "string"
}
},
"required": ["name", "namespace"]
},
"spec": {
"type": "object",
"properties": {
"build": {
"type": "object",
"properties": {
"source": {
"type": "string"
},
"manifest": {
"type": "string"
},
"language": {
"type": "string",
"enum": ["python", "go"]
},
"architecture": {
"type": "string",
"enum": [X86_64]
}
},
"required": ["source", "manifest", "language"]
},
"handler": {
"type": "string"
},
"timeout": {
"type": "integer",
"minimum": 0
},
"runtime": {
"type": "string",
"enum": ["python3.7", "python3.8", "python3.9", "python3.10", "golang1.x", "go1.x"]
}
},
"required": ["build", "handler", "timeout", "runtime"]
}
},
"required": ["metadata", "spec"]
} | /refunc-cli-1.2.3.tar.gz/refunc-cli-1.2.3/rfctl/schema.py | 0.627609 | 0.288124 | schema.py | pypi |
[](https://github.com/pleiszenburg/refuse/blob/master/LICENSE) [](https://github.com/pleiszenburg/refuse/milestone/3) [](https://pypi.python.org/pypi/refuse) [](https://pypi.python.org/pypi/refuse)

## Synopsis
`refuse` is a Python module implemented using [`ctypes`](https://docs.python.org/3/library/ctypes.html) that provides a simple cross-platform interface to:
- [libfuse](https://github.com/libfuse/libfuse)
- [FUSE for macOS](https://osxfuse.github.io/)
- [WinFsp](https://github.com/billziss-gh/winfsp)
`refuse` originated as a fork of [`fusepy`](https://github.com/fusepy/fusepy). This fork will break with its origins in (at least) the following aspects:
- [x] Dropping Python 2 support
- [ ] Dropping the monolithic single-file-design
- [ ] Adding ``libfuse3`` support
- [ ] Marking ``libfuse2`` support as deprecated
- [ ] A test suite
**If you have a pending pull request against `fusepy` that you would like to see included into `refuse` please open an issue here.**
**If you want to contribute to `refuse`, please have a look at the [contributing guidelines](https://github.com/pleiszenburg/refuse/blob/develop/CONTRIBUTING.md).**
## Project status
THIS PROJECT HAS **ALPHA** STATUS.
The high level API has been tested through [`LoggedFS-python`](https://github.com/pleiszenburg/loggedfs-python) with [`pjdfstest`](https://github.com/pjd/pjdfstest/) and [`fsx`](https://github.com/linux-test-project/ltp/blob/master/testcases/kernel/fs/fsx-linux/fsx-linux.c) on x86_64 Linux only, but not in all possible modes of operation. The low level API is completely untested at this point.
## Installation
`refuse` requires `libfuse` 2.8 or 2.9 (highly recommended), `FUSE for macOS` or `WinFsp`. The [`master` branch](https://github.com/pleiszenburg/refuse/tree/master) of its git repository is always kept at the latest *preview release*. It should be "sort of stable" (still ALPHA). Development happens in the [`develop` branch](https://github.com/pleiszenburg/refuse/tree/develop).
You can install the *preview releases* from PyPI:
```bash
pip install refuse
```
You can alternatively also install the current `HEAD`, most likely very unstable:
```bash
pip install git+https://github.com/pleiszenburgrefuse.git@develop
```
`refuse` (theoretically) runs on:
<table>
<tr>
<th>OS</th><th colspan="2">API</th><th colspan="6">arch</th>
</tr>
<tr>
<th></th><th>level</th><th>version</th>
<th>i386</th><th>x86_64</th><th>PPC</th><th>PPC64</th><th>arm64</th><th>MIPS</th>
</tr>
<tr>
<td rowspan="4">Linux</td><td rowspan="2">high</td><td>2</td>
<td>yes</td><td>yes</td><td>yes</td><td>yes</td><td>yes</td><td>yes</td>
</tr>
<tr>
<td>3</td>
<td>no</td><td>no</td><td>no</td><td>no</td><td>no</td><td>no</td>
</tr>
<tr>
<td rowspan="2">low</td><td>2</td>
<td>yes</td><td>yes</td><td>yes</td><td>yes</td><td>yes</td><td>yes</td>
</tr>
<tr>
<td>3</td>
<td>no</td><td>no</td><td>no</td><td>no</td><td>no</td><td>no</td>
</tr>
<tr>
<td rowspan="4">Mac OS X</td><td rowspan="2">high</td><td>2</td>
<td>yes</td><td>yes</td><td>yes</td><td>yes</td><td></td><td></td>
</tr>
<tr>
<td>3</td>
<td>no</td><td>no</td><td>no</td><td>no</td><td></td><td></td>
</tr>
<tr>
<td rowspan="2">low</td><td>2</td>
<td>yes</td><td>yes</td><td>yes</td><td>yes</td><td></td><td></td>
</tr>
<tr>
<td>3</td>
<td>no</td><td>no</td><td>no</td><td>no</td><td></td><td></td>
</tr>
<tr>
<td rowspan="4">FreeBSD</td><td rowspan="2">high</td><td>2</td>
<td>yes</td><td>yes</td><td>no</td><td>no</td><td>no</td><td>no</td>
</tr>
<tr>
<td>3</td>
<td>no</td><td>no</td><td>no</td><td>no</td><td>no</td><td>no</td>
</tr>
<tr>
<td rowspan="2">low</td><td>2</td>
<td>yes</td><td>yes</td><td>no</td><td>no</td><td>no</td><td>no</td>
</tr>
<tr>
<td>3</td>
<td>no</td><td>no</td><td>no</td><td>no</td><td>no</td><td>no</td>
</tr>
<tr>
<td rowspan="4">OpenBSD</td><td rowspan="2">high</td><td>2</td>
<td>yes</td><td>yes</td><td>yes</td><td>yes</td><td>yes</td><td>yes</td>
</tr>
<tr>
<td>3</td>
<td>no</td><td>no</td><td>no</td><td>no</td><td>no</td><td>no</td>
</tr>
<tr>
<td rowspan="2">low</td><td>2</td>
<td>no</td><td>no</td><td>no</td><td>no</td><td>no</td><td>no</td>
</tr>
<tr>
<td>3</td>
<td>no</td><td>no</td><td>no</td><td>no</td><td>no</td><td>no</td>
</tr>
<tr>
<td rowspan="4">Windows</td><td rowspan="2">high</td><td>2</td>
<td>yes</td><td>yes</td><td></td><td></td><td>no</td><td></td>
</tr>
<tr>
<td>3</td>
<td>no</td><td>no</td><td></td><td></td><td>no</td><td></td>
</tr>
<tr>
<td rowspan="2">low</td><td>2</td>
<td>no</td><td>no</td><td></td><td></td><td>no</td><td></td>
</tr>
<tr>
<td>3</td>
<td>no</td><td>no</td><td></td><td></td><td>no</td><td></td>
</tr>
<tr>
<td rowspan="4">Windows/Cygwin</td><td rowspan="2">high</td><td>2</td>
<td>yes</td><td>yes</td><td></td><td></td><td>no</td><td></td>
</tr>
<tr>
<td>3</td>
<td>no</td><td>no</td><td></td><td></td><td>no</td><td></td>
</tr>
<tr>
<td rowspan="2">low</td><td>2</td>
<td>no</td><td>no</td><td></td><td></td><td>no</td><td></td>
</tr>
<tr>
<td>3</td>
<td>no</td><td>no</td><td></td><td></td><td>no</td><td></td>
</tr>
</table>
## Porting a project from `fusepy` to `refuse`
[See documentation](https://github.com/pleiszenburg/refuse/blob/master/docs/porting.md).
## Miscellaneous
- [Authors](https://github.com/pleiszenburg/refuse/blob/master/AUTHORS.md) (credit where credit is due)
- [Change log (current)](https://github.com/pleiszenburg/refuse/blob/develop/CHANGES.md) (changes in development branch since last release)
- [Change log (past)](https://github.com/pleiszenburg/refuse/blob/master/CHANGES.md) (release history)
- [Contributing](https://github.com/pleiszenburg/refuse/blob/develop/CONTRIBUTING.md) (**Contributions are highly welcomed!**)
- [Documentation](https://github.com/pleiszenburg/refuse/tree/master/docs) (mostly notes at this point)
- [License](https://github.com/pleiszenburg/refuse/blob/master/LICENSE) (**ISCL**)
| /refuse-0.0.4.tar.gz/refuse-0.0.4/README.md | 0.501465 | 0.936807 | README.md | pypi |
import sys
from functools import partial
import numpy as np
from .util import generate_evenly_spaced_data_set
from .util import generate_uniform_data_set
from .util import generator_from_helper
"""
Sets of "Improving Symbolic Regression with Interval Arithmetic and Linear Scaling" by Maarten Keijzer
DOI: 10.1007/3-540-36599-0_7
"""
def keijzer_func4(x):
return 0.3 * x * np.sin(2.0 * np.pi * x)
def keijzer_func5(x):
sx = np.sin(x)
cx = np.cos(x)
return x ** 3 * np.exp(-x) * cx * sx * (sx ** 2 * cx - 1.0)
def keijzer_func6(x, y, z):
return 30.0 * x * z / ((x - 10.0) * y ** 2)
def keijzer_func7(x):
f = lambda n: sum(1.0 / i for i in range(1, int(n)))
if any(x < 0):
raise ValueError
if isinstance(x, np.ndarray):
return np.array([f(n) for n in x])
else:
return f(x)
def keijzer_func8(x):
return np.log(x)
def keijzer_func9(x):
return np.sqrt(x)
def keijzer_func10(x):
return np.arcsinh(x)
def keijzer_func11(x, y):
return x ** y
def keijzer_func12(x, y):
return x * y + np.sin((x - 1.0) * (y - 1.0))
def keijzer_func13(x, y):
return x ** 4 - x ** 3 + 0.5 * y ** 2 - y
def keijzer_func14(x, y):
return 6.0 * np.sin(x) * np.cos(x)
def keijzer_func15(x, y):
return 8.0 / (2.0 + x ** 2 + y ** 2)
def keijzer_func16(x, y):
return x ** 3 / 3.0 + y ** 3 / 2.0 - y - x
def _keijzer1_3_helper(step, ranges):
return generate_evenly_spaced_data_set(keijzer_func4, step, ranges)
def generate_keijzer1():
ranges = (-1, 1)
train = _keijzer1_3_helper(0.1, ranges)
test = _keijzer1_3_helper(0.001, ranges)
return train, test
def generate_keijzer2():
ranges = (-2, 2)
train = _keijzer1_3_helper(0.1, ranges)
test = _keijzer1_3_helper(0.001, ranges)
return train, test
def generate_keijzer3():
ranges = (-4, 4)
train = _keijzer1_3_helper(0.1, ranges)
test = _keijzer1_3_helper(0.001, ranges)
return train, test
def generate_keijzer4():
train = generate_evenly_spaced_data_set(keijzer_func5, 0.05, (0, 10))
test = generate_evenly_spaced_data_set(keijzer_func5, 0.05, (0.05, 10.05))
return train, test
def generate_keijzer5(rng=np.random):
ranges = [(-1, 1), (1, 2), (-1, 1)]
train = generate_uniform_data_set(keijzer_func6, 1000, ranges, rng=rng)
test = generate_uniform_data_set(keijzer_func6, 10000, ranges, rng=rng)
return train, test
def generate_keijzer6():
train = generate_evenly_spaced_data_set(keijzer_func7, 1.0, (1, 50))
test = generate_evenly_spaced_data_set(keijzer_func7, 1.0, (1, 120))
return train, test
def generate_keijzer7():
train = generate_evenly_spaced_data_set(keijzer_func8, 1.0, (1, 100))
test = generate_evenly_spaced_data_set(keijzer_func8, 0.01, (1, 100))
return train, test
def generate_keijzer8():
train = generate_evenly_spaced_data_set(keijzer_func9, 1.0, (0, 100))
test = generate_evenly_spaced_data_set(keijzer_func9, 0.01, (0, 100))
return train, test
def generate_keijzer9():
train = generate_evenly_spaced_data_set(keijzer_func10, 1.0, (0, 100))
test = generate_evenly_spaced_data_set(keijzer_func10, 0.01, (0, 100))
return train, test
def generate_keijzer10(rng=np.random):
train = generate_uniform_data_set(keijzer_func11, 100, (0, 1), rng=rng)
test = generate_evenly_spaced_data_set(keijzer_func11, 0.01, (0, 1))
return train, test
def _keijzer11_15_helper(func, rng=np.random):
train = generate_uniform_data_set(func, 20, (-3, 3), rng=rng)
test = generate_evenly_spaced_data_set(func, 0.01, (-3, 3))
return train, test
generator_from_helper(_keijzer11_15_helper, shift=-1, i=list(range(12, 16)))
current_module = sys.modules[__name__]
all_problems = {name: getattr(current_module, name) for name in locals() if "generate_keijzer" in name} | /reg-bench-0.0.3.tar.gz/reg-bench-0.0.3/reg_bench/symbolic_regression/keijzer.py | 0.405566 | 0.405802 | keijzer.py | pypi |
import functools
import inspect
import sys
import numpy as np
from sklearn.datasets.base import Bunch
from ..utils import make_register
from .integrate import generate_ode_data
all_ode = {}
register_ode = make_register(all_ode)
@register_ode(2, "linear", "polynomial")
def harmonic_oscillator(omega=1.0):
@functools.wraps(harmonic_oscillator)
def dy(y, t):
dy0 = y[1]
dy1 = -omega ** 2 * y[0]
return [dy0, dy1]
return dy
@register_ode(2, "polynomial")
def anharmonic_oscillator(omega=1.0, c=1.0, l=1.0):
@functools.wraps(anharmonic_oscillator)
def dy(y, t):
dy0 = y[1]
dy1 = -omega ** 2 * y[0] - l * y[0] ** 2 - c * y[1]
return [dy0, dy1]
return dy
@register_ode(3, "polynomial")
def lorenz(s=10.0, r=28.0, b=8.0 / 3.0):
@functools.wraps(lorenz)
def dy(y, t):
dy0 = s * (y[1] - y[0])
dy1 = r * y[0] - y[1] - y[0] * y[2]
dy2 = y[0] * y[1] - b * y[2]
return [dy0, dy1, dy2]
return dy
@register_ode(2, "polynomial")
def van_der_pol(omega=1.0, a=0.1, b=0.01):
@functools.wraps(van_der_pol)
def dy(y, t):
y0, y1 = y
dy0 = y1
dy1 = -omega ** 2 * y0 + a * y1 * (1 - b * y0 ** 2)
return dy0, dy1
return dy
@register_ode(2)
def michaelis_menten(vmax=0.25, Km=0.1, rho=1.0):
@functools.wraps(michaelis_menten)
def dy(y, t):
s, p = y
dp = vmax * s ** rho / (Km + s ** rho)
ds = -dp
return [ds, dp]
return dy
@register_ode(3, "polynomial")
def rössler(a=0.15, b=0.20, c=10.0):
@functools.wraps(rössler)
def dy_(state, t):
x, y, z = state
dx = -y - z
dy = x + a * y
dz = b + (x - c) * z
return [dx, dy, dz]
return dy_
@register_ode(2, "polynomial")
def brusselator(a=1.0, b=3.0):
@functools.wraps(brusselator)
def dy_(state, t):
x, y = state
dx = a + x ** 2 * y - (b + 1) * x
dy = b * x - x ** 2 * y
return dx, dy
return dy_
@register_ode(2)
def magnets(K=0.25):
@functools.wraps(magnets)
def dy(state, t):
theta1, theta2 = state
dtheta1 = K * np.sin(theta1 - theta2) - np.sin(theta1)
dtheta2 = K * np.sin(theta2 - theta1) - np.sin(theta2)
return dtheta1, dtheta2
return dy
@register_ode(2)
def predator_prey(a=0.5, b=0.5):
@functools.wraps(predator_prey)
def dy_(state, t):
x, y = state
dx = x * (b - x - y / (1.0 + x))
dy = y * (x / (1 + x) - a * y)
return dx, dy
return dy_
@register_ode(2)
def bacterial_respiration(a=0.1, b=0.2, q=1.0):
@functools.wraps(bacterial_respiration)
def dy_(state, t):
x, y = state
temp = x * y / (1 + q * x ** 2)
dx = b - x - temp
dy = a - temp
return dx, dy
return dy_
@register_ode(2)
def glider(d=1.0):
@functools.wraps(glider)
def dy(state, t):
v, theta = state
dv = -np.sin(theta) - d * v ** 2
dtheta = -np.cos(theta) / v + v
return dv, dtheta
return dy
@register_ode(2)
def shear_flow(a=0.3):
@functools.wraps(shear_flow)
def dy(state, t):
theta, phi = state
dtheta = np.tan(phi) ** (-1) * np.cos(theta)
dphi = (np.cos(phi) ** 2 + a * np.sin(phi) ** 2) * np.sin(theta)
return dtheta, dphi
return dy
def make_bunch(data_config):
default_params = lambda: {
p.name: p.default for p in inspect.signature(data_config["problem"]).parameters.values()
}
data_config["ode_params"] = data_config.get("ode_params", default_params())
x, dx = generate_ode_data(**data_config)
return Bunch(data=x, target=dx, x0=data_config["x0"], params=data_config["ode_params"], t=data_config["t"])
def make_load(ode, t=np.linspace(0, 100, 10001, endpoint=True), x0=1):
arity = all_ode[ode]["arity"]
data_config = dict(problem=ode, x0=np.ones(arity) * x0, t=t)
def loader():
return make_bunch(data_config)
loader.__name__ = "load_" + all_ode[ode]["name"]
# register loader to the simple_ode module
caller = inspect.getframeinfo(inspect.stack()[1][0]) # find current_module by looking up caller in stack
name = inspect.getmodulename(caller.filename)
current_module = [mod for mname, mod in sys.modules.items() if name == mname.split(".")[-1]][0]
if loader.__name__ not in dir(current_module):
setattr(current_module, loader.__name__, loader)
return loader
all_loaders = {all_ode[ode]["name"]: make_load(ode) for ode in all_ode} | /reg-bench-0.0.3.tar.gz/reg-bench-0.0.3/reg_bench/ode/simple_ode.py | 0.488283 | 0.40251 | simple_ode.py | pypi |
class resampler:
def __init__(self):
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from collections import Counter
import numpy as np
self.bins = 3
self.pd = pd
self.LabelEncoder = LabelEncoder
self.Counter = Counter
self.X = 0
self.Y_classes = 0
self.target = 0
self.np = np
# This function adds classes to each sample and returns the class list as a dataframe/numpy array (as per input)
# It also merges classes as and when required
def fit(self, X, target, bins=3, min_n_samples=6, balanced_binning=False, verbose=2):
self.bins = bins
tmp = target
# If data is numpy, then convert it into pandas
if type(target) == int:
if target < 0:
target = X.shape[1]+target
tmp = target
self.X = self.pd.DataFrame()
for i in range(X.shape[1]):
if i!=target:
self.X[str(i)] = X[:,i]
self.X["target"] = X[:,target]
target = "target"
else:
self.X = X.copy()
# Use qcut if balanced binning is required
if balanced_binning:
self.Y_classes = self.pd.qcut(self.X[target], q=self.bins, precision=0)
else:
self.Y_classes = self.pd.cut(self.X[target], bins=self.bins)
# Pandas outputs ranges after binning. Convert ranges to classes
le = self.LabelEncoder()
self.Y_classes = le.fit_transform(self.Y_classes)
# Merge classes if number of neighbours is more than the number of samples
classes_count = list(map(list, self.Counter(self.Y_classes).items()))
classes_count = sorted(classes_count, key = lambda x: x[0])
mid_point = len(classes_count)
# Logic for merging
for i in range(len(classes_count)):
if classes_count[i][1] < min_n_samples:
self.Y_classes[self.np.where(self.Y_classes == classes_count[i][0])[0]] = classes_count[i-1][0]
if verbose > 0:
print("INFO: Class " + str(classes_count[i][0]) + " has been merged into Class " + str(classes_count[i-1][0]) + " due to low number of samples")
classes_count[i][0] = classes_count[i-1][0]
if verbose > 0:
print()
# Perform label-encoding once again
# Avoids class skipping after merging
le = self.LabelEncoder()
self.Y_classes = le.fit_transform(self.Y_classes)
# Pretty print
if verbose > 1:
print("Class Distribution:\n-------------------")
classes_count = list(map(list, self.Counter(self.Y_classes).items()))
classes_count = sorted(classes_count, key = lambda x: x[0])
for class_, count in classes_count:
print(str(class_)+": "+str(count))
print()
# Finally concatenate and return as dataframe or numpy
# Based on what type of target was sent
self.X["classes"] = self.Y_classes
if type(tmp) == int:
self.target = tmp
else:
self.target = target
return self.Y_classes
# This function performs the re-sampling
def resample(self, sampler_obj, trainX, trainY):
# If classes haven't yet been created, then run the "fit" function
if type(self.Y_classes) == int:
print("Error! Run fit method first!!")
return None
# Finally, perform the re-sampling
resampled_data, _ = sampler_obj.fit_resample(trainX, trainY)
if type(resampled_data).__module__ == 'numpy':
resampled_data = self.pd.DataFrame(resampled_data, columns=self.X.drop("classes", axis=1).columns)
# Return the correct X and Y
if type(self.target) == int:
return resampled_data.drop("target", axis=1).values, resampled_data["target"].values
else:
return resampled_data.drop(self.target, axis=1), resampled_data[self.target] | /reg_resampler-2.1.1.tar.gz/reg_resampler-2.1.1/src/reg_resampler.py | 0.601477 | 0.37319 | reg_resampler.py | pypi |
import pandas as pd
import numpy as np
import statsmodels.stats.api as sms
from statsmodels.stats.outliers_influence import variance_inflation_factor
import statsmodels.api as sm
from typing import List, Tuple, Any, Union
from dataclasses import dataclass
@dataclass
class TreatedModelResults:
"""
An object to store the model's results after treatment
Parameters:
- metric_list (List[str]): The metrics returned by the model
- model (sm.Logit | sm.OLS): The final model after treatment
"""
metric_list: List[str]
model: Union[sm.Logit, sm.OLS]
def __repr__(self) -> str:
return (
f"TreatedModelResults(metric_list={self.metric_list}, model={self.model})"
)
def treat_regression_model(
X: pd.DataFrame,
y: pd.DataFrame,
threshhold_vif: float = 5,
threshold_pval: float = 0.05,
reg_type: str = "OLS",
) -> TreatedModelResults:
"""
Treat multicollinearity and drop features based on p-values in a linear regression or logistic regression model.
Parameters
----------
X : pd.DataFrame
The independent variable(s).
y : pd.DataFrame
The target variable.
threshhold_vif : float, optional
The threshold for variance inflation factor (VIF).
Default is 5.
threshold_pval : float, optional
The threshold p-value. Features with p-values greater than this threshold will be dropped.
Default is 0.05.
reg_type : str, optional
The regression type. Must be "OLS" or "logit".
Returns
-------
TreatedModelResults
An object containing the results of the model after treatment.
Raises
------
ValueError
If `reg_type` is not valid.
ValueError
If `threshhold_vif` is not a non-negative number.
ValueError
If `threshold_pval` is not within the valid range (0 to 1).
Notes
-----
This function treats multicollinearity in the input DataFrame `X` based on the specified VIF threshold,
and then iteratively drops features from the treated DataFrame based on their p-values in a linear
regression or logistic regression model. The regression type is determined by the `reg_type` parameter.
Examples
--------
>>> X = pd.DataFrame({'feature1': [1, 2, 3], 'feature2': [4, 5, 6]})
>>> y = pd.DataFrame({'target': [0, 1, 0]})
>>> result = treat_regression_model(X, y, threshhold_vif=3, threshold_pval=0.1, reg_type='OLS')
>>> print(result)
TreatedModelResults(metric_list=['feature1', 'feature2'], model=<statsmodels.regression.linear_model.OLS object at ...>)
"""
if reg_type not in ["OLS", "logit"]:
raise ValueError("Invalid 'reg_type'. Must be 'OLS' or 'logit'.")
if threshhold_vif < 0:
raise ValueError("Invalid 'threshhold_vif'. Must be a non-negative number.")
if threshold_pval < 0 or threshold_pval > 1:
raise ValueError(
"Invalid 'threshold_pval'. Must be greater than 0 and less than 1"
)
vif_results = treat_multicollinearity(X, y, threshhold_vif, reg_type)
pval_results = treat_pvalue(X[vif_results.metric_list], y, threshold_pval, reg_type)
return pval_results
def treat_multicollinearity(
X: pd.DataFrame, y: pd.DataFrame, threshhold_vif: float = 5, reg_type: str = "OLS"
) -> TreatedModelResults:
"""
Treat multicollinearity in a linear regression or logistic regression model.
Parameters
----------
X : pd.DataFrame
The independent variable(s).
y : pd.DataFrame
The target variable.
threshhold_vif : float, optional
The threshold for variance inflation factor (VIF).
Default is 5.
reg_type : str, optional
The regression type. Must be "OLS" or "logit".
Returns
-------
TreatedModelResults
An object containing the results of the model after treatment.
Raises
------
ValueError
If `reg_type` is not valid.
ValueError
If `threshhold_vif` is not a positive number.
"""
regression_mapping = {
"OLS": (sm.OLS, {}),
"logit": (sm.Logit, {}),
}
try:
X = X.astype(float)
except:
raise Exception("Invalid dataset dtypes. Cannot have object dtypes")
if reg_type not in ["OLS", "logit"]:
raise ValueError("Invalid 'reg_type'. Must be 'OLS' or 'logit'.")
if threshhold_vif < 0:
raise ValueError("Invalid 'threshhold_vif'. Must be a positive number.")
model_class, model_args = regression_mapping[reg_type]
vif_series = pd.Series(
[variance_inflation_factor(X.values, i) for i in range(X.shape[1])],
index=X.columns,
dtype=float,
)
col_to_drop = vif_series.sort_values(ascending=False).index[0]
max_vif = vif_series.max()
X = sm.add_constant(X)
model = model_class(y, X.astype(float), **model_args).fit(disp=False)
while True:
if max_vif < threshhold_vif:
break
X = X.drop(col_to_drop, axis=1)
X = sm.add_constant(X)
model = model_class(y, X.astype(float), **model_args).fit(disp=False)
X = X.drop("const", axis=1)
vif_series = pd.Series(
[variance_inflation_factor(X.values, i) for i in range(X.shape[1])],
index=X.columns,
dtype=float,
)
col_to_drop = vif_series.sort_values(ascending=False).index[0]
max_vif = vif_series.max()
return TreatedModelResults(vif_series.index.tolist(), model)
def treat_pvalue(
X: pd.DataFrame,
y: pd.DataFrame,
threshold_pval: float = 0.05,
reg_type: str = "OLS",
) -> TreatedModelResults:
"""
Iteratively drops features based on p-values in a linear regression or logistic regression model.
Parameters
----------
X : pd.DataFrame
The independent variable(s).
y : pd.DataFrame
The target variable.
threshold_pval : float, optional
The threshold p-value. Features with p-values greater than this threshold will be dropped.
Default is 0.05.
reg_type : str, optional
The regression type. Must be "OLS" or "logit".
Returns
-------
TreatedModelResults
An object containing the results of the model after treatment.
Raises
------
ValueError
If `reg_type` is not valid.
ValueError
If `threshold_pval` is not within the valid range (0 to 1).
Notes
-----
This function iteratively drops features from the input DataFrame `X` based on their p-values in
a linear regression or logistic regression model. The regression type is determined by the
`reg_type` parameter.
Examples
--------
>>> X = pd.DataFrame({'feature1': [1, 2, 3], 'feature2': [4, 5, 6]})
>>> y = pd.DataFrame({'target': [0, 1, 0]})
>>> result = treat_pvalue(X, y, threshold_pval=0.1, reg_type='OLS')
>>> print(result)
TreatedModelResults(metric_list=['feature1', 'feature2'], model=<statsmodels.regression.linear_model.OLS object at ...>)
"""
regression_mapping = {
"OLS": (sm.OLS, {}),
"logit": (sm.Logit, {}),
}
if reg_type not in regression_mapping:
raise ValueError("Invalid 'reg_type'. Must be 'OLS' or 'logit'.")
if threshold_pval < 0 or threshold_pval > 1:
raise ValueError(
"Invalid 'threshold_pval'. Must be greater than 0 and less than 1"
)
model_class, model_args = regression_mapping[reg_type]
cols = X.columns.tolist()
X = sm.add_constant(X)
model = model_class(y, X.astype(float), **model_args).fit(disp=False)
p_values = model.pvalues
max_p_value = max(p_values)
col_to_drop = p_values.idxmax()
while True:
if max_p_value < threshold_pval:
break
cols.remove(col_to_drop)
X = X[cols]
X = sm.add_constant(X)
model = model_class(y, X.astype(float), **model_args).fit(disp=False)
p_values = model.pvalues[cols]
max_p_value = max(p_values)
col_to_drop = p_values.idxmax()
return TreatedModelResults(cols, model) | /reg_stat_inference-0.1.9.tar.gz/reg_stat_inference-0.1.9/reg_stat_inference/main.py | 0.937056 | 0.652919 | main.py | pypi |
import warnings
from reg_tables.utils import compare, align_latex_table, PanelOLSNew
from statsmodels.tools.tools import add_constant
import copy
import io
import pandas as pd
import numpy as np
import re
from varname import argname
from varname.utils import ImproperUseError, UsingExecWarning
from ast import Subscript
from tqdm import tqdm as tq
class Spec():
"""
Contains specification of regression
Parameters
----------
data : {np.ndarray, pd.DataFrame}
Dataset from which 'x' and 'y' variables are going to be sourced from
y : str
Name of the column with 'y' variable
x_vars : {str,list, dict, set, tuple, np.ndarray, pd.core.series.Series}
Name of the columns with 'x' variables
entity_effects : bool
Peform regression with entity effects
time_effects : bool
Peform regression with time effects
other_effects : {str,list, dict, set, tuple, np.ndarray, pd.core.series.Series}
Category codes to use for any effects that are not entity or time effects. Each variable is treated as an effect.
cluster_entity : bool
Cluster standard errors by entity
cluster_time : bool
Cluster standard errors by time
double_cluster : bool
Cluster standard errors bith by entity and time
intercept : bool
Include intercept in the regression
check_rank : bool
Check rank during regression
"""
def __init__(self,
data, y, x_vars,
entity_effects=False, time_effects=False,
cluster_entity=False, other_effects=None, cluster_time=False,
double_cluster=False, intercept=True,check_rank=True, data_name=None
):
if ((
(time_effects==True) and
(entity_effects==True)
)and\
(other_effects!=None)):
raise ValueError('At most two fixed effects are supported.')
self.data = data
if data_name != None:
self.data_name = data_name
else:
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UsingExecWarning)
self.data_name = argname('data')
except ImproperUseError:
print("Can't retrieve name of dataset. Using default value. Rename the dataset or provide a 'data_name' argument.")
self.data_name = 'data'
self.y = y
if isinstance(x_vars, (list, dict, set, tuple,
np.ndarray, pd.core.series.Series)) != True: x_vars = [x_vars]
self.x_vars = x_vars
self.entity_effects = entity_effects
self.time_effects = time_effects
if other_effects == []:
self.other_effects = None
else:
self.other_effects = other_effects
self.cluster_entity = cluster_entity
self.cluster_time = cluster_time
self.double_cluster = double_cluster
self.intercept=intercept
self.check_rank=check_rank
if (time_effects or entity_effects): intercept=False
def __repr__(self):
return (f'dataset : {self.data_name}\
x-vars: {self.x_vars}, y: {self.y},\
Entity Effects: {self.entity_effects},\
Time Effects: {self.time_effects},\
Other Effects: {self.other_effects},\
Cluster Entity: {self.cluster_entity},\
Cluster Time: {self.cluster_time},\
Double Cluster: {self.double_cluster},\
Intercept: {self.intercept},\
Check rank: {self.check_rank}')
def to_model(self, model):
"""
Adds the current spec to specified model
"""
if isinstance(model, Model):
model.add_spec(self)
else:
raise TypeError('Please provide a Model object as argument')
def run(self):
"""
Run this regression
Returns
-------
PanelEffectsResults
The panel effects results object.
"""
if self.other_effects != None:
reg = PanelOLSNew(
self.data[[self.y]],
add_constant(self.data[self.x_vars])if self.intercept == True else self.data[self.x_vars],
entity_effects = self.entity_effects,
time_effects = self.time_effects,
other_effects = self.data[self.other_effects]
).fit(
cov_type='clustered',
cluster_entity=(self.cluster_entity | self.double_cluster),
cluster_time=(self.cluster_time | self.double_cluster)
)
else:
reg = PanelOLSNew(
self.data[[self.y]],
add_constant(self.data[self.x_vars])if self.intercept == True else self.data[self.x_vars],
entity_effects = self.entity_effects,
time_effects = self.time_effects
).fit(
cov_type='clustered',
cluster_entity=(self.cluster_entity | self.double_cluster),
cluster_time=(self.cluster_time | self.double_cluster)
)
return reg
class Model():
"""
Contains multiple Spec objects
Parameters
----------
baseline : Spec
First regression of the model
rename_dict : dict
Rename columns with the variables
time_entity_effects : bool
Peform all regressions both with entity and time effects
"""
def __init__(self, baseline, rename_dict={}, time_entity_effects=False):
self._rename_dict = rename_dict#add check
baseline.intercept = True
self.baseline = baseline
self.specs = []
if time_entity_effects:
for comb in [(False, False), (True, False), (False, True), (True, True)]:
new_spec = copy.deepcopy(self.baseline)
new_spec.entity_effects = comb[0]
new_spec.time_effects = comb[1]
self.specs.append(new_spec)
else:
new_spec = copy.deepcopy(self.baseline)
self.specs.append(new_spec)
def __repr__(self):
strr = ''
for idx, spec in enumerate(self.specs):
strr = strr+(f'Spec {idx+1}: '+ spec.__repr__()+'\n')
return strr
def remove_spec (self, idx1, idx2=None):
"""
Remove regression from the model
Parameters
----------
idx1 : {float, int}
Index of the model that needs to be removed
(numeration starts from 1)
idx2 : {float, int}
If passed a slice of [idx1:idx2] will be removed
(numeration starts from 1)
"""
if idx2 != None:del self.specs[idx1-1:idx2-1]
else:
del self.specs[idx1-1]
def add_spec(self, **kwargs):
"""
Add specs to the model
Parameters
----------
**kwargs:
kwargs describing the models. Possible arguments :
data : {np.ndarray, pd.DataFrame}
Dataset from which 'x' and 'y' variables are going to be sourced from
y : str
Name of the column with 'y' variable
x_vars : {str,list, dict, set, tuple, np.ndarray, pd.core.series.Series}
Name of the columns with 'x' variables
entity_effects : bool
Peform regression with entity effects
time_effects : bool
Peform regression with time effects
time_entity_effects : bool
Peform regression both with entity and time effects
other_effects : {str,list, dict, set, tuple, np.ndarray, pd.core.series.Series}
Category codes to use for any effects that are not entity or time effects. Each variable is treated as an effect.
cluster_entity : bool
Cluster standard errors by entity
cluster_time : bool
Cluster standard errors by time
double_cluster : bool
Cluster standard errors bith by entity and time
intercept : bool
Include intercept in the regression
check_rank : bool
Check rank during regression
Examples
--------
>>> model.add_spec(y='y2', entity_effects=True)
>>> model.add_spec(y='y2', time_effects=True)
"""
new_spec = copy.deepcopy(self.baseline)
try:
if isinstance(kwargs[x_vars],(list,dict,set,tuple,np.ndarray,pd.core.series.Series)) != True:
kwargs[x_vars] = [x_vars]
except:pass
if 'data' in kwargs.keys():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UsingExecWarning)
new_spec.data_name = argname('kwargs[data]')
if isinstance(new_spec.data_name, Subscript):
if 'data_name' not in kwargs.keys():
new_spec.data_name = 'data'
print("Can't retrieve name of dataset. Using default value. Rename the dataset or provide a 'data_name' argument.")
if (('time_entity_effects' in kwargs) and ('other_effects' in kwargs)) or\
((
('time_effects' in kwargs) and
('entity_effects' in kwargs)
)\
and ('other_effects' in kwargs)):
raise ValueError('At most two fixed effects are supported.')
if 'other_effects' in kwargs:
if kwargs['other_effects'] == []:
kwargs['other_effects'] = None
for key in kwargs: setattr(new_spec, key, kwargs[key])
if (new_spec.time_effects or new_spec.entity_effects): new_spec.intercept = False
if 'time_entity_effects' in kwargs:
for comb in [(False, False), (True, False), (False, True), (True, True)]:
variation = copy.deepcopy(new_spec)
variation.entity_effects = comb[0]
variation.time_effects = comb[1]
variation.intercept = False
self.specs.append(variation)
else:self.specs.append(new_spec)
def rename(self, rename_dict):
"""
Rename the variables in the output table
Parameters
----------
rename_dict : dict
Rename columns with the variables
"""
for key in rename_dict.keys(): self._rename_dict[key] = rename_dict[key]
def run(self,coeff_decimals=2,latex_path=None,
time_fe_name='Time FEs', entity_fe_name='Entity FEs',
other_fe_name = 'Other FEs',
custom_row=None, display_datasets=False,
rsquared='Inclusive'):
"""
Run all regressions in the models
Parameters
----------
coeff_decimals : int
Display the numbers in the results table
with certain number of fraction digits
latex_path : str
Write the table in LaTeX format to specified path
time_fe_name : str
Name for time fixed effects column
entity_fe_name : str
Name for entity fixed effects column
custom_row : str
Adds a custom row to the end of the table
display_datasets : {bool,list}
Display the names of databases in the results table.
If the value is 'True' then use original names of variables.
Pass a list of strings to define custom names for databases.
rsquared : str
Type of R-squared. Default is 'Inclusive'. Other supported types - 'Standard', 'Between',
'Overall', 'Within'.
Returns
-------
pd.DataFrame
Table with the results of the regressions
"""
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="In a future version of pandas all arguments of concat\
except for the argument 'objs' will be keyword-only.")
if custom_row !=None:
if isinstance (custom_row,list)!=True:
print('Custom row is not a list')
regs = []
for spec in tq(self.specs, 'Running regressions'):
regs.append(spec.run())
if rsquared == "Inclusive":
rsquared_func = lambda reg: reg.rsquared_inclusive
elif rsquared == "Standard":
rsquared_func = lambda reg: reg.rsquared
elif rsquared == "Between":
rsquared_func = lambda reg: reg.rsquared_between
elif rsquared == "Overall":
rsquared_func = lambda reg: reg.rsquared_overall
elif rsquared == "Within":
rsquared_func = lambda reg: reg.rsquared_within
R2s = list(map(rsquared_func, regs))
regs = compare(regs, stars=True, precision='tstats')
csv = regs.summary.as_csv()
tab = pd.read_csv(io.StringIO(csv), skiprows=1)
other_eff_dict = {}
for idx, spec in enumerate(self.specs):
if spec.other_effects != None:
other_eff_dict[idx] = spec.other_effects
if other_eff_dict != {}:
rows = tab.shape[0]
cols = tab.shape[1]
last_row = tab[rows-1:rows].copy()
tab = tab[:rows-1]
tab.loc[tab.shape[0]] = ['Other Effects'] + ['']*(cols-1)
tab = pd.concat([tab,last_row])
for key, value in other_eff_dict.items():
if isinstance (value, list):
renamed_items = []
for item in value:
if item in self._rename_dict.keys():
item = self._rename_dict[item]
renamed_items.append(item)
tab.iat[tab.shape[0]-2,int(key)+1] = ', '.join(renamed_items)
else:
if value in self._rename_dict.keys():
value = self._rename_dict[value]
tab.iat[tab.shape[0]-2,int(key)+1] = value
tab = tab.set_index([tab.columns[0]])
col_dict = dict(zip(tab.columns.to_list(),
list(map(lambda x:'('+str(int(x.replace(' ','').replace('Model',''))+1)+')',
tab.columns.to_list())))
)
coeff_borders = []
observ = int()
r2 = int()
const = int()
for idx, x in enumerate(tab.index):
if 'No. Observations' in x:observ = idx
if 'const' in x:const = idx
if re.match('R-squared ',x) != None:
r2 = idx
if '===' in x:coeff_borders.append(idx)
tab.rename(index={tab.index[observ]:'Observations', tab.index[const]:'Intercept'}, columns=col_dict, inplace=True)
tab.loc['Observations'] = ["{0:0,.0f}".format(float(x)) for x in tab.loc['Observations']]
try:coeffs = tab[coeff_borders[0]+1:coeff_borders[1]].copy()
except:coeffs = tab[coeff_borders[0]+1:-1].copy()
if coeff_decimals != None:
def change_decimals(cell):
try:
if '(' in cell:
sub_str = float(re.search('\(-?[0-9]*\.[0-9]*' ,cell)[0][1:])
return '('+re.sub('\(-?[0-9]*\.[0-9]*', f'{sub_str:.{coeff_decimals}f}', cell)
else:
sub_str = float(re.search('^-?[0-9]*\.[0-9]*', cell)[0])
return re.sub('^-?[0-9]*\.[0-9]*', f'{sub_str:.{coeff_decimals}f}', cell)
except TypeError:
return cell
coeffs = coeffs.applymap(change_decimals)
s = "{0:0."+str(coeff_decimals)+"f}"
R2s = [ s.format(x) for x in R2s ]
else:
R2s = [ "{0:0.4f}".format(x) for x in R2s ]
if const != 0:
coeffs = pd.concat([coeffs[2:], coeffs[0:2]])
coeffs_dict = {}
for idx,name in enumerate(coeffs.index):
if re.sub('[ \t]+$','',name) in self._rename_dict.keys():
coeffs_dict[name] = self._rename_dict[re.sub('[ \t]+$', '', name)]
coeffs.rename(index=coeffs_dict, inplace=True)
final = pd.concat([tab.head(1), coeffs])
for idx,name in enumerate(final.iloc[0]):
if re.sub('[ \t]+$','', name) in self._rename_dict.keys():
final.iat[0,idx] = self._rename_dict[re.sub('[ \t]+$','', name)]
# Add spacing
final = pd.concat([final.iloc[:1], pd.DataFrame(index=[' ']), final.iloc[1:]])
final = pd.concat([final, pd.DataFrame(index=[' '])])
for line in [observ,r2]:
final = pd.concat([final, tab[line:].head(1)])
# Inclusive R2s (including fixed effects)
final.iloc[-1] = R2s
effects = pd.DataFrame(index=[time_fe_name, entity_fe_name, other_fe_name])
some_effects = False
for column in tab.columns:
for x in tab[column]:
if re.search('Time', str(x))!=None: effects.loc[time_fe_name,column]='Yes'; some_effects = True
if re.search('Entity', str(x))!=None: effects.loc[entity_fe_name,column]='Yes'; some_effects = True
if other_eff_dict != {}:
for x in tab.iloc[tab.shape[0]-2,:].index:
effects.at[other_fe_name, x] = tab.iloc[tab.shape[0]-2,:][x]
some_effects = True
else:
effects = effects[:2]
if some_effects: final=pd.concat([final,effects.dropna(how='all')])
clustering = pd.DataFrame(index=['SEs clustering:'], columns=final.columns)
for idx, spec in enumerate(self.specs):
if spec.cluster_entity == True:
clustering.iat[0, idx] = 'Entity'
if spec.cluster_time == True:
clustering.iat[0, idx] = 'Time'
if ((spec.cluster_time == True)\
& (spec.cluster_entity == True))\
| spec.double_cluster == True:
clustering.iat[0, idx] = 'Entity and Time'
clustering.dropna(how='all',inplace=True)
if clustering.shape[0] == 1:
final = pd.concat([final,clustering])
if display_datasets != False:
if display_datasets == True:
data_info = pd.DataFrame(data=[[spec.data_name for spec in self.specs]]
, columns=final.columns, index=['Dataset'])
else:
data_info = pd.DataFrame(data=[display_datasets]
, columns=final.columns, index=['Dataset'])
final = pd.concat([final, data_info])
if custom_row != None:
custom = pd.DataFrame(index=[custom_row[0]])
for idx,item in enumerate(custom_row[1:]):
custom.at[custom_row[0],final.columns[idx]]=item
final = pd.concat([final,custom])
final.fillna('', inplace=True)
final.drop(index=' ', inplace=True)
if latex_path != None:
latex_string = final.style.to_latex(column_format = 'l'+ ((final.shape[1])*'c'), hrules=True)
# latex_string = re.sub('(?<=\{tabular\}\{l)(.*?)(?=\})',
# 'c'*len(re.search('(?<=\{tabular\}\{l)(.*?)(?=\})',
# final.style.to_latex())[0]),final.style.to_latex())
# latex_string = re.sub('{lcccc}\n','{lcccc}\n\\\\toprule\n{}', latex_string)
# latex_string = re.sub('\nD','\n\\\midrule\nD', latex_string)
# latex_string = re.sub('\n\\\end{tabular}\n','\n\\\\bottomrule\n\\\end{tabular}\n', latex_string)
latex_string = align_latex_table(latex_string).replace('\\\\ \n',' \\\\\n')
with open(latex_path, 'w') as f:
f.write(latex_string)
return final | /reg_tables-0.1.34.tar.gz/reg_tables-0.1.34/reg_tables/main.py | 0.629091 | 0.393647 | main.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.