File size: 8,557 Bytes
b4b665d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
import abc
import os
from dataclasses import dataclass, field
from typing import Hashable, List, Optional, TypeVar
from .consts import (COMPLETE_ALPHABET, WHITESPACE_CHARACTERS, DEFAULT_MAX_CONSECUTIVE_WHITESPACES,
DEFAULT_FORCE_JSON_FIELD_ORDER, CONFIG_ENV_VAR_MAX_CONSECUTIVE_WHITESPACES,
CONFIG_ENV_VAR_STRICT_JSON_FIELD_ORDER, CONFIG_ENV_VAR_MAX_JSON_ARRAY_LENGTH,
DEFAULT_MAX_JSON_ARRAY_LENGTH)
def _parse_bool(s: str) -> bool:
return s and (s.strip().lower() in ['true', '1'])
def _env_or_default_field(env_var: str, default_val):
default_val_type = type(default_val)
parser_func = _parse_bool if default_val_type == bool else default_val_type
def factory_func():
return parser_func(os.environ.get(env_var, str(default_val)))
return field(default_factory=factory_func)
@dataclass
class CharacterLevelParserConfig:
alphabet: str = COMPLETE_ALPHABET
max_consecutive_whitespaces: int = _env_or_default_field(CONFIG_ENV_VAR_MAX_CONSECUTIVE_WHITESPACES,
DEFAULT_MAX_CONSECUTIVE_WHITESPACES)
"""How many consective whitespaces the JsonSchemaParser will allow"""
force_json_field_order: bool = _env_or_default_field(CONFIG_ENV_VAR_STRICT_JSON_FIELD_ORDER,
DEFAULT_FORCE_JSON_FIELD_ORDER)
"""Whether the JsonSchemaParser will force fields to appear in the
order of the 'required' field in the schema"""
max_json_array_length: int = _env_or_default_field(CONFIG_ENV_VAR_MAX_JSON_ARRAY_LENGTH,
DEFAULT_MAX_JSON_ARRAY_LENGTH)
"""What is the maximum json array length if not specified by the schema. Helps the LLM
avoid infinite loops."""
class CharacterLevelParser(abc.ABC):
"""CharacterLevelParser is an interface for classes that can parse strings one character at a time, and determine which characters are allowed at any specific time"""
def __init__(self, config: Optional[CharacterLevelParserConfig] = None):
self._config = config or CharacterLevelParserConfig()
@abc.abstractmethod
def add_character(self, new_character: str) -> 'CharacterLevelParser':
"""Add a character to the parser, and return a new parser that represents the state of the parser after the character has been added. This has to be
an immutable operation - the original CharacterLevelParser (self) must not be modified."""
raise NotImplementedError()
@abc.abstractmethod
def get_allowed_characters(self) -> str:
"""Return a string containing all characters that are allowed at the current point in the parsing process."""
raise NotImplementedError()
@abc.abstractmethod
def can_end(self) -> bool:
"""Return True if the parser is in a state where it can end (potentially finished parsing the desired structure), and False otherwise."""
raise NotImplementedError()
def shortcut_key(self) -> Optional[Hashable]:
"""Optional. Return a key that denotes that this state is a repeating state, full tree traversal should be avoided."""
return None
def cache_key(self) -> Optional[Hashable]:
"""Optional. Return a key that denotes that this state is a repeating state, and if it is visited again, results can be cached."""
return None
@property
def config(self) -> CharacterLevelParserConfig:
return self._config
@config.setter
def config(self, new_config: CharacterLevelParserConfig):
self._config = new_config
return self
class StringParser(CharacterLevelParser):
"""RegexParser is an example CharacterLevelParser that only allows an exact string. It is a debugging / learning tool
to show how CharacterLevelParser works together with TokenizerPrefixTree to filter the allowed tokens (some of whom may contain multiple characters)"""
def __init__(self, string: str):
self.target_str = string
def add_character(self, new_character: str) -> CharacterLevelParser:
if self.target_str.startswith(new_character):
return StringParser(self.target_str[len(new_character):])
else:
raise ValueError(f"Expected '{self.target_str[0]}' but got '{new_character}'")
def get_allowed_characters(self) -> str:
return self.target_str[0] if self.target_str else ""
def can_end(self) -> bool:
return not self.target_str
class ForceStopParser(CharacterLevelParser):
"""A simple parser that forbids any characters except the stop token. Used to force stop LM operation"""
def __init__(self, allow_whitespace: bool = False):
self.allow_whitespace = allow_whitespace
def add_character(self, new_character: str) -> CharacterLevelParser:
return self
def get_allowed_characters(self) -> str:
return WHITESPACE_CHARACTERS if self.allow_whitespace else ""
def can_end(self) -> bool:
return True
class UnionParser(CharacterLevelParser):
"""A parser that allows a string that would be allowed by any of several different parsers"""
def __init__(self, parsers: List[CharacterLevelParser]):
self.parsers = parsers
def add_character(self, new_character: str) -> CharacterLevelParser:
# This is a bit of a performance hit, as it means get_allowed_characters() is called twice.
relevant_parsers = [parser for parser in self.parsers if new_character in parser.get_allowed_characters()]
next_parsers = [parser.add_character(new_character) for parser in relevant_parsers]
if len(next_parsers) == 1:
return next_parsers[0]
return UnionParser(next_parsers)
def get_allowed_characters(self) -> str:
allowed = "".join([parser.get_allowed_characters() for parser in self.parsers])
return "".join(set(allowed))
def can_end(self) -> bool:
return any([parser.can_end() for parser in self.parsers])
def shortcut_key(self) -> Optional[Hashable]:
unique_shortcut_keys = set(parser.shortcut_key() for parser in self.parsers)
if len(unique_shortcut_keys) == 1:
return next(iter(unique_shortcut_keys))
return None
def cache_key(self) -> Optional[Hashable]:
all_cache_keys = tuple(parser.cache_key() for parser in self.parsers)
if all(key is not None for key in all_cache_keys):
return ('union', all_cache_keys)
return None
class SequenceParser(CharacterLevelParser):
"""A parser that is a sequence of multiple parsers."""
def __init__(self, parsers: List[CharacterLevelParser]):
self.parsers = parsers
def add_character(self, new_character: str) -> CharacterLevelParser:
legal_parsers = []
# Tricky edge case: if the first parser can both end and accept the character,
# and the second parser can also accept, we don't know which scenario we are dealing
# with, so we need to return a UnionParser.
for idx, parser in enumerate(self.parsers):
if new_character in parser.get_allowed_characters():
updated_parser = parser.add_character(new_character)
next_parsers = [updated_parser] + self.parsers[idx+1:]
if len(next_parsers) == 1:
legal_parsers.append(next_parsers[0])
else:
legal_parsers.append(SequenceParser(next_parsers))
if not parser.can_end():
break
if len(legal_parsers) == 1:
return legal_parsers[0]
return UnionParser(legal_parsers)
def get_allowed_characters(self) -> str:
allowed_characters = set()
for parser in self.parsers:
allowed_characters.update(parser.get_allowed_characters())
if not parser.can_end():
break
return "".join(allowed_characters)
def can_end(self) -> bool:
return all([parser.can_end() for parser in self.parsers])
def shortcut_key(self) -> Optional[str]:
return self.parsers[0].shortcut_key() if len(self.parsers) == 1 else None
def cache_key(self) -> Optional[Hashable]:
all_cache_keys = tuple(parser.cache_key() for parser in self.parsers)
if all(key is not None for key in all_cache_keys):
return ('sequence', all_cache_keys)
return None
|