Coverage for tinytroupe / enrichment / tiny_styler.py: 0%

26 statements  

« prev     ^ index     » next       coverage.py v7.13.4, created at 2026-02-28 17:48 +0000

1from tinytroupe.enrichment import logger 

2from tinytroupe.utils import JsonSerializableRegistry 

3from tinytroupe.utils.llm import LLMChat 

4import tinytroupe.utils as utils 

5 

6 

7class TinyStyler(JsonSerializableRegistry): 

8 """ 

9 A class for applying a specified writing or speaking style to content while preserving 

10 the original information. 

11 """ 

12 

13 def __init__(self, use_past_results_in_context=False) -> None: 

14 """ 

15 Initialize the TinyStyler. 

16 

17 Args: 

18 use_past_results_in_context (bool): Whether to use past styling results in the context. 

19 """ 

20 self.use_past_results_in_context = use_past_results_in_context 

21 self.context_cache = [] 

22 

23 def apply_style(self, content: str, style: str, content_type: str = None, 

24 context_info: str = "", context_cache: list = None, verbose: bool = False, 

25 temperature: float = 0.7): 

26 """ 

27 Apply a specified style to the content while preserving all the original information. 

28 

29 Args: 

30 content (str): The content to style. 

31 style (str): The style to apply (e.g., "professional", "casual", "technical", etc.). 

32 content_type (str, optional): The type of content (e.g., "email", "report", "conversation"). 

33 context_info (str, optional): Additional context information. 

34 context_cache (list, optional): Previous styling results to use as context. 

35 verbose (bool, optional): Whether to print debug information. 

36 temperature (float, optional): The temperature to use for the LLM generation. 

37 

38 Returns: 

39 str: The styled content. 

40 """ 

41 if context_cache is None and self.use_past_results_in_context: 

42 context_cache = self.context_cache 

43 

44 rendering_configs = { 

45 "content": content, 

46 "style": style, 

47 "content_type": content_type, 

48 "context_info": context_info, 

49 "context_cache": context_cache 

50 } 

51 

52 # Initialize the LLMChat with appropriate templates 

53 chat = LLMChat( 

54 system_template_name="styler.system.mustache", 

55 user_template_name="styler.user.mustache", 

56 base_module_folder="enrichment", 

57 temperature=temperature 

58 ) 

59 

60 # Call the model and get the response 

61 result = chat.call(**rendering_configs) 

62 

63 debug_msg = f"Styling result: {result}" 

64 logger.debug(debug_msg) 

65 if verbose: 

66 print(debug_msg) 

67 

68 # Extract the styled content from code blocks if present 

69 if result is not None: 

70 styled_content = utils.extract_code_block(result) 

71 # If no code block was found, use the raw result 

72 if not styled_content: 

73 styled_content = result 

74 

75 # Add to context cache if enabled 

76 if self.use_past_results_in_context: 

77 self.context_cache.append({ 

78 "original": content, 

79 "style": style, 

80 "styled": styled_content 

81 }) 

82 

83 return styled_content 

84 else: 

85 return None