File size: 4,404 Bytes
3629f64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f67e05d
3629f64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8c01ffb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import datetime
import requests
import pytz
import yaml
import random
from tools.final_answer import FinalAnswerTool

from Gradio_UI import GradioUI

@tool
def get_word_prompt(die_roll: str) -> str:
    """A tool that generates a random word prompt based on a 3d6 roll.

    Args:

        die_roll: A string representing the result of the roll (e.g. "111", "222", "333", ..., "666").

    """
    # read the word_prompts.csv
    dict_word_prompts = {}
    with open("word_prompts.csv", "r") as f:
        lines = f.readlines()
        dict_word_prompts = {line.split(",")[0]: line.split(",")[1].strip() for line in lines}
    
    # get the word prompt based on the die roll
    word_prompt = dict_word_prompts.get(die_roll)
    if word_prompt:
        return f"Your word prompt is: \"{word_prompt}\""
    else:
        return f"Invalid die roll '{die_roll}'. Please provide a valid 3d6 roll (e.g., '111', '222', '333', ..., '666')."
    
@tool
def get_oracle_result(likelihood_of_favorable_outcome: str) -> str:
    """A tool that provides a yes/no answer based on a likelihood of a favorable outcome.

    Args:

        likelihood_of_favorable_outcome: A string representing the likelihood of a favorable outcome. Choose only: "low", "medium", or "high".

    """
    # critical is 2 6s
    is_critical = False
    if likelihood_of_favorable_outcome == "low":
        # roll 1d6
        roll = str(random.randint(1, 6))
    elif likelihood_of_favorable_outcome == "medium":
        # roll 2d6
        rolls = [random.randint(1, 6), random.randint(1, 6)]
        roll = max(rolls)
        if rolls == [6, 6]:
            is_critical = True
    elif likelihood_of_favorable_outcome == "high":
        # roll 3d6
        rolls = [str(random.randint(1, 6), random.randint(1, 6), random.randint(1, 6))]
        roll = max(rolls)
        if rolls.count(6) >= 2:
            is_critical = True
    
    # now we have the roll, let's return the result
    if roll == "6":
        if is_critical:
            return "Yes, and it's a critical success! You did it, plus another favorable outcome."
        else:
            return "Yes, and it's a success! You did it without any negative consequences."
    elif roll >= 4:
        return "Yes, but it's a partial success. You did it, but there are some minor negative consequences"
    elif roll == 3:
        return "No, because... Work with the GM to find out why."
    elif roll == 2:
        return "No, it failed. There are negative consequences."
    else:
        return "No. That's a critical failure. There are negative consequences, plus something else goes wrong."
    


@tool
def get_current_time_in_timezone(timezone: str) -> str:
    """A tool that fetches the current local time in a specified timezone.

    Args:

        timezone: A string representing a valid timezone (e.g., 'America/New_York').

    """
    try:
        # Create timezone object
        tz = pytz.timezone(timezone)
        # Get current time in that timezone
        local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
        return f"The current local time in {timezone} is: {local_time}"
    except Exception as e:
        return f"Error fetching time for timezone '{timezone}': {str(e)}"


final_answer = FinalAnswerTool()

# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' 

model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)


# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)

with open("prompts.yaml", 'r') as stream:
    prompt_templates = yaml.safe_load(stream)
    
agent = CodeAgent(
    model=model,
    tools=[final_answer, image_generation_tool, get_oracle_result, get_word_prompt],
    max_steps=6,
    verbosity_level=1,
    grammar=None,
    planning_interval=None,
    name=None,
    description=None,
    prompt_templates=prompt_templates
)


GradioUI(agent).launch()