tongilcoto commited on
Commit
9a93d63
·
1 Parent(s): 81917a3

First try

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
37
+ *.png filter=lfs diff=lfs merge=lfs -text
agent_OAI_03.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import inspect
4
+ import base64
5
+ from mimetypes import guess_type
6
+ from dotenv import load_dotenv
7
+ from typing import TypedDict, Annotated, List
8
+ from langgraph.graph.message import add_messages
9
+ from langchain_core.messages import AnyMessage
10
+ from langchain_openai import ChatOpenAI
11
+ from langgraph.prebuilt import ToolNode
12
+ from langgraph.graph import START, StateGraph
13
+ from langgraph.prebuilt import tools_condition
14
+ from langchain_tavily import TavilySearch
15
+ from langchain_community.tools import RequestsGetTool
16
+ from langchain_community.utilities.requests import TextRequestsWrapper
17
+ from openai import OpenAI, audio
18
+ import pandas as pd, openpyxl
19
+ from langchain_experimental.tools.python.tool import PythonREPLTool
20
+
21
+ load_dotenv()
22
+
23
+ # Initialize our LLM
24
+ gpt1 = 'gpt-4o'
25
+ gpt2 = 'gpt-4.1-2025-04-14'
26
+ gpt3 = 'o3-mini'
27
+ gptmm = "gpt-4-vision-preview"
28
+ #model = ChatOpenAI(model=gpt2, temperature=0)
29
+ model = ChatOpenAI(model=gpt3)
30
+
31
+ def integer_comparison(numb1: int, numb2: int) -> int:
32
+ """
33
+ Given input parameters
34
+ * numb1: an integer number,
35
+ * numb2: an integer number,
36
+ This function returns
37
+ * 0 if integer numb1 is equal to integer numb2
38
+ * 1 if integer numb1 is strictly bigger than integer numb2
39
+ * -1 if integer numb1 is strictly smaller than integer numb2
40
+ """
41
+ if numb1 == numb2:
42
+ return 0
43
+ elif numb1 > numb2:
44
+ return 1
45
+ else:
46
+ return -1
47
+
48
+ def local_image_to_data_url(image_path: str) -> str:
49
+ # Guess MIME type
50
+ mime_type, _ = guess_type(image_path)
51
+ if mime_type is None:
52
+ mime_type = "application/octet-stream"
53
+ # Read file and base64-encode
54
+ with open(image_path, "rb") as f:
55
+ data = f.read()
56
+ b64 = base64.b64encode(data).decode("utf-8")
57
+ return f"data:{mime_type};base64,{b64}"
58
+
59
+ def describe_a_photo(file: str) -> str:
60
+ """
61
+ Given input parameters
62
+ * file: file name of an image to be described in detail,
63
+ This function returns
64
+ * A string containing the description of the image
65
+ """
66
+ data_url = local_image_to_data_url(f"assets/{file}")
67
+ client = OpenAI()
68
+ messages = [
69
+ {
70
+ "role": "user",
71
+ "content": [
72
+ "Describe what you see in this image:",
73
+ {
74
+ "type": "image_url",
75
+ "image_url": {
76
+ "url": data_url,
77
+ "detail": "auto" # optional: "low", "high", or "auto"
78
+ }
79
+ }
80
+ ]
81
+ }
82
+ ]
83
+ resp = client.chat.completions.create(model="gpt-4o", messages=messages)
84
+ return resp.choices[0].message.content
85
+
86
+ def transcript_an_audio(file: str) -> str:
87
+ """
88
+ Given input parameters
89
+ * file: file name of an audio to be transcripted
90
+ This function returns
91
+ * A string containing the transcription of the audio file
92
+ """
93
+ with open(f"assets/{file}", "rb") as audio_file:
94
+ # 3. Call the transcription endpoint
95
+ resp = audio.transcriptions.create(
96
+ model="whisper-1",
97
+ file=audio_file,
98
+ # optionally: prompt="...", response_format="verbose_json", temperature=0, language="en"
99
+ )
100
+ transcript = resp.text
101
+ return transcript
102
+
103
+ def read_an_excel(file: str) -> str:
104
+ """
105
+ Given input parameters
106
+ * file: file name of an excel file to be attached
107
+ This function returns
108
+ * A string containing the excel rows as text using json format
109
+ """
110
+ df = pd.read_excel(f"assets/{file}")
111
+ records = df.to_dict(orient="records")
112
+ return str(records)
113
+
114
+ def load_python_script(file: str) -> str:
115
+ """
116
+ Given input parameters
117
+ * file: file name of an python script file to be executed
118
+ This function returns
119
+ * A string containing the file content, the python script
120
+ """
121
+ with open(f"assets/{file}", "rb") as f:
122
+ data = f.read()
123
+ return str(data)
124
+
125
+ # Simple instantiation with just the allow_dangerous_requests flag
126
+ requests_wrapper = TextRequestsWrapper() # or customize headers/proxy if needed
127
+
128
+ # noinspection PyArgumentList
129
+ visit_tool = RequestsGetTool(
130
+ requests_wrapper=requests_wrapper,
131
+ allow_dangerous_requests=True # PyCharm may flag this, ignore inspection
132
+ )
133
+
134
+ # Add to your tools list:
135
+ #visit_tool = RequestsGetTool(allow_dangerous_requests=True)
136
+ tools = [TavilySearch(max_results=5),
137
+ visit_tool,
138
+ integer_comparison,
139
+ describe_a_photo,
140
+ transcript_an_audio,
141
+ read_an_excel,
142
+ load_python_script,
143
+ PythonREPLTool()]
144
+
145
+ #llm_with_tools = model.bind_tools(tools, parallel_tool_calls=False)
146
+ llm_with_tools = model.bind_tools(tools)
147
+
148
+
149
+ # Generate the AgentState and Agent graph
150
+ class AgentState(TypedDict):
151
+ messages: Annotated[List[AnyMessage], add_messages]
152
+
153
+ def assistant(state: AgentState):
154
+ return {
155
+ "messages": [llm_with_tools.invoke(state["messages"])],
156
+ }
157
+
158
+ def create_and_compile_oai_agent():
159
+ from openai import OpenAI
160
+ import os
161
+
162
+ client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
163
+ models = client.models.list()
164
+
165
+ #print("Available models:")
166
+ #for m in models.data:
167
+ # print(m.id)
168
+
169
+ ## The graph
170
+ builder = StateGraph(AgentState)
171
+
172
+ # Define nodes: these do the work
173
+ builder.add_node("assistant", assistant)
174
+ builder.add_node("tools", ToolNode(tools))
175
+
176
+ # Define edges: these determine how the control flow moves
177
+ builder.add_edge(START, "assistant")
178
+ builder.add_conditional_edges(
179
+ "assistant",
180
+ # If the latest message requires a tool, route to tools
181
+ # Otherwise, provide a direct response
182
+ tools_condition,
183
+ )
184
+ builder.add_edge("tools", "assistant")
185
+ return builder.compile()
186
+
app.py CHANGED
@@ -1,23 +1,14 @@
1
  import os
2
  import gradio as gr
3
  import requests
4
- import inspect
5
  import pandas as pd
 
 
6
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
11
- # --- Basic Agent Definition ---
12
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
- class BasicAgent:
14
- def __init__(self):
15
- print("BasicAgent initialized.")
16
- def __call__(self, question: str) -> str:
17
- print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
21
 
22
  def run_and_submit_all( profile: gr.OAuthProfile | None):
23
  """
@@ -40,7 +31,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
40
 
41
  # 1. Instantiate Agent ( modify this part to create your agent)
42
  try:
43
- agent = BasicAgent()
44
  except Exception as e:
45
  print(f"Error instantiating agent: {e}")
46
  return f"Error initializing agent: {e}", None
@@ -50,6 +41,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
50
 
51
  # 2. Fetch Questions
52
  print(f"Fetching questions from: {questions_url}")
 
53
  try:
54
  response = requests.get(questions_url, timeout=15)
55
  response.raise_for_status()
@@ -72,6 +64,36 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
72
  # 3. Run your Agent
73
  results_log = []
74
  answers_payload = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  print(f"Running agent on {len(questions_data)} questions...")
76
  for item in questions_data:
77
  task_id = item.get("task_id")
@@ -80,7 +102,18 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
80
  print(f"Skipping item with missing task_id or question: {item}")
81
  continue
82
  try:
83
- submitted_answer = agent(question_text)
 
 
 
 
 
 
 
 
 
 
 
84
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
86
  except Exception as e:
 
1
  import os
2
  import gradio as gr
3
  import requests
 
4
  import pandas as pd
5
+ from langchain_core.messages import HumanMessage, SystemMessage
6
+ from agent_OAI_03 import create_and_compile_oai_agent
7
 
8
  # (Keep Constants as is)
9
  # --- Constants ---
10
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
11
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  def run_and_submit_all( profile: gr.OAuthProfile | None):
14
  """
 
31
 
32
  # 1. Instantiate Agent ( modify this part to create your agent)
33
  try:
34
+ agent = create_and_compile_oai_agent()
35
  except Exception as e:
36
  print(f"Error instantiating agent: {e}")
37
  return f"Error initializing agent: {e}", None
 
41
 
42
  # 2. Fetch Questions
43
  print(f"Fetching questions from: {questions_url}")
44
+ response = None
45
  try:
46
  response = requests.get(questions_url, timeout=15)
47
  response.raise_for_status()
 
64
  # 3. Run your Agent
65
  results_log = []
66
  answers_payload = []
67
+
68
+ prompt = f"""
69
+ As Alfred the butler:
70
+ 1.- analyze the human user question
71
+ 2.- design a plan for getting the solution, including the use of the provided tools (described below)
72
+ 3.- and, based on your reasoning and the information gathered using the available tools,
73
+ provide exactly the answer in the explained format
74
+ (there be always a specific format for a direct response: a number, a word, etc.)
75
+
76
+ You are sported with great tools:
77
+ - Based on the words of the human you can look for related pages in Internet with TavilySearch: it will
78
+ give you a list of URL as well the pages sneak peak of its content. Since the sneak peaks rarely contain the
79
+ final answer, in any case it will help you to choose what URL to visit next using the next tool:
80
+ - RequestsGetTool, for capturing the text, along the page DOM, so you can complete your knowledge and make a new decision
81
+ - Describe a Photo, for depicting the content of a picture for further questions. The tool just needs the file name,
82
+ it will take care of loading it from file system and send it to the VLM.
83
+ - Transcript an Audio, for transcripting the content of an audio file for further questions. The tool just needs
84
+ the file name, it will take care of loading it from file system and send it to the transcriptor.
85
+ - Read an excel file, for attaching the content in a text string to the messages. The tool just needs name, it will
86
+ take care of loading it from file system and send it to the agent.
87
+
88
+ Very important, just answer with the specific information in the specified format.
89
+ It will be parsed with a deterministic program and it will evaluate your skills.
90
+ For example
91
+ - User: "How many items .....?"
92
+ - Your answer: "5"
93
+ - User: "comma separated list of ....."
94
+ - Your answer: "item1, item2, item3" (use a blank space on the right of the comma)
95
+ """
96
+
97
  print(f"Running agent on {len(questions_data)} questions...")
98
  for item in questions_data:
99
  task_id = item.get("task_id")
 
102
  print(f"Skipping item with missing task_id or question: {item}")
103
  continue
104
  try:
105
+ question_content = question_text
106
+ if item.get('file_name'):
107
+ if (item.get('file_name').endswith('.png') or
108
+ item.get('file_name').endswith('.mp3') or
109
+ item.get('file_name').endswith('.xlsx') or
110
+ item.get('file_name').endswith('.py')):
111
+ question_content = [
112
+ {"type": "text", "text": question_text},
113
+ {"type": "text", "text": f"the file name is: {item.get('file_name')}"}
114
+ ]
115
+ messages = [SystemMessage(content=prompt), HumanMessage(content=question_content)]
116
+ submitted_answer = agent.invoke({"messages": messages})
117
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
118
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
119
  except Exception as e:
assets/1f975693-876d-457b-a649-393859e79bf3.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:200f767e732b49efef5c05d128903ee4d2c34e66fdce7f5593ac123b2e637673
3
+ size 280868
assets/7bd855d8-463d-4ed5-93ca-5fe35145f733.xlsx ADDED
Binary file (5.29 kB). View file
 
assets/99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b218c951c1f888f0bbe6f46c080f57afc7c9348fffc7ba4da35749ff1e2ac40f
3
+ size 179304
assets/cca530fc-4052-43b2-b130-b30968d8aa44.png ADDED

Git LFS Details

  • SHA256: daaa417b9746471ec313c3233bb63175908d49de0859b5bce99431392e45efd8
  • Pointer size: 130 Bytes
  • Size of remote file: 63.1 kB
assets/f918266a-b3e0-4914-865d-4faa564f1aef.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from random import randint
2
+ import time
3
+
4
+ class UhOh(Exception):
5
+ pass
6
+
7
+ class Hmm:
8
+ def __init__(self):
9
+ self.value = randint(-100, 100)
10
+
11
+ def Yeah(self):
12
+ if self.value == 0:
13
+ return True
14
+ else:
15
+ raise UhOh()
16
+
17
+ def Okay():
18
+ while True:
19
+ yield Hmm()
20
+
21
+ def keep_trying(go, first_try=True):
22
+ maybe = next(go)
23
+ try:
24
+ if maybe.Yeah():
25
+ return maybe.value
26
+ except UhOh:
27
+ if first_try:
28
+ print("Working...")
29
+ print("Please wait patiently...")
30
+ time.sleep(0.1)
31
+ return keep_trying(go, first_try=False)
32
+
33
+ if __name__ == "__main__":
34
+ go = Okay()
35
+ print(f"{keep_trying(go)}")
runner_endpoints.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import sys
3
+ from pathlib import Path
4
+ from langchain_core.messages import HumanMessage, SystemMessage
5
+ from langchain_core.runnables import RunnableConfig
6
+ from langfuse.callback import CallbackHandler
7
+
8
+
9
+ def run(provider, questions_data: list, question_number: int):
10
+ # 1. Instantiate Agent ( modify this part to create your agent)
11
+ try:
12
+ if provider == "openai":
13
+ # from agent_OAI_01 import create_and_compile_oai_agent
14
+ from agent_OAI_03 import create_and_compile_oai_agent
15
+ agent = create_and_compile_oai_agent()
16
+ elif provider == "deepseek":
17
+ from agent_DS_01 import create_and_compile_ds_agent
18
+ agent = create_and_compile_ds_agent()
19
+ # elif provider == "claude":
20
+ print('agent compiled')
21
+ except Exception as e:
22
+ print(f"Error instantiating agent: {e}")
23
+ return f"Error initializing agent: {e}", None
24
+
25
+ # 3. Run your Agent
26
+ results_log = []
27
+ answers_payload = []
28
+
29
+ prompt = f"""
30
+ As Alfred the butler:
31
+ 1.- analyze the human user question
32
+ 2.- design a plan for getting the solution, including the use of the provided tools (described below)
33
+ 3.- and, based on your reasoning and the information gathered using the available tools,
34
+ provide exactly the answer in the explained format
35
+ (there be always a specific format for a direct response: a number, a word, etc.)
36
+
37
+ You are sported with great tools:
38
+ - Based on the words of the human you can look for related pages in Internet with TavilySearch: it will
39
+ give you a list of URL as well the pages sneak peak of its content. Since the sneak peaks rarely contain the
40
+ final answer, in any case it will help you to choose what URL to visit next using the next tool:
41
+ - RequestsGetTool, for capturing the text, along the page DOM, so you can complete your knowledge and make a new decision
42
+ - Describe a Photo, for depicting the content of a picture for further questions. The tool just needs the file name,
43
+ it will take care of loading it from file system and send it to the VLM.
44
+ - Transcript an Audio, for transcripting the content of an audio file for further questions. The tool just needs
45
+ the file name, it will take care of loading it from file system and send it to the transcriptor.
46
+ - Read an excel file, for attaching the content in a text string to the messages. The tool just needs name, it will
47
+ take care of loading it from file system and send it to the agent.
48
+
49
+ **Very important**, just answer with the specific information in the specified format.
50
+ It will be parsed with a deterministic program and it will evaluate your skills.
51
+ For example
52
+ - User: "How many items .....?"
53
+ - Your answer: "5"
54
+ - User: "comma separated list of ....."
55
+ - Your answer: "item1, item2, item3" (use a blank space on the right of the comma)
56
+ Another format rule for your answer: The first letter of the final answer should be upper case unless the word is in the question.
57
+ """
58
+
59
+ match = 0
60
+
61
+ for i, item in enumerate(questions_data[:question_number]):
62
+ print(item)
63
+ task_id = item.get("task_id")
64
+ question_text = item.get("question")
65
+ if not task_id or question_text is None:
66
+ print(f"Skipping item with missing task_id or question: {item}")
67
+ continue
68
+ try:
69
+ question_content = question_text
70
+ if item.get('file_name'):
71
+ if (item.get('file_name').endswith('.png') or
72
+ item.get('file_name').endswith('.mp3') or
73
+ item.get('file_name').endswith('.xlsx') or
74
+ item.get('file_name').endswith('.py')):
75
+ question_content = [
76
+ {"type": "text", "text": question_text},
77
+ {"type": "text", "text": f"the file name is: {item.get('file_name')}"}
78
+ ]
79
+ messages = [SystemMessage(content=prompt), HumanMessage(content=question_content)]
80
+ print('invocando')
81
+
82
+ # WORKS!!!
83
+ # TEMPORARY DISABLED WHILE TESTING THE COURSE TOPIC
84
+ # Langfuse: https://cloud.langfuse.com/
85
+ # Initialize Langfuse CallbackHandler for LangGraph/Langchain (tracing)
86
+ langfuse_handler = CallbackHandler()
87
+
88
+ # Process legitimate email
89
+ submitted_answer = agent.invoke(
90
+ # input={"messages": messages, "llm": get_model(item.get('file_name'))},
91
+ input={"messages": messages},
92
+ config=RunnableConfig(callbacks=[langfuse_handler])
93
+ )
94
+
95
+ # submitted_answer = agent.invoke({"messages": messages})
96
+ print('response')
97
+ print(submitted_answer['messages'][-1].content)
98
+ #print("Expected: ", item.get("Final answer"))
99
+ #if submitted_answer['messages'][-1].content == item.get("Final answer"):
100
+ # print("***** MATCH !!! *****")
101
+ # match += 1
102
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer['messages'][-1].content})
103
+ results_log.append({"Task ID": task_id, "Question": question_text,
104
+ "Submitted Answer": submitted_answer['messages'][-1].content})
105
+ except Exception as e:
106
+ print(f"Error running agent on task {task_id}: {e}")
107
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
108
+
109
+ print('**** OUTPUT ***')
110
+ print(answers_payload)
111
+ print('\n\n ===== ---- TOTAL MATCHES:', match)
112
+ return None
113
+
114
+
115
+ if __name__ == "__main__":
116
+
117
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
118
+ api_url = DEFAULT_API_URL
119
+ questions_url = f"{api_url}/questions"
120
+ submit_url = f"{api_url}/submit"
121
+
122
+ # 2. Fetch Questions
123
+ print(f"Fetching questions from: {questions_url}")
124
+ try:
125
+ response = requests.get(questions_url, timeout=15)
126
+ response.raise_for_status()
127
+ questions_data = response.json()
128
+ if not questions_data:
129
+ print("Fetched questions list is empty.")
130
+ sys.exit("Fetched questions list is empty or invalid format.")
131
+ print(f"Fetched {len(questions_data)} questions.")
132
+ except requests.exceptions.RequestException as e:
133
+ print(f"Error fetching questions: {e}")
134
+ sys.exit(f"Error fetching questions: {e}")
135
+ except requests.exceptions.JSONDecodeError as e:
136
+ print(f"Error decoding JSON response from questions endpoint: {e}")
137
+ print(f"Response text: {response.text[:500]}")
138
+ sys.exit(f"Error decoding server response for questions: {e}")
139
+ except Exception as e:
140
+ print(f"An unexpected error occurred fetching questions: {e}")
141
+ sys.exit(f"An unexpected error occurred fetching questions: {e}")
142
+
143
+
144
+ run("openai", questions_data, question_number=20)
145
+ # run(provider, [q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12], question_number=args.max_questions)
146
+