text stringlengths 1 1.02k | class_index int64 0 10.8k | source stringlengths 85 188 |
|---|---|---|
try:
code_action = self.parse_code_blob(code_action)
except Exception as e:
error_msg = f"Error in code parsing: {e}. Be sure to provide correct code"
self.logger.error(error_msg, exc_info=1)
return error_msg
# Execute
self.log_rationale_code_action(rationale, code_action)
try:
available_tools = {**BASE_PYTHON_TOOLS.copy(), **self.toolbox.tools}
output = self.python_evaluator(
code_action,
static_tools=available_tools,
custom_tools={},
state=self.state,
authorized_imports=self.authorized_imports,
)
self.logger.info(self.state["print_outputs"])
return output
except Exception as e:
error_msg = f"Error in execution: {e}. Be sure to provide correct code."
self.logger.error(error_msg, exc_info=1)
return error_msg | 384 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
class ReactAgent(Agent):
"""
This agent that solves the given task step by step, using the ReAct framework:
While the objective is not reached, the agent will perform a cycle of thinking and acting.
The action will be parsed from the LLM output: it consists in calls to tools from the toolbox, with arguments chosen by the LLM engine.
""" | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
def __init__(
self,
tools: List[Tool],
llm_engine: Optional[Callable] = None,
system_prompt: Optional[str] = None,
tool_description_template: Optional[str] = None,
grammar: Optional[Dict[str, str]] = None,
plan_type: Optional[str] = None,
planning_interval: Optional[int] = None,
**kwargs,
):
if llm_engine is None:
llm_engine = HfApiEngine()
if system_prompt is None:
system_prompt = DEFAULT_REACT_CODE_SYSTEM_PROMPT
if tool_description_template is None:
tool_description_template = DEFAULT_TOOL_DESCRIPTION_TEMPLATE
if plan_type is None:
plan_type = SUPPORTED_PLAN_TYPES[0]
else:
assert plan_type in SUPPORTED_PLAN_TYPES, f"plan type {plan_type} is not supported"
super().__init__(
tools=tools,
llm_engine=llm_engine,
system_prompt=system_prompt, | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
tool_description_template=tool_description_template,
grammar=grammar,
**kwargs,
)
self.planning_interval = planning_interval
self.plan_type = plan_type | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
def provide_final_answer(self, task) -> str:
"""
This method provides a final answer to the task, based on the logs of the agent's interactions.
"""
self.prompt = [
{
"role": MessageRole.SYSTEM,
"content": "An agent tried to answer an user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:",
}
]
self.prompt += self.write_inner_memory_from_logs()[1:]
self.prompt += [
{
"role": MessageRole.USER,
"content": f"Based on the above, please provide an answer to the following user request:\n{task}",
}
]
try:
return self.llm_engine(self.prompt)
except Exception as e:
return f"Error in generating final llm output: {e}." | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
def run(self, task: str, stream: bool = False, reset: bool = True, **kwargs):
"""
Runs the agent for the given task.
Args:
task (`str`): The task to perform
Example:
```py
from transformers.agents import ReactCodeAgent
agent = ReactCodeAgent(tools=[])
agent.run("What is the result of 2 power 3.7384?")
```
"""
self.task = task
if len(kwargs) > 0:
self.task += f"\nYou have been provided with these initial arguments: {str(kwargs)}."
self.state = kwargs.copy()
if reset:
self.initialize_for_run()
else:
self.logs.append({"task": task})
if stream:
return self.stream_run(task)
else:
return self.direct_run(task) | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
def stream_run(self, task: str):
"""
Runs the agent in streaming mode, yielding steps as they are executed: should be launched only in the `run` method.
"""
final_answer = None
iteration = 0
while final_answer is None and iteration < self.max_iterations:
step_start_time = time.time()
step_log_entry = {"iteration": iteration, "start_time": step_start_time}
try:
self.step(step_log_entry)
if "final_answer" in step_log_entry:
final_answer = step_log_entry["final_answer"]
except AgentError as e:
self.logger.error(e, exc_info=1)
step_log_entry["error"] = e
finally:
step_end_time = time.time()
step_log_entry["step_end_time"] = step_end_time
step_log_entry["step_duration"] = step_end_time - step_start_time
self.logs.append(step_log_entry) | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
for callback in self.step_callbacks:
callback(step_log_entry)
iteration += 1
yield step_log_entry | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
if final_answer is None and iteration == self.max_iterations:
error_message = "Reached max iterations."
final_step_log = {"error": AgentMaxIterationsError(error_message)}
self.logs.append(final_step_log)
self.logger.error(error_message, exc_info=1)
final_answer = self.provide_final_answer(task)
final_step_log["final_answer"] = final_answer
final_step_log["step_duration"] = 0
for callback in self.step_callbacks:
callback(final_step_log)
yield final_step_log
yield final_answer | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
def direct_run(self, task: str):
"""
Runs the agent in direct mode, returning outputs only at the end: should be launched only in the `run` method.
"""
final_answer = None
iteration = 0
while final_answer is None and iteration < self.max_iterations:
step_start_time = time.time()
step_log_entry = {"iteration": iteration, "start_time": step_start_time}
try:
if self.planning_interval is not None and iteration % self.planning_interval == 0:
self.planning_step(task, is_first_step=(iteration == 0), iteration=iteration)
self.step(step_log_entry)
if "final_answer" in step_log_entry:
final_answer = step_log_entry["final_answer"]
except AgentError as e:
self.logger.error(e, exc_info=1)
step_log_entry["error"] = e
finally:
step_end_time = time.time() | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
step_log_entry["step_end_time"] = step_end_time
step_log_entry["step_duration"] = step_end_time - step_start_time
self.logs.append(step_log_entry)
for callback in self.step_callbacks:
callback(step_log_entry)
iteration += 1 | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
if final_answer is None and iteration == self.max_iterations:
error_message = "Reached max iterations."
final_step_log = {"error": AgentMaxIterationsError(error_message)}
self.logs.append(final_step_log)
self.logger.error(error_message, exc_info=1)
final_answer = self.provide_final_answer(task)
final_step_log["final_answer"] = final_answer
final_step_log["step_duration"] = 0
for callback in self.step_callbacks:
callback(final_step_log)
return final_answer
def planning_step(self, task, is_first_step: bool = False, iteration: int = None):
"""
Used periodically by the agent to plan the next steps to reach the objective. | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
Args:
task (`str`): The task to perform
is_first_step (`bool`): If this step is not the first one, the plan should be an update over a previous plan.
iteration (`int`): The number of the current step, used as an indication for the LLM.
"""
if is_first_step:
message_prompt_facts = {"role": MessageRole.SYSTEM, "content": SYSTEM_PROMPT_FACTS}
message_prompt_task = {
"role": MessageRole.USER,
"content": f"""Here is the task:
```
{task}
```
Now begin!""",
}
answer_facts = self.llm_engine([message_prompt_facts, message_prompt_task]) | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
message_system_prompt_plan = {
"role": MessageRole.SYSTEM,
"content": PROMPTS_FOR_INITIAL_PLAN[self.plan_type]["system"],
}
message_user_prompt_plan = {
"role": MessageRole.USER,
"content": PROMPTS_FOR_INITIAL_PLAN[self.plan_type]["user"].format(
task=task,
tool_descriptions=self._toolbox.show_tool_descriptions(self.tool_description_template),
managed_agents_descriptions=(
show_agents_descriptions(self.managed_agents) if self.managed_agents is not None else ""
),
answer_facts=answer_facts,
),
}
answer_plan = self.llm_engine(
[message_system_prompt_plan, message_user_prompt_plan], stop_sequences=["<end_plan>"]
) | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
final_plan_redaction = f"""Here is the plan of action that I will follow to solve the task:
```
{answer_plan}
```"""
final_facts_redaction = f"""Here are the facts that I know so far:
```
{answer_facts}
```""".strip()
self.logs.append({"plan": final_plan_redaction, "facts": final_facts_redaction})
self.logger.log(36, "===== Initial plan =====")
self.logger.log(35, final_plan_redaction)
else: # update plan
agent_memory = self.write_inner_memory_from_logs(
summary_mode=False
) # This will not log the plan but will log facts | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
# Redact updated facts
facts_update_system_prompt = {
"role": MessageRole.SYSTEM,
"content": SYSTEM_PROMPT_FACTS_UPDATE,
}
facts_update_message = {
"role": MessageRole.USER,
"content": USER_PROMPT_FACTS_UPDATE,
}
facts_update = self.llm_engine([facts_update_system_prompt] + agent_memory + [facts_update_message]) | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
# Redact updated plan
plan_update_message = {
"role": MessageRole.SYSTEM,
"content": PROMPTS_FOR_PLAN_UPDATE[self.plan_type]["system"].format(task=task),
}
plan_update_message_user = {
"role": MessageRole.USER,
"content": PROMPTS_FOR_PLAN_UPDATE[self.plan_type]["user"].format(
task=task,
tool_descriptions=self._toolbox.show_tool_descriptions(self.tool_description_template),
managed_agents_descriptions=(
show_agents_descriptions(self.managed_agents) if self.managed_agents is not None else ""
),
facts_update=facts_update,
remaining_steps=(self.max_iterations - iteration),
),
}
plan_update = self.llm_engine(
[plan_update_message] + agent_memory + [plan_update_message_user], stop_sequences=["<end_plan>"] | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
) | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
# Log final facts and plan
final_plan_redaction = PLAN_UPDATE_FINAL_PLAN_REDACTION.format(task=task, plan_update=plan_update)
final_facts_redaction = f"""Here is the updated list of the facts that I know:
```
{facts_update}
```"""
self.logs.append({"plan": final_plan_redaction, "facts": final_facts_redaction})
self.logger.log(36, "===== Updated plan =====")
self.logger.log(35, final_plan_redaction) | 385 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
class ReactJsonAgent(ReactAgent):
"""
This agent that solves the given task step by step, using the ReAct framework:
While the objective is not reached, the agent will perform a cycle of thinking and acting.
The tool calls will be formulated by the LLM in JSON format, then parsed and executed.
""" | 386 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
def __init__(
self,
tools: List[Tool],
llm_engine: Optional[Callable] = None,
system_prompt: Optional[str] = None,
tool_description_template: Optional[str] = None,
grammar: Optional[Dict[str, str]] = None,
planning_interval: Optional[int] = None,
**kwargs,
):
if llm_engine is None:
llm_engine = HfApiEngine()
if system_prompt is None:
system_prompt = DEFAULT_REACT_JSON_SYSTEM_PROMPT
if tool_description_template is None:
tool_description_template = DEFAULT_TOOL_DESCRIPTION_TEMPLATE
super().__init__(
tools=tools,
llm_engine=llm_engine,
system_prompt=system_prompt,
tool_description_template=tool_description_template,
grammar=grammar,
planning_interval=planning_interval,
**kwargs,
) | 386 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
def step(self, log_entry: Dict[str, Any]):
"""
Perform one step in the ReAct framework: the agent thinks, acts, and observes the result.
The errors are raised here, they are caught and logged in the run() method.
"""
agent_memory = self.write_inner_memory_from_logs()
self.prompt = agent_memory
self.logger.debug("===== New step =====")
# Add new step in logs
log_entry["agent_memory"] = agent_memory.copy()
self.logger.info("===== Calling LLM with this last message: =====")
self.logger.info(self.prompt[-1]) | 386 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
try:
additional_args = {"grammar": self.grammar} if self.grammar is not None else {}
llm_output = self.llm_engine(
self.prompt, stop_sequences=["<end_action>", "Observation:"], **additional_args
)
except Exception as e:
raise AgentGenerationError(f"Error in generating llm output: {e}.")
self.logger.debug("===== Output message of the LLM: =====")
self.logger.debug(llm_output)
log_entry["llm_output"] = llm_output
# Parse
self.logger.debug("===== Extracting action =====")
rationale, action = self.extract_action(llm_output=llm_output, split_token="Action:")
try:
tool_name, arguments = self.tool_parser(action)
except Exception as e:
raise AgentParsingError(f"Could not parse the given action: {e}.")
log_entry["rationale"] = rationale
log_entry["tool_call"] = {"tool_name": tool_name, "tool_arguments": arguments} | 386 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
# Execute
self.logger.warning("=== Agent thoughts:")
self.logger.log(31, rationale)
self.logger.warning(f">>> Calling tool: '{tool_name}' with arguments: {arguments}")
if tool_name == "final_answer":
if isinstance(arguments, dict):
if "answer" in arguments:
answer = arguments["answer"]
if (
isinstance(answer, str) and answer in self.state.keys()
): # if the answer is a state variable, return the value
answer = self.state[answer]
else:
answer = arguments
else:
answer = arguments
log_entry["final_answer"] = answer
return answer
else:
if arguments is None:
arguments = {}
observation = self.execute_tool_call(tool_name, arguments)
observation_type = type(observation) | 386 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
if observation_type in [AgentImage, AgentAudio]:
if observation_type == AgentImage:
observation_name = "image.png"
elif observation_type == AgentAudio:
observation_name = "audio.mp3"
# TODO: observation naming could allow for different names of same type | 386 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
self.state[observation_name] = observation
updated_information = f"Stored '{observation_name}' in memory."
else:
updated_information = str(observation).strip()
self.logger.info(updated_information)
log_entry["observation"] = updated_information
return log_entry | 386 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
class ReactCodeAgent(ReactAgent):
"""
This agent that solves the given task step by step, using the ReAct framework:
While the objective is not reached, the agent will perform a cycle of thinking and acting.
The tool calls will be formulated by the LLM in code format, then parsed and executed.
""" | 387 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
def __init__(
self,
tools: List[Tool],
llm_engine: Optional[Callable] = None,
system_prompt: Optional[str] = None,
tool_description_template: Optional[str] = None,
grammar: Optional[Dict[str, str]] = None,
additional_authorized_imports: Optional[List[str]] = None,
planning_interval: Optional[int] = None,
**kwargs,
):
if llm_engine is None:
llm_engine = HfApiEngine()
if system_prompt is None:
system_prompt = DEFAULT_REACT_CODE_SYSTEM_PROMPT
if tool_description_template is None:
tool_description_template = DEFAULT_TOOL_DESCRIPTION_TEMPLATE
super().__init__(
tools=tools,
llm_engine=llm_engine,
system_prompt=system_prompt,
tool_description_template=tool_description_template,
grammar=grammar,
planning_interval=planning_interval,
**kwargs,
) | 387 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
if not is_pygments_available():
transformers_logging.warning_once(
logger,
"pygments isn't installed. Installing pygments will enable color syntax highlighting in the "
"ReactCodeAgent.",
)
self.python_evaluator = evaluate_python_code
self.additional_authorized_imports = additional_authorized_imports if additional_authorized_imports else []
self.authorized_imports = list(set(LIST_SAFE_MODULES) | set(self.additional_authorized_imports))
self.system_prompt = self.system_prompt.replace("<<authorized_imports>>", str(self.authorized_imports))
self.custom_tools = {}
def step(self, log_entry: Dict[str, Any]):
"""
Perform one step in the ReAct framework: the agent thinks, acts, and observes the result.
The errors are raised here, they are caught and logged in the run() method.
"""
agent_memory = self.write_inner_memory_from_logs() | 387 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
self.prompt = agent_memory.copy()
self.logger.debug("===== New step =====")
# Add new step in logs
log_entry["agent_memory"] = agent_memory.copy()
self.logger.info("===== Calling LLM with these last messages: =====")
self.logger.info(self.prompt[-2:])
try:
additional_args = {"grammar": self.grammar} if self.grammar is not None else {}
llm_output = self.llm_engine(
self.prompt, stop_sequences=["<end_action>", "Observation:"], **additional_args
)
except Exception as e:
raise AgentGenerationError(f"Error in generating llm output: {e}.")
self.logger.debug("=== Output message of the LLM:")
self.logger.debug(llm_output)
log_entry["llm_output"] = llm_output | 387 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
# Parse
self.logger.debug("=== Extracting action ===")
try:
rationale, raw_code_action = self.extract_action(llm_output=llm_output, split_token="Code:")
except Exception as e:
self.logger.debug(f"Error in extracting action, trying to parse the whole output. Error trace: {e}")
rationale, raw_code_action = llm_output, llm_output
try:
code_action = parse_code_blob(raw_code_action)
except Exception as e:
error_msg = f"Error in code parsing: {e}. Make sure to provide correct code"
raise AgentParsingError(error_msg)
log_entry["rationale"] = rationale
log_entry["tool_call"] = {"tool_name": "code interpreter", "tool_arguments": code_action} | 387 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
# Execute
self.log_rationale_code_action(rationale, code_action)
try:
static_tools = {
**BASE_PYTHON_TOOLS.copy(),
**self.toolbox.tools,
}
if self.managed_agents is not None:
static_tools = {**static_tools, **self.managed_agents}
result = self.python_evaluator(
code_action,
static_tools=static_tools,
custom_tools=self.custom_tools,
state=self.state,
authorized_imports=self.authorized_imports,
)
self.logger.warning("Print outputs:")
self.logger.log(32, self.state["print_outputs"])
observation = "Print outputs:\n" + self.state["print_outputs"]
if result is not None:
self.logger.warning("Last output from code snippet:")
self.logger.log(32, str(result)) | 387 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
observation += "Last output from code snippet:\n" + str(result)[:100000]
log_entry["observation"] = observation
except Exception as e:
error_msg = f"Code execution failed due to the following error:\n{str(e)}"
if "'dict' object has no attribute 'read'" in str(e):
error_msg += "\nYou get this error because you passed a dict as input for one of the arguments instead of a string."
raise AgentExecutionError(error_msg)
for line in code_action.split("\n"):
if line[: len("final_answer")] == "final_answer":
self.logger.log(33, "Final answer:")
self.logger.log(32, result)
log_entry["final_answer"] = result
return result | 387 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
class ManagedAgent:
def __init__(self, agent, name, description, additional_prompting=None, provide_run_summary=False):
self.agent = agent
self.name = name
self.description = description
self.additional_prompting = additional_prompting
self.provide_run_summary = provide_run_summary
def write_full_task(self, task):
full_task = f"""You're a helpful agent named '{self.name}'.
You have been submitted this task by your manager.
---
Task:
{task}
---
You're helping your manager solve a wider task: so make sure to not provide a one-line answer, but give as much information as possible so that they have a clear understanding of the answer.
Your final_answer WILL HAVE to contain these parts:
### 1. Task outcome (short version):
### 2. Task outcome (extremely detailed version):
### 3. Additional context (if relevant): | 388 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
Put all these in your final_answer tool, everything that you do not pass as an argument to final_answer will be lost.
And even if your task resolution is not successful, please return as much context as possible, so that your manager can act upon this feedback.
<<additional_prompting>>"""
if self.additional_prompting:
full_task = full_task.replace("\n<<additional_prompting>>", self.additional_prompting).strip()
else:
full_task = full_task.replace("\n<<additional_prompting>>", "").strip()
return full_task | 388 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
def __call__(self, request, **kwargs):
full_task = self.write_full_task(request)
output = self.agent.run(full_task, **kwargs)
if self.provide_run_summary:
answer = f"Here is the final answer from your managed agent '{self.name}':\n"
answer += str(output)
answer += f"\n\nFor more detail, find below a summary of this agent's work:\nSUMMARY OF WORK FROM AGENT '{self.name}':\n"
for message in self.agent.write_inner_memory_from_logs(summary_mode=True):
content = message["content"]
if len(str(content)) < LENGTH_TRUNCATE_REPORTS or "[FACTS LIST]" in str(content):
answer += "\n" + str(content) + "\n---"
else:
answer += (
"\n"
+ str(content)[:LENGTH_TRUNCATE_REPORTS]
+ "\n(...Step was truncated because too long)...\n---"
) | 388 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
answer += f"\nEND OF SUMMARY OF WORK FROM AGENT '{self.name}'."
return answer
else:
return output | 388 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py |
class TextToSpeechTool(PipelineTool):
default_checkpoint = "microsoft/speecht5_tts"
description = (
"This is a tool that reads an English text out loud. It returns a waveform object containing the sound."
)
name = "text_to_speech"
pre_processor_class = SpeechT5Processor
model_class = SpeechT5ForTextToSpeech
post_processor_class = SpeechT5HifiGan
inputs = {"text": {"type": "string", "description": "The text to read out loud (in English)"}}
output_type = "audio"
def setup(self):
if self.post_processor is None:
self.post_processor = "microsoft/speecht5_hifigan"
super().setup()
def encode(self, text, speaker_embeddings=None):
inputs = self.pre_processor(text=text, return_tensors="pt", truncation=True)
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings.") | 389 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/text_to_speech.py |
embeddings_dataset = load_dataset(
"Matthijs/cmu-arctic-xvectors", split="validation", trust_remote_code=True
)
speaker_embeddings = torch.tensor(embeddings_dataset[7305]["xvector"]).unsqueeze(0)
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def forward(self, inputs):
with torch.no_grad():
return self.model.generate_speech(**inputs)
def decode(self, outputs):
with torch.no_grad():
return self.post_processor(outputs).cpu().detach() | 389 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/text_to_speech.py |
class DocumentQuestionAnsweringTool(PipelineTool):
default_checkpoint = "naver-clova-ix/donut-base-finetuned-docvqa"
description = "This is a tool that answers a question about an document (pdf). It returns a string that contains the answer to the question."
name = "document_qa"
pre_processor_class = AutoProcessor
model_class = VisionEncoderDecoderModel
inputs = {
"document": {
"type": "image",
"description": "The image containing the information. Can be a PIL Image or a string path to the image.",
},
"question": {"type": "string", "description": "The question in English"},
}
output_type = "string"
def __init__(self, *args, **kwargs):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.")
super().__init__(*args, **kwargs) | 390 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/document_question_answering.py |
def encode(self, document: "Image", question: str):
task_prompt = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
prompt = task_prompt.replace("{user_input}", question)
decoder_input_ids = self.pre_processor.tokenizer(
prompt, add_special_tokens=False, return_tensors="pt"
).input_ids
if isinstance(document, str):
img = Image.open(document).convert("RGB")
img_array = np.array(img).transpose(2, 0, 1)
document = torch.from_numpy(img_array)
pixel_values = self.pre_processor(document, return_tensors="pt").pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} | 390 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/document_question_answering.py |
def forward(self, inputs):
return self.model.generate(
inputs["pixel_values"].to(self.device),
decoder_input_ids=inputs["decoder_input_ids"].to(self.device),
max_length=self.model.decoder.config.max_position_embeddings,
early_stopping=True,
pad_token_id=self.pre_processor.tokenizer.pad_token_id,
eos_token_id=self.pre_processor.tokenizer.eos_token_id,
use_cache=True,
num_beams=1,
bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]],
return_dict_in_generate=True,
).sequences | 390 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/document_question_answering.py |
def decode(self, outputs):
sequence = self.pre_processor.batch_decode(outputs)[0]
sequence = sequence.replace(self.pre_processor.tokenizer.eos_token, "")
sequence = sequence.replace(self.pre_processor.tokenizer.pad_token, "")
sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token
sequence = self.pre_processor.token2json(sequence)
return sequence["answer"] | 390 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/document_question_answering.py |
class TranslationTool(PipelineTool):
"""
Example:
```py
from transformers.agents import TranslationTool
translator = TranslationTool()
translator("This is a super nice API!", src_lang="English", tgt_lang="French")
```
"""
lang_to_code = LANGUAGE_CODES
default_checkpoint = "facebook/nllb-200-distilled-600M"
description = (
"This is a tool that translates text from a language to another."
f"Both `src_lang`and `tgt_lang` should belong to this list of languages: {list(lang_to_code.keys())}."
)
name = "translator"
pre_processor_class = AutoTokenizer
model_class = AutoModelForSeq2SeqLM | 391 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/translation.py |
inputs = {
"text": {"type": "string", "description": "The text to translate"},
"src_lang": {
"type": "string",
"description": "The language of the text to translate. Written in plain English, such as 'Romanian', or 'Albanian'",
},
"tgt_lang": {
"type": "string",
"description": "The language for the desired ouput language. Written in plain English, such as 'Romanian', or 'Albanian'",
},
}
output_type = "string" | 391 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/translation.py |
def encode(self, text, src_lang, tgt_lang):
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language.")
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language.")
src_lang = self.lang_to_code[src_lang]
tgt_lang = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
text, return_tensors="pt", src_lang=src_lang, tgt_lang=tgt_lang
)
def forward(self, inputs):
return self.model.generate(**inputs)
def decode(self, outputs):
return self.post_processor.decode(outputs[0].tolist(), skip_special_tokens=True) | 391 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/translation.py |
class PreTool:
name: str
inputs: Dict[str, str]
output_type: type
task: str
description: str
repo_id: str | 392 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/default_tools.py |
class PythonInterpreterTool(Tool):
name = "python_interpreter"
description = "This is a tool that evaluates python code. It can be used to perform calculations."
output_type = "string"
def __init__(self, *args, authorized_imports=None, **kwargs):
if authorized_imports is None:
self.authorized_imports = list(set(LIST_SAFE_MODULES))
else:
self.authorized_imports = list(set(LIST_SAFE_MODULES) | set(authorized_imports))
self.inputs = {
"code": {
"type": "string",
"description": (
"The code snippet to evaluate. All variables used in this snippet must be defined in this same snippet, "
f"else you will get an error. This code can only import the following python libraries: {authorized_imports}."
),
}
}
super().__init__(*args, **kwargs) | 393 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/default_tools.py |
def forward(self, code):
output = str(
evaluate_python_code(code, static_tools=BASE_PYTHON_TOOLS, authorized_imports=self.authorized_imports)
)
return output | 393 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/default_tools.py |
class FinalAnswerTool(Tool):
name = "final_answer"
description = "Provides a final answer to the given problem."
inputs = {"answer": {"type": "any", "description": "The final answer to the problem"}}
output_type = "any"
def forward(self, answer):
return answer | 394 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/default_tools.py |
class DuckDuckGoSearchTool(Tool):
name = "web_search"
description = """Perform a web search based on your query (think a Google search) then returns the top search results as a list of dict elements.
Each result has keys 'title', 'href' and 'body'."""
inputs = {"query": {"type": "string", "description": "The search query to perform."}}
output_type = "any"
def forward(self, query: str) -> str:
try:
from duckduckgo_search import DDGS
except ImportError:
raise ImportError(
"You must install package `duckduckgo_search` to run this tool: for instance run `pip install duckduckgo-search`."
)
results = DDGS().text(query, max_results=7)
return results | 395 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/search.py |
class VisitWebpageTool(Tool):
name = "visit_webpage"
description = "Visits a webpage at the given url and returns its content as a markdown string."
inputs = {
"url": {
"type": "string",
"description": "The url of the webpage to visit.",
}
}
output_type = "string"
def forward(self, url: str) -> str:
try:
from markdownify import markdownify
except ImportError:
raise ImportError(
"You must install package `markdownify` to run this tool: for instance run `pip install markdownify`."
)
try:
# Send a GET request to the URL
response = requests.get(url)
response.raise_for_status() # Raise an exception for bad status codes
# Convert the HTML content to Markdown
markdown_content = markdownify(response.text).strip() | 396 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/search.py |
# Remove multiple line breaks
markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
return markdown_content
except RequestException as e:
return f"Error fetching the webpage: {str(e)}"
except Exception as e:
return f"An unexpected error occurred: {str(e)}" | 396 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/search.py |
class InterpreterError(ValueError):
"""
An error raised when the interpretor cannot evaluate a Python expression, due to syntax error or unsupported
operations.
"""
pass | 397 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/python_interpreter.py |
class BreakException(Exception):
pass | 398 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/python_interpreter.py |
class ContinueException(Exception):
pass | 399 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/python_interpreter.py |
class ReturnException(Exception):
def __init__(self, value):
self.value = value | 400 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/python_interpreter.py |
class AgentType:
"""
Abstract class to be reimplemented to define types that can be returned by agents.
These objects serve three purposes:
- They behave as they were the type they're meant to be, e.g., a string for text, a PIL.Image for images
- They can be stringified: str(object) in order to return a string defining the object
- They should be displayed correctly in ipython notebooks/colab/jupyter
"""
def __init__(self, value):
self._value = value
def __str__(self):
return self.to_string()
def to_raw(self):
logger.error(
"This is a raw AgentType of unknown type. Display in notebooks and string conversion will be unreliable"
)
return self._value
def to_string(self) -> str:
logger.error(
"This is a raw AgentType of unknown type. Display in notebooks and string conversion will be unreliable"
)
return str(self._value) | 401 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agent_types.py |
class AgentText(AgentType, str):
"""
Text type returned by the agent. Behaves as a string.
"""
def to_raw(self):
return self._value
def to_string(self):
return str(self._value) | 402 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agent_types.py |
class AgentImage(AgentType, ImageType):
"""
Image type returned by the agent. Behaves as a PIL.Image.
"""
def __init__(self, value):
AgentType.__init__(self, value)
ImageType.__init__(self)
if not is_vision_available():
raise ImportError("PIL must be installed in order to handle images.")
self._path = None
self._raw = None
self._tensor = None
if isinstance(value, ImageType):
self._raw = value
elif isinstance(value, (str, pathlib.Path)):
self._path = value
elif isinstance(value, torch.Tensor):
self._tensor = value
elif isinstance(value, np.ndarray):
self._tensor = torch.from_numpy(value)
else:
raise TypeError(f"Unsupported type for {self.__class__.__name__}: {type(value)}") | 403 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agent_types.py |
def _ipython_display_(self, include=None, exclude=None):
"""
Displays correctly this type in an ipython notebook (ipython, colab, jupyter, ...)
"""
from IPython.display import Image, display
display(Image(self.to_string()))
def to_raw(self):
"""
Returns the "raw" version of that object. In the case of an AgentImage, it is a PIL.Image.
"""
if self._raw is not None:
return self._raw
if self._path is not None:
self._raw = Image.open(self._path)
return self._raw
if self._tensor is not None:
array = self._tensor.cpu().detach().numpy()
return Image.fromarray((255 - array * 255).astype(np.uint8))
def to_string(self):
"""
Returns the stringified version of that object. In the case of an AgentImage, it is a path to the serialized
version of the image.
"""
if self._path is not None:
return self._path | 403 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agent_types.py |
if self._raw is not None:
directory = tempfile.mkdtemp()
self._path = os.path.join(directory, str(uuid.uuid4()) + ".png")
self._raw.save(self._path)
return self._path
if self._tensor is not None:
array = self._tensor.cpu().detach().numpy()
# There is likely simpler than load into image into save
img = Image.fromarray((255 - array * 255).astype(np.uint8))
directory = tempfile.mkdtemp()
self._path = os.path.join(directory, str(uuid.uuid4()) + ".png")
img.save(self._path)
return self._path | 403 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agent_types.py |
def save(self, output_bytes, format, **params):
"""
Saves the image to a file.
Args:
output_bytes (bytes): The output bytes to save the image to.
format (str): The format to use for the output image. The format is the same as in PIL.Image.save.
**params: Additional parameters to pass to PIL.Image.save.
"""
img = self.to_raw()
img.save(output_bytes, format, **params) | 403 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agent_types.py |
class AgentAudio(AgentType, str):
"""
Audio type returned by the agent.
"""
def __init__(self, value, samplerate=16_000):
super().__init__(value)
if not is_soundfile_available():
raise ImportError("soundfile must be installed in order to handle audio.")
self._path = None
self._tensor = None
self.samplerate = samplerate
if isinstance(value, (str, pathlib.Path)):
self._path = value
elif is_torch_available() and isinstance(value, torch.Tensor):
self._tensor = value
elif isinstance(value, tuple):
self.samplerate = value[0]
if isinstance(value[1], np.ndarray):
self._tensor = torch.from_numpy(value[1])
else:
self._tensor = torch.tensor(value[1])
else:
raise ValueError(f"Unsupported audio type: {type(value)}") | 404 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agent_types.py |
def _ipython_display_(self, include=None, exclude=None):
"""
Displays correctly this type in an ipython notebook (ipython, colab, jupyter, ...)
"""
from IPython.display import Audio, display
display(Audio(self.to_string(), rate=self.samplerate))
def to_raw(self):
"""
Returns the "raw" version of that object. It is a `torch.Tensor` object.
"""
if self._tensor is not None:
return self._tensor
if self._path is not None:
tensor, self.samplerate = sf.read(self._path)
self._tensor = torch.tensor(tensor)
return self._tensor
def to_string(self):
"""
Returns the stringified version of that object. In the case of an AgentAudio, it is a path to the serialized
version of the audio.
"""
if self._path is not None:
return self._path | 404 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agent_types.py |
if self._tensor is not None:
directory = tempfile.mkdtemp()
self._path = os.path.join(directory, str(uuid.uuid4()) + ".wav")
sf.write(self._path, self._tensor, samplerate=self.samplerate)
return self._path | 404 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agent_types.py |
class PipelineDataset(Dataset):
def __init__(self, dataset, process, params):
self.dataset = dataset
self.process = process
self.params = params
def __len__(self):
return len(self.dataset)
def __getitem__(self, i):
item = self.dataset[i]
processed = self.process(item, **self.params)
return processed | 405 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
class PipelineIterator(IterableDataset):
def __init__(self, loader, infer, params, loader_batch_size=None):
"""
Roughly equivalent to
```
for item in loader:
yield infer(item, **params)
```
Arguments:
loader (`torch.utils.data.DataLoader` or `Iterable`):
The iterator that will be used to apply `infer` on.
infer (any function):
The function to apply of each element of `loader`.
params (`dict`):
The parameters passed to `infer` along with every item
loader_batch_size (`int`, *optional*):
If specified, the items of `loader` are supposed to come as batch, and are loader_batched here
making it roughly behave as | 406 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
```
for items in loader:
for i in loader_batch_size:
item = items[i]
yield infer(item, **params)
```"""
self.loader = loader
self.infer = infer
self.params = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
loader_batch_size = None
self.loader_batch_size = loader_batch_size
# Internal bookkeeping
self._loader_batch_index = None
self._loader_batch_data = None
def __len__(self):
return len(self.loader)
def __iter__(self):
self.iterator = iter(self.loader)
return self | 406 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
def loader_batch_item(self):
"""
Return item located at `loader_batch_index` within the current `loader_batch_data`.
"""
if isinstance(self._loader_batch_data, torch.Tensor):
# Batch data is simple tensor, just fetch the slice
result = self._loader_batch_data[self._loader_batch_index].unsqueeze(0)
else:
# Batch data is assumed to be BaseModelOutput (or dict)
loader_batched = {}
for k, element in self._loader_batch_data.items():
if isinstance(element, ModelOutput):
# Convert ModelOutput to tuple first
element = element.to_tuple()
if isinstance(element[0], torch.Tensor):
loader_batched[k] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0], np.ndarray): | 406 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
loader_batched[k] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element)
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(element, tuple):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0], torch.Tensor):
loader_batched[k] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0], np.ndarray):
loader_batched[k] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element)
continue
if element is None:
# This can happen for optional data that get passed around
loader_batched[k] = None
elif isinstance(element[self._loader_batch_index], torch.Tensor): | 406 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers | 406 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
loader_batched[k] = element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index], np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
loader_batched[k] = np.expand_dims(element[self._loader_batch_index], 0)
else:
# This is typically a list, so no need to `unsqueeze`.
loader_batched[k] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
result = self._loader_batch_data.__class__(loader_batched)
self._loader_batch_index += 1
return result | 406 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
def __next__(self):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
item = next(self.iterator)
processed = self.infer(item, **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(processed, torch.Tensor):
first_tensor = processed
elif isinstance(processed, tuple):
first_tensor = processed[0]
else:
key = list(processed.keys())[0]
first_tensor = processed[key] | 406 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
if isinstance(first_tensor, list):
observed_batch_size = len(first_tensor)
else:
observed_batch_size = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
self.loader_batch_size = observed_batch_size
# Setting internal index to unwrap the batch
self._loader_batch_data = processed[0] if isinstance(processed, tuple) else processed
self._loader_batch_index = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed | 406 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
class PipelineChunkIterator(PipelineIterator):
def __init__(self, loader, infer, params, loader_batch_size=None):
"""
Roughly equivalent to
```
for iterator in loader:
for item in iterator:
yield infer(item, **params)
```
Arguments:
loader (`torch.utils.data.DataLoader` or `Iterable`):
The iterator that will be used to apply `infer` on.
infer (any function):
The function to apply of each element of `loader`.
params (`dict`):
The parameters passed to `infer` along with every item
"""
super().__init__(loader, infer, params)
def __iter__(self):
self.iterator = iter(self.loader)
self.subiterator = None
return self | 407 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
def __next__(self):
if self.subiterator is None:
"Subiterator None means we haven't started a `preprocess` iterator. so start it"
self.subiterator = self.infer(next(self.iterator), **self.params)
try:
# Try to return next item
processed = next(self.subiterator)
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
self.subiterator = self.infer(next(self.iterator), **self.params)
processed = next(self.subiterator)
return processed | 407 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
class PipelinePackIterator(PipelineIterator):
"""
Roughly equivalent to
```
packed = []
for item in loader:
packed.append(item)
if item["is_last"]:
yield packed
packed = []
```
but it also handles cases where `item` are batched (meaning it's a dict of Tensor with first dimension > 1. In
that case it does
```
packed = []
for batch in loader:
# item is batched
for item in batch:
packed.append(item)
if item["is_last"]:
yield packed
packed = []
``` | 408 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
Arguments:
loader (`torch.utils.data.DataLoader` or `Iterable`):
The iterator that will be used to apply `infer` on.
infer (any function):
The function to apply of each element of `loader`.
params (`dict`):
The parameters passed to `infer` along with every item
loader_batch_size (`int`, *optional*):
If specified, the items of `loader` are supposed to come as batch, and are loader_batched here making
it roughly behave as
```
for items in loader:
for i in loader_batch_size:
item = items[i]
yield infer(item, **params)
```"""
def __iter__(self):
self.iterator = iter(self.loader)
return self | 408 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
def __next__(self):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
is_last = False
accumulator = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
item = self.loader_batch_item()
is_last = item.pop("is_last")
accumulator.append(item)
if is_last:
return accumulator | 408 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
while not is_last:
processed = self.infer(next(self.iterator), **self.params)
if self.loader_batch_size is not None:
if isinstance(processed, torch.Tensor):
first_tensor = processed
else:
key = list(processed.keys())[0]
first_tensor = processed[key]
if isinstance(first_tensor, list):
observed_batch_size = len(first_tensor)
else:
observed_batch_size = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
self.loader_batch_size = observed_batch_size
self._loader_batch_data = processed
self._loader_batch_index = 0
while self._loader_batch_index < self.loader_batch_size: | 408 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
item = self.loader_batch_item()
is_last = item.pop("is_last")
accumulator.append(item)
if is_last:
return accumulator
else:
item = processed
is_last = item.pop("is_last")
accumulator.append(item)
return accumulator | 408 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
class KeyDataset(Dataset):
def __init__(self, dataset: Dataset, key: str):
self.dataset = dataset
self.key = key
def __len__(self):
return len(self.dataset)
def __getitem__(self, i):
return self.dataset[i][self.key] | 409 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
class KeyPairDataset(Dataset):
def __init__(self, dataset: Dataset, key1: str, key2: str):
self.dataset = dataset
self.key1 = key1
self.key2 = key2
def __len__(self):
return len(self.dataset)
def __getitem__(self, i):
return {"text": self.dataset[i][self.key1], "text_pair": self.dataset[i][self.key2]} | 410 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/pt_utils.py |
class TextToAudioPipeline(Pipeline):
"""
Text-to-audio generation pipeline using any `AutoModelForTextToWaveform` or `AutoModelForTextToSpectrogram`. This
pipeline generates an audio file from an input text and optional other conditional inputs.
Example:
```python
>>> from transformers import pipeline
>>> pipe = pipeline(model="suno/bark-small")
>>> output = pipe("Hey it's HuggingFace on the phone!")
>>> audio = output["audio"]
>>> sampling_rate = output["sampling_rate"]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
<Tip>
You can specify parameters passed to the model by using [`TextToAudioPipeline.__call__.forward_params`] or
[`TextToAudioPipeline.__call__.generate_kwargs`].
Example:
```python
>>> from transformers import pipeline
>>> music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") | 411 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/text_to_audio.py |
>>> # diversify the music generation by adding randomness with a high temperature and set a maximum music length
>>> generate_kwargs = {
... "do_sample": True,
... "temperature": 0.7,
... "max_new_tokens": 35,
... }
>>> outputs = music_generator("Techno music with high melodic riffs", generate_kwargs=generate_kwargs)
```
</Tip>
This pipeline can currently be loaded from [`pipeline`] using the following task identifiers: `"text-to-speech"` or
`"text-to-audio"`.
See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=text-to-speech).
"""
def __init__(self, *args, vocoder=None, sampling_rate=None, **kwargs):
super().__init__(*args, **kwargs)
if self.framework == "tf":
raise ValueError("The TextToAudioPipeline is only available in PyTorch.") | 411 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/text_to_audio.py |
self.vocoder = None
if self.model.__class__ in MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING.values():
self.vocoder = (
SpeechT5HifiGan.from_pretrained(DEFAULT_VOCODER_ID).to(self.model.device)
if vocoder is None
else vocoder
)
self.sampling_rate = sampling_rate
if self.vocoder is not None:
self.sampling_rate = self.vocoder.config.sampling_rate
if self.sampling_rate is None:
# get sampling_rate from config and generation config
config = self.model.config
gen_config = self.model.__dict__.get("generation_config", None)
if gen_config is not None:
config.update(gen_config.to_dict())
for sampling_rate_name in ["sample_rate", "sampling_rate"]:
sampling_rate = getattr(config, sampling_rate_name, None)
if sampling_rate is not None:
self.sampling_rate = sampling_rate | 411 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/text_to_audio.py |
def preprocess(self, text, **kwargs):
if isinstance(text, str):
text = [text]
if self.model.config.model_type == "bark":
# bark Tokenizer is called with BarkProcessor which uses those kwargs
new_kwargs = {
"max_length": self.generation_config.semantic_config.get("max_input_semantic_length", 256),
"add_special_tokens": False,
"return_attention_mask": True,
"return_token_type_ids": False,
"padding": "max_length",
}
# priority is given to kwargs
new_kwargs.update(kwargs)
kwargs = new_kwargs
output = self.tokenizer(text, **kwargs, return_tensors="pt")
return output | 411 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/text_to_audio.py |
def _forward(self, model_inputs, **kwargs):
# we expect some kwargs to be additional tensors which need to be on the right device
kwargs = self._ensure_tensor_on_device(kwargs, device=self.device)
forward_params = kwargs["forward_params"]
generate_kwargs = kwargs["generate_kwargs"]
if self.model.can_generate():
# we expect some kwargs to be additional tensors which need to be on the right device
generate_kwargs = self._ensure_tensor_on_device(generate_kwargs, device=self.device)
# User-defined `generation_config` passed to the pipeline call take precedence
if "generation_config" not in generate_kwargs:
generate_kwargs["generation_config"] = self.generation_config
# generate_kwargs get priority over forward_params
forward_params.update(generate_kwargs) | 411 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/text_to_audio.py |
output = self.model.generate(**model_inputs, **forward_params)
else:
if len(generate_kwargs):
raise ValueError(
"You're using the `TextToAudioPipeline` with a forward-only model, but `generate_kwargs` is non "
"empty. For forward-only TTA models, please use `forward_params` instead of `generate_kwargs`. "
f"For reference, the `generate_kwargs` used here are: {generate_kwargs.keys()}"
)
output = self.model(**model_inputs, **forward_params)[0]
if self.vocoder is not None:
# in that case, the output is a spectrogram that needs to be converted into a waveform
output = self.vocoder(output)
return output
def __call__(self, text_inputs: Union[str, List[str]], **forward_params):
"""
Generates speech/audio from the inputs. See the [`TextToAudioPipeline`] documentation for more information. | 411 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/text_to_audio.py |
Args:
text_inputs (`str` or `List[str]`):
The text(s) to generate.
forward_params (`dict`, *optional*):
Parameters passed to the model generation/forward method. `forward_params` are always passed to the
underlying model.
generate_kwargs (`dict`, *optional*):
The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a
complete overview of generate, check the [following
guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation). `generate_kwargs` are
only passed to the underlying model if the latter is a generative model.
Return:
A `dict` or a list of `dict`: The dictionaries have two keys: | 411 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/text_to_audio.py |
- **audio** (`np.ndarray` of shape `(nb_channels, audio_length)`) -- The generated audio waveform.
- **sampling_rate** (`int`) -- The sampling rate of the generated audio waveform.
"""
return super().__call__(text_inputs, **forward_params)
def _sanitize_parameters(
self,
preprocess_params=None,
forward_params=None,
generate_kwargs=None,
):
if self.assistant_model is not None:
generate_kwargs["assistant_model"] = self.assistant_model
if self.assistant_tokenizer is not None:
generate_kwargs["tokenizer"] = self.tokenizer
generate_kwargs["assistant_tokenizer"] = self.assistant_tokenizer
params = {
"forward_params": forward_params if forward_params else {},
"generate_kwargs": generate_kwargs if generate_kwargs else {},
}
if preprocess_params is None:
preprocess_params = {}
postprocess_params = {} | 411 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/text_to_audio.py |
return preprocess_params, params, postprocess_params
def postprocess(self, waveform):
output_dict = {}
if isinstance(waveform, dict):
waveform = waveform["waveform"]
elif isinstance(waveform, tuple):
waveform = waveform[0]
output_dict["audio"] = waveform.cpu().float().numpy()
output_dict["sampling_rate"] = self.sampling_rate
return output_dict | 411 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/text_to_audio.py |
class QuestionAnsweringArgumentHandler(ArgumentHandler):
"""
QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped to
internal [`SquadExample`].
QuestionAnsweringArgumentHandler manages all the possible to create a [`SquadExample`] from the command-line
supplied arguments.
"""
def normalize(self, item):
if isinstance(item, SquadExample):
return item
elif isinstance(item, dict):
for k in ["question", "context"]:
if k not in item:
raise KeyError("You need to provide a dictionary with keys {question:..., context:...}")
elif item[k] is None:
raise ValueError(f"`{k}` cannot be None")
elif isinstance(item[k], str) and len(item[k]) == 0:
raise ValueError(f"`{k}` cannot be empty") | 412 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/question_answering.py |
return QuestionAnsweringPipeline.create_sample(**item)
raise ValueError(f"{item} argument needs to be of type (SquadExample, dict)") | 412 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/question_answering.py |
def __call__(self, *args, **kwargs):
# Detect where the actual inputs are
if args is not None and len(args) > 0:
if len(args) == 1:
inputs = args[0]
elif len(args) == 2 and {type(el) for el in args} == {str}:
inputs = [{"question": args[0], "context": args[1]}]
else:
inputs = list(args)
# Generic compatibility with sklearn and Keras
# Batched data
elif "X" in kwargs:
warnings.warn(
"Passing the `X` argument to the pipeline is deprecated and will be removed in v5. Inputs should be passed using the `question` and `context` keyword arguments instead.",
FutureWarning,
)
inputs = kwargs["X"]
elif "data" in kwargs:
warnings.warn( | 412 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/question_answering.py |
"Passing the `data` argument to the pipeline is deprecated and will be removed in v5. Inputs should be passed using the `question` and `context` keyword arguments instead.",
FutureWarning,
)
inputs = kwargs["data"]
elif "question" in kwargs and "context" in kwargs:
if isinstance(kwargs["question"], list) and isinstance(kwargs["context"], str):
inputs = [{"question": Q, "context": kwargs["context"]} for Q in kwargs["question"]]
elif isinstance(kwargs["question"], list) and isinstance(kwargs["context"], list):
if len(kwargs["question"]) != len(kwargs["context"]):
raise ValueError("Questions and contexts don't have the same lengths") | 412 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/question_answering.py |
inputs = [{"question": Q, "context": C} for Q, C in zip(kwargs["question"], kwargs["context"])]
elif isinstance(kwargs["question"], str) and isinstance(kwargs["context"], str):
inputs = [{"question": kwargs["question"], "context": kwargs["context"]}]
else:
raise ValueError("Arguments can't be understood")
else:
raise ValueError(f"Unknown arguments {kwargs}")
# When user is sending a generator we need to trust it's a valid example
generator_types = (types.GeneratorType, Dataset) if Dataset is not None else (types.GeneratorType,)
if isinstance(inputs, generator_types):
return inputs
# Normalize inputs
if isinstance(inputs, dict):
inputs = [inputs]
elif isinstance(inputs, Iterable):
# Copy to avoid overriding arguments
inputs = list(inputs)
else:
raise ValueError(f"Invalid arguments {kwargs}") | 412 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/question_answering.py |
for i, item in enumerate(inputs):
inputs[i] = self.normalize(item)
return inputs | 412 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/question_answering.py |
class QuestionAnsweringPipeline(ChunkPipeline):
"""
Question Answering pipeline using any `ModelForQuestionAnswering`. See the [question answering
examples](../task_summary#question-answering) for more information.
Example:
```python
>>> from transformers import pipeline
>>> oracle = pipeline(model="deepset/roberta-base-squad2")
>>> oracle(question="Where do I live?", context="My name is Wolfgang and I live in Berlin")
{'score': 0.9191, 'start': 34, 'end': 40, 'answer': 'Berlin'}
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This question answering pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"question-answering"`. | 413 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/question_answering.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.