repo_id
stringlengths
15
132
file_path
stringlengths
34
176
content
stringlengths
2
3.52M
__index_level_0__
int64
0
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-classification-accuracy/calculate_accuracy.py
from typing import List from promptflow import log_metric, tool @tool def calculate_accuracy(grades: List[str]): result = [] for index in range(len(grades)): grade = grades[index] result.append(grade) # calculate accuracy for each variant accuracy = round((result.count("Correct") / l...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-classification-accuracy/README.md
# Classification Accuracy Evaluation This is a flow illustrating how to evaluate the performance of a classification system. It involves comparing each prediction to the groundtruth and assigns a "Correct" or "Incorrect" grade, and aggregating the results to produce metrics such as accuracy, which reflects how good th...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-classification-accuracy/grade.py
from promptflow import tool @tool def grade(groundtruth: str, prediction: str): return "Correct" if groundtruth.lower() == prediction.lower() else "Incorrect"
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-classification-accuracy/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-classification-accuracy/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: groundtruth: type: string description: Please specify the groundtruth column, which contains the true label to the outputs that your flow produces. default: APP prediction: type: string description: Pl...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/data.jsonl
{"entities": ["software engineer","CEO"],"ground_truth": "\"CEO, Software Engineer, Finance Manager\""} {"entities": ["Software Engineer","CEO", "Finance Manager"],"ground_truth": "\"CEO, Software Engineer, Finance Manager\""}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/README.md
# Entity match rate evaluation This is a flow evaluates: entity match rate. Tools used in this flow: - `python` tool ## Prerequisites Install promptflow sdk and other dependencies: ```bash pip install -r requirements.txt ``` ### 1. Test flow/node ```bash # test with default input value in flow.dag.yaml pf flow te...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/match.py
from promptflow import tool from typing import List @tool def match(answer: List[str], ground_truth: List[str]): exact_match = 0 partial_match = 0 if is_match(answer, ground_truth, ignore_case=True, ignore_order=True, allow_partial=False): exact_match = 1 if is_match(answer, ground_truth, ig...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/cleansing.py
from typing import List from promptflow import tool @tool def cleansing(entities_str: str) -> List[str]: # Split, remove leading and trailing spaces/tabs/dots parts = entities_str.split(",") cleaned_parts = [part.strip(" \t.\"") for part in parts] entities = [part for part in cleaned_parts if len(part...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/log_metrics.py
from promptflow import tool from typing import List from promptflow import log_metric # The inputs section will change based on the arguments of the tool function, after you save the code # Adding type to arguments and return value will help the system show the types properly # Please update the function name/signatur...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/is_match_test.py
import unittest from match import is_match class IsMatchTest(unittest.TestCase): def test_normal(self): self.assertEqual(is_match(["a", "b"], ["B", "a"], True, True, False), True) self.assertEqual(is_match(["a", "b"], ["B", "a"], True, False, False), False) self.assertEqual(is_match(["a",...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: entities: type: list default: - software engineer - CEO ground_truth: type: string default: '"CEO, Software Engineer, Finance Manager"' outputs: match_cnt: type: object reference: ${match.outpu...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/data.jsonl
{"question": "What is the purpose of the LLM Grounding Score, and what does a higher score mean in this context?", "answer": "The LLM Grounding Score is a metric used in the context of in-context learning with large-scale pretrained language models (LLMs) [doc1]. It measures the ability of the LLM to understand and con...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/README.md
# Q&A Evaluation: This is a flow evaluating the Q&A RAG (Retrieval Augmented Generation) systems by leveraging the state-of-the-art Large Language Models (LLM) to measure the quality and safety of responses. Utilizing GPT model to assist with measurements aims to achieve a high agreement with human evaluations compare...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/validate_input.py
from promptflow import tool def is_valid(input_item): return True if input_item and input_item.strip() else False @tool def validate_input(question: str, answer: str, documents: str, selected_metrics: dict) -> dict: input_data = {"question": is_valid(question), "answer": is_valid(answer), "documents": is_va...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/rag_groundedness_prompt.jinja2
system: You are a helpful assistant. user: Your task is to check and rate if factual information in chatbot's reply is all grounded to retrieved documents. You will be given a question, chatbot's response to the question, a chat history between this chatbot and human, and a list of retrieved documents in json format. ...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/parse_groundedness_score.py
from promptflow import tool import re @tool def parse_grounding_output(rag_grounding_score: str) -> str: try: numbers_found = re.findall(r"Quality score:\s*(\d+)\/\d", rag_grounding_score) score = float(numbers_found[0]) if len(numbers_found) > 0 else 0 except Exception: score = float(...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/concat_scores.py
from promptflow import tool import numpy as np @tool def concat_results(rag_retrieval_score: dict = None, rag_grounding_score: dict = None, rag_generation_score: dict = None): load_list = [{'name': 'gpt_groundedness', 'result': rag_grounding_score}, {'name': 'gpt_retrieval_sco...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/parse_generation_score.py
from promptflow import tool import re @tool def parse_generation_output(rag_generation_score: str) -> str: quality_score = float('nan') quality_reasoning = '' for sent in rag_generation_score.split('\n'): sent = sent.strip() if re.match(r"\s*(<)?Quality score:", sent): numbers_...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/rag_retrieval_prompt.jinja2
system: You are a helpful assistant. user: A chat history between user and bot is shown below A list of documents is shown below in json format, and each document has one unique id. These listed documents are used as contex to answer the given question. The task is to score the relevance between the documents and the ...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/parse_retrival_score.py
from promptflow import tool import re @tool def parse_retrieval_output(retrieval_output: str) -> str: score_response = [sent.strip() for sent in retrieval_output.strip("\"").split("# Result")[-1].strip().split('.') if sent.strip()] parsed_score_response = re.findall(r"\d+", score_respons...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: metrics: type: string default: gpt_groundedness,gpt_relevance,gpt_retrieval_score is_chat_input: false answer: type: string default: Of the tents mentioned in the retrieved documents, the Alpine Explorer ...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/aggregate_variants_results.py
from typing import List from promptflow import tool, log_metric import numpy as np @tool def aggregate_variants_results(results: List[dict], metrics: List[str]): aggregate_results = {} for result in results: for name, value in result.items(): if name not in aggregate_results.keys(): ...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/rag_generation_prompt.jinja2
system: You will be provided a question, a conversation history, fetched documents related to the question and a response to the question in the domain. You task is to evaluate the quality of the provided response by following the steps below: - Understand the context of the question based on the conversation history. ...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/select_metrics.py
from promptflow import tool @tool def select_metrics(metrics: str) -> str: supported_metrics = ('gpt_relevance', 'gpt_groundedness', 'gpt_retrieval_score') user_selected_metrics = [metric.strip() for metric in metrics.split(',') if metric] metric_selection_dict = {} for metric in supported_metrics: ...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-basic/data.jsonl
{"groundtruth": "Tomorrow's weather will be sunny.","prediction": "The weather will be sunny tomorrow."}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-basic/README.md
# Basic Eval This example shows how to create a basic evaluation flow. Tools used in this flow: - `python` tool ## Prerequisites Install promptflow sdk and other dependencies in this folder: ```bash pip install -r requirements.txt ``` ## What you will learn In this flow, you will learn - how to compose a point ba...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-basic/line_process.py
from promptflow import tool @tool def line_process(groundtruth: str, prediction: str): """ This tool processes the prediction of a single line and returns the processed result. :param groundtruth: the groundtruth of a single line. :param prediction: the prediction of a single line. """ # Add...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-basic/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-basic/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: groundtruth: type: string default: groundtruth prediction: type: string default: prediction outputs: results: type: string reference: ${line_process.output} nodes: - name: line_process type: python ...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-basic/aggregate.py
from typing import List from promptflow import tool @tool def aggregate(processed_results: List[str]): """ This tool aggregates the processed result of all lines to the variant level and log metric for each variant. :param processed_results: List of the output of line_process node. """ # Add yo...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-groundedness/data.jsonl
{"question": "What is the name of the new language representation model introduced in the document?", "variant_id": "v1", "line_number":1, "answer":"The document mentions multiple language representation models, so it is unclear which one is being referred to as \"new\". Can you provide more specific information or con...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-groundedness/calc_groundedness.py
from promptflow import tool import re @tool def parse_score(gpt_score: str): return float(extract_float(gpt_score)) def extract_float(s): match = re.search(r"[-+]?\d*\.\d+|\d+", s) if match: return float(match.group()) else: return None
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-groundedness/gpt_groundedness.md
user: # Instructions * There are many chatbots that can answer users questions based on the context given from different sources like search results, or snippets from books/papers. They try to understand users's question and then get context by either performing search from search engines, databases or books/papers fo...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-groundedness/README.md
# Groundedness Evaluation This is a flow leverage llm to eval groundedness: whether answer is stating facts that are all present in the given context. Tools used in this flow: - `python` tool - built-in `llm` tool ### 0. Setup connection Prepare your Azure Open AI resource follow this [instruction](https://learn.mi...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-groundedness/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-groundedness/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json environment: python_requirements_txt: requirements.txt inputs: question: type: string default: What is the name of the new language representation model introduced in the document? answer: type: string default: ...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-groundedness/aggregate.py
from typing import List from promptflow import tool @tool def aggregate(groundedness_scores: List[float]): """ This tool aggregates the processed result of all lines to the variant level and log metric for each variant. :param processed_results: List of the output of line_process node. :param variant...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-chat-math/data.jsonl
{"groundtruth": "10","prediction": "10"} {"groundtruth": "253","prediction": "506"} {"groundtruth": "1/3","prediction": "2/6"}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-chat-math/README.md
# Eval chat math This example shows how to evaluate the answer of math questions, which can compare the output results with the standard answers numerically. Learn more on corresponding [tutorials](../../../tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md) Tools used in this flow: - `python` t...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-chat-math/line_process.py
from promptflow import tool def string_to_number(raw_string: str) -> float: ''' Try to parse the prediction string and groundtruth string to float number. Support parse int, float, fraction and recognize non-numeric string with wrong format. Wrong format cases: 'the answer is \box{2/3}', '0, 5, or any num...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-chat-math/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-chat-math/flow.dag.yaml
inputs: groundtruth: type: string default: "10" is_chat_input: false prediction: type: string default: "10" is_chat_input: false outputs: score: type: string reference: ${line_process.output} nodes: - name: line_process type: python source: type: code path: line_process...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-chat-math/aggregate.py
from typing import List from promptflow import tool from promptflow import log_metric @tool def accuracy_aggregate(processed_results: List[int]): num_exception = 0 num_correct = 0 for i in range(len(processed_results)): if processed_results[i] == -1: num_exception += 1 elif p...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-perceived-intelligence/gpt_perceived_intelligence.md
user: # Instructions * There are many chatbots that can answer users questions based on the context given from different sources like search results, or snippets from books/papers. They try to understand users's question and then get context by either performing search from search engines, databases or books/papers fo...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-perceived-intelligence/data.jsonl
{"question": "What is the name of the new language representation model introduced in the document?", "variant_id": "v1", "line_number":1, "answer":"The document mentions multiple language representation models, so it is unclear which one is being referred to as \"new\". Can you provide more specific information or con...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-perceived-intelligence/README.md
# Perceived Intelligence Evaluation This is a flow leverage llm to eval perceived intelligence. Perceived intelligence is the degree to which a bot can impress the user with its responses, by showing originality, insight, creativity, knowledge, and adaptability. Tools used in this flow: - `python` tool - built-in `ll...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-perceived-intelligence/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-perceived-intelligence/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json environment: python_requirements_txt: requirements.txt inputs: question: type: string default: What is the name of the new language representation model introduced in the document? answer: type: string default: ...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-perceived-intelligence/aggregate.py
from typing import List from promptflow import tool @tool def aggregate(perceived_intelligence_score: List[float]): aggregated_results = {"perceived_intelligence_score": 0.0, "count": 0} # Calculate average perceived_intelligence_score for i in range(len(perceived_intelligence_score)): aggregated...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-perceived-intelligence/parse_score.py
from promptflow import tool import re @tool def parse_score(gpt_score: str): return float(extract_float(gpt_score)) def extract_float(s): match = re.search(r"[-+]?\d*\.\d+|\d+", s) if match: return float(match.group()) else: return None
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-accuracy-maths-to-code/test_data.jsonl
{"question": "What is the sum of 5 and 3?", "groundtruth": "8", "answer": "8"} {"question": "Subtract 7 from 10.", "groundtruth": "3", "answer": "3"} {"question": "Multiply 6 by 4.", "groundtruth": "24", "answer": "24"} {"question": "Divide 20 by 5.", "groundtruth": "4", "answer": "4"} {"question": "What is the square ...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-accuracy-maths-to-code/line_process.py
from promptflow import tool @tool def line_process(groundtruth: str, prediction: str) -> int: processed_result = 0 if prediction == "JSONDecodeError" or prediction.startswith("Unknown Error:"): processed_result = -1 return processed_result try: groundtruth = float(groundtruth) ...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-accuracy-maths-to-code/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: groundtruth: type: string default: "1" prediction: type: string default: "2" outputs: score: type: string reference: ${line_process.output} nodes: - name: line_process type: python source: type...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-accuracy-maths-to-code/aggregate.py
from typing import List from promptflow import tool from promptflow import log_metric @tool def accuracy_aggregate(processed_results: List[int]): num_exception = 0 num_correct = 0 for i in range(len(processed_results)): if processed_results[i] == -1: num_exception += 1 elif p...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/data.jsonl
{"question":"Which tent is the most waterproof?","ground_truth":"The Alpine Explorer Tent has the highest rainfly waterproof rating at 3000m","answer":"The Alpine Explorer Tent is the most waterproof.","context":"From the our product list, the alpine explorer tent is the most waterproof. The Adventure Dining Table has ...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/README.md
# Q&A Evaluation: This is a flow evaluating the Q&A systems by leveraging Large Language Models (LLM) to measure the quality and safety of responses. Utilizing GPT and GPT embedding model to assist with measurements aims to achieve a high agreement with human evaluations compared to traditional mathematical measuremen...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/ada_cosine_similarity_score.py
from promptflow import tool import numpy as np from numpy.linalg import norm @tool def compute_ada_cosine_similarity(a, b) -> float: return np.dot(a, b)/(norm(a)*norm(b))
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/validate_input.py
from promptflow import tool @tool def validate_input(question: str, answer: str, context: str, ground_truth: str, selected_metrics: dict) -> dict: input_data = {"question": question, "answer": answer, "context": context, "ground_truth": ground_truth} expected_input_cols = set(input_data.keys()) dict_metri...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/gpt_fluency_prompt.jinja2
system: You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. user: Fluency measures the quality of individual sentences in the answe...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/gpt_coherence_prompt.jinja2
system: You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. user: Coherence of an answer is measured by how well all the sentences...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/gpt_similarity_prompt.jinja2
system: You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. user: Equivalence, as a metric, measures the similarity between the pre...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/concat_scores.py
from promptflow import tool import numpy as np import re @tool def concat_results(gpt_coherence_score: str = None, gpt_similarity_score: str = None, gpt_fluency_score: str = None, gpt_relevance_score: str = None, gpt_groundedness_score: str =...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/gpt_relevance_prompt.jinja2
system: You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. user: Relevance measures how well the answer addresses the main aspects...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: question: type: string default: Which tent is the most waterproof? is_chat_input: false answer: type: string default: The Alpine Explorer Tent is the most waterproof. is_chat_input: false context: ...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/aggregate_variants_results.py
from typing import List from promptflow import tool, log_metric import numpy as np @tool def aggregate_variants_results(results: List[dict], metrics: List[str]): aggregate_results = {} for result in results: for name, value in result.items(): if name in metrics[0]: if name ...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/select_metrics.py
from promptflow import tool @tool def select_metrics(metrics: str) -> str: supported_metrics = ('gpt_coherence', 'gpt_similarity', 'gpt_fluency', 'gpt_relevance', 'gpt_groundedness', 'f1_score', 'ada_similarity') user_selected_metrics = [metric.strip() for metric in metrics.split(',')...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/f1_score.py
from promptflow import tool from collections import Counter @tool def compute_f1_score(ground_truth: str, answer: str) -> str: import string import re class QASplitTokenizer: def __call__(self, line): """Tokenizes an input line using split() on whitespace :param line: a s...
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/gpt_groundedness_prompt.jinja2
system: You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. user: You will be presented with a CONTEXT and an ANSWER about that CON...
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/tool/generate_tool_package_template.py
import argparse import os import re from jinja2 import Environment, FileSystemLoader def make_pythonic_variable_name(input_string): variable_name = input_string.strip() variable_name = re.sub(r'\W|^(?=\d)', '_', variable_name) if not variable_name[0].isalpha() and variable_name[0] != '_': variable...
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/tool/generate_connection_config.py
import argparse import json from pathlib import Path from utils.secret_manager import get_secret, get_secret_client, list_secret_names CONNECTION_FILE_NAME = "connections.json" PROMPTFLOW_TOOLS_ROOT = Path(__file__) / "../../../src/promptflow-tools" CONNECTION_TPL_FILE_PATH = PROMPTFLOW_TOOLS_ROOT / "connections.json...
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/tool/validate_tool_secret.py
import argparse from utils.secret_manager import ( get_secret_client, init_used_secret_names, validate_secret_name, ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--tenant_id", type=str, required=True, help="The tenant id of the...
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/tool/deploy_endpoint.py
import argparse from utils.repo_utils import create_remote_branch_in_ADO_with_new_tool_pkg_version, deploy_test_endpoint if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--tool_pkg_version", type=str, required=True) parser.add_argument("--ado_pat", type=str, required=True...
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/tool/generate_package_tool_meta.py
import argparse import ast import importlib import json import os import sys from ruamel.yaml import YAML sys.path.append("src/promptflow-tools") sys.path.append(os.getcwd()) from utils.generate_tool_meta_utils import generate_custom_llm_tools_in_module_as_dict, generate_python_tools_in_module_as_dict # noqa: E402,...
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/tool/convert_image_to_data_url.py
import argparse import base64 import os import io from PIL import Image SUPPORT_IMAGE_TYPES = ["png", "jpg", "jpeg", "bmp"] def get_image_size(image_path): with Image.open(image_path) as img: width, height = img.size return width, height def get_image_storage_size(image_path): file_size_bytes ...
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/tool/upload_tool_secret.py
import argparse from utils.secret_manager import get_secret_client, upload_secret if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--tenant_id", type=str, required=True, help="The tenant id of the service principal", ) parser.add_argum...
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/test_tool.py.j2
import pytest import unittest from promptflow.connections import CustomConnection from {{ package_name }}.tools.{{ tool_name }} import {{ function_name }} @pytest.fixture def my_custom_connection() -> CustomConnection: my_custom_connection = CustomConnection( { "api-key" : "my-api-key", ...
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/tool.yaml.j2
{{ package_name }}.tools.{{ tool_name }}.{{ function_name }}: function: {{ function_name }} inputs: connection: type: - CustomConnection input_text: type: - string module: {{ package_name }}.tools.{{ tool_name }} name: Hello World Tool description: This is hello world tool ty...
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/setup.py.j2
from setuptools import find_packages, setup PACKAGE_NAME = "{{ package_name }}" setup( name=PACKAGE_NAME, version="0.0.1", description="This is my tools package", packages=find_packages(), entry_points={ "package_tools": ["{{ tool_name }} = {{ package_name }}.tools.utils:list_package_tools...
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/tool2.py.j2
from promptflow import ToolProvider, tool import urllib.request class {{ class_name }}(ToolProvider): def __init__(self, url: str): super().__init__() # Load content from url might be slow, so we do it in __init__ method to make sure it is loaded only once. self.content = urllib.request.u...
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/utils.py.j2
from ruamel.yaml import YAML from pathlib import Path def collect_tools_from_directory(base_dir) -> dict: tools = {} yaml = YAML() for f in Path(base_dir).glob("**/*.yaml"): with open(f, "r") as f: tools_in_file = yaml.load(f) for identifier, tool in tools_in_file.items(): ...
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/MANIFEST.in.j2
include {{ package_name }}/yamls/*.yaml
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/tool2.yaml.j2
{{ package_name }}.tools.{{ tool_name }}.{{ class_name }}.{{ function_name }}: class_name: {{ class_name }} function: {{ function_name }} inputs: url: type: - string query: type: - string module: {{ package_name }}.tools.{{ tool_name }} name: Hello World Tool description: Thi...
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/test_tool2.py.j2
import pytest import unittest from {{ package_name }}.tools.{{ tool_name }} import {{ class_name }} @pytest.fixture def my_url() -> str: my_url = "https://www.bing.com" return my_url @pytest.fixture def my_tool_provider(my_url) -> {{ class_name }}: my_tool_provider = {{ class_name }}(my_url) return...
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/tool.py.j2
from promptflow import tool from promptflow.connections import CustomConnection @tool def {{ function_name }}(connection: CustomConnection, input_text: str) -> str: # Replace with your tool code. # Usually connection contains configs to connect to an API. # Use CustomConnection is a dict. You can use it l...
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/exceptions/secret_exceptions.py
class SecretNameAlreadyExistsException(Exception): pass class SecretNameInvalidException(Exception): pass class SecretNoSetPermissionException(Exception): pass
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/exceptions/__init__.py
from .secret_exceptions import SecretNameAlreadyExistsException, SecretNameInvalidException, SecretNoSetPermissionException # noqa: F401, E501
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/utils/generate_tool_meta_utils.py
""" This file can generate a meta file for the given prompt template or a python file. """ import inspect import types from dataclasses import asdict from utils.tool_utils import function_to_interface from promptflow.contracts.tool import Tool, ToolType # Avoid circular dependencies: Use import 'from promptflow._inte...
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/utils/repo_utils.py
import json import os import shutil import subprocess from datetime import datetime from pathlib import Path import requests scripts_dir = os.path.join(os.getcwd(), "scripts") index_url = "https://azuremlsdktestpypi.azureedge.net/test-promptflow/promptflow-tools" ado_promptflow_repo_url_format = "https://{0}@dev.azur...
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/utils/secret_manager.py
import re from azure.core.exceptions import HttpResponseError, ResourceExistsError from azure.identity import ClientSecretCredential from azure.keyvault.secrets import SecretClient from exceptions import ( SecretNameAlreadyExistsException, SecretNameInvalidException, SecretNoSetPermissionException, ) key_...
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/utils/tool_utils.py
import inspect from enum import Enum, EnumMeta from typing import Callable, Union, get_args, get_origin from promptflow.contracts.tool import ConnectionType, InputDefinition, ValueType, ToolType from promptflow.contracts.types import PromptTemplate def value_to_str(val): if val is inspect.Parameter.empty: ...
0
promptflow_repo/promptflow/scripts/tool/utils
promptflow_repo/promptflow/scripts/tool/utils/configs/deploy-endpoint-request-body.json
{ "stagesToSkip": [], "resources": { "repositories": { "self": { "refName": "refs/heads/dev-branch" } } }, "templateParameters": { "deployEndpoint": "True" }, "variables": { "model-file": { "value": "promptflow-g...
0
promptflow_repo/promptflow/scripts/tool/utils
promptflow_repo/promptflow/scripts/tool/utils/configs/promptflow-gallery-tool-test.yaml
storage: storage_account: promptflowgall5817910653 deployment: subscription_id: 96aede12-2f73-41cb-b983-6d11a904839b resource_group: promptflow workspace_name: promptflow-gallery endpoint_name: tool-test638236049123389546 deployment_name: blue mt_service_endpoint: https://eastus2euap.api.azureml.ms
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/docs/promptflow.connections.rst
promptflow.connections package ============================== .. autoclass:: promptflow.connections.AzureContentSafetyConnection :members: :undoc-members: :show-inheritance: :noindex: .. autoclass:: promptflow.connections.AzureOpenAIConnection :members: :undoc-members: :show-inheritance: :noin...
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/docs/doc_generation.ps1
<# .DESCRIPTION Script to build doc site .EXAMPLE PS> ./doc_generation.ps1 -SkipInstall # skip pip install PS> ./doc_generation.ps1 -BuildLinkCheck -WarningAsError:$true -SkipInstall #> [CmdletBinding()] param( [switch]$SkipInstall, [switch]$WarningAsError = $false, [switch]$BuildLinkCheck = $false, ...
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/docs/conf.py
# -- Path setup -------------------------------------------------------------- import sys # -- Project information ----------------------------------------------------- project = 'Prompt flow' copyright = '2023, Microsoft' author = 'Microsoft' sys.path.append(".") from gallery_directive import GalleryDirective # no...
0
promptflow_repo/promptflow/scripts/docs
promptflow_repo/promptflow/scripts/docs/_static/custom.js
// Get the head element let head = document.getElementsByTagName("head")[0]; // Create the script element let script = document.createElement("script"); script.async = true; script.src = "https://www.googletagmanager.com/gtag/js?id=G-KZXK5PFBZY"; // Create another script element for the gtag code let script2 = docume...
0