id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
21,701 | import collections
import glob
import json
import logging
import math
import multiprocessing
import os
import pickle
import torch
from functools import partial
from typing import Tuple, List, Dict, Iterable, Optional
from torch import Tensor as T
from tqdm import tqdm
from dpr.utils.data_utils import Tensorizer, read_serialized_data_from_files
SpanPrediction = collections.namedtuple(
"SpanPrediction",
[
"prediction_text",
"span_score",
"relevance_score",
"passage_index",
"passage_token_ids",
],
)
def _extend_span_to_full_words(
tensorizer: Tensorizer, tokens: List[int], span: Tuple[int, int]
) -> Tuple[int, int]:
start_index, end_index = span
max_len = len(tokens)
while start_index > 0 and tensorizer.is_sub_word_id(tokens[start_index]):
start_index -= 1
while end_index < max_len - 1 and tensorizer.is_sub_word_id(tokens[end_index + 1]):
end_index += 1
return start_index, end_index
class Tensorizer(object):
"""
Component for all text to model input data conversions and related utility methods
"""
# Note: title, if present, is supposed to be put before text (i.e. optional title + document body)
def text_to_tensor(
self,
text: str,
title: str = None,
add_special_tokens: bool = True,
apply_max_len: bool = True,
):
raise NotImplementedError
def get_pair_separator_ids(self) -> T:
raise NotImplementedError
def get_pad_id(self) -> int:
raise NotImplementedError
def get_attn_mask(self, tokens_tensor: T):
raise NotImplementedError
def is_sub_word_id(self, token_id: int):
raise NotImplementedError
def to_string(self, token_ids, skip_special_tokens=True):
raise NotImplementedError
def set_pad_to_max(self, pad: bool):
raise NotImplementedError
def get_token_id(self, token: str) -> int:
raise NotImplementedError
The provided code snippet includes necessary dependencies for implementing the `get_best_spans` function. Write a Python function `def get_best_spans( tensorizer: Tensorizer, start_logits: List, end_logits: List, ctx_ids: List, max_answer_length: int, passage_idx: int, relevance_score: float, top_spans: int = 1, ) -> List[SpanPrediction]` to solve the following problem:
Finds the best answer span for the extractive Q&A model
Here is the function:
def get_best_spans(
tensorizer: Tensorizer,
start_logits: List,
end_logits: List,
ctx_ids: List,
max_answer_length: int,
passage_idx: int,
relevance_score: float,
top_spans: int = 1,
) -> List[SpanPrediction]:
"""
Finds the best answer span for the extractive Q&A model
"""
scores = []
for (i, s) in enumerate(start_logits):
for (j, e) in enumerate(end_logits[i : i + max_answer_length]):
scores.append(((i, i + j), s + e))
scores = sorted(scores, key=lambda x: x[1], reverse=True)
chosen_span_intervals = []
best_spans = []
for (start_index, end_index), score in scores:
assert start_index <= end_index
length = end_index - start_index + 1
assert length <= max_answer_length
if any(
[
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals
]
):
continue
# extend bpe subtokens to full tokens
start_index, end_index = _extend_span_to_full_words(
tensorizer, ctx_ids, (start_index, end_index)
)
predicted_answer = tensorizer.to_string(ctx_ids[start_index : end_index + 1])
best_spans.append(
SpanPrediction(
predicted_answer, score, relevance_score, passage_idx, ctx_ids
)
)
chosen_span_intervals.append((start_index, end_index))
if len(chosen_span_intervals) == top_spans:
break
return best_spans | Finds the best answer span for the extractive Q&A model |
21,702 | import collections
import csv
import json
import logging
import re
import unicodedata
import jsonlines
import spacy as spacy
from typing import List, Dict
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if logger.hasHandlers():
logger.handlers.clear()
logger.addHandler(console)
def convert_jsonl_to_qas_tsv(path, out):
results = []
with jsonlines.open(path, mode="r") as jsonl_reader:
for jline in jsonl_reader:
q = jline["question"]
answers = []
if "short_answers" in jline:
answers = jline["short_answers"]
results.append((q, answers))
with open(out, "w", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter="\t")
for r in results:
writer.writerow([r[0], r[1]])
logger.info("Saved to %s", out) | null |
21,703 | import collections
import csv
import json
import logging
import re
import unicodedata
import jsonlines
import spacy as spacy
from typing import List, Dict
def tokenize(text):
doc = nlp(text)
return [token.text.lower() for token in doc]
def normalize(text):
"""Resolve different type of unicode encodings."""
return unicodedata.normalize("NFD", text)
The provided code snippet includes necessary dependencies for implementing the `has_prepared_answer` function. Write a Python function `def has_prepared_answer(prep_answers: List[List[str]], text)` to solve the following problem:
Check if a document contains an answer string.
Here is the function:
def has_prepared_answer(prep_answers: List[List[str]], text):
"""Check if a document contains an answer string."""
text = normalize(text)
# Answer is a list of possible strings
text = tokenize(text)
for single_answer in prep_answers:
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i : i + len(single_answer)]:
return True
return False | Check if a document contains an answer string. |
21,704 | import collections
import csv
import json
import logging
import re
import unicodedata
import jsonlines
import spacy as spacy
from typing import List, Dict
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if logger.hasHandlers():
logger.handlers.clear()
logger.addHandler(console)
class NQTableParser(object):
def __init__(self, tokens, is_html_mask, title):
self.tokens = tokens
self.is_html_mask = is_html_mask
self.max_idx = len(self.tokens)
self.all_tables = []
self.current_table: Table = None
self.tables_stack = collections.deque()
self.title = title
def parse(self) -> List[Table]:
self.all_tables = []
self.tables_stack = collections.deque()
for i in range(self.max_idx):
t = self.tokens[i]
if not self.is_html_mask[i]:
# cell content
self._on_content(t)
continue
if "<Table" in t:
self._on_table_start()
elif t == "</Table>":
self._on_table_end()
elif "<Tr" in t:
self._onRowStart()
elif t == "</Tr>":
self._onRowEnd()
elif "<Td" in t or "<Th" in t:
self._onCellStart()
elif t in ["</Td>", "</Th>"]:
self._on_cell_end()
return self.all_tables
def _on_table_start(self):
caption = self.title
parent_table = self.current_table
if parent_table:
self.tables_stack.append(parent_table)
caption = parent_table.caption
if parent_table.body and parent_table.body[-1].cells:
current_cell = self.current_table.body[-1].cells[-1]
caption += " | " + " ".join(current_cell.value_tokens)
t = Table()
t.caption = caption
self.current_table = t
self.all_tables.append(t)
def _on_table_end(self):
t = self.current_table
if t:
if self.tables_stack: # t is a nested table
self.current_table = self.tables_stack.pop()
if self.current_table.body:
current_cell = self.current_table.body[-1].cells[-1]
current_cell.nested_tables.append(t)
else:
logger.error("table end without table object")
def _onRowStart(self):
self.current_table.body.append(Row())
def _onRowEnd(self):
pass
def _onCellStart(self):
current_row = self.current_table.body[-1]
current_row.cells.append(Cell())
def _on_cell_end(self):
pass
def _on_content(self, token):
if self.current_table.body:
current_row = self.current_table.body[-1]
current_cell = current_row.cells[-1]
current_cell.value_tokens.append(token)
else: # tokens outside of row/cells. Just append to the table caption.
self.current_table.caption += " " + token
def read_nq_tables_jsonl(path: str, out_file: str = None) -> Dict[str, Table]:
tables_with_issues = 0
single_row_tables = 0
nested_tables = 0
regular_tables = 0
total_tables = 0
total_rows = 0
tables_dict = {}
with jsonlines.open(path, mode="r") as jsonl_reader:
for jline in jsonl_reader:
tokens = jline["tokens"]
if "( hide ) This section has multiple issues" in " ".join(tokens):
tables_with_issues += 1
continue
# if '<Table>' in tokens[1:]:
# nested_tables += 1
mask = jline["html_mask"]
page_url = jline["doc_url"]
title = jline["title"]
# logger.info('Table from page %s', title)
# logger.info('tokens len %s', len(tokens))
# logger.info('tokens %s', tokens)
# logger.info('page_url %s', page_url)
p = NQTableParser(tokens, mask, title)
tables = p.parse()
# logger.info('parsed tables %d', len(tables))
# table = parse_table(tokens, mask)
nested_tables += len(tables[1:])
for t in tables:
# logger.info('Table: %s', t)
total_tables += 1
# calc amount of non empty rows
non_empty_rows = sum(
[
1
for r in t.body
if r.cells and any([True for c in r.cells if c.value_tokens])
]
)
if non_empty_rows <= 1:
single_row_tables += 1
else:
regular_tables += 1
total_rows += len(t.body)
if t.get_key() not in tables_dict:
tables_dict[t.get_key()] = t
if len(tables_dict) % 1000 == 0:
logger.info("tables_dict %d", len(tables_dict))
print("regular tables", regular_tables)
print("tables_with_issues", tables_with_issues)
print("single_row_tables", single_row_tables)
print("nested_tables", nested_tables)
if out_file:
convert_to_csv_for_lucene(tables_dict, out_file)
return tables_dict
def prepare_answers(answers) -> List[List[str]]:
r = []
for single_answer in answers:
single_answer = normalize(single_answer)
single_answer = single_answer.lower().split(" ") # tokenize(single_answer)
r.append(single_answer)
return r
def has_prepared_answer2(prep_answers: List[List[str]], text: List[str]):
text = [normalize(token).lower() for token in text]
# text = [item for sublist in text for item in sublist]
# text = ' '.join(text)
# text = normalize(text)
# text = tokenize(text)
for single_answer in prep_answers:
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i : i + len(single_answer)]:
return True
return False
def has_answer(answers, text, regMatxh=False):
"""Check if a document contains an answer string."""
text = normalize(text)
if regMatxh:
single_answer = normalize(answers[0])
if regex_match(text, single_answer):
return True
else:
# Answer is a list of possible strings
text = tokenize(text)
for single_answer in answers:
single_answer = normalize(single_answer)
single_answer = tokenize(single_answer)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i : i + len(single_answer)]:
return True
return False
def convert_search_res_to_dpr_and_eval(
res_file, all_tables_file_jsonl, nq_table_file, out_file, gold_res_file: str = None
):
db = {}
id = 0
tables_dict = read_nq_tables_jsonl(all_tables_file_jsonl)
for _, v in tables_dict.items():
id += 1
db[id] = v
logger.info("db size %s", len(db))
total = 0
dpr_results = {}
import torch
bm25_per_topk_hits = torch.tensor([0] * 100)
qas = []
with open(res_file) as tsvfile:
reader = csv.reader(tsvfile, delimiter="\t")
# file format: id, text
for row in reader:
total += 1
q = row[0]
answers = eval(row[1])
prep_answers = prepare_answers(answers)
qas.append((q, prep_answers))
# logger.info('question %s', q)
question_hns = []
question_positives = []
answers_table_links = []
for k, bm25result in enumerate(row[2:]):
score, id = bm25result.split(",")
table = db[int(id)]
answer_locations = []
def check_answer(tokens, row_idx: int, cell_idx: int):
if has_prepared_answer2(prep_answers, tokens):
answer_locations.append((row_idx, cell_idx))
# logger.info('table %s', table)
# get string representation to find answer
if (len(question_positives) >= 10 and len(question_hns) >= 10) or (
len(question_hns) >= 30
):
break
# table_str = get_table_string_for_answer_check(table)
table.visit(check_answer)
has_answer = len(answer_locations) > 0
if has_answer:
# has_answer(answers, table.key)
# has_answer(answers, get_table_string_for_answer_check(table))
# bm25_per_topk_hits[k:] += 1
question_positives.append(table)
answers_table_links.append(answer_locations)
# break
else:
question_hns.append(table)
dpr_results[q] = (question_positives, question_hns, answers_table_links)
if len(dpr_results) % 100 == 0:
logger.info("dpr_results %s", len(dpr_results))
logger.info("dpr_results size %s", len(dpr_results))
logger.info("total %s", total)
logger.info("bm25_per_topk_hits %s", bm25_per_topk_hits)
if gold_res_file:
logger.info("Processing gold_res_file")
with open(gold_res_file) as cFile:
csvReader = csv.reader(cFile, delimiter=",")
for row in csvReader:
q_id = int(row[0])
qas_tuple = qas[q_id]
prep_answers = qas_tuple[1]
question_gold_positive_match = None
q = qas_tuple[0]
# logger.info("q=%s q_id=%s", q, q_id)
answers_links = None
for field in row[1:]:
psg_id = int(field.split()[0])
# logger.info("psg_id=%s", psg_id)
# if psg_id >= len(db):
# continue
table = db[psg_id]
answer_locations = []
def check_answer(tokens, row_idx: int, cell_idx: int):
if has_prepared_answer2(prep_answers, tokens):
answer_locations.append((row_idx, cell_idx))
table.visit(check_answer)
has_answer = len(answer_locations) > 0
if has_answer and question_gold_positive_match is None:
question_gold_positive_match = table
question_gold_positive_match.gold_match = True
answers_links = answer_locations
if question_gold_positive_match is None:
logger.info("No gold match for q=%s, q_id=%s", q, q_id)
else: # inject into ctx+ at the first position
question_positives, hns, ans_links = dpr_results[q]
question_positives.insert(0, question_gold_positive_match)
ans_links.insert(0, answers_links)
# return
out_results = []
with jsonlines.open(nq_table_file, mode="r") as jsonl_reader:
for jline in jsonl_reader:
q = jline["question"]
gold_positive_table = jline["contexts"][0]
mask = gold_positive_table["html_mask"]
# page_url = jline['doc_url']
title = jline["title"]
p = NQTableParser(gold_positive_table["tokens"], mask, title)
tables = p.parse()
# select the one with the answer(s)
prep_answers = prepare_answers(jline["short_answers"])
tables_with_answers = []
tables_answer_locations = []
for t in tables:
answer_locations = []
def check_answer(tokens, row_idx: int, cell_idx: int):
if has_prepared_answer2(prep_answers, tokens):
answer_locations.append((row_idx, cell_idx))
t.visit(check_answer)
has_answer = len(answer_locations) > 0
if has_answer:
tables_with_answers.append(t)
tables_answer_locations.append(answer_locations)
if not tables_with_answers:
logger.info("No answer in gold table(s) for q=%s", q)
# tables_with_answers.append(tables[0])
positive_ctxs, hard_neg_ctxs, answers_table_links = dpr_results[q]
positive_ctxs = positive_ctxs + tables_with_answers
tables_answer_locations = answers_table_links + tables_answer_locations
assert len(positive_ctxs) == len(tables_answer_locations)
positive_ctxs = [t.to_dpr_json() for t in positive_ctxs]
# set has_answer attributes
for i, ctx_json in enumerate(positive_ctxs):
answer_links = tables_answer_locations[i]
ctx_json["answer_pos"] = answer_links
hard_neg_ctxs = [t.to_dpr_json() for t in hard_neg_ctxs]
out_results.append(
{
"question": q,
"id": jline["example_id"],
"answers": jline["short_answers"],
"positive_ctxs": positive_ctxs,
"hard_negative_ctxs": hard_neg_ctxs,
}
)
logger.info("out_results size %s", len(out_results))
with jsonlines.open(
out_file, mode="w"
) as writer: # encoding="utf-8", .encode('utf-8')
for r in out_results:
writer.write(r)
# with open(out_file, "w") as writer:
# writer.write(json.dumps(out_results, indent=4) + "\n") # indent=4
logger.info("Saved to %s", out_file) | null |
21,705 | import collections
import csv
import json
import logging
import re
import unicodedata
import jsonlines
import spacy as spacy
from typing import List, Dict
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if logger.hasHandlers():
logger.handlers.clear()
logger.addHandler(console)
class NQTableParser(object):
def __init__(self, tokens, is_html_mask, title):
self.tokens = tokens
self.is_html_mask = is_html_mask
self.max_idx = len(self.tokens)
self.all_tables = []
self.current_table: Table = None
self.tables_stack = collections.deque()
self.title = title
def parse(self) -> List[Table]:
self.all_tables = []
self.tables_stack = collections.deque()
for i in range(self.max_idx):
t = self.tokens[i]
if not self.is_html_mask[i]:
# cell content
self._on_content(t)
continue
if "<Table" in t:
self._on_table_start()
elif t == "</Table>":
self._on_table_end()
elif "<Tr" in t:
self._onRowStart()
elif t == "</Tr>":
self._onRowEnd()
elif "<Td" in t or "<Th" in t:
self._onCellStart()
elif t in ["</Td>", "</Th>"]:
self._on_cell_end()
return self.all_tables
def _on_table_start(self):
caption = self.title
parent_table = self.current_table
if parent_table:
self.tables_stack.append(parent_table)
caption = parent_table.caption
if parent_table.body and parent_table.body[-1].cells:
current_cell = self.current_table.body[-1].cells[-1]
caption += " | " + " ".join(current_cell.value_tokens)
t = Table()
t.caption = caption
self.current_table = t
self.all_tables.append(t)
def _on_table_end(self):
t = self.current_table
if t:
if self.tables_stack: # t is a nested table
self.current_table = self.tables_stack.pop()
if self.current_table.body:
current_cell = self.current_table.body[-1].cells[-1]
current_cell.nested_tables.append(t)
else:
logger.error("table end without table object")
def _onRowStart(self):
self.current_table.body.append(Row())
def _onRowEnd(self):
pass
def _onCellStart(self):
current_row = self.current_table.body[-1]
current_row.cells.append(Cell())
def _on_cell_end(self):
pass
def _on_content(self, token):
if self.current_table.body:
current_row = self.current_table.body[-1]
current_cell = current_row.cells[-1]
current_cell.value_tokens.append(token)
else: # tokens outside of row/cells. Just append to the table caption.
self.current_table.caption += " " + token
def convert_long_ans_to_dpr(nq_table_file, out_file):
out_results = []
with jsonlines.open(nq_table_file, mode="r") as jsonl_reader:
for jline in jsonl_reader:
q = jline["question"]
gold_positive_table = jline["contexts"]
mask = gold_positive_table["la_ans_tokens_html_mask"]
# page_url = jline['doc_url']
title = jline["title"]
p = NQTableParser(gold_positive_table["la_ans_tokens"], mask, title)
tables = p.parse()
# select the one with the answer(s)
positive_ctxs = [tables[0].to_dpr_json()]
out_results.append(
{
"question": q,
"id": jline["example_id"],
"answers": [],
"positive_ctxs": positive_ctxs,
"hard_negative_ctxs": [],
}
)
logger.info("out_results size %s", len(out_results))
with jsonlines.open(
out_file, mode="w"
) as writer: # encoding="utf-8", .encode('utf-8')
for r in out_results:
writer.write(r)
logger.info("Saved to %s", out_file) | null |
21,706 | import collections
import csv
import json
import logging
import re
import unicodedata
import jsonlines
import spacy as spacy
from typing import List, Dict
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if logger.hasHandlers():
logger.handlers.clear()
logger.addHandler(console)
def parse_qa_csv_file(location):
def calc_questions_overlap(tables_file, regular_file, dev_file):
tab_questions = set()
with jsonlines.open(tables_file, mode="r") as jsonl_reader:
logger.info("Reading file %s" % tables_file)
for jline in jsonl_reader:
q = jline["question"]
tab_questions.add(q)
reg_questions = set()
if regular_file[-4:] == ".csv":
qas = parse_qa_csv_file(regular_file)
for qa in qas:
reg_questions.add(qa[0])
else:
with open(regular_file, "r", encoding="utf-8") as f:
logger.info("Reading file %s" % regular_file)
data = json.load(f)
for item in data:
q = item["question"]
reg_questions.add(q)
if dev_file:
if dev_file[-4:] == ".csv":
qas = parse_qa_csv_file(dev_file)
for qa in qas:
reg_questions.add(qa[0])
else:
with open(dev_file, "r", encoding="utf-8") as f:
logger.info("Reading file %s" % dev_file)
data = json.load(f)
for item in data:
q = item["question"]
reg_questions.add(q)
logger.info("tab_questions %d", len(tab_questions))
logger.info("reg_questions %d", len(reg_questions))
logger.info("overlap %d", len(tab_questions.intersection(reg_questions))) | null |
21,707 | import collections
import csv
import json
import logging
import re
import unicodedata
import jsonlines
import spacy as spacy
from typing import List, Dict
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if logger.hasHandlers():
logger.handlers.clear()
logger.addHandler(console)
def convert_train_jsonl_to_ctxmatch(path: str, out_file: str):
def get_table_string_for_ctx_match(table: dict): # this doesn't use caption
table_text = table["caption"] + " . "
for r in table["rows"]:
table_text += " . ".join([c["value"] for c in r["columns"]])
table_text += " . "
return table_text
results = []
with jsonlines.open(path, mode="r") as jsonl_reader:
for jline in jsonl_reader:
if len(jline["positive_ctxs"]) == 0:
continue
ctx_pos = jline["positive_ctxs"][0]
table_str = get_table_string_for_ctx_match(ctx_pos)
q = jline["question"]
results.append((q, table_str))
if len(results) % 1000 == 0:
logger.info("results %d", len(results))
shards_sz = 3000
shard = 0
for s in range(0, len(results), shards_sz):
chunk = results[s : s + shards_sz]
shard_file = out_file + ".shard_{}".format(shard)
with jsonlines.open(shard_file, mode="w") as writer:
logger.info("Saving to %s", shard_file)
for i, item in enumerate(chunk):
writer.write({"id": s + i, "question": item[0], "context": item[1]})
shard += 1 | null |
21,708 | import collections
import logging
import string
import unicodedata
from multiprocessing import Pool as ProcessPool
import regex as re
from functools import partial
from typing import Tuple, List, Dict
from dpr.data.retriever_data import TableChunk
from dpr.utils.tokenizers import SimpleTokenizer
def _normalize_answer(s):
def exact_match_score(prediction, ground_truth):
return _normalize_answer(prediction) == _normalize_answer(ground_truth) | null |
21,709 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
import hydra
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
from dpr.data.tables import Table
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer
def download(resource_key: str, out_dir: str = None):
if resource_key not in RESOURCES_MAP:
# match by prefix
resources = [k for k in RESOURCES_MAP.keys() if k.startswith(resource_key)]
if resources:
for key in resources:
download(key, out_dir)
else:
logger.info("no resources found for specified key")
return []
download_info = RESOURCES_MAP[resource_key]
s3_url = download_info["s3_url"]
save_root_dir = None
data_files = []
if isinstance(s3_url, list):
for i, url in enumerate(s3_url):
save_root_dir, local_file = download_resource(
url,
download_info["original_ext"],
download_info["compressed"],
"{}_{}".format(resource_key, i),
out_dir,
)
data_files.append(local_file)
else:
save_root_dir, local_file = download_resource(
s3_url,
download_info["original_ext"],
download_info["compressed"],
resource_key,
out_dir,
)
data_files.append(local_file)
license_files = download_info.get("license_files", None)
if license_files:
download_file(license_files[0], save_root_dir, "LICENSE")
download_file(license_files[1], save_root_dir, "README")
return data_files
def get_dpr_files(source_name) -> List[str]:
if os.path.exists(source_name) or glob.glob(source_name):
return glob.glob(source_name)
else:
# try to use data downloader
from dpr.data.download_data import download
return download(source_name) | null |
21,710 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
import hydra
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
from dpr.data.tables import Table
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer
def normalize_passage(ctx_text: str):
ctx_text = ctx_text.replace("\n", " ").replace("’", "'")
return ctx_text | null |
21,711 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
import hydra
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
from dpr.data.tables import Table
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer
def normalize_question(question: str) -> str:
question = question.replace("’", "'")
return question | null |
21,712 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
import hydra
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
from dpr.data.tables import Table
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer
logger = logging.getLogger(__name__)
class Table(object):
def __init__(self, caption=""):
self.caption = caption
self.body: List[Row] = []
self.key = None
self.gold_match = False
def __str__(self):
table_str = "<T>: {}\n".format(self.caption)
table_str += " rows:\n"
for i, r in enumerate(self.body):
table_str += " row #{}: {}\n".format(i, str(r))
return table_str
def get_key(self) -> str:
if not self.key:
self.key = str(self)
return self.key
def visit(self, tokens_function, include_caption: bool = False) -> bool:
if include_caption:
tokens_function(self.caption, -1, -1)
for i, r in enumerate(self.body):
r.visit(tokens_function, i)
def to_dpr_json(self):
r = {
"caption": self.caption,
"rows": [r.to_dpr_json(i) for i, r in enumerate(self.body)],
}
if self.gold_match:
r["gold_match"] = 1
return r
class NQTableParser(object):
def __init__(self, tokens, is_html_mask, title):
self.tokens = tokens
self.is_html_mask = is_html_mask
self.max_idx = len(self.tokens)
self.all_tables = []
self.current_table: Table = None
self.tables_stack = collections.deque()
self.title = title
def parse(self) -> List[Table]:
self.all_tables = []
self.tables_stack = collections.deque()
for i in range(self.max_idx):
t = self.tokens[i]
if not self.is_html_mask[i]:
# cell content
self._on_content(t)
continue
if "<Table" in t:
self._on_table_start()
elif t == "</Table>":
self._on_table_end()
elif "<Tr" in t:
self._onRowStart()
elif t == "</Tr>":
self._onRowEnd()
elif "<Td" in t or "<Th" in t:
self._onCellStart()
elif t in ["</Td>", "</Th>"]:
self._on_cell_end()
return self.all_tables
def _on_table_start(self):
caption = self.title
parent_table = self.current_table
if parent_table:
self.tables_stack.append(parent_table)
caption = parent_table.caption
if parent_table.body and parent_table.body[-1].cells:
current_cell = self.current_table.body[-1].cells[-1]
caption += " | " + " ".join(current_cell.value_tokens)
t = Table()
t.caption = caption
self.current_table = t
self.all_tables.append(t)
def _on_table_end(self):
t = self.current_table
if t:
if self.tables_stack: # t is a nested table
self.current_table = self.tables_stack.pop()
if self.current_table.body:
current_cell = self.current_table.body[-1].cells[-1]
current_cell.nested_tables.append(t)
else:
logger.error("table end without table object")
def _onRowStart(self):
self.current_table.body.append(Row())
def _onRowEnd(self):
pass
def _onCellStart(self):
current_row = self.current_table.body[-1]
current_row.cells.append(Cell())
def _on_cell_end(self):
pass
def _on_content(self, token):
if self.current_table.body:
current_row = self.current_table.body[-1]
current_cell = current_row.cells[-1]
current_cell.value_tokens.append(token)
else: # tokens outside of row/cells. Just append to the table caption.
self.current_table.caption += " " + token
class Table(object):
def __init__(self, caption=""):
self.caption = caption
self.body: List[Row] = []
self.key = None
self.gold_match = False
def __str__(self):
table_str = "<T>: {}\n".format(self.caption)
table_str += " rows:\n"
for i, r in enumerate(self.body):
table_str += " row #{}: {}\n".format(i, str(r))
return table_str
def get_key(self) -> str:
if not self.key:
self.key = str(self)
return self.key
def visit(self, tokens_function, include_caption: bool = False) -> bool:
if include_caption:
tokens_function(self.caption, -1, -1)
for i, r in enumerate(self.body):
r.visit(tokens_function, i)
def to_dpr_json(self):
r = {
"caption": self.caption,
"rows": [r.to_dpr_json(i) for i, r in enumerate(self.body)],
}
if self.gold_match:
r["gold_match"] = 1
return r
def read_nq_tables_jsonl(path: str) -> Dict[str, Table]:
tables_with_issues = 0
single_row_tables = 0
nested_tables = 0
regular_tables = 0
total_tables = 0
total_rows = 0
tables_dict = {}
with jsonlines.open(path, mode="r") as jsonl_reader:
for jline in jsonl_reader:
tokens = jline["tokens"]
if "( hide ) This section has multiple issues" in " ".join(tokens):
tables_with_issues += 1
continue
mask = jline["html_mask"]
# page_url = jline["doc_url"]
title = jline["title"]
p = NQTableParser(tokens, mask, title)
tables = p.parse()
# table = parse_table(tokens, mask)
nested_tables += len(tables[1:])
for t in tables:
total_tables += 1
# calc amount of non empty rows
non_empty_rows = sum(
[
1
for r in t.body
if r.cells and any([True for c in r.cells if c.value_tokens])
]
)
if non_empty_rows <= 1:
single_row_tables += 1
else:
regular_tables += 1
total_rows += len(t.body)
if t.get_key() not in tables_dict:
tables_dict[t.get_key()] = t
if len(tables_dict) % 1000 == 0:
logger.info("tables_dict %d", len(tables_dict))
logger.info("regular tables %d", regular_tables)
logger.info("tables_with_issues %d", tables_with_issues)
logger.info("single_row_tables %d", single_row_tables)
logger.info("nested_tables %d", nested_tables)
return tables_dict | null |
21,713 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
import hydra
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
from dpr.data.tables import Table
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer
class Table(object):
def __init__(self, caption=""):
self.caption = caption
self.body: List[Row] = []
self.key = None
self.gold_match = False
def __str__(self):
table_str = "<T>: {}\n".format(self.caption)
table_str += " rows:\n"
for i, r in enumerate(self.body):
table_str += " row #{}: {}\n".format(i, str(r))
return table_str
def get_key(self) -> str:
if not self.key:
self.key = str(self)
return self.key
def visit(self, tokens_function, include_caption: bool = False) -> bool:
if include_caption:
tokens_function(self.caption, -1, -1)
for i, r in enumerate(self.body):
r.visit(tokens_function, i)
def to_dpr_json(self):
r = {
"caption": self.caption,
"rows": [r.to_dpr_json(i) for i, r in enumerate(self.body)],
}
if self.gold_match:
r["gold_match"] = 1
return r
class Table(object):
def __init__(self, caption=""):
self.caption = caption
self.body: List[Row] = []
self.key = None
self.gold_match = False
def __str__(self):
table_str = "<T>: {}\n".format(self.caption)
table_str += " rows:\n"
for i, r in enumerate(self.body):
table_str += " row #{}: {}\n".format(i, str(r))
return table_str
def get_key(self) -> str:
if not self.key:
self.key = str(self)
return self.key
def visit(self, tokens_function, include_caption: bool = False) -> bool:
if include_caption:
tokens_function(self.caption, -1, -1)
for i, r in enumerate(self.body):
r.visit(tokens_function, i)
def to_dpr_json(self):
r = {
"caption": self.caption,
"rows": [r.to_dpr_json(i) for i, r in enumerate(self.body)],
}
if self.gold_match:
r["gold_match"] = 1
return r
def get_table_string_for_answer_check(table: Table): # this doesn't use caption
table_text = ""
for r in table.body:
table_text += " . ".join([" ".join(c.value_tokens) for c in r.cells])
table_text += " . "
return table_text | null |
21,714 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
import hydra
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
from dpr.data.tables import Table
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer
logger = logging.getLogger(__name__)
class Table(object):
def __init__(self, caption=""):
def __str__(self):
def get_key(self) -> str:
def visit(self, tokens_function, include_caption: bool = False) -> bool:
def to_dpr_json(self):
class JsonLTablesQADataset(Dataset):
def __init__(
self,
file: str,
is_train_set: bool,
selector: DictConfig = None,
shuffle_positives: bool = False,
max_negatives: int = 1,
seed: int = 0,
max_len=100,
split_type: str = "type1",
):
def load_data(self):
def __getitem__(self, index) -> BiEncoderSample:
def __len__(self):
def get_lin_func(cls, split_type: str):
def split_table(cls, t: dict, max_length: int):
def _linearize_table(self, t: dict, is_positive: bool) -> str:
def _linearize_row(cls, row: dict) -> Tuple[str, int]:
class Table(object):
def __init__(self, caption=""):
def __str__(self):
def get_key(self) -> str:
def visit(self, tokens_function, include_caption: bool = False) -> bool:
def to_dpr_json(self):
def split_tables_to_chunks(
tables_dict: Dict[str, Table], max_table_len: int, split_type: str = "type1"
) -> List[Tuple[int, str, str, int]]:
tables_as_dicts = [t.to_dpr_json() for k, t in tables_dict.items()]
chunks = []
chunk_id = 0
for i, t in enumerate(tables_as_dicts):
# TODO: support other types
assert split_type == "type1"
table_chunks = JsonLTablesQADataset.split_table(t, max_table_len)
title = t["caption"]
for c in table_chunks:
# chunk id , text, title, external_id
chunks.append((chunk_id, c, title, i))
chunk_id += 1
if i % 1000 == 0:
logger.info("Splitted %d tables to %d chunks", i, len(chunks))
return chunks | null |
21,715 | import logging
import numpy as np
import os
import random
import socket
import torch
from omegaconf import DictConfig
The provided code snippet includes necessary dependencies for implementing the `set_cfg_params_from_state` function. Write a Python function `def set_cfg_params_from_state(state: dict, cfg: DictConfig)` to solve the following problem:
Overrides some of the encoder config parameters from a give state object
Here is the function:
def set_cfg_params_from_state(state: dict, cfg: DictConfig):
"""
Overrides some of the encoder config parameters from a give state object
"""
if not state:
return
cfg.do_lower_case = state["do_lower_case"]
cfg.encoder.pretrained_model_cfg = state["pretrained_model_cfg"]
cfg.encoder.encoder_model_type = state["encoder_model_type"]
cfg.encoder.pretrained_file = state["pretrained_file"]
cfg.encoder.projection_dim = state["projection_dim"]
cfg.encoder.sequence_length = state["sequence_length"] | Overrides some of the encoder config parameters from a give state object |
21,716 | import logging
import numpy as np
import os
import random
import socket
import torch
from omegaconf import DictConfig
The provided code snippet includes necessary dependencies for implementing the `get_encoder_params_state_from_cfg` function. Write a Python function `def get_encoder_params_state_from_cfg(cfg: DictConfig)` to solve the following problem:
Selects the param values to be saved in a checkpoint, so that a trained model can be used for downstream tasks without the need to specify these parameter again :return: Dict of params to memorize in a checkpoint
Here is the function:
def get_encoder_params_state_from_cfg(cfg: DictConfig):
"""
Selects the param values to be saved in a checkpoint, so that a trained model can be used for downstream
tasks without the need to specify these parameter again
:return: Dict of params to memorize in a checkpoint
"""
return {
"do_lower_case": cfg.do_lower_case,
"pretrained_model_cfg": cfg.encoder.pretrained_model_cfg,
"encoder_model_type": cfg.encoder.encoder_model_type,
"pretrained_file": cfg.encoder.pretrained_file,
"projection_dim": cfg.encoder.projection_dim,
"sequence_length": cfg.encoder.sequence_length,
} | Selects the param values to be saved in a checkpoint, so that a trained model can be used for downstream tasks without the need to specify these parameter again :return: Dict of params to memorize in a checkpoint |
21,717 | import logging
import numpy as np
import os
import random
import socket
import torch
from omegaconf import DictConfig
def set_seed(args):
seed = args.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(seed) | null |
21,718 | import logging
import numpy as np
import os
import random
import socket
import torch
from omegaconf import DictConfig
logger = logging.getLogger()
The provided code snippet includes necessary dependencies for implementing the `setup_cfg_gpu` function. Write a Python function `def setup_cfg_gpu(cfg)` to solve the following problem:
Setup params for CUDA, GPU & distributed training
Here is the function:
def setup_cfg_gpu(cfg):
"""
Setup params for CUDA, GPU & distributed training
"""
logger.info("args.local_rank %s", cfg.local_rank)
ws = os.environ.get("WORLD_SIZE")
cfg.distributed_world_size = int(ws) if ws else 1
logger.info("WORLD_SIZE %s", ws)
if cfg.local_rank == -1 or cfg.no_cuda: # single-node multi-gpu (or cpu) mode
device = str(
torch.device(
"cuda" if torch.cuda.is_available() and not cfg.no_cuda else "cpu"
)
)
cfg.n_gpu = torch.cuda.device_count()
else: # distributed mode
torch.cuda.set_device(cfg.local_rank)
device = str(torch.device("cuda", cfg.local_rank))
torch.distributed.init_process_group(backend="nccl")
cfg.n_gpu = 1
cfg.device = device
logger.info(
"Initialized host %s as d.rank %d on device=%s, n_gpu=%d, world size=%d",
socket.gethostname(),
cfg.local_rank,
cfg.device,
cfg.n_gpu,
cfg.distributed_world_size,
)
logger.info("16-bits training: %s ", cfg.fp16)
return cfg | Setup params for CUDA, GPU & distributed training |
21,719 | import logging
import numpy as np
import os
import random
import socket
import torch
from omegaconf import DictConfig
def setup_logger(logger):
logger.setLevel(logging.INFO)
if logger.hasHandlers():
logger.handlers.clear()
log_formatter = logging.Formatter(
"[%(thread)s] %(asctime)s [%(levelname)s] %(name)s: %(message)s"
)
console = logging.StreamHandler()
console.setFormatter(log_formatter)
logger.addHandler(console) | null |
21,720 | import logging
from typing import Tuple
import torch
from torch import Tensor as T
from torch import nn
from transformers.modeling_bert import BertConfig, BertModel
from transformers.optimization import AdamW
from transformers.tokenization_bert import BertTokenizer
from transformers.tokenization_roberta import RobertaTokenizer
from dpr.models.biencoder import BiEncoder
from dpr.utils.data_utils import Tensorizer
from .reader import Reader
def get_bert_tensorizer(cfg, tokenizer=None):
sequence_length = cfg.encoder.sequence_length
pretrained_model_cfg = cfg.encoder.pretrained_model_cfg
if not tokenizer:
tokenizer = get_bert_tokenizer(
pretrained_model_cfg, do_lower_case=cfg.do_lower_case
)
if cfg.special_tokens:
_add_special_tokens(tokenizer, cfg.special_tokens)
return BertTensorizer(tokenizer, sequence_length)
def get_optimizer(
model: nn.Module,
learning_rate: float = 1e-5,
adam_eps: float = 1e-8,
weight_decay: float = 0.0,
) -> torch.optim.Optimizer:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, eps=adam_eps)
return optimizer
class HFBertEncoder(BertModel):
def __init__(self, config, project_dim: int = 0):
BertModel.__init__(self, config)
assert config.hidden_size > 0, "Encoder hidden_size can't be zero"
self.encode_proj = (
nn.Linear(config.hidden_size, project_dim) if project_dim != 0 else None
)
self.init_weights()
def init_encoder(
cls,
cfg_name: str,
projection_dim: int = 0,
dropout: float = 0.1,
pretrained: bool = True,
**kwargs
) -> BertModel:
cfg = BertConfig.from_pretrained(cfg_name if cfg_name else "bert-base-uncased")
if dropout != 0:
cfg.attention_probs_dropout_prob = dropout
cfg.hidden_dropout_prob = dropout
if pretrained:
return cls.from_pretrained(
cfg_name, config=cfg, project_dim=projection_dim, **kwargs
)
else:
return HFBertEncoder(cfg, project_dim=projection_dim)
def forward(
self,
input_ids: T,
token_type_ids: T,
attention_mask: T,
representation_token_pos=0,
) -> Tuple[T, ...]:
if self.config.output_hidden_states:
sequence_output, pooled_output, hidden_states = super().forward(
input_ids=input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
)
else:
hidden_states = None
sequence_output, pooled_output = super().forward(
input_ids=input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
)
if isinstance(representation_token_pos, int):
pooled_output = sequence_output[:, representation_token_pos, :]
else: # treat as a tensor
bsz = sequence_output.size(0)
assert (
representation_token_pos.size(0) == bsz
), "query bsz={} while representation_token_pos bsz={}".format(
bsz, representation_token_pos.size(0)
)
pooled_output = torch.stack(
[
sequence_output[i, representation_token_pos[i, 1], :]
for i in range(bsz)
]
)
if self.encode_proj:
pooled_output = self.encode_proj(pooled_output)
return sequence_output, pooled_output, hidden_states
def get_out_size(self):
if self.encode_proj:
return self.encode_proj.out_features
return self.config.hidden_size
class BiEncoder(nn.Module):
"""Bi-Encoder model component. Encapsulates query/question and context/passage encoders."""
def __init__(
self,
question_model: nn.Module,
ctx_model: nn.Module,
fix_q_encoder: bool = False,
fix_ctx_encoder: bool = False,
):
super(BiEncoder, self).__init__()
self.question_model = question_model
self.ctx_model = ctx_model
self.fix_q_encoder = fix_q_encoder
self.fix_ctx_encoder = fix_ctx_encoder
def get_representation(
sub_model: nn.Module,
ids: T,
segments: T,
attn_mask: T,
fix_encoder: bool = False,
representation_token_pos=0,
) -> (T, T, T):
sequence_output = None
pooled_output = None
hidden_states = None
if ids is not None:
if fix_encoder:
with torch.no_grad():
sequence_output, pooled_output, hidden_states = sub_model(
ids,
segments,
attn_mask,
representation_token_pos=representation_token_pos,
)
if sub_model.training:
sequence_output.requires_grad_(requires_grad=True)
pooled_output.requires_grad_(requires_grad=True)
else:
sequence_output, pooled_output, hidden_states = sub_model(
ids,
segments,
attn_mask,
representation_token_pos=representation_token_pos,
)
return sequence_output, pooled_output, hidden_states
def forward(
self,
question_ids: T,
question_segments: T,
question_attn_mask: T,
context_ids: T,
ctx_segments: T,
ctx_attn_mask: T,
encoder_type: str = None,
representation_token_pos=0,
) -> Tuple[T, T]:
q_encoder = (
self.question_model
if encoder_type is None or encoder_type == "question"
else self.ctx_model
)
_q_seq, q_pooled_out, _q_hidden = self.get_representation(
q_encoder,
question_ids,
question_segments,
question_attn_mask,
self.fix_q_encoder,
representation_token_pos=representation_token_pos,
)
ctx_encoder = (
self.ctx_model
if encoder_type is None or encoder_type == "ctx"
else self.question_model
)
_ctx_seq, ctx_pooled_out, _ctx_hidden = self.get_representation(
ctx_encoder, context_ids, ctx_segments, ctx_attn_mask, self.fix_ctx_encoder
)
return q_pooled_out, ctx_pooled_out
# TODO delete once moved to the new method
def create_biencoder_input(
cls,
samples: List,
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = False,
hard_neg_fallback: bool = True,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of data items (from json) to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample["positive_ctxs"]
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample["positive_ctxs"][0]
neg_ctxs = sample["negative_ctxs"]
hard_neg_ctxs = sample["hard_negative_ctxs"]
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx["text"],
title=ctx["title"] if (insert_title and "title" in ctx) else None,
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def create_biencoder_input2(
cls,
samples: List[BiEncoderSample],
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = False,
hard_neg_fallback: bool = True,
query_token: str = None,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of BiEncoderSample-s to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample.positive_passages
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample.positive_passages[0]
neg_ctxs = sample.negative_passages
hard_neg_ctxs = sample.hard_negative_passages
question = sample.query
# question = normalize_question(sample.query)
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx.text, title=ctx.title if (insert_title and ctx.title) else None
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
if query_token:
# TODO: tmp workaround for EL, remove or revise
if query_token == "[START_ENT]":
query_span = _select_span_with_token(
question, tensorizer, token_str=query_token
)
question_tensors.append(query_span)
else:
question_tensors.append(
tensorizer.text_to_tensor(" ".join([query_token, question]))
)
else:
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def load_state(self, saved_state: CheckpointState):
# TODO: make a long term HF compatibility fix
if "question_model.embeddings.position_ids" in saved_state.model_dict:
del saved_state.model_dict["question_model.embeddings.position_ids"]
del saved_state.model_dict["ctx_model.embeddings.position_ids"]
self.load_state_dict(saved_state.model_dict)
def get_state_dict(self):
return self.state_dict()
def get_bert_biencoder_components(cfg, inference_only: bool = False, **kwargs):
dropout = cfg.encoder.dropout if hasattr(cfg.encoder, "dropout") else 0.0
question_encoder = HFBertEncoder.init_encoder(
cfg.encoder.pretrained_model_cfg,
projection_dim=cfg.encoder.projection_dim,
dropout=dropout,
pretrained=cfg.encoder.pretrained,
**kwargs
)
ctx_encoder = HFBertEncoder.init_encoder(
cfg.encoder.pretrained_model_cfg,
projection_dim=cfg.encoder.projection_dim,
dropout=dropout,
pretrained=cfg.encoder.pretrained,
**kwargs
)
fix_ctx_encoder = cfg.fix_ctx_encoder if hasattr(cfg, "fix_ctx_encoder") else False
biencoder = BiEncoder(
question_encoder, ctx_encoder, fix_ctx_encoder=fix_ctx_encoder
)
optimizer = (
get_optimizer(
biencoder,
learning_rate=cfg.train.learning_rate,
adam_eps=cfg.train.adam_eps,
weight_decay=cfg.train.weight_decay,
)
if not inference_only
else None
)
tensorizer = get_bert_tensorizer(cfg)
return tensorizer, biencoder, optimizer | null |
21,721 | import logging
from typing import Tuple
import torch
from torch import Tensor as T
from torch import nn
from transformers.modeling_bert import BertConfig, BertModel
from transformers.optimization import AdamW
from transformers.tokenization_bert import BertTokenizer
from transformers.tokenization_roberta import RobertaTokenizer
from dpr.models.biencoder import BiEncoder
from dpr.utils.data_utils import Tensorizer
from .reader import Reader
def get_bert_tensorizer(cfg, tokenizer=None):
def get_optimizer(
model: nn.Module,
learning_rate: float = 1e-5,
adam_eps: float = 1e-8,
weight_decay: float = 0.0,
) -> torch.optim.Optimizer:
class HFBertEncoder(BertModel):
def __init__(self, config, project_dim: int = 0):
def init_encoder(
cls,
cfg_name: str,
projection_dim: int = 0,
dropout: float = 0.1,
pretrained: bool = True,
**kwargs
) -> BertModel:
def forward(
self,
input_ids: T,
token_type_ids: T,
attention_mask: T,
representation_token_pos=0,
) -> Tuple[T, ...]:
def get_out_size(self):
class Reader(nn.Module):
def __init__(self, encoder: nn.Module, hidden_size):
def forward(self, input_ids: T, attention_mask: T, start_positions=None, end_positions=None, answer_mask=None):
def _forward(self, input_ids, attention_mask):
def get_bert_reader_components(cfg, inference_only: bool = False, **kwargs):
dropout = cfg.encoder.dropout if hasattr(cfg.encoder, "dropout") else 0.0
encoder = HFBertEncoder.init_encoder(
cfg.encoder.pretrained_model_cfg,
projection_dim=cfg.encoder.projection_dim,
dropout=dropout,
pretrained=cfg.encoder.pretrained,
**kwargs
)
hidden_size = encoder.config.hidden_size
reader = Reader(encoder, hidden_size)
optimizer = (
get_optimizer(
reader,
learning_rate=cfg.train.learning_rate,
adam_eps=cfg.train.adam_eps,
weight_decay=cfg.train.weight_decay,
)
if not inference_only
else None
)
tensorizer = get_bert_tensorizer(cfg)
return tensorizer, reader, optimizer | null |
21,722 | import logging
from typing import Tuple
import torch
from pytext.models.representations.transformer_sentence_encoder import TransformerSentenceEncoder
from pytext.optimizer.optimizers import AdamW
from torch import Tensor as T
from torch import nn
from .biencoder import BiEncoder
def get_optimizer(model: nn.Module, learning_rate: float = 1e-5, adam_eps: float = 1e-8,
weight_decay: float = 0.0) -> torch.optim.Optimizer:
cfg = AdamW.Config()
cfg.lr = learning_rate
cfg.weight_decay = weight_decay
cfg.eps = adam_eps
optimizer = AdamW.from_config(cfg, model)
return optimizer
class PytextBertEncoder(TransformerSentenceEncoder):
def __init__(self, config: TransformerSentenceEncoder.Config,
padding_idx: int,
vocab_size: int,
projection_dim: int = 0,
*args,
**kwarg
):
TransformerSentenceEncoder.__init__(self, config, False, padding_idx, vocab_size, *args, **kwarg)
assert config.embedding_dim > 0, 'Encoder hidden_size can\'t be zero'
self.encode_proj = nn.Linear(config.embedding_dim, projection_dim) if projection_dim != 0 else None
def init_encoder(cls, pretrained_file: str = None, projection_dim: int = 0, dropout: float = 0.1,
vocab_size: int = 0,
padding_idx: int = 0, **kwargs):
cfg = get_pytext_bert_base_cfg()
if dropout != 0:
cfg.dropout = dropout
cfg.attention_dropout = dropout
cfg.activation_dropout = dropout
encoder = cls(cfg, padding_idx, vocab_size, projection_dim, **kwargs)
if pretrained_file:
logger.info('Loading pre-trained pytext encoder state from %s', pretrained_file)
state = torch.load(pretrained_file)
encoder.load_state_dict(state)
return encoder
def forward(self, input_ids: T, token_type_ids: T, attention_mask: T) -> Tuple[T, ...]:
pooled_output = super().forward((input_ids, attention_mask, token_type_ids, None))[0]
if self.encode_proj:
pooled_output = self.encode_proj(pooled_output)
return None, pooled_output, None
def get_out_size(self):
if self.encode_proj:
return self.encode_proj.out_features
return self.representation_dim
class BiEncoder(nn.Module):
"""Bi-Encoder model component. Encapsulates query/question and context/passage encoders."""
def __init__(
self,
question_model: nn.Module,
ctx_model: nn.Module,
fix_q_encoder: bool = False,
fix_ctx_encoder: bool = False,
):
super(BiEncoder, self).__init__()
self.question_model = question_model
self.ctx_model = ctx_model
self.fix_q_encoder = fix_q_encoder
self.fix_ctx_encoder = fix_ctx_encoder
def get_representation(
sub_model: nn.Module,
ids: T,
segments: T,
attn_mask: T,
fix_encoder: bool = False,
representation_token_pos=0,
) -> (T, T, T):
sequence_output = None
pooled_output = None
hidden_states = None
if ids is not None:
if fix_encoder:
with torch.no_grad():
sequence_output, pooled_output, hidden_states = sub_model(
ids,
segments,
attn_mask,
representation_token_pos=representation_token_pos,
)
if sub_model.training:
sequence_output.requires_grad_(requires_grad=True)
pooled_output.requires_grad_(requires_grad=True)
else:
sequence_output, pooled_output, hidden_states = sub_model(
ids,
segments,
attn_mask,
representation_token_pos=representation_token_pos,
)
return sequence_output, pooled_output, hidden_states
def forward(
self,
question_ids: T,
question_segments: T,
question_attn_mask: T,
context_ids: T,
ctx_segments: T,
ctx_attn_mask: T,
encoder_type: str = None,
representation_token_pos=0,
) -> Tuple[T, T]:
q_encoder = (
self.question_model
if encoder_type is None or encoder_type == "question"
else self.ctx_model
)
_q_seq, q_pooled_out, _q_hidden = self.get_representation(
q_encoder,
question_ids,
question_segments,
question_attn_mask,
self.fix_q_encoder,
representation_token_pos=representation_token_pos,
)
ctx_encoder = (
self.ctx_model
if encoder_type is None or encoder_type == "ctx"
else self.question_model
)
_ctx_seq, ctx_pooled_out, _ctx_hidden = self.get_representation(
ctx_encoder, context_ids, ctx_segments, ctx_attn_mask, self.fix_ctx_encoder
)
return q_pooled_out, ctx_pooled_out
# TODO delete once moved to the new method
def create_biencoder_input(
cls,
samples: List,
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = False,
hard_neg_fallback: bool = True,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of data items (from json) to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample["positive_ctxs"]
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample["positive_ctxs"][0]
neg_ctxs = sample["negative_ctxs"]
hard_neg_ctxs = sample["hard_negative_ctxs"]
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx["text"],
title=ctx["title"] if (insert_title and "title" in ctx) else None,
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def create_biencoder_input2(
cls,
samples: List[BiEncoderSample],
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = False,
hard_neg_fallback: bool = True,
query_token: str = None,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of BiEncoderSample-s to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample.positive_passages
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample.positive_passages[0]
neg_ctxs = sample.negative_passages
hard_neg_ctxs = sample.hard_negative_passages
question = sample.query
# question = normalize_question(sample.query)
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx.text, title=ctx.title if (insert_title and ctx.title) else None
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
if query_token:
# TODO: tmp workaround for EL, remove or revise
if query_token == "[START_ENT]":
query_span = _select_span_with_token(
question, tensorizer, token_str=query_token
)
question_tensors.append(query_span)
else:
question_tensors.append(
tensorizer.text_to_tensor(" ".join([query_token, question]))
)
else:
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def load_state(self, saved_state: CheckpointState):
# TODO: make a long term HF compatibility fix
if "question_model.embeddings.position_ids" in saved_state.model_dict:
del saved_state.model_dict["question_model.embeddings.position_ids"]
del saved_state.model_dict["ctx_model.embeddings.position_ids"]
self.load_state_dict(saved_state.model_dict)
def get_state_dict(self):
return self.state_dict()
class BertTensorizer(Tensorizer):
def __init__(
self, tokenizer: BertTokenizer, max_length: int, pad_to_max: bool = True
):
self.tokenizer = tokenizer
self.max_length = max_length
self.pad_to_max = pad_to_max
def text_to_tensor(
self,
text: str,
title: str = None,
add_special_tokens: bool = True,
apply_max_len: bool = True,
):
text = text.strip()
# tokenizer automatic padding is explicitly disabled since its inconsistent behavior
# TODO: move max len to methods params?
if title:
token_ids = self.tokenizer.encode(
title,
text_pair=text,
add_special_tokens=add_special_tokens,
max_length=self.max_length if apply_max_len else 10000,
pad_to_max_length=False,
truncation=True,
)
else:
token_ids = self.tokenizer.encode(
text,
add_special_tokens=add_special_tokens,
max_length=self.max_length if apply_max_len else 10000,
pad_to_max_length=False,
truncation=True,
)
seq_len = self.max_length
if self.pad_to_max and len(token_ids) < seq_len:
token_ids = token_ids + [self.tokenizer.pad_token_id] * (
seq_len - len(token_ids)
)
if len(token_ids) >= seq_len:
token_ids = token_ids[0:seq_len] if apply_max_len else token_ids
token_ids[-1] = self.tokenizer.sep_token_id
return torch.tensor(token_ids)
def get_pair_separator_ids(self) -> T:
return torch.tensor([self.tokenizer.sep_token_id])
def get_pad_id(self) -> int:
return self.tokenizer.pad_token_id
def get_attn_mask(self, tokens_tensor: T) -> T:
return tokens_tensor != self.get_pad_id()
def is_sub_word_id(self, token_id: int):
token = self.tokenizer.convert_ids_to_tokens([token_id])[0]
return token.startswith("##") or token.startswith(" ##")
def to_string(self, token_ids, skip_special_tokens=True):
return self.tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens)
def set_pad_to_max(self, do_pad: bool):
self.pad_to_max = do_pad
def get_token_id(self, token: str) -> int:
return self.tokenizer.vocab[token]
def get_bert_biencoder_components(args, inference_only: bool = False):
# since bert tokenizer is the same in HF and pytext/fairseq, just use HF's implementation here for now
from .hf_models import get_tokenizer, BertTensorizer
tokenizer = get_tokenizer(args.pretrained_model_cfg, do_lower_case=args.do_lower_case)
question_encoder = PytextBertEncoder.init_encoder(args.pretrained_file,
projection_dim=args.projection_dim, dropout=args.dropout,
vocab_size=tokenizer.vocab_size,
padding_idx=tokenizer.pad_token_type_id
)
ctx_encoder = PytextBertEncoder.init_encoder(args.pretrained_file,
projection_dim=args.projection_dim, dropout=args.dropout,
vocab_size=tokenizer.vocab_size,
padding_idx=tokenizer.pad_token_type_id
)
biencoder = BiEncoder(question_encoder, ctx_encoder)
optimizer = get_optimizer(biencoder,
learning_rate=args.learning_rate,
adam_eps=args.adam_eps, weight_decay=args.weight_decay,
) if not inference_only else None
tensorizer = BertTensorizer(tokenizer, args.sequence_length)
return tensorizer, biencoder, optimizer | null |
21,723 | import logging
from typing import Tuple
import torch
from pytext.models.representations.transformer_sentence_encoder import TransformerSentenceEncoder
from pytext.optimizer.optimizers import AdamW
from torch import Tensor as T
from torch import nn
from .biencoder import BiEncoder
def get_pytext_bert_base_cfg():
cfg = TransformerSentenceEncoder.Config()
cfg.embedding_dim = 768
cfg.ffn_embedding_dim = 3072
cfg.num_encoder_layers = 12
cfg.num_attention_heads = 12
cfg.num_segments = 2
cfg.use_position_embeddings = True
cfg.offset_positions_by_padding = True
cfg.apply_bert_init = True
cfg.encoder_normalize_before = True
cfg.activation_fn = "gelu"
cfg.projection_dim = 0
cfg.max_seq_len = 512
cfg.multilingual = False
cfg.freeze_embeddings = False
cfg.n_trans_layers_to_freeze = 0
cfg.use_torchscript = False
return cfg | null |
21,724 | import collections
import logging
import random
from typing import Tuple, List
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor as T
from torch import nn
from dpr.data.biencoder_data import BiEncoderSample
from dpr.utils.data_utils import Tensorizer
from dpr.utils.model_utils import CheckpointState
The provided code snippet includes necessary dependencies for implementing the `dot_product_scores` function. Write a Python function `def dot_product_scores(q_vectors: T, ctx_vectors: T) -> T` to solve the following problem:
calculates q->ctx scores for every row in ctx_vector :param q_vector: :param ctx_vector: :return:
Here is the function:
def dot_product_scores(q_vectors: T, ctx_vectors: T) -> T:
"""
calculates q->ctx scores for every row in ctx_vector
:param q_vector:
:param ctx_vector:
:return:
"""
# q_vector: n1 x D, ctx_vectors: n2 x D, result n1 x n2
r = torch.matmul(q_vectors, torch.transpose(ctx_vectors, 0, 1))
return r | calculates q->ctx scores for every row in ctx_vector :param q_vector: :param ctx_vector: :return: |
21,725 | import collections
import logging
import random
from typing import Tuple, List
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor as T
from torch import nn
from dpr.data.biencoder_data import BiEncoderSample
from dpr.utils.data_utils import Tensorizer
from dpr.utils.model_utils import CheckpointState
def cosine_scores(q_vector: T, ctx_vectors: T):
# q_vector: n1 x D, ctx_vectors: n2 x D, result n1 x n2
return F.cosine_similarity(q_vector, ctx_vectors, dim=1) | null |
21,726 | import logging
from typing import Tuple
from fairseq.models.roberta.hub_interface import RobertaHubInterface
from fairseq.models.roberta.model import RobertaModel as FaiseqRobertaModel
from fairseq.optim.adam import FairseqAdam
from torch import Tensor as T
from torch import nn
from dpr.models.hf_models import get_roberta_tensorizer
from .biencoder import BiEncoder
def get_fairseq_adamw_optimizer(model: nn.Module, args):
setattr(args, 'lr', [args.learning_rate])
return FairseqAdam(args, model.parameters()).optimizer
class RobertaEncoder(nn.Module):
def __init__(self, fairseq_roberta_hub: RobertaHubInterface):
super(RobertaEncoder, self).__init__()
self.fairseq_roberta = fairseq_roberta_hub
def from_pretrained(cls, pretrained_dir_path: str):
model = FaiseqRobertaModel.from_pretrained(pretrained_dir_path)
return cls(model)
def forward(self, input_ids: T, token_type_ids: T, attention_mask: T) -> Tuple[T, ...]:
roberta_out = self.fairseq_roberta.extract_features(input_ids)
cls_out = roberta_out[:, 0, :]
return roberta_out, cls_out, None
def get_out_size(self):
raise NotImplementedError
def get_roberta_tensorizer(args, tokenizer=None):
if not tokenizer:
tokenizer = get_roberta_tokenizer(
args.pretrained_model_cfg, do_lower_case=args.do_lower_case
)
return RobertaTensorizer(tokenizer, args.sequence_length)
class BiEncoder(nn.Module):
"""Bi-Encoder model component. Encapsulates query/question and context/passage encoders."""
def __init__(
self,
question_model: nn.Module,
ctx_model: nn.Module,
fix_q_encoder: bool = False,
fix_ctx_encoder: bool = False,
):
super(BiEncoder, self).__init__()
self.question_model = question_model
self.ctx_model = ctx_model
self.fix_q_encoder = fix_q_encoder
self.fix_ctx_encoder = fix_ctx_encoder
def get_representation(
sub_model: nn.Module,
ids: T,
segments: T,
attn_mask: T,
fix_encoder: bool = False,
representation_token_pos=0,
) -> (T, T, T):
sequence_output = None
pooled_output = None
hidden_states = None
if ids is not None:
if fix_encoder:
with torch.no_grad():
sequence_output, pooled_output, hidden_states = sub_model(
ids,
segments,
attn_mask,
representation_token_pos=representation_token_pos,
)
if sub_model.training:
sequence_output.requires_grad_(requires_grad=True)
pooled_output.requires_grad_(requires_grad=True)
else:
sequence_output, pooled_output, hidden_states = sub_model(
ids,
segments,
attn_mask,
representation_token_pos=representation_token_pos,
)
return sequence_output, pooled_output, hidden_states
def forward(
self,
question_ids: T,
question_segments: T,
question_attn_mask: T,
context_ids: T,
ctx_segments: T,
ctx_attn_mask: T,
encoder_type: str = None,
representation_token_pos=0,
) -> Tuple[T, T]:
q_encoder = (
self.question_model
if encoder_type is None or encoder_type == "question"
else self.ctx_model
)
_q_seq, q_pooled_out, _q_hidden = self.get_representation(
q_encoder,
question_ids,
question_segments,
question_attn_mask,
self.fix_q_encoder,
representation_token_pos=representation_token_pos,
)
ctx_encoder = (
self.ctx_model
if encoder_type is None or encoder_type == "ctx"
else self.question_model
)
_ctx_seq, ctx_pooled_out, _ctx_hidden = self.get_representation(
ctx_encoder, context_ids, ctx_segments, ctx_attn_mask, self.fix_ctx_encoder
)
return q_pooled_out, ctx_pooled_out
# TODO delete once moved to the new method
def create_biencoder_input(
cls,
samples: List,
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = False,
hard_neg_fallback: bool = True,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of data items (from json) to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample["positive_ctxs"]
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample["positive_ctxs"][0]
neg_ctxs = sample["negative_ctxs"]
hard_neg_ctxs = sample["hard_negative_ctxs"]
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx["text"],
title=ctx["title"] if (insert_title and "title" in ctx) else None,
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def create_biencoder_input2(
cls,
samples: List[BiEncoderSample],
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = False,
hard_neg_fallback: bool = True,
query_token: str = None,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of BiEncoderSample-s to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample.positive_passages
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample.positive_passages[0]
neg_ctxs = sample.negative_passages
hard_neg_ctxs = sample.hard_negative_passages
question = sample.query
# question = normalize_question(sample.query)
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx.text, title=ctx.title if (insert_title and ctx.title) else None
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
if query_token:
# TODO: tmp workaround for EL, remove or revise
if query_token == "[START_ENT]":
query_span = _select_span_with_token(
question, tensorizer, token_str=query_token
)
question_tensors.append(query_span)
else:
question_tensors.append(
tensorizer.text_to_tensor(" ".join([query_token, question]))
)
else:
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def load_state(self, saved_state: CheckpointState):
# TODO: make a long term HF compatibility fix
if "question_model.embeddings.position_ids" in saved_state.model_dict:
del saved_state.model_dict["question_model.embeddings.position_ids"]
del saved_state.model_dict["ctx_model.embeddings.position_ids"]
self.load_state_dict(saved_state.model_dict)
def get_state_dict(self):
return self.state_dict()
def get_roberta_biencoder_components(args, inference_only: bool = False, **kwargs):
question_encoder = RobertaEncoder.from_pretrained(args.pretrained_file)
ctx_encoder = RobertaEncoder.from_pretrained(args.pretrained_file)
biencoder = BiEncoder(question_encoder, ctx_encoder)
optimizer = get_fairseq_adamw_optimizer(biencoder, args) if not inference_only else None
tensorizer = get_roberta_tensorizer(args)
return tensorizer, biencoder, optimizer | null |
21,727 | import collections
import logging
from typing import List
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor as T
from torch.nn import CrossEntropyLoss
from dpr.data.reader_data import ReaderSample, ReaderPassage
from dpr.utils.model_utils import init_weights
def _calc_mml(loss_tensor):
marginal_likelihood = torch.sum(torch.exp(
- loss_tensor - 1e10 * (loss_tensor == 0).float()), 1)
return -torch.sum(torch.log(marginal_likelihood +
torch.ones(loss_tensor.size(0)).cuda() * (marginal_likelihood == 0).float()))
def compute_loss(start_positions, end_positions, answer_mask, start_logits, end_logits, relevance_logits, N, M):
start_positions = start_positions.view(N * M, -1)
end_positions = end_positions.view(N * M, -1)
answer_mask = answer_mask.view(N * M, -1)
start_logits = start_logits.view(N * M, -1)
end_logits = end_logits.view(N * M, -1)
relevance_logits = relevance_logits.view(N * M)
answer_mask = answer_mask.type(torch.FloatTensor).cuda()
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(reduce=False, ignore_index=ignored_index)
# compute switch loss
relevance_logits = relevance_logits.view(N, M)
switch_labels = torch.zeros(N, dtype=torch.long).cuda()
switch_loss = torch.sum(loss_fct(relevance_logits, switch_labels))
# compute span loss
start_losses = [(loss_fct(start_logits, _start_positions) * _span_mask)
for (_start_positions, _span_mask)
in zip(torch.unbind(start_positions, dim=1), torch.unbind(answer_mask, dim=1))]
end_losses = [(loss_fct(end_logits, _end_positions) * _span_mask)
for (_end_positions, _span_mask)
in zip(torch.unbind(end_positions, dim=1), torch.unbind(answer_mask, dim=1))]
loss_tensor = torch.cat([t.unsqueeze(1) for t in start_losses], dim=1) + \
torch.cat([t.unsqueeze(1) for t in end_losses], dim=1)
loss_tensor = loss_tensor.view(N, M, -1).max(dim=1)[0]
span_loss = _calc_mml(loss_tensor)
return span_loss + switch_loss | null |
21,728 | import collections
import logging
from typing import List
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor as T
from torch.nn import CrossEntropyLoss
from dpr.data.reader_data import ReaderSample, ReaderPassage
from dpr.utils.model_utils import init_weights
logger = logging.getLogger()
ReaderBatch = collections.namedtuple('ReaderBatch', ['input_ids', 'start_positions', 'end_positions', 'answers_mask'])
def _create_question_passages_tensors(positives: List[ReaderPassage], negatives: List[ReaderPassage], total_size: int,
empty_ids: T,
max_n_answers: int,
pad_token_id: int,
is_train: bool,
is_random: bool = True):
max_len = empty_ids.size(0)
if is_train:
# select just one positive
positive_idx = _get_positive_idx(positives, max_len, is_random)
if positive_idx is None:
return None
positive_a_spans = _get_answer_spans(positive_idx, positives, max_len)[0: max_n_answers]
answer_starts = [span[0] for span in positive_a_spans]
answer_ends = [span[1] for span in positive_a_spans]
assert all(s < max_len for s in answer_starts)
assert all(e < max_len for e in answer_ends)
positive_input_ids = _pad_to_len(positives[positive_idx].sequence_ids, pad_token_id, max_len)
answer_starts_tensor = torch.zeros((total_size, max_n_answers)).long()
answer_starts_tensor[0, 0:len(answer_starts)] = torch.tensor(answer_starts)
answer_ends_tensor = torch.zeros((total_size, max_n_answers)).long()
answer_ends_tensor[0, 0:len(answer_ends)] = torch.tensor(answer_ends)
answer_mask = torch.zeros((total_size, max_n_answers), dtype=torch.long)
answer_mask[0, 0:len(answer_starts)] = torch.tensor([1 for _ in range(len(answer_starts))])
positives_selected = [positive_input_ids]
else:
positives_selected = []
answer_starts_tensor = None
answer_ends_tensor = None
answer_mask = None
positives_num = len(positives_selected)
negative_idxs = np.random.permutation(range(len(negatives))) if is_random else range(
len(negatives) - positives_num)
negative_idxs = negative_idxs[:total_size - positives_num]
negatives_selected = [_pad_to_len(negatives[i].sequence_ids, pad_token_id, max_len) for i in negative_idxs]
while len(negatives_selected) < total_size - positives_num:
negatives_selected.append(empty_ids.clone())
input_ids = torch.stack([t for t in positives_selected + negatives_selected], dim=0)
return input_ids, answer_starts_tensor, answer_ends_tensor, answer_mask
class ReaderSample(object):
"""
Container to collect all Q&A passages data per singe question
"""
def __init__(
self,
question: str,
answers: List,
positive_passages: List[ReaderPassage] = [],
negative_passages: List[ReaderPassage] = [],
passages: List[ReaderPassage] = [],
):
self.question = question
self.answers = answers
self.positive_passages = positive_passages
self.negative_passages = negative_passages
self.passages = passages
def on_serialize(self):
for passage in self.passages + self.positive_passages + self.negative_passages:
passage.on_serialize()
def on_deserialize(self):
for passage in self.passages + self.positive_passages + self.negative_passages:
passage.on_deserialize()
The provided code snippet includes necessary dependencies for implementing the `create_reader_input` function. Write a Python function `def create_reader_input(pad_token_id: int, samples: List[ReaderSample], passages_per_question: int, max_length: int, max_n_answers: int, is_train: bool, shuffle: bool, ) -> ReaderBatch` to solve the following problem:
Creates a reader batch instance out of a list of ReaderSample-s :param pad_token_id: id of the padding token :param samples: list of samples to create the batch for :param passages_per_question: amount of passages for every question in a batch :param max_length: max model input sequence length :param max_n_answers: max num of answers per single question :param is_train: if the samples are for a train set :param shuffle: should passages selection be randomized :return: ReaderBatch instance
Here is the function:
def create_reader_input(pad_token_id: int,
samples: List[ReaderSample],
passages_per_question: int,
max_length: int,
max_n_answers: int,
is_train: bool,
shuffle: bool,
) -> ReaderBatch:
"""
Creates a reader batch instance out of a list of ReaderSample-s
:param pad_token_id: id of the padding token
:param samples: list of samples to create the batch for
:param passages_per_question: amount of passages for every question in a batch
:param max_length: max model input sequence length
:param max_n_answers: max num of answers per single question
:param is_train: if the samples are for a train set
:param shuffle: should passages selection be randomized
:return: ReaderBatch instance
"""
input_ids = []
start_positions = []
end_positions = []
answers_masks = []
empty_sequence = torch.Tensor().new_full((max_length,), pad_token_id, dtype=torch.long)
for sample in samples:
positive_ctxs = sample.positive_passages
negative_ctxs = sample.negative_passages if is_train else sample.passages
sample_tensors = _create_question_passages_tensors(positive_ctxs,
negative_ctxs,
passages_per_question,
empty_sequence,
max_n_answers,
pad_token_id,
is_train,
is_random=shuffle)
if not sample_tensors:
logger.warning('No valid passages combination for question=%s ', sample.question)
continue
sample_input_ids, starts_tensor, ends_tensor, answer_mask = sample_tensors
input_ids.append(sample_input_ids)
if is_train:
start_positions.append(starts_tensor)
end_positions.append(ends_tensor)
answers_masks.append(answer_mask)
input_ids = torch.cat([ids.unsqueeze(0) for ids in input_ids], dim=0)
if is_train:
start_positions = torch.stack(start_positions, dim=0)
end_positions = torch.stack(end_positions, dim=0)
answers_masks = torch.stack(answers_masks, dim=0)
return ReaderBatch(input_ids, start_positions, end_positions, answers_masks) | Creates a reader batch instance out of a list of ReaderSample-s :param pad_token_id: id of the padding token :param samples: list of samples to create the batch for :param passages_per_question: amount of passages for every question in a batch :param max_length: max model input sequence length :param max_n_answers: max num of answers per single question :param is_train: if the samples are for a train set :param shuffle: should passages selection be randomized :return: ReaderBatch instance |
21,729 | import json
import logging
import pickle
import random
import itertools
import math
import torch
from torch import Tensor as T
from typing import List, Iterator, Callable, Tuple
logger = logging.getLogger()
def read_serialized_data_from_files(paths: List[str]) -> List:
results = []
for i, path in enumerate(paths):
with open(path, "rb") as reader:
logger.info("Reading file %s", path)
data = pickle.load(reader)
results.extend(data)
logger.info("Aggregated data size: {}".format(len(results)))
logger.info("Total data size: {}".format(len(results)))
return results | null |
21,730 | import json
import logging
import pickle
import random
import itertools
import math
import torch
from torch import Tensor as T
from typing import List, Iterator, Callable, Tuple
logger = logging.getLogger()
def read_data_from_json_files(paths: List[str]) -> List:
results = []
for i, path in enumerate(paths):
with open(path, "r", encoding="utf-8") as f:
logger.info("Reading file %s" % path)
data = json.load(f)
results = data
logger.info("Aggregated data size: {}".format(len(results)))
return results | null |
21,731 | import collections
import glob
import logging
import os
from typing import List
import torch
from torch import nn
from torch.optim.lr_scheduler import LambdaLR
from torch.serialization import default_restore_location
def setup_for_distributed_mode(
model: nn.Module,
optimizer: torch.optim.Optimizer,
device: object,
n_gpu: int = 1,
local_rank: int = -1,
fp16: bool = False,
fp16_opt_level: str = "O1",
) -> (nn.Module, torch.optim.Optimizer):
model.to(device)
if fp16:
try:
import apex
from apex import amp
apex.amp.register_half_function(torch, "einsum")
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(model, optimizer, opt_level=fp16_opt_level)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
if local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[device if device else local_rank],
output_device=local_rank,
find_unused_parameters=True,
)
return model, optimizer | null |
21,732 | import collections
import glob
import logging
import os
from typing import List
import torch
from torch import nn
from torch.optim.lr_scheduler import LambdaLR
from torch.serialization import default_restore_location
def move_to_cuda(sample):
if len(sample) == 0:
return {}
def _move_to_cuda(maybe_tensor):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.cuda()
elif isinstance(maybe_tensor, dict):
return {key: _move_to_cuda(value) for key, value in maybe_tensor.items()}
elif isinstance(maybe_tensor, list):
return [_move_to_cuda(x) for x in maybe_tensor]
elif isinstance(maybe_tensor, tuple):
return [_move_to_cuda(x) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_cuda(sample) | null |
21,733 | import collections
import glob
import logging
import os
from typing import List
import torch
from torch import nn
from torch.optim.lr_scheduler import LambdaLR
from torch.serialization import default_restore_location
The provided code snippet includes necessary dependencies for implementing the `get_schedule_linear` function. Write a Python function `def get_schedule_linear( optimizer, warmup_steps, total_training_steps, steps_shift=0, last_epoch=-1, )` to solve the following problem:
Create a schedule with a learning rate that decreases linearly after linearly increasing during a warmup period.
Here is the function:
def get_schedule_linear(
optimizer,
warmup_steps,
total_training_steps,
steps_shift=0,
last_epoch=-1,
):
"""Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
"""
def lr_lambda(current_step):
current_step += steps_shift
if current_step < warmup_steps:
return float(current_step) / float(max(1, warmup_steps))
return max(
1e-7,
float(total_training_steps - current_step)
/ float(max(1, total_training_steps - warmup_steps)),
)
return LambdaLR(optimizer, lr_lambda, last_epoch) | Create a schedule with a learning rate that decreases linearly after linearly increasing during a warmup period. |
21,734 | import collections
import glob
import logging
import os
from typing import List
import torch
from torch import nn
from torch.optim.lr_scheduler import LambdaLR
from torch.serialization import default_restore_location
def init_weights(modules: List):
for module in modules:
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_() | null |
21,735 | import collections
import glob
import logging
import os
from typing import List
import torch
from torch import nn
from torch.optim.lr_scheduler import LambdaLR
from torch.serialization import default_restore_location
def get_model_obj(model: nn.Module):
return model.module if hasattr(model, "module") else model | null |
21,736 | import collections
import glob
import logging
import os
from typing import List
import torch
from torch import nn
from torch.optim.lr_scheduler import LambdaLR
from torch.serialization import default_restore_location
logger = logging.getLogger()
def get_model_file(args, file_prefix) -> str:
if args.model_file and os.path.exists(args.model_file):
return args.model_file
out_cp_files = (
glob.glob(os.path.join(args.output_dir, file_prefix + "*"))
if args.output_dir
else []
)
logger.info("Checkpoint files %s", out_cp_files)
model_file = None
if len(out_cp_files) > 0:
model_file = max(out_cp_files, key=os.path.getctime)
return model_file | null |
21,737 | import collections
import glob
import logging
import os
from typing import List
import torch
from torch import nn
from torch.optim.lr_scheduler import LambdaLR
from torch.serialization import default_restore_location
logger = logging.getLogger()
CheckpointState = collections.namedtuple(
"CheckpointState",
[
"model_dict",
"optimizer_dict",
"scheduler_dict",
"offset",
"epoch",
"encoder_params",
],
)
def load_states_from_checkpoint(model_file: str) -> CheckpointState:
logger.info("Reading saved model from %s", model_file)
state_dict = torch.load(
model_file, map_location=lambda s, l: default_restore_location(s, "cpu")
)
logger.info("model_state_dict keys %s", state_dict.keys())
return CheckpointState(**state_dict) | null |
21,738 | import glob
import json
import logging
import pickle
import time
from typing import List, Tuple, Dict, Iterator
import hydra
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf
from torch import Tensor as T
from torch import nn
from dpr.data.biencoder_data import RepTokenSelector
from dpr.data.qa_validation import calculate_matches, calculate_chunked_matches
from dpr.data.retriever_data import KiltCsvCtxSrc, TableChunk
from dpr.indexer.faiss_indexers import (
DenseIndexer,
)
from dpr.models import init_biencoder_components
from dpr.models.biencoder import BiEncoder, _select_span_with_token
from dpr.options import setup_logger, setup_cfg_gpu, set_cfg_params_from_state
from dpr.utils.data_utils import Tensorizer
from dpr.utils.model_utils import (
setup_for_distributed_mode,
get_model_obj,
load_states_from_checkpoint,
)
logger = logging.getLogger()
class RepTokenSelector(object):
def get_positions(self, input_ids: T, tenzorizer: Tensorizer):
raise NotImplementedError
class BiEncoder(nn.Module):
"""Bi-Encoder model component. Encapsulates query/question and context/passage encoders."""
def __init__(
self,
question_model: nn.Module,
ctx_model: nn.Module,
fix_q_encoder: bool = False,
fix_ctx_encoder: bool = False,
):
super(BiEncoder, self).__init__()
self.question_model = question_model
self.ctx_model = ctx_model
self.fix_q_encoder = fix_q_encoder
self.fix_ctx_encoder = fix_ctx_encoder
def get_representation(
sub_model: nn.Module,
ids: T,
segments: T,
attn_mask: T,
fix_encoder: bool = False,
representation_token_pos=0,
) -> (T, T, T):
sequence_output = None
pooled_output = None
hidden_states = None
if ids is not None:
if fix_encoder:
with torch.no_grad():
sequence_output, pooled_output, hidden_states = sub_model(
ids,
segments,
attn_mask,
representation_token_pos=representation_token_pos,
)
if sub_model.training:
sequence_output.requires_grad_(requires_grad=True)
pooled_output.requires_grad_(requires_grad=True)
else:
sequence_output, pooled_output, hidden_states = sub_model(
ids,
segments,
attn_mask,
representation_token_pos=representation_token_pos,
)
return sequence_output, pooled_output, hidden_states
def forward(
self,
question_ids: T,
question_segments: T,
question_attn_mask: T,
context_ids: T,
ctx_segments: T,
ctx_attn_mask: T,
encoder_type: str = None,
representation_token_pos=0,
) -> Tuple[T, T]:
q_encoder = (
self.question_model
if encoder_type is None or encoder_type == "question"
else self.ctx_model
)
_q_seq, q_pooled_out, _q_hidden = self.get_representation(
q_encoder,
question_ids,
question_segments,
question_attn_mask,
self.fix_q_encoder,
representation_token_pos=representation_token_pos,
)
ctx_encoder = (
self.ctx_model
if encoder_type is None or encoder_type == "ctx"
else self.question_model
)
_ctx_seq, ctx_pooled_out, _ctx_hidden = self.get_representation(
ctx_encoder, context_ids, ctx_segments, ctx_attn_mask, self.fix_ctx_encoder
)
return q_pooled_out, ctx_pooled_out
# TODO delete once moved to the new method
def create_biencoder_input(
cls,
samples: List,
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = False,
hard_neg_fallback: bool = True,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of data items (from json) to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample["positive_ctxs"]
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample["positive_ctxs"][0]
neg_ctxs = sample["negative_ctxs"]
hard_neg_ctxs = sample["hard_negative_ctxs"]
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx["text"],
title=ctx["title"] if (insert_title and "title" in ctx) else None,
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def create_biencoder_input2(
cls,
samples: List[BiEncoderSample],
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = False,
hard_neg_fallback: bool = True,
query_token: str = None,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of BiEncoderSample-s to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample.positive_passages
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample.positive_passages[0]
neg_ctxs = sample.negative_passages
hard_neg_ctxs = sample.hard_negative_passages
question = sample.query
# question = normalize_question(sample.query)
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx.text, title=ctx.title if (insert_title and ctx.title) else None
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
if query_token:
# TODO: tmp workaround for EL, remove or revise
if query_token == "[START_ENT]":
query_span = _select_span_with_token(
question, tensorizer, token_str=query_token
)
question_tensors.append(query_span)
else:
question_tensors.append(
tensorizer.text_to_tensor(" ".join([query_token, question]))
)
else:
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def load_state(self, saved_state: CheckpointState):
# TODO: make a long term HF compatibility fix
if "question_model.embeddings.position_ids" in saved_state.model_dict:
del saved_state.model_dict["question_model.embeddings.position_ids"]
del saved_state.model_dict["ctx_model.embeddings.position_ids"]
self.load_state_dict(saved_state.model_dict)
def get_state_dict(self):
return self.state_dict()
def _select_span_with_token(
text: str, tensorizer: Tensorizer, token_str: str = "[START_ENT]"
) -> T:
id = tensorizer.get_token_id(token_str)
query_tensor = tensorizer.text_to_tensor(text)
if id not in query_tensor:
query_tensor_full = tensorizer.text_to_tensor(text, apply_max_len=False)
token_indexes = (query_tensor_full == id).nonzero()
if token_indexes.size(0) > 0:
start_pos = token_indexes[0, 0].item()
# add some randomization to avoid overfitting to a specific token position
left_shit = int(tensorizer.max_length / 2)
rnd_shift = int((rnd.random() - 0.5) * left_shit / 2)
left_shit += rnd_shift
query_tensor = query_tensor_full[start_pos - left_shit :]
cls_id = tensorizer.tokenizer.cls_token_id
if query_tensor[0] != cls_id:
query_tensor = torch.cat([torch.tensor([cls_id]), query_tensor], dim=0)
from dpr.models.reader import _pad_to_len
query_tensor = _pad_to_len(
query_tensor, tensorizer.get_pad_id(), tensorizer.max_length
)
query_tensor[-1] = tensorizer.tokenizer.sep_token_id
# logger.info('aligned query_tensor %s', query_tensor)
assert id in query_tensor, "query_tensor={}".format(query_tensor)
return query_tensor
else:
raise RuntimeError(
"[START_ENT] toke not found for Entity Linking sample query={}".format(
text
)
)
else:
return query_tensor
class Tensorizer(object):
"""
Component for all text to model input data conversions and related utility methods
"""
# Note: title, if present, is supposed to be put before text (i.e. optional title + document body)
def text_to_tensor(
self,
text: str,
title: str = None,
add_special_tokens: bool = True,
apply_max_len: bool = True,
):
raise NotImplementedError
def get_pair_separator_ids(self) -> T:
raise NotImplementedError
def get_pad_id(self) -> int:
raise NotImplementedError
def get_attn_mask(self, tokens_tensor: T):
raise NotImplementedError
def is_sub_word_id(self, token_id: int):
raise NotImplementedError
def to_string(self, token_ids, skip_special_tokens=True):
raise NotImplementedError
def set_pad_to_max(self, pad: bool):
raise NotImplementedError
def get_token_id(self, token: str) -> int:
raise NotImplementedError
def generate_question_vectors(
question_encoder: torch.nn.Module,
tensorizer: Tensorizer,
questions: List[str],
bsz: int,
query_token: str = None,
selector: RepTokenSelector = None,
) -> T:
n = len(questions)
query_vectors = []
with torch.no_grad():
for j, batch_start in enumerate(range(0, n, bsz)):
batch_questions = questions[batch_start : batch_start + bsz]
if query_token:
# TODO: tmp workaround for EL, remove or revise
if query_token == "[START_ENT]":
batch_token_tensors = [
_select_span_with_token(q, tensorizer, token_str=query_token)
for q in batch_questions
]
else:
batch_token_tensors = [
tensorizer.text_to_tensor(" ".join([query_token, q]))
for q in batch_questions
]
else:
batch_token_tensors = [
tensorizer.text_to_tensor(q) for q in batch_questions
]
q_ids_batch = torch.stack(batch_token_tensors, dim=0).cuda()
q_seg_batch = torch.zeros_like(q_ids_batch).cuda()
q_attn_mask = tensorizer.get_attn_mask(q_ids_batch)
if selector:
rep_positions = selector.get_positions(q_ids_batch, tensorizer)
_, out, _ = BiEncoder.get_representation(
question_encoder,
q_ids_batch,
q_seg_batch,
q_attn_mask,
representation_token_pos=rep_positions,
)
else:
_, out, _ = question_encoder(q_ids_batch, q_seg_batch, q_attn_mask)
query_vectors.extend(out.cpu().split(1, dim=0))
if len(query_vectors) % 100 == 0:
logger.info("Encoded queries %d", len(query_vectors))
query_tensor = torch.cat(query_vectors, dim=0)
logger.info("Total encoded queries tensor %s", query_tensor.size())
assert query_tensor.size(0) == len(questions)
return query_tensor | null |
21,739 | import glob
import json
import logging
import pickle
import time
from typing import List, Tuple, Dict, Iterator
import hydra
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf
from torch import Tensor as T
from torch import nn
from dpr.data.biencoder_data import RepTokenSelector
from dpr.data.qa_validation import calculate_matches, calculate_chunked_matches
from dpr.data.retriever_data import KiltCsvCtxSrc, TableChunk
from dpr.indexer.faiss_indexers import (
DenseIndexer,
)
from dpr.models import init_biencoder_components
from dpr.models.biencoder import BiEncoder, _select_span_with_token
from dpr.options import setup_logger, setup_cfg_gpu, set_cfg_params_from_state
from dpr.utils.data_utils import Tensorizer
from dpr.utils.model_utils import (
setup_for_distributed_mode,
get_model_obj,
load_states_from_checkpoint,
)
logger = logging.getLogger()
def calculate_matches(
all_docs: Dict[object, Tuple[str, str]],
answers: List[List[str]],
closest_docs: List[Tuple[List[object], List[float]]],
workers_num: int,
match_type: str,
) -> QAMatchStats:
"""
Evaluates answers presence in the set of documents. This function is supposed to be used with a large collection of
documents and results. It internally forks multiple sub-processes for evaluation and then merges results
:param all_docs: dictionary of the entire documents database. doc_id -> (doc_text, title)
:param answers: list of answers's list. One list per question
:param closest_docs: document ids of the top results along with their scores
:param workers_num: amount of parallel threads to process data
:param match_type: type of answer matching. Refer to has_answer code for available options
:return: matching information tuple.
top_k_hits - a list where the index is the amount of top documents retrieved and the value is the total amount of
valid matches across an entire dataset.
questions_doc_hits - more detailed info with answer matches for every question and every retrieved document
"""
global dpr_all_documents
dpr_all_documents = all_docs
logger.info("dpr_all_documents size %d", len(dpr_all_documents))
tok_opts = {}
tokenizer = SimpleTokenizer(**tok_opts)
processes = ProcessPool(processes=workers_num)
logger.info("Matching answers in top docs...")
get_score_partial = partial(
check_answer, match_type=match_type, tokenizer=tokenizer
)
questions_answers_docs = zip(answers, closest_docs)
scores = processes.map(get_score_partial, questions_answers_docs)
logger.info("Per question validation results len=%d", len(scores))
n_docs = len(closest_docs[0][0])
top_k_hits = [0] * n_docs
for question_hits in scores:
best_hit = next((i for i, x in enumerate(question_hits) if x), None)
if best_hit is not None:
top_k_hits[best_hit:] = [v + 1 for v in top_k_hits[best_hit:]]
return QAMatchStats(top_k_hits, scores)
def validate(
passages: Dict[object, Tuple[str, str]],
answers: List[List[str]],
result_ctx_ids: List[Tuple[List[object], List[float]]],
workers_num: int,
match_type: str,
) -> List[List[bool]]:
match_stats = calculate_matches(
passages, answers, result_ctx_ids, workers_num, match_type
)
top_k_hits = match_stats.top_k_hits
logger.info("Validation results: top k documents hits %s", top_k_hits)
top_k_hits = [v / len(result_ctx_ids) for v in top_k_hits]
logger.info("Validation results: top k documents hits accuracy %s", top_k_hits)
return match_stats.questions_doc_hits | null |
21,740 | import glob
import json
import logging
import pickle
import time
from typing import List, Tuple, Dict, Iterator
import hydra
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf
from torch import Tensor as T
from torch import nn
from dpr.data.biencoder_data import RepTokenSelector
from dpr.data.qa_validation import calculate_matches, calculate_chunked_matches
from dpr.data.retriever_data import KiltCsvCtxSrc, TableChunk
from dpr.indexer.faiss_indexers import (
DenseIndexer,
)
from dpr.models import init_biencoder_components
from dpr.models.biencoder import BiEncoder, _select_span_with_token
from dpr.options import setup_logger, setup_cfg_gpu, set_cfg_params_from_state
from dpr.utils.data_utils import Tensorizer
from dpr.utils.model_utils import (
setup_for_distributed_mode,
get_model_obj,
load_states_from_checkpoint,
)
logger = logging.getLogger()
def save_results(
passages: Dict[object, Tuple[str, str]],
questions: List[str],
answers: List[List[str]],
top_passages_and_scores: List[Tuple[List[object], List[float]]],
per_question_hits: List[List[bool]],
out_file: str,
):
# join passages text with the result ids, their questions and assigning has|no answer labels
merged_data = []
# assert len(per_question_hits) == len(questions) == len(answers)
for i, q in enumerate(questions):
q_answers = answers[i]
results_and_scores = top_passages_and_scores[i]
hits = per_question_hits[i]
docs = [passages[doc_id] for doc_id in results_and_scores[0]]
scores = [str(score) for score in results_and_scores[1]]
ctxs_num = len(hits)
merged_data.append(
{
"question": q,
"answers": q_answers,
"ctxs": [
{
"id": results_and_scores[0][c],
"title": docs[c][1],
"text": docs[c][0],
"score": scores[c],
"has_answer": hits[c],
}
for c in range(ctxs_num)
],
}
)
with open(out_file, "w") as writer:
writer.write(json.dumps(merged_data, indent=4) + "\n")
logger.info("Saved results * scores to %s", out_file) | null |
21,741 | import glob
import json
import logging
import pickle
import time
from typing import List, Tuple, Dict, Iterator
import hydra
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf
from torch import Tensor as T
from torch import nn
from dpr.data.biencoder_data import RepTokenSelector
from dpr.data.qa_validation import calculate_matches, calculate_chunked_matches
from dpr.data.retriever_data import KiltCsvCtxSrc, TableChunk
from dpr.indexer.faiss_indexers import (
DenseIndexer,
)
from dpr.models import init_biencoder_components
from dpr.models.biencoder import BiEncoder, _select_span_with_token
from dpr.options import setup_logger, setup_cfg_gpu, set_cfg_params_from_state
from dpr.utils.data_utils import Tensorizer
from dpr.utils.model_utils import (
setup_for_distributed_mode,
get_model_obj,
load_states_from_checkpoint,
)
logger = logging.getLogger()
def iterate_encoded_files(
vector_files: list, path_id_prefixes: List = None
) -> Iterator[Tuple]:
for i, file in enumerate(vector_files):
logger.info("Reading file %s", file)
id_prefix = None
if path_id_prefixes:
id_prefix = path_id_prefixes[i]
with open(file, "rb") as reader:
doc_vectors = pickle.load(reader)
for doc in doc_vectors:
doc = list(doc)
if id_prefix and not str(doc[0]).startswith(id_prefix):
doc[0] = id_prefix + str(doc[0])
yield doc | null |
21,742 | import glob
import json
import logging
import pickle
import time
from typing import List, Tuple, Dict, Iterator
import hydra
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf
from torch import Tensor as T
from torch import nn
from dpr.data.biencoder_data import RepTokenSelector
from dpr.data.qa_validation import calculate_matches, calculate_chunked_matches
from dpr.data.retriever_data import KiltCsvCtxSrc, TableChunk
from dpr.indexer.faiss_indexers import (
DenseIndexer,
)
from dpr.models import init_biencoder_components
from dpr.models.biencoder import BiEncoder, _select_span_with_token
from dpr.options import setup_logger, setup_cfg_gpu, set_cfg_params_from_state
from dpr.utils.data_utils import Tensorizer
from dpr.utils.model_utils import (
setup_for_distributed_mode,
get_model_obj,
load_states_from_checkpoint,
)
logger = logging.getLogger()
def calculate_chunked_matches(
all_docs: Dict[object, TableChunk],
answers: List[List[str]],
closest_docs: List[Tuple[List[object], List[float]]],
workers_num: int,
match_type: str,
) -> QATableMatchStats:
TableChunk = collections.namedtuple("TableChunk", ["text", "title", "table_id"])
def validate_tables(
passages: Dict[object, TableChunk],
answers: List[List[str]],
result_ctx_ids: List[Tuple[List[object], List[float]]],
workers_num: int,
match_type: str,
) -> List[List[bool]]:
match_stats = calculate_chunked_matches(
passages, answers, result_ctx_ids, workers_num, match_type
)
top_k_chunk_hits = match_stats.top_k_chunk_hits
top_k_table_hits = match_stats.top_k_table_hits
logger.info("Validation results: top k documents hits %s", top_k_chunk_hits)
top_k_hits = [v / len(result_ctx_ids) for v in top_k_chunk_hits]
logger.info("Validation results: top k table chunk hits accuracy %s", top_k_hits)
logger.info("Validation results: top k tables hits %s", top_k_table_hits)
top_k_table_hits = [v / len(result_ctx_ids) for v in top_k_table_hits]
logger.info("Validation results: top k tables accuracy %s", top_k_table_hits)
return match_stats.top_k_chunk_hits | null |
21,743 | import logging
import math
import os
import pathlib
import pickle
from typing import List, Tuple
import hydra
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf
from torch import nn
from dpr.data.biencoder_data import BiEncoderPassage
from dpr.models import init_biencoder_components
from dpr.options import set_cfg_params_from_state, setup_cfg_gpu, setup_logger
from dpr.utils.data_utils import Tensorizer
from dpr.utils.model_utils import (
setup_for_distributed_mode,
get_model_obj,
load_states_from_checkpoint,
move_to_device,
)
logger = logging.getLogger()
BiEncoderPassage = collections.namedtuple("BiEncoderPassage", ["text", "title"])
class Tensorizer(object):
"""
Component for all text to model input data conversions and related utility methods
"""
# Note: title, if present, is supposed to be put before text (i.e. optional title + document body)
def text_to_tensor(
self,
text: str,
title: str = None,
add_special_tokens: bool = True,
apply_max_len: bool = True,
):
raise NotImplementedError
def get_pair_separator_ids(self) -> T:
raise NotImplementedError
def get_pad_id(self) -> int:
raise NotImplementedError
def get_attn_mask(self, tokens_tensor: T):
raise NotImplementedError
def is_sub_word_id(self, token_id: int):
raise NotImplementedError
def to_string(self, token_ids, skip_special_tokens=True):
raise NotImplementedError
def set_pad_to_max(self, pad: bool):
raise NotImplementedError
def get_token_id(self, token: str) -> int:
raise NotImplementedError
def move_to_device(sample, device):
if len(sample) == 0:
return {}
def _move_to_device(maybe_tensor, device):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.to(device)
elif isinstance(maybe_tensor, dict):
return {
key: _move_to_device(value, device)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
return [_move_to_device(x, device) for x in maybe_tensor]
elif isinstance(maybe_tensor, tuple):
return [_move_to_device(x, device) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_device(sample, device)
def gen_ctx_vectors(
cfg: DictConfig,
ctx_rows: List[Tuple[object, BiEncoderPassage]],
model: nn.Module,
tensorizer: Tensorizer,
insert_title: bool = True,
) -> List[Tuple[object, np.array]]:
n = len(ctx_rows)
bsz = cfg.batch_size
total = 0
results = []
for j, batch_start in enumerate(range(0, n, bsz)):
batch = ctx_rows[batch_start : batch_start + bsz]
batch_token_tensors = [
tensorizer.text_to_tensor(
ctx[1].text, title=ctx[1].title if insert_title else None
)
for ctx in batch
]
ctx_ids_batch = move_to_device(
torch.stack(batch_token_tensors, dim=0), cfg.device
)
ctx_seg_batch = move_to_device(torch.zeros_like(ctx_ids_batch), cfg.device)
ctx_attn_mask = move_to_device(
tensorizer.get_attn_mask(ctx_ids_batch), cfg.device
)
with torch.no_grad():
_, out, _ = model(ctx_ids_batch, ctx_seg_batch, ctx_attn_mask)
out = out.cpu()
ctx_ids = [r[0] for r in batch]
extra_info = []
if len(batch[0]) > 3:
extra_info = [r[3:] for r in batch]
assert len(ctx_ids) == out.size(0)
total += len(ctx_ids)
# TODO: refactor to avoid 'if'
if extra_info:
results.extend(
[
(ctx_ids[i], out[i].view(-1).numpy(), *extra_info[i])
for i in range(out.size(0))
]
)
else:
results.extend(
[(ctx_ids[i], out[i].view(-1).numpy()) for i in range(out.size(0))]
)
if total % 10 == 0:
logger.info("Encoded passages %d", total)
return results | null |
21,744 | import sys
import statistics
from collections import Counter
def load_reference(path_to_reference):
"""Load Reference reference relevant passages
Args:path_to_reference (str): path to a file to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
with open(path_to_reference,'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids
def load_candidate(path_to_candidate):
"""Load candidate data from a file.
Args:path_to_candidate (str): path to file to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
with open(path_to_candidate,'r') as f:
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
return qid_to_ranked_candidate_passages
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Perform quality checks on the dictionaries
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
bool,str: Boolean whether allowed, message to be shown in case of a problem
"""
message = ''
allowed = True
# Create sets of the QIDs for the submitted and reference queries
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
# Check that we do not have multiple passages per query
for qid in qids_to_ranked_candidate_passages:
# Remove all zeros from the candidates
duplicate_pids = set([item for item, count in Counter(qids_to_ranked_candidate_passages[qid]).items() if count > 1])
if len(duplicate_pids-set([0])) > 0:
message = "Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}".format(
qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return allowed, message
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Compute MRR metric
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
all_scores = {}
MRR = 0
qids_with_relevant_passages = 0
ranking = []
j = 0
for qid in qids_to_ranked_candidate_passages:
if qid in qids_to_relevant_passageids:
ranking.append(0)
target_pid = qids_to_relevant_passageids[qid]
candidate_pid = qids_to_ranked_candidate_passages[qid]
for i in range(0,MaxMRRRank):
if candidate_pid[i] in target_pid:
MRR += 1/(i + 1)
j += 1
ranking.pop()
ranking.append(i+1)
break
if len(ranking) == 0:
raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?")
denominator = len(qids_to_ranked_candidate_passages) #qids_to_relevant_passageids
MRR = MRR/denominator
all_scores['MRR @10'] = MRR
all_scores['Recal @10'] = j/denominator
all_scores['QueriesRanked'] = denominator
return all_scores
The provided code snippet includes necessary dependencies for implementing the `compute_metrics_from_files` function. Write a Python function `def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True)` to solve the following problem:
Compute MRR metric Args: p_path_to_reference_file (str): path to reference file. Reference file should contain lines in the following format: QUERYID\tPASSAGEID Where PASSAGEID is a relevant passage for a query. Note QUERYID can repeat on different lines with different PASSAGEIDs p_path_to_candidate_file (str): path to candidate file. Candidate file sould contain lines in the following format: QUERYID\tPASSAGEID1\tRank If a user wishes to use the TREC format please run the script with a -t flag at the end. If this flag is used the expected format is QUERYID\tITER\tDOCNO\tRANK\tSIM\tRUNID Where the values are separated by tabs and ranked in order of relevance Returns: dict: dictionary of metrics {'MRR': <MRR Score>}
Here is the function:
def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True):
"""Compute MRR metric
Args:
p_path_to_reference_file (str): path to reference file.
Reference file should contain lines in the following format:
QUERYID\tPASSAGEID
Where PASSAGEID is a relevant passage for a query. Note QUERYID can repeat on different lines with different PASSAGEIDs
p_path_to_candidate_file (str): path to candidate file.
Candidate file sould contain lines in the following format:
QUERYID\tPASSAGEID1\tRank
If a user wishes to use the TREC format please run the script with a -t flag at the end. If this flag is used the expected format is
QUERYID\tITER\tDOCNO\tRANK\tSIM\tRUNID
Where the values are separated by tabs and ranked in order of relevance
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
qids_to_relevant_passageids = load_reference(path_to_reference)
qids_to_ranked_candidate_passages = load_candidate(path_to_candidate)
if perform_checks:
allowed, message = quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
if message != '': print(message)
return compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages) | Compute MRR metric Args: p_path_to_reference_file (str): path to reference file. Reference file should contain lines in the following format: QUERYID\tPASSAGEID Where PASSAGEID is a relevant passage for a query. Note QUERYID can repeat on different lines with different PASSAGEIDs p_path_to_candidate_file (str): path to candidate file. Candidate file sould contain lines in the following format: QUERYID\tPASSAGEID1\tRank If a user wishes to use the TREC format please run the script with a -t flag at the end. If this flag is used the expected format is QUERYID\tITER\tDOCNO\tRANK\tSIM\tRUNID Where the values are separated by tabs and ranked in order of relevance Returns: dict: dictionary of metrics {'MRR': <MRR Score>} |
21,745 | import argparse
import glob
import os
import re
import subprocess
from distutils.dir_util import copy_tree
from typing import List
from bs4 import BeautifulSoup
from packaging import version
The provided code snippet includes necessary dependencies for implementing the `parse_args` function. Write a Python function `def parse_args()` to solve the following problem:
Setup and parse command line arguments for using the script
Here is the function:
def parse_args():
"""
Setup and parse command line arguments for using the script
"""
parser = argparse.ArgumentParser(
description="Create and package documentation for the repository"
)
parser.add_argument(
"--src",
type=str,
required=True,
help="the source directory to read the source for the docs from",
)
parser.add_argument(
"--dest",
type=str,
required=True,
help="the destination directory to put the built docs",
)
return parser.parse_args() | Setup and parse command line arguments for using the script |
21,746 | import argparse
import glob
import os
import re
import subprocess
from distutils.dir_util import copy_tree
from typing import List
from bs4 import BeautifulSoup
from packaging import version
The provided code snippet includes necessary dependencies for implementing the `create_docs` function. Write a Python function `def create_docs(src: str, dest: str)` to solve the following problem:
Run the sphinx command to create the docs from src into dest. :param src: the source directory for docs :type src: str :param dest: the destination directory for docs :type dest: str
Here is the function:
def create_docs(src: str, dest: str):
"""
Run the sphinx command to create the docs from src into dest.
:param src: the source directory for docs
:type src: str
:param dest: the destination directory for docs
:type dest: str
"""
print("running sphinx-multiversion")
res = subprocess.run(["sphinx-multiversion", src, dest])
if not res.returncode == 0:
raise Exception(f"{res.stdout} {res.stderr}")
print("completed sphinx build") | Run the sphinx command to create the docs from src into dest. :param src: the source directory for docs :type src: str :param dest: the destination directory for docs :type dest: str |
21,747 | import argparse
import glob
import os
import re
import subprocess
from distutils.dir_util import copy_tree
from typing import List
from bs4 import BeautifulSoup
from packaging import version
def _get_docs_folders(dest: str) -> List[str]:
folders = os.listdir(dest)
return folders
def _get_latest_folder(folders: List[str]) -> str:
versioned_folders = [
(folder, version.parse(folder[1:]))
for folder in folders
if re.match(r"^v[0-9]+\.[0-9]+\.[0-9]+$", folder)
]
versioned_folders.sort(key=lambda ver: ver[1])
# get the latest version
if versioned_folders:
return versioned_folders[-1][0]
# fall back on main if available as default
if "main" in folders:
return "main"
# fall back on any other folder sorted
folders.sort()
return folders[-1]
def _copy_to_root(dest: str, latest: str):
latest_path = os.path.join(dest, latest)
copy_tree(latest_path, dest)
def _fix_html_files_version_links(dest: str, folders: List[str]):
for file in glob.glob(os.path.join(dest, "**", "*.html"), recursive=True):
relative = os.path.relpath(file, dest)
parent = relative.split(os.sep)[0]
if parent in folders:
continue
_fix_html_version_links(file)
The provided code snippet includes necessary dependencies for implementing the `package_docs` function. Write a Python function `def package_docs(dest: str)` to solve the following problem:
Run any extra packaging commands to prep the docs for release. Ex: copies the latest version to the root so if a version isn't specified will load. :param dest: the destination directory the docs were built in :type dest: str
Here is the function:
def package_docs(dest: str):
"""
Run any extra packaging commands to prep the docs for release.
Ex: copies the latest version to the root so if a version isn't specified will load.
:param dest: the destination directory the docs were built in
:type dest: str
"""
print(f"packaging docs at {dest}")
folders = _get_docs_folders(dest)
print(f"found {len(folders)} docs folders from build")
latest = _get_latest_folder(folders)
print(f"found latest version `{latest}`, copying to {dest}")
_copy_to_root(dest, latest)
print(f"copied version {latest} to root as default")
print("fixing root links")
_fix_html_files_version_links(dest, folders)
print("root links fixed") | Run any extra packaging commands to prep the docs for release. Ex: copies the latest version to the root so if a version isn't specified will load. :param dest: the destination directory the docs were built in :type dest: str |
21,748 | import argparse
import os
import re
from typing import Dict
import numpy as np
import tensorflow
import torch
import torchvision.transforms as transforms
from PIL import Image
from sparseml.keras.datasets import ImageNetDataset, SplitsTransforms
from sparseml.keras.models import ModelRegistry as KRModelRegistry
from sparseml.keras.utils import ModelExporter, keras
from sparseml.pytorch.models import ModelRegistry as PTModelRegistry
from sparseml.utils.datasets import IMAGENET_RGB_MEANS, IMAGENET_RGB_STDS
def parse_args():
parser = argparse.ArgumentParser(description="Convert Pytorch models to Keras")
parser.add_argument(
"--arch-key",
type=str,
required=True,
help="Arch key of model to convert (e.g., 'resnet50')",
)
parser.add_argument(
"--model-type",
type=str,
required=True,
help="Type of model to convert (e.g., 'base-none', 'pruned-moderate')",
)
parser.add_argument(
"--imagenet-dir",
type=str,
required=True,
help="The root path to where the Imagenet dataset is stored",
)
parser.add_argument(
"--test-image-file-path",
type=str,
required=True,
help="Path to an image used for comparing inference results between "
"Pytorch and Keras",
)
parser.add_argument(
"--save-dir",
type=str,
required=True,
help="The path to the directory for saving results",
)
return parser.parse_args() | null |
21,749 | import argparse
import os
import re
from typing import Dict
import numpy as np
import tensorflow
import torch
import torchvision.transforms as transforms
from PIL import Image
from sparseml.keras.datasets import ImageNetDataset, SplitsTransforms
from sparseml.keras.models import ModelRegistry as KRModelRegistry
from sparseml.keras.utils import ModelExporter, keras
from sparseml.pytorch.models import ModelRegistry as PTModelRegistry
from sparseml.utils.datasets import IMAGENET_RGB_MEANS, IMAGENET_RGB_STDS
The provided code snippet includes necessary dependencies for implementing the `verify_keras_model_with_pytorch` function. Write a Python function `def verify_keras_model_with_pytorch(kr_model, imagenet_dir)` to solve the following problem:
Verify the converted models using ImageNet's data pipeline in Pytorch Assumption: the validation pipeline is enhanced with the following permutation class my_permuter: def __call__(self, img): return img.permute(1, 2, 0) to fit into the default data format "channels_last" by Keras
Here is the function:
def verify_keras_model_with_pytorch(kr_model, imagenet_dir):
"""
Verify the converted models using ImageNet's data pipeline in Pytorch
Assumption: the validation pipeline is enhanced with the following permutation
class my_permuter:
def __call__(self, img):
return img.permute(1, 2, 0)
to fit into the default data format "channels_last" by Keras
"""
from torch.utils.data import DataLoader
from sparseml.pytorch.datasets import ImageNetDataset as PTImageNetDataset
batch_size = 128
val_dataset = PTImageNetDataset(imagenet_dir, train=False)
val_loader = DataLoader(
val_dataset, batch_size, shuffle=False, pin_memory=True, num_workers=8
)
val_acc_metric = keras.metrics.CategoricalAccuracy()
for x_batch_val, y_batch_val in val_loader:
val_logits = kr_model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(
torch.nn.functional.one_hot(y_batch_val, num_classes=1000), val_logits
)
val_acc = val_acc_metric.result()
val_acc_metric.reset_states()
print("Validation acc: %.4f" % (float(val_acc),)) | Verify the converted models using ImageNet's data pipeline in Pytorch Assumption: the validation pipeline is enhanced with the following permutation class my_permuter: def __call__(self, img): return img.permute(1, 2, 0) to fit into the default data format "channels_last" by Keras |
21,750 | import argparse
import os
import re
from typing import Dict
import numpy as np
import tensorflow
import torch
import torchvision.transforms as transforms
from PIL import Image
from sparseml.keras.datasets import ImageNetDataset, SplitsTransforms
from sparseml.keras.models import ModelRegistry as KRModelRegistry
from sparseml.keras.utils import ModelExporter, keras
from sparseml.pytorch.models import ModelRegistry as PTModelRegistry
from sparseml.utils.datasets import IMAGENET_RGB_MEANS, IMAGENET_RGB_STDS
def convert_pytorch_to_keras(
arch_key: str,
type_: str,
class_type: str,
layer_mapping: Dict[str, str],
output_dir: str,
imagenet_dir: str,
test_image_file_path: str,
):
model_dir = os.path.join(output_dir, "{}-{}".format(arch_key, type_))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model_file_path = os.path.join(model_dir, "model.h5")
# Convert and save
kr_model = _convert_model(
arch_key, type_, class_type, layer_mapping, test_image_file_path
)
kr_model.save(model_file_path)
verify_keras_model(kr_model, imagenet_dir)
# verify_keras_model_with_pytorch(kr_model, imagenet_dir)
# Export to ONNX
exporter = ModelExporter(kr_model, model_dir)
exporter.export_onnx(name="model.onnx", debug_mode=False)
# Samples
n_samples = 20
samples_dir = os.path.join(model_dir, "samples")
if not os.path.exists(samples_dir):
os.makedirs(samples_dir)
val_dataset = ImageNetDataset(imagenet_dir, train=False)
val_dataset = val_dataset.build(
batch_size=n_samples, shuffle_buffer_size=None, repeat_count=1
)
for img_batch, label_batch in val_dataset.take(1):
output_batch = kr_model(img_batch)
np.save(os.path.join(samples_dir, "inputs.npy"), img_batch)
np.save(os.path.join(samples_dir, "outputs.npy"), output_batch)
np.save(os.path.join(samples_dir, "labels.npy"), label_batch)
def zero_padding_image():
def _zero_padding_image(image):
max_image_size = 1024
return tensorflow.image.pad_to_bounding_box(
image, 1, 1, max_image_size, max_image_size
)
return _zero_padding_image
pad_val_dataset = ImageNetDataset(
imagenet_dir,
image_size=None,
train=False,
pre_resize_transforms=SplitsTransforms(train=None, val=(zero_padding_image(),)),
post_resize_transforms=SplitsTransforms(train=None, val=None),
)
pad_val_dataset = pad_val_dataset.build(
batch_size=n_samples, shuffle_buffer_size=None, repeat_count=1
)
for padded_img_batch, _ in pad_val_dataset.take(1):
np.save(os.path.join(samples_dir, "originals.npy"), padded_img_batch)
def convert_resnets_for_keras(args):
output_dir = args.save_dir
imagenet_dir = args.imagenet_dir
test_image_file_path = args.test_image_file_path
resnet_mapping = {
"input.conv": "input.conv",
"input.bn": "input.bn",
"sections.([0-9]+).([0-9]+).conv([0-9]+)": "sections.{}.{}.conv{}",
"sections.([0-9]+).([0-9]+).bn([0-9]+)": "sections.{}.{}.bn{}",
"sections.([0-9]+).([0-9]+).identity.conv": "sections.{}.{}.identity.conv",
"sections.([0-9]+).([0-9]+).identity.bn": "sections.{}.{}.identity.bn",
"classifier.fc": "classifier.fc",
}
convert_pytorch_to_keras(
args.arch_key,
args.model_type,
"single",
resnet_mapping,
output_dir,
imagenet_dir,
test_image_file_path,
) | null |
21,751 | import argparse
import glob
import os
import sys
from typing import List, NamedTuple
QUALITY_COMMAND = "quality"
STYLE_COMMAND = "style"
The provided code snippet includes necessary dependencies for implementing the `parse_args` function. Write a Python function `def parse_args()` to solve the following problem:
Setup and parse command line arguments for using the script
Here is the function:
def parse_args():
"""
Setup and parse command line arguments for using the script
"""
parser = argparse.ArgumentParser(
description=(
"Add Neuralmagic copyright to the beginning of all "
"files under the given glob patterns. "
"Currently assumes Python files using '#' as the commenting prefix."
)
)
subparsers = parser.add_subparsers(dest="command")
quality_parser = subparsers.add_parser(
QUALITY_COMMAND,
description=(
"Run check across the files in the given patterns and "
"fail if any do not have a copyright in them"
),
)
style_parser = subparsers.add_parser(
STYLE_COMMAND,
description=(
"Add the copyright to any files in the given patterns if it is not present"
),
)
for sub in [quality_parser, style_parser]:
sub.add_argument(
"patterns",
type=str,
default=[],
nargs="+",
help="the patterns to search through",
)
return parser.parse_args() | Setup and parse command line arguments for using the script |
21,752 | import argparse
import glob
import os
import sys
from typing import List, NamedTuple
def _get_files(patterns: List[str]) -> List[str]:
files = []
for pattern in patterns:
for file in glob.glob(pattern, recursive=True):
files.append(os.path.abspath(os.path.expanduser(file)))
files.sort()
return files
def _dont_copyright(file_path: str) -> bool:
with open(file_path, "r") as file:
content = file.read()
try:
content.index(NO_COPYRIGHT_LINE)
return True
except ValueError:
return False
def _contains_copyright(file_path: str) -> bool:
with open(file_path, "r") as file:
content = file.read()
try:
for line in COPYRIGHT_LINES:
content.index(line)
return True
except ValueError:
return False
The provided code snippet includes necessary dependencies for implementing the `quality` function. Write a Python function `def quality(patterns: List[str])` to solve the following problem:
Run a quality check across all files in the given glob patterns. This checks to make sure all matching files have the NM copyright present. If any do not, it will list them out and exit with an error. :param patterns: The glob file patterns to run quality check on
Here is the function:
def quality(patterns: List[str]):
"""
Run a quality check across all files in the given glob patterns.
This checks to make sure all matching files have the NM copyright present.
If any do not, it will list them out and exit with an error.
:param patterns: The glob file patterns to run quality check on
"""
check_files = _get_files(patterns)
error_files = []
for file in check_files:
if not _dont_copyright(file) and not _contains_copyright(file):
print(f"would add copyright to {file}")
error_files.append(file)
if error_files:
sys.exit(
f"{len(error_files)} would be copyrighted, "
f"{len(check_files) - len(error_files)} would be left unchanged."
)
else:
print(f"{len(check_files)} files have copyrights") | Run a quality check across all files in the given glob patterns. This checks to make sure all matching files have the NM copyright present. If any do not, it will list them out and exit with an error. :param patterns: The glob file patterns to run quality check on |
21,753 | import argparse
import glob
import os
import sys
from typing import List, NamedTuple
def _get_files(patterns: List[str]) -> List[str]:
files = []
for pattern in patterns:
for file in glob.glob(pattern, recursive=True):
files.append(os.path.abspath(os.path.expanduser(file)))
files.sort()
return files
def _dont_copyright(file_path: str) -> bool:
with open(file_path, "r") as file:
content = file.read()
try:
content.index(NO_COPYRIGHT_LINE)
return True
except ValueError:
return False
def _contains_copyright(file_path: str) -> bool:
with open(file_path, "r") as file:
content = file.read()
try:
for line in COPYRIGHT_LINES:
content.index(line)
return True
except ValueError:
return False
def _add_copyright(file_path: str):
file_type = _file_type(file_path)
if file_type == "unknown":
raise ValueError(
f"unsupported file_type given to be copyrighted at {file_path}"
)
with open(file_path, "r+") as file:
lines = file.readlines()
header_info = _file_header_info(lines, file_type)
inject_index = 0
if header_info.end_index > -1:
# if there is already a header, we want to inject the copyright after it
# additionally we'll need a new line between the prev header and copyright
inject_index = header_info.end_index + 1
lines.insert(inject_index, "\n")
inject_index += 1
# add the copyright at the inject index
file_copyright = _file_copyright(file_type)
lines.insert(inject_index, file_copyright)
if not header_info.new_line_after:
# if there wasn't a new line after the header,
# add in a new line after to create space between the code and copyright
inject_index += 1
lines.insert(inject_index, "\n")
file.seek(0)
file.writelines(lines)
file.truncate()
The provided code snippet includes necessary dependencies for implementing the `style` function. Write a Python function `def style(patterns: List[str])` to solve the following problem:
Run a style application across all files in the given glob patterns. This checks to make sure all matching files have the NM copyright present. If any do not, it will append the copyright to above the file after any already contained headers such as shebang lines. :param patterns: The glob file patterns to run quality check on
Here is the function:
def style(patterns: List[str]):
"""
Run a style application across all files in the given glob patterns.
This checks to make sure all matching files have the NM copyright present.
If any do not, it will append the copyright to above the file after
any already contained headers such as shebang lines.
:param patterns: The glob file patterns to run quality check on
"""
check_files = _get_files(patterns)
copyrighted_files = []
for file in check_files:
if not _dont_copyright(file) and not _contains_copyright(file):
_add_copyright(file)
print(f"copyrighted {file}")
copyrighted_files.append(file)
if copyrighted_files:
print(
f"{len(copyrighted_files)} file(s) copyrighted, "
f"{len(check_files) - len(copyrighted_files)} files unchanged"
)
else:
print(f"{len(check_files)} files unchanged") | Run a style application across all files in the given glob patterns. This checks to make sure all matching files have the NM copyright present. If any do not, it will append the copyright to above the file after any already contained headers such as shebang lines. :param patterns: The glob file patterns to run quality check on |
21,754 | import codecs
import os
import re
from typing import List
import setuptools
from setuptools import find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
def parse_requirements(file_name: str) -> List[str]:
with open(file_name) as f:
return [
require.strip() for require in f
if require.strip() and not require.startswith('#')
] | null |
21,755 | import codecs
import os
import re
from typing import List
import setuptools
from setuptools import find_packages
def read(*parts):
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.") | null |
21,756 | import streamlit as st
from PIL import Image
import os
import io
import base64
from io import BytesIO
import requests
from gptcache import cache
from gptcache.manager import get_data_manager, CacheBase, VectorBase, ObjectBase
from gptcache.adapter import openai
from gptcache.processor.pre import get_prompt
from gptcache.embedding import Onnx
from gptcache.similarity_evaluation import ExactMatchEvaluation
data_manager = initialize_configuration()
def get_prompt(data: Dict[str, Any], **_: Dict[str, Any]) -> Any:
"""get the prompt of the llm request params
:param data: the user llm request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import get_prompt
content = get_prompt({"prompt": "foo"})
# "foo"
"""
return data.get("prompt")
def Onnx(model="GPTCache/paraphrase-albert-onnx"):
return onnx.Onnx(model)
def ExactMatchEvaluation():
return exact_match.ExactMatchEvaluation()
def initialize_configuration():
onnx = Onnx()
data_manager = get_data_manager(CacheBase('sqlite', sql_url='sqlite:///./local/gptcache10.db'),
VectorBase('faiss', dimension=onnx.dimension, index_path='./local/faiss10.index'),
ObjectBase('local', path='./local'))
cache.init(
pre_embedding_func=get_prompt,
embedding_func=onnx.to_embeddings,
data_manager=data_manager,
similarity_evaluation=ExactMatchEvaluation(),
)
return data_manager | null |
21,757 | import streamlit as st
from PIL import Image
import os
import io
import base64
from io import BytesIO
import requests
from gptcache import cache
from gptcache.manager import get_data_manager, CacheBase, VectorBase, ObjectBase
from gptcache.adapter import openai
from gptcache.processor.pre import get_prompt
from gptcache.embedding import Onnx
from gptcache.similarity_evaluation import ExactMatchEvaluation
import openai
def api_call(text_input, open_ai_key):
os.environ['CURL_CA_BUNDLE'] = ''
response = openai.Image.create(
prompt=text_input,
n=1,
size='256x256',
api_key=open_ai_key
)
image_url = response['data'][0]['url']
is_cached = response.get('gptcache', False)
if is_cached is False:
response = requests.get(image_url)
img = Image.open(BytesIO(response.content))
else:
img = Image.open(image_url)
return img, is_cached | null |
21,758 | import streamlit as st
import os
import uuid
from gptcache import cache
from gptcache.manager import get_data_manager, CacheBase, VectorBase, ObjectBase
from gptcache.adapter import openai
from gptcache.processor.pre import get_file_name
from gptcache.embedding import Data2VecAudio
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
data_manager = initialize_configuration()
def get_file_name(data: Dict[str, Any], **_: Dict[str, Any]) -> str:
def Data2VecAudio(model="facebook/data2vec-audio-base-960h"):
class SearchDistanceEvaluation(SimilarityEvaluation):
def __init__(self, max_distance=4.0, positive=False):
def evaluation(
self, src_dict: Dict[str, Any], cache_dict: Dict[str, Any], **_
) -> float:
def range(self) -> Tuple[float, float]:
def initialize_configuration():
data2vec = Data2VecAudio()
data_manager = get_data_manager(CacheBase('sqlite', sql_url='sqlite:///./local/gptcache20.db'),
VectorBase('faiss', dimension=data2vec.dimension, index_path='./local/faiss20.index'),
ObjectBase('local', path='./local'))
cache.init(
pre_embedding_func=get_file_name,
embedding_func=data2vec.to_embeddings,
data_manager=data_manager,
similarity_evaluation=SearchDistanceEvaluation(),
)
return data_manager | null |
21,759 | import streamlit as st
import os
import uuid
from gptcache import cache
from gptcache.manager import get_data_manager, CacheBase, VectorBase, ObjectBase
from gptcache.adapter import openai
from gptcache.processor.pre import get_file_name
from gptcache.embedding import Data2VecAudio
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
import openai
def api_call(audio_bytes, open_ai_key):
os.environ['OPENAI_API_KEY'] = open_ai_key
os.environ['CURL_CA_BUNDLE'] = ''
os.environ['KMP_DUPLICATE_LIB_OK']='TRUE'
cache.set_openai_key()
transcript = openai.Audio.transcribe('whisper-1', audio_bytes, api_key=open_ai_key)
is_cached = transcript.get('gptcache', False)
return transcript['text'], is_cached | null |
21,760 | from gptcache import cache
from gptcache.session import Session
from gptcache.adapter import openai
class Session:
"""
Session for gptcache. Session can isolate the context of each connection, and can also filter the results after recall,
and if not satisfied will re-request rather than return the cache results directly.
:param name: the name of the session, defaults to `uuid.uuid4().hex`.
:type name: str
:param data_manager: the DataManager of the session, defaults to cache.data_manager with the initialized cache.
:type data_manager: DataManager
:param check_hit_func: a Callable to check the hit, defaults to `processor.check_hit.check_hit_session`,which will not return cached data
if you ask the same or similar question in the same session.
:type check_hit_func: Callable
Example:
.. code-block:: python
from gptcache import cache
from gptcache.session import Session
# init gptcache
cache.init()
cache.set_openai_key()
session = Session()
from gptcache.adapter import openai
# run ChatCompletion model with gptcache on session
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{
'role': 'user',
'content': "what's github"
}],
session=session
)
response_content = response['choices'][0]['message']['content']
"""
def __init__(
self,
name: Optional[str] = None,
data_manager: Optional[DataManager] = None,
check_hit_func: Optional[Callable] = None,
):
self._name = uuid.uuid4().hex if not name else name
self._data_manager = cache.data_manager if not data_manager else data_manager
self.check_hit_func = (
check_hit_session if not check_hit_func else check_hit_func
)
def name(self):
return self._name
def __enter__(self):
gptcache_log.warning(
"The `with` method will delete the session data directly on exit."
)
return self
def __exit__(self, *_):
self.drop()
def drop(self):
"""Drop the session and delete all data in the session"""
self._data_manager.delete_session(self.name)
gptcache_log.info("Deleting data in the session: %s.", self.name)
import openai
def run_session():
session = Session()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "what's github?"
}],
session=session
)
response_content = response["choices"][0]["message"]["content"]
print(response_content) | null |
21,761 | from gptcache import cache
from gptcache.session import Session
from gptcache.adapter import openai
class Session:
"""
Session for gptcache. Session can isolate the context of each connection, and can also filter the results after recall,
and if not satisfied will re-request rather than return the cache results directly.
:param name: the name of the session, defaults to `uuid.uuid4().hex`.
:type name: str
:param data_manager: the DataManager of the session, defaults to cache.data_manager with the initialized cache.
:type data_manager: DataManager
:param check_hit_func: a Callable to check the hit, defaults to `processor.check_hit.check_hit_session`,which will not return cached data
if you ask the same or similar question in the same session.
:type check_hit_func: Callable
Example:
.. code-block:: python
from gptcache import cache
from gptcache.session import Session
# init gptcache
cache.init()
cache.set_openai_key()
session = Session()
from gptcache.adapter import openai
# run ChatCompletion model with gptcache on session
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{
'role': 'user',
'content': "what's github"
}],
session=session
)
response_content = response['choices'][0]['message']['content']
"""
def __init__(
self,
name: Optional[str] = None,
data_manager: Optional[DataManager] = None,
check_hit_func: Optional[Callable] = None,
):
self._name = uuid.uuid4().hex if not name else name
self._data_manager = cache.data_manager if not data_manager else data_manager
self.check_hit_func = (
check_hit_session if not check_hit_func else check_hit_func
)
def name(self):
return self._name
def __enter__(self):
gptcache_log.warning(
"The `with` method will delete the session data directly on exit."
)
return self
def __exit__(self, *_):
self.drop()
def drop(self):
"""Drop the session and delete all data in the session"""
self._data_manager.delete_session(self.name)
gptcache_log.info("Deleting data in the session: %s.", self.name)
import openai
def run_custom_session():
def my_check_hit(cur_session_id, cache_session_ids, cache_questions, cache_answer):
print(cur_session_id, cache_session_ids, cache_questions, cache_answer)
if "GitHub" in cache_answer:
return True
return False
session = Session(name="my-session", check_hit_func=my_check_hit)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "what's github?"
}],
session=session
)
response_content = response["choices"][0]["message"]["content"]
print(response_content) | null |
21,762 | from gptcache import cache, Config, Cache
from gptcache.adapter.api import put, get, init_similar_cache
from gptcache.processor.post import nop
from gptcache.processor.pre import get_prompt
def put(prompt: str, data: Any, **kwargs) -> None:
"""put api, put qa pair information to GPTCache
Please make sure that the `pre_embedding_func` param is `get_prompt` when initializing the cache
:param prompt: the cache data key, usually question text
:type prompt: str
:param data: the cache data value, usually answer text
:type data: Any
:param kwargs: list of user-defined parameters
:type kwargs: Dict
Example:
.. code-block:: python
from gptcache.adapter.api import put
from gptcache.processor.pre import get_prompt
cache.init(pre_embedding_func=get_prompt)
put("hello", "foo")
"""
def llm_handle(*llm_args, **llm_kwargs): # pylint: disable=W0613
return data
adapt(
llm_handle,
_cache_data_converter,
_update_cache_callback,
cache_skip=True,
prompt=prompt,
**kwargs,
)
def get(prompt: str, **kwargs) -> Any:
"""get api, get the cache data according to the `prompt`
Please make sure that the `pre_embedding_func` param is `get_prompt` when initializing the cache
:param prompt: the cache data key, usually question text
:type prompt: str
:param kwargs: list of user-defined parameters
:type kwargs: Dict
Example:
.. code-block:: python
from gptcache.adapter.api import put, get
from gptcache.processor.pre import get_prompt
cache.init(pre_embedding_func=get_prompt)
put("hello", "foo")
print(get("hello"))
"""
res = adapt(
_llm_handle_none,
_cache_data_converter,
_update_cache_callback_none,
prompt=prompt,
**kwargs,
)
return res
def get_prompt(data: Dict[str, Any], **_: Dict[str, Any]) -> Any:
"""get the prompt of the llm request params
:param data: the user llm request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import get_prompt
content = get_prompt({"prompt": "foo"})
# "foo"
"""
return data.get("prompt")
def run_basic():
cache.init(pre_embedding_func=get_prompt)
put("hello", "foo")
print(get("hello"))
# output: foo | null |
21,763 | from gptcache import cache, Config, Cache
from gptcache.adapter.api import put, get, init_similar_cache
from gptcache.processor.post import nop
from gptcache.processor.pre import get_prompt
def put(prompt: str, data: Any, **kwargs) -> None:
"""put api, put qa pair information to GPTCache
Please make sure that the `pre_embedding_func` param is `get_prompt` when initializing the cache
:param prompt: the cache data key, usually question text
:type prompt: str
:param data: the cache data value, usually answer text
:type data: Any
:param kwargs: list of user-defined parameters
:type kwargs: Dict
Example:
.. code-block:: python
from gptcache.adapter.api import put
from gptcache.processor.pre import get_prompt
cache.init(pre_embedding_func=get_prompt)
put("hello", "foo")
"""
def llm_handle(*llm_args, **llm_kwargs): # pylint: disable=W0613
return data
adapt(
llm_handle,
_cache_data_converter,
_update_cache_callback,
cache_skip=True,
prompt=prompt,
**kwargs,
)
def get(prompt: str, **kwargs) -> Any:
"""get api, get the cache data according to the `prompt`
Please make sure that the `pre_embedding_func` param is `get_prompt` when initializing the cache
:param prompt: the cache data key, usually question text
:type prompt: str
:param kwargs: list of user-defined parameters
:type kwargs: Dict
Example:
.. code-block:: python
from gptcache.adapter.api import put, get
from gptcache.processor.pre import get_prompt
cache.init(pre_embedding_func=get_prompt)
put("hello", "foo")
print(get("hello"))
"""
res = adapt(
_llm_handle_none,
_cache_data_converter,
_update_cache_callback_none,
prompt=prompt,
**kwargs,
)
return res
def init_similar_cache(
data_dir: str = "api_cache",
cache_obj: Optional[Cache] = None,
pre_func: Callable = get_prompt,
embedding: Optional[BaseEmbedding] = None,
data_manager: Optional[DataManager] = None,
evaluation: Optional[SimilarityEvaluation] = None,
post_func: Callable = temperature_softmax,
config: Config = Config(),
):
"""Provide a quick way to initialize cache for api service
:param data_dir: cache data storage directory
:type data_dir: str
:param cache_obj: specify to initialize the Cache object, if not specified, initialize the global object
:type cache_obj: Optional[Cache]
:param pre_func: pre-processing of the cache input text
:type pre_func: Callable
:param embedding: embedding object
:type embedding: BaseEmbedding
:param data_manager: data manager object
:type data_manager: DataManager
:param evaluation: similarity evaluation object
:type evaluation: SimilarityEvaluation
:param post_func: post-processing of the cached result list, the most similar result is taken by default
:type post_func: Callable[[List[Any]], Any]
:param config: cache configuration, the core is similar threshold
:type config: Config
:return: None
Example:
.. code-block:: python
from gptcache.adapter.api import put, get, init_similar_cache
init_similar_cache()
put("hello", "foo")
print(get("hello"))
"""
if not embedding:
embedding = Onnx()
if not data_manager:
data_manager = manager_factory(
"sqlite,faiss",
data_dir=data_dir,
vector_params={"dimension": embedding.dimension},
)
if not evaluation:
evaluation = SearchDistanceEvaluation()
cache_obj = cache_obj if cache_obj else cache
cache_obj.init(
pre_embedding_func=pre_func,
embedding_func=embedding.to_embeddings,
data_manager=data_manager,
similarity_evaluation=evaluation,
post_process_messages_func=post_func,
config=config,
)
def nop(messages: List[Any]) -> Any:
"""No change after evaluation.
:param messages: A list of candidate outputs.
:type messages: List[Any]
Example:
.. code-block:: python
from gptcache.processor.post import nop
messages = ["message 1", "message 2", "message 3"]
answer = nop(messages)
assert answer = messages
"""
return messages
def run_similar_match():
inner_cache = Cache()
init_similar_cache(
cache_obj=inner_cache, post_func=nop, config=Config(similarity_threshold=0)
)
put("hello1", "foo1", cache_obj=inner_cache)
put("hello2", "foo2", cache_obj=inner_cache)
put("hello3", "foo3", cache_obj=inner_cache)
messages = get("hello", cache_obj=inner_cache, top_k=3)
print(messages)
# output: ['foo1', 'foo2', 'foo3'] | null |
21,764 | import os
from langchain import Cohere
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
from gptcache.adapter.langchain_models import LangChainLLMs
from gptcache import cache
from gptcache.processor.pre import get_prompt
from gptcache.adapter.langchain_models import LangChainChat
OpenAI.api_key = os.getenv("OPENAI_API_KEY")
Cohere.cohere_api_key = os.getenv("COHERE_API_KEY")
class LangChainLLMs(LLM):
"""LangChain LLM Wrapper.
:param llm: LLM from langchain.llms.
:type llm: Any
Example:
.. code-block:: python
from gptcache import cache
from gptcache.processor.pre import get_prompt
# init gptcache
cache.init(pre_embedding_func=get_prompt)
cache.set_openai_key()
from langchain.llms import OpenAI
from gptcache.adapter.langchain_models import LangChainLLMs
# run llm with gptcache
llm = LangChainLLMs(llm=OpenAI(temperature=0))
llm("Hello world")
"""
llm: Any
session: Session = None
tmp_args: Any = None
def _llm_type(self) -> str:
return self.llm._llm_type
def _identifying_params(self) -> Mapping[str, Any]:
return self.llm._identifying_params
def __str__(self) -> str:
return str(self.llm)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
_: Optional[CallbackManagerForLLMRun] = None,
) -> str:
session = (
self.session
if "session" not in self.tmp_args
else self.tmp_args.pop("session")
)
cache_obj = self.tmp_args.pop("cache_obj", cache)
return adapt(
self.llm,
_cache_data_convert,
_update_cache_callback,
prompt=prompt,
stop=stop,
cache_obj=cache_obj,
session=session,
**self.tmp_args,
)
async def _acall(self, prompt: str, stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None) -> str:
return await super()._acall(prompt, stop=stop, run_manager=run_manager)
def generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs,
) -> LLMResult:
self.tmp_args = kwargs
return super().generate(prompts, stop=stop, callbacks=callbacks)
async def agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs,
) -> LLMResult:
self.tmp_args = kwargs
return await super().agenerate(prompts, stop=stop, callbacks=callbacks)
def __call__(
self,
prompt: str,
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs,
) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
return (
self.generate([prompt], stop=stop, callbacks=callbacks, **kwargs)
.generations[0][0]
.text
)
def get_prompt(data: Dict[str, Any], **_: Dict[str, Any]) -> Any:
"""get the prompt of the llm request params
:param data: the user llm request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import get_prompt
content = get_prompt({"prompt": "foo"})
# "foo"
"""
return data.get("prompt")
def run_llm():
cache.init(
pre_embedding_func=get_prompt,
)
question = "what is chatgpt"
langchain_openai = OpenAI(model_name="text-ada-001")
llm = LangChainLLMs(llm=langchain_openai)
answer = llm(prompt=question)
print(answer)
# TODO install cohere auto
langchain_cohere = Cohere()
llm = LangChainLLMs(llm=langchain_cohere)
answer = llm(prompt=question)
print(answer) | null |
21,765 | import os
from langchain import Cohere
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
from gptcache.adapter.langchain_models import LangChainLLMs
from gptcache import cache
from gptcache.processor.pre import get_prompt
from gptcache.adapter.langchain_models import LangChainChat
def get_msg(data, **_):
class LangChainChat(BaseChatModel):
def _llm_type(self) -> str:
def _generate(
self,
messages: Any,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> ChatResult:
async def _agenerate(
self,
messages: List[List[BaseMessage]],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> ChatResult:
def generate(
self,
messages: List[List[BaseMessage]],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs,
) -> LLMResult:
async def agenerate(
self,
messages: List[List[BaseMessage]],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs,
) -> LLMResult:
def _identifying_params(self):
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
def get_num_tokens(self, text: str) -> int:
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
def __call__(self, messages: Any, stop: Optional[List[str]] = None, **kwargs):
def run_chat_model():
cache.init(
pre_embedding_func=get_msg,
)
chat = LangChainChat(chat=ChatOpenAI(temperature=0))
answer = chat(
messages=[
HumanMessage(
content="Translate this sentence from English to Chinese. I love programming."
)
]
)
print(answer) | null |
21,766 | from gptcache.adapter import openai
from gptcache import cache
from gptcache.manager import get_data_manager, CacheBase, VectorBase
from gptcache.embedding import Onnx as EmbeddingOnnx
from gptcache.similarity_evaluation import OnnxModelEvaluation
import openai
def OnnxModelEvaluation(model="GPTCache/albert-duplicate-onnx"):
return onnx.OnnxModelEvaluation(model)
def run():
onnx = EmbeddingOnnx()
evaluation_onnx = OnnxModelEvaluation()
vector_base = VectorBase('faiss', dimension=onnx.dimension)
data_manager = get_data_manager('sqlite', vector_base)
cache.init(embedding_func=onnx.to_embeddings,
data_manager=data_manager,
similarity_evaluation=evaluation_onnx,
)
cache.set_openai_key()
answer = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': 'what is chatgpt'}
],
)
print(answer) | null |
21,767 | from gptcache.adapter import openai
from gptcache import cache
from gptcache.similarity_evaluation.exact_match import ExactMatchEvaluation
import openai
class ExactMatchEvaluation(SimilarityEvaluation):
"""Using exact metric to evaluate sentences pair similarity.
This evaluator is used to directly compare two `question` from text. If every single character in two questions can match, then this evaluator
will return 1 else 0.
Example:
.. code-block:: python
from gptcache.similarity_evaluation import ExactMatchEvaluation
evaluation = ExactMatchEvaluation()
score = evaluation.evaluation(
{
"question": "What is the color of sky?"
},
{
"question": "What is the color of sky?"
}
)
"""
def __init__(self):
pass
def evaluation(
self, src_dict: Dict[str, Any], cache_dict: Dict[str, Any], **_
) -> float:
"""Evaluate the similarity score of pair.
:param src_dict: the query dictionary to evaluate with cache_dict.
:type src_dict: Dict
:param cache_dict: the cache dictionary.
:type cache_dict: Dict
:return: evaluation score.
"""
return 1 if cache_dict["question"] == src_dict["question"] else 0
def range(self) -> Tuple[float, float]:
"""Range of similarity score.
:return: minimum and maximum of similarity score.
"""
return 0, 1
def run():
cache.init(similarity_evaluation=ExactMatchEvaluation())
cache.set_openai_key()
answer = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': 'what is chatgpt'}
],
)
print(answer) | null |
21,768 | from gptcache.adapter import openai
from gptcache import cache
from gptcache.manager import get_data_manager, VectorBase
from gptcache.similarity_evaluation import SequenceMatchEvaluation
from gptcache.processor.pre import concat_all_queries
from gptcache.embedding import Onnx
from gptcache import Config
import openai
def SequenceMatchEvaluation(weights, embedding_extractor, embedding_config: Dict[str, Any] = None):
return sequence_match.SequenceMatchEvaluation(weights, embedding_extractor, embedding_config=embedding_config)
def concat_all_queries(data: Dict[str, Any], **params: Dict[str, Any]) -> Any:
"""
:param data: the user llm request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import concat_all_queries
content = concat_all_queries({"messages": [{"role": "system", "content": "hello"},
{"role": "user", "content": "world"},
{"role": "assistant", "content": "alice"}]})
"""
cache_config = params.get("cache_config", None)
skip_list = cache_config.skip_list
context_len = cache_config.context_len
context_len = context_len * 2
s = ""
messages = data.get("messages")
length = min(context_len, len(messages))
messages = messages[len(messages) - length:]
for i, message in enumerate(messages):
if message["role"] in skip_list:
continue
if i == len(messages) - 1:
s += f'{message["role"].upper()}: {message["content"]}'
else:
s += f'{message["role"].upper()}: {message["content"]}\n'
return s
def Onnx(model="GPTCache/paraphrase-albert-onnx"):
return onnx.Onnx(model)
def run():
onnx = Onnx()
vector_base = VectorBase('faiss', dimension=onnx.dimension)
data_manager = get_data_manager('sqlite', vector_base)
cache.init(embedding_func=onnx.to_embeddings,
pre_embedding_func=concat_all_queries,
data_manager=data_manager,
similarity_evaluation=SequenceMatchEvaluation([0.1, 0.2, 0.7], 'onnx'),
config=Config(context_len=3, skip_list=['system', 'assistant'])
)
cache.set_openai_key()
answer = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'system', 'content': 'you are a helpful chatbot.'},
{'role': 'user', 'content': 'query1'},
{'role': 'assistant', 'content': 'answer1'},
{'role': 'user', 'content': 'query2'},
{'role': 'assistant', 'content': 'answer2'},
{'role': 'user', 'content': 'query3'},
{'role': 'assistant', 'content': 'answer3'}
]
)
print(answer) | null |
21,769 | from gptcache.adapter import openai
from gptcache import cache
from gptcache.manager import get_data_manager, VectorBase
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
from gptcache.embedding import Onnx
import openai
class SearchDistanceEvaluation(SimilarityEvaluation):
"""Using search distance to evaluate sentences pair similarity.
This is the evaluator to compare two embeddings according to their distance computed in embedding retrieval stage.
In the retrieval stage, `search_result` is the distance used for approximate nearest neighbor search and have been
put into `cache_dict`. `max_distance` is used to bound this distance to make it between [0-`max_distance`]. `positive` is
used to indicate this distance is directly proportional to the similarity of two entites. If `positive` is set `False`,
`max_distance` will be used to substract this distance to get the final score.
:param max_distance: the bound of maximum distance.
:type max_distance: float
:param positive: if the larger distance indicates more similar of two entities, It is True. Otherwise it is False.
:type positive: bool
Example:
.. code-block:: python
from gptcache.similarity_evaluation import SearchDistanceEvaluation
evaluation = SearchDistanceEvaluation()
score = evaluation.evaluation(
{},
{
"search_result": (1, None)
}
)
"""
def __init__(self, max_distance=4.0, positive=False):
self.max_distance = max_distance
self.positive = positive
def evaluation(
self, src_dict: Dict[str, Any], cache_dict: Dict[str, Any], **_
) -> float:
"""Evaluate the similarity score of pair.
:param src_dict: the query dictionary to evaluate with cache.
:type src_dict: Dict
:param cache_dict: the cache dictionary.
:type cache_dict: Dict
:return: evaluation score.
"""
distance, _ = cache_dict["search_result"]
if distance < 0:
distance = 0
elif distance > self.max_distance:
distance = self.max_distance
if self.positive:
return distance
return self.max_distance - distance
def range(self) -> Tuple[float, float]:
"""Range of similarity score.
:return: minimum and maximum of similarity score.
"""
return 0.0, self.max_distance
def Onnx(model="GPTCache/paraphrase-albert-onnx"):
return onnx.Onnx(model)
def run():
onnx = Onnx()
vector_base = VectorBase('faiss', dimension=onnx.dimension)
data_manager = get_data_manager('sqlite', vector_base)
cache.init(embedding_func=onnx.to_embeddings,
data_manager=data_manager,
similarity_evaluation=SearchDistanceEvaluation(),
)
cache.set_openai_key()
answer = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': 'what is chatgpt'}
],
)
print(answer) | null |
21,770 | import argparse
import gradio as gr
from gptcache import cache
from gptcache.processor.pre import get_image, get_image_question
from gptcache.embedding import Timm
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
from gptcache.manager.factory import manager_factory
from gptcache.adapter.minigpt4 import MiniGPT4
args = parse_args()
if args.map:
data_manager = manager_factory("map", args.dir)
cache.init(
pre_embedding_func=get_image_question,
data_manager=data_manager
) # init with map method
else:
timm = Timm()
data_manager = manager_factory("sqlite,faiss", args.dir, vector_params={"dimension": timm.dimension})
cache.init(
pre_embedding_func=get_image,
data_manager=data_manager,
embedding_func=timm.to_embeddings,
similarity_evaluation=SearchDistanceEvaluation()
)
description = """<h3>This is the demo of MiniGPT-4 and GPTCache. Upload your images and ask question, and it will be cached.</h3>"""
def parse_args():
parser = argparse.ArgumentParser(description="Demo")
parser.add_argument("--cfg-path", required=True, help="path to configuration file.")
parser.add_argument("--gpu-id", type=int, default=0, help="specify the gpu to load the model.")
parser.add_argument("--dir", type=str, default=".", help="path for data storage.")
parser.add_argument("--map", action='store_true', help="use map for exact match cache.")
parser.add_argument('--no-map', dest='map', action='store_false', help="use sqlite and faiss for similar search cache.")
parser.set_defaults(map=True)
parser.add_argument(
"--options",
nargs="+",
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file (deprecate), "
"change to --cfg-options instead.",
)
args = parser.parse_args()
return args | null |
21,771 | from gptcache.adapter import openai
from gptcache import cache
from gptcache.manager.factory import get_data_manager
from gptcache.manager import get_data_manager, CacheBase, VectorBase
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
from gptcache.embedding import Onnx
import openai
def get_data_manager(
cache_base: Union[CacheBase, str] = None,
vector_base: Union[VectorBase, str] = None,
object_base: Union[ObjectBase, str] = None,
eviction_base: Union[EvictionBase, str] = None,
max_size: int = 1000,
clean_size=None,
eviction: str = "LRU",
data_path: str = "data_map.txt",
get_data_container: Callable = None,
):
class SearchDistanceEvaluation(SimilarityEvaluation):
def __init__(self, max_distance=4.0, positive=False):
def evaluation(
self, src_dict: Dict[str, Any], cache_dict: Dict[str, Any], **_
) -> float:
def range(self) -> Tuple[float, float]:
def Onnx(model="GPTCache/paraphrase-albert-onnx"):
def run():
onnx = Onnx()
cache_base = CacheBase('sqlite')
vector_base = VectorBase('faiss', dimension=onnx.dimension)
data_manager = get_data_manager(cache_base, vector_base)
cache.init(embedding_func=onnx.to_embeddings,
data_manager=data_manager,
similarity_evaluation=SearchDistanceEvaluation(),
)
cache.set_openai_key()
answer = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': 'what is chatgpt'}
],
)
print(answer) | null |
21,772 | from gptcache.adapter import openai
from gptcache import cache
from gptcache.embedding.string import to_embeddings as string_embedding
import openai
def run():
cache.init(embedding_func=string_embedding)
cache.set_openai_key()
answer = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': 'what is chatgpt'}
],
)
print(answer) | null |
21,773 | from gptcache.adapter import openai
from gptcache import cache
from gptcache.manager.factory import get_data_manager
from gptcache.manager import get_data_manager, CacheBase, VectorBase
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
from gptcache.embedding import PaddleNLP
import openai
def get_data_manager(
cache_base: Union[CacheBase, str] = None,
vector_base: Union[VectorBase, str] = None,
object_base: Union[ObjectBase, str] = None,
eviction_base: Union[EvictionBase, str] = None,
max_size: int = 1000,
clean_size=None,
eviction: str = "LRU",
data_path: str = "data_map.txt",
get_data_container: Callable = None,
):
"""Generate `SSDataManager` (with `cache_base`, `vector_base`, `max_size`, `clean_size` and `eviction` params),
or `MAPDataManager` (with `data_path`, `max_size` and `get_data_container` params) to manager the data.
:param cache_base: a CacheBase object, or the name of the cache storage, it is support 'sqlite', 'duckdb', 'postgresql',
'mysql', 'mariadb', 'sqlserver' and 'oracle' now.
:type cache_base: :class:`CacheBase` or str
:param vector_base: a VectorBase object, or the name of the vector storage, it is support 'milvus', 'faiss' and
'chromadb' now.
:type vector_base: :class:`VectorBase` or str
:param object_base: a object storage, supports local path and s3.
:type object_base: :class:`ObjectBase` or str
:param max_size: the max size for the LRU cache in MapDataManager, defaults to 1000.
:type max_size: int
:param eviction_base: a EvictionBase object, or the name of the eviction, it supports:
- 'memory'
- 'redis'
- 'no_op_eviction'.
:type eviction_base: :class:`EvictionBase` or str
:param clean_size: the clean size for the LRU cache in MapDataManager, defaults to None.
:type clean_size: int
:param eviction: the eviction policy for the LRU cache in MapDataManager, defaults to 'LRU'.
:type eviction: str
:param data_path: the path to save the map data, defaults to 'data_map.txt'.
:type data_path: str
:param get_data_container: a Callable to get the data container, defaults to None.
:type get_data_container: Callable
:return: SSDataManager or MapDataManager.
Example:
.. code-block:: python
from gptcache.manager import get_data_manager, CacheBase, VectorBase
data_manager = get_data_manager(CacheBase('sqlite'), VectorBase('faiss', dimension=128))
# or using manager factory enabled with redis cache instead of in-memory cache
# example 1: using redis eviction base with sqlite cache base
data_manager = get_data_manager(cache_base=CacheBase("sqlite"),
vector_base=VectorBase("faiss", dimension=onnx.dimension),
eviction_base=EvictionBase("redis", maxmemory="0", policy="noeviction", ttl=1))
# example 2: using redis eviction base with redis cache base
# here no_op_eviction is used since `redis` cache base already handles evictions internally
data_manager = get_data_manager(cache_base=CacheBase("redis", maxmemory="0", policy="noeviction", ttl=1),
vector_base=VectorBase("faiss", dimension=onnx.dimension),
eviction_base=EvictionBase("no_op_eviction"))
"""
if not cache_base and not vector_base:
return MapDataManager(data_path, max_size, get_data_container)
if isinstance(cache_base, str):
cache_base = CacheBase(name=cache_base)
if isinstance(vector_base, str):
vector_base = VectorBase(name=vector_base)
if isinstance(object_base, str):
object_base = ObjectBase(name=object_base)
if isinstance(eviction_base, str):
eviction_base = EvictionBase(name=eviction_base)
assert cache_base and vector_base
return SSDataManager(cache_base, vector_base, object_base, eviction_base, max_size, clean_size, eviction)
class SearchDistanceEvaluation(SimilarityEvaluation):
"""Using search distance to evaluate sentences pair similarity.
This is the evaluator to compare two embeddings according to their distance computed in embedding retrieval stage.
In the retrieval stage, `search_result` is the distance used for approximate nearest neighbor search and have been
put into `cache_dict`. `max_distance` is used to bound this distance to make it between [0-`max_distance`]. `positive` is
used to indicate this distance is directly proportional to the similarity of two entites. If `positive` is set `False`,
`max_distance` will be used to substract this distance to get the final score.
:param max_distance: the bound of maximum distance.
:type max_distance: float
:param positive: if the larger distance indicates more similar of two entities, It is True. Otherwise it is False.
:type positive: bool
Example:
.. code-block:: python
from gptcache.similarity_evaluation import SearchDistanceEvaluation
evaluation = SearchDistanceEvaluation()
score = evaluation.evaluation(
{},
{
"search_result": (1, None)
}
)
"""
def __init__(self, max_distance=4.0, positive=False):
self.max_distance = max_distance
self.positive = positive
def evaluation(
self, src_dict: Dict[str, Any], cache_dict: Dict[str, Any], **_
) -> float:
"""Evaluate the similarity score of pair.
:param src_dict: the query dictionary to evaluate with cache.
:type src_dict: Dict
:param cache_dict: the cache dictionary.
:type cache_dict: Dict
:return: evaluation score.
"""
distance, _ = cache_dict["search_result"]
if distance < 0:
distance = 0
elif distance > self.max_distance:
distance = self.max_distance
if self.positive:
return distance
return self.max_distance - distance
def range(self) -> Tuple[float, float]:
"""Range of similarity score.
:return: minimum and maximum of similarity score.
"""
return 0.0, self.max_distance
def PaddleNLP(model="ernie-3.0-medium-zh"):
return paddlenlp.PaddleNLP(model)
def run():
paddlenlp = PaddleNLP()
cache_base = CacheBase('sqlite')
vector_base = VectorBase('faiss', dimension=paddlenlp.dimension)
data_manager = get_data_manager(cache_base, vector_base)
cache.init(embedding_func=paddlenlp.to_embeddings,
data_manager=data_manager,
similarity_evaluation=SearchDistanceEvaluation(),
)
cache.set_openai_key()
answer = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': 'what is chatgpt'}
],
)
print(answer) | null |
21,774 | from gptcache.adapter import openai
from gptcache import cache
from gptcache.manager import get_data_manager, CacheBase, VectorBase
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
import numpy as np
d = 8
def mock_embeddings(data, **kwargs):
return np.random.random((d, )).astype('float32')
import openai
class SearchDistanceEvaluation(SimilarityEvaluation):
"""Using search distance to evaluate sentences pair similarity.
This is the evaluator to compare two embeddings according to their distance computed in embedding retrieval stage.
In the retrieval stage, `search_result` is the distance used for approximate nearest neighbor search and have been
put into `cache_dict`. `max_distance` is used to bound this distance to make it between [0-`max_distance`]. `positive` is
used to indicate this distance is directly proportional to the similarity of two entites. If `positive` is set `False`,
`max_distance` will be used to substract this distance to get the final score.
:param max_distance: the bound of maximum distance.
:type max_distance: float
:param positive: if the larger distance indicates more similar of two entities, It is True. Otherwise it is False.
:type positive: bool
Example:
.. code-block:: python
from gptcache.similarity_evaluation import SearchDistanceEvaluation
evaluation = SearchDistanceEvaluation()
score = evaluation.evaluation(
{},
{
"search_result": (1, None)
}
)
"""
def __init__(self, max_distance=4.0, positive=False):
self.max_distance = max_distance
self.positive = positive
def evaluation(
self, src_dict: Dict[str, Any], cache_dict: Dict[str, Any], **_
) -> float:
"""Evaluate the similarity score of pair.
:param src_dict: the query dictionary to evaluate with cache.
:type src_dict: Dict
:param cache_dict: the cache dictionary.
:type cache_dict: Dict
:return: evaluation score.
"""
distance, _ = cache_dict["search_result"]
if distance < 0:
distance = 0
elif distance > self.max_distance:
distance = self.max_distance
if self.positive:
return distance
return self.max_distance - distance
def range(self) -> Tuple[float, float]:
"""Range of similarity score.
:return: minimum and maximum of similarity score.
"""
return 0.0, self.max_distance
def run():
cache_base = CacheBase('sqlite')
vector_base = VectorBase('faiss', dimension=d)
data_manager = get_data_manager(cache_base, vector_base)
cache.init(embedding_func=mock_embeddings,
data_manager=data_manager,
similarity_evaluation=SearchDistanceEvaluation(),
)
cache.set_openai_key()
answer = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': 'what is chatgpt'}
],
)
print(answer) | null |
21,775 | import time
from gptcache.adapter.llama_cpp import Llama
from gptcache.manager import manager_factory
from gptcache import Cache
from gptcache.embedding import Onnx
from gptcache.processor.pre import get_prompt
class Llama(llama_cpp.Llama):
"""llama.cpp wrapper
You should have the llama-cpp-python library installed.
https://github.com/abetlen/llama-cpp-python
Example:
.. code-block:: python
onnx = Onnx()
m = manager_factory('sqlite,faiss,local', data_dir=root, vector_params={"dimension": onnx.dimension})
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
data_manager=m,
embedding_func=onnx.to_embeddings
)
llm = Llama('./models/7B/ggml-model.bin')
answer = llm(prompt=question, cache_obj=llm_cache)
"""
def __call__(
self,
prompt: str,
**kwargs
):
def update_cache_callback(llm_data, update_cache_func, *args, **kwargs): # pylint: disable=unused-argument
if not isinstance(llm_data, Iterator):
update_cache_func(Answer(llm_data["choices"][0]["text"], DataType.STR))
return llm_data
else:
def stream_answer(it):
total_answer = ""
for item in it:
total_answer += item["choices"][0]["text"]
yield item
update_cache_func(Answer(total_answer, DataType.STR))
return stream_answer(llm_data)
def cache_data_convert(cache_data):
if kwargs.get("stream", False):
return _construct_stream_resp_from_cache(cache_data)
return _construct_resp_from_cache(cache_data)
return adapt(
self.create_completion,
cache_data_convert,
update_cache_callback,
prompt=prompt,
**kwargs
)
def Onnx(model="GPTCache/paraphrase-albert-onnx"):
return onnx.Onnx(model)
def get_prompt(data: Dict[str, Any], **_: Dict[str, Any]) -> Any:
"""get the prompt of the llm request params
:param data: the user llm request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import get_prompt
content = get_prompt({"prompt": "foo"})
# "foo"
"""
return data.get("prompt")
def llama_cpp_base_usage():
onnx = Onnx()
m = manager_factory("sqlite,faiss,local", data_dir="./llamacpp_basic", vector_params={"dimension": onnx.dimension})
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
data_manager=m,
embedding_func=onnx.to_embeddings
)
llm = Llama("./ggml-model-q4_0.bin")
for _ in range(2):
start_time = time.time()
answer = llm(prompt="Q: Name the planets in the solar system? A: ", stop=["Q:", "\n"], cache_obj=llm_cache)
print("Time consuming: {:.2f}s".format(time.time() - start_time))
print(f"Received: {answer['choices'][0]['text']}")
print(f"Hit cache: {answer.get('gptcache', False)}") | null |
21,776 | import time
from gptcache.adapter.llama_cpp import Llama
from gptcache.manager import manager_factory
from gptcache import Cache
from gptcache.embedding import Onnx
from gptcache.processor.pre import get_prompt
class Llama(llama_cpp.Llama):
def __call__(
self,
prompt: str,
**kwargs
):
def update_cache_callback(llm_data, update_cache_func, *args, **kwargs):
def stream_answer(it):
def cache_data_convert(cache_data):
def Onnx(model="GPTCache/paraphrase-albert-onnx"):
def get_prompt(data: Dict[str, Any], **_: Dict[str, Any]) -> Any:
def llama_cpp_stream_usage():
onnx = Onnx()
m = manager_factory("sqlite,faiss,local", data_dir="./llamacpp_stream", vector_params={"dimension": onnx.dimension})
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
data_manager=m,
embedding_func=onnx.to_embeddings
)
llm = Llama("./ggml-model-q4_0.bin")
for _ in range(2):
start_time = time.time()
ret = llm(prompt="Q: Name the planets in the solar system? A: ", stop=["Q:", "\n"], stream=True, cache_obj=llm_cache)
answer = ''
for chunk in ret:
answer += chunk['choices'][0]['text']
print("Time consuming: {:.2f}s".format(time.time() - start_time))
print(f"Received: {answer}") | null |
21,777 | import os
import time
import openai
from gptcache import cache
from gptcache.adapter import openai
from gptcache import cache
from gptcache.adapter import openai
from gptcache.embedding import Onnx
from gptcache.manager import get_data_manager, VectorBase
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
def response_text(openai_resp):
return openai_resp['choices'][0]['message']['content'] | null |
21,778 | import os
import time
from gptcache.manager import get_data_manager, VectorBase
from gptcache import cache, Cache
from gptcache.embedding import Onnx
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
from gptcache.adapter import openai
def cache_init():
dir_name, _ = os.path.split(os.path.abspath(__file__))
cache.init(data_manager=get_data_manager())
os.environ['OPENAI_API_KEY'] = 'API KEY'
cache.set_openai_key() | null |
21,779 | import os
import time
from gptcache.manager import get_data_manager, VectorBase
from gptcache import cache, Cache
from gptcache.embedding import Onnx
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
from gptcache.adapter import openai
def response_text(openai_resp):
return openai_resp['choices'][0]['message']['content']
import openai
def base_request():
for _ in range(2):
start_time = time.time()
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{
'role': 'user',
'content': 'Count to 5, with a comma between each number and no newlines. E.g., 1, 2, 3, ...'
}
],
temperature=0,
)
print('Time consuming: {:.2f}s'.format(time.time() - start_time))
print(f'Received: {response_text(response)}') | null |
21,780 | import os
import time
from gptcache.manager import get_data_manager, VectorBase
from gptcache import cache, Cache
from gptcache.embedding import Onnx
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
from gptcache.adapter import openai
import openai
def stream_request():
for _ in range(2):
start_time = time.time()
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': 'what is 1+1? Answer in one word.'}
],
temperature=0,
stream=True # this time, we set stream=True
)
# create variables to collect the stream of chunks
collected_chunks = []
collected_messages = []
# iterate through the stream of events
for chunk in response:
collected_chunks.append(chunk) # save the event response
chunk_message = chunk['choices'][0]['delta'] # extract the message
collected_messages.append(chunk_message) # save the message
# print the time delay and text received
full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
end_time = time.time()
print('Time consuming: {:.2f}s'.format(end_time - start_time))
print(f'Full conversation received: {full_reply_content}') | null |
21,781 | import os
import time
from gptcache.manager import get_data_manager, VectorBase
from gptcache import cache, Cache
from gptcache.embedding import Onnx
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
from gptcache.adapter import openai
def response_text(openai_resp):
return openai_resp['choices'][0]['message']['content']
def Onnx(model="GPTCache/paraphrase-albert-onnx"):
return onnx.Onnx(model)
class SearchDistanceEvaluation(SimilarityEvaluation):
"""Using search distance to evaluate sentences pair similarity.
This is the evaluator to compare two embeddings according to their distance computed in embedding retrieval stage.
In the retrieval stage, `search_result` is the distance used for approximate nearest neighbor search and have been
put into `cache_dict`. `max_distance` is used to bound this distance to make it between [0-`max_distance`]. `positive` is
used to indicate this distance is directly proportional to the similarity of two entites. If `positive` is set `False`,
`max_distance` will be used to substract this distance to get the final score.
:param max_distance: the bound of maximum distance.
:type max_distance: float
:param positive: if the larger distance indicates more similar of two entities, It is True. Otherwise it is False.
:type positive: bool
Example:
.. code-block:: python
from gptcache.similarity_evaluation import SearchDistanceEvaluation
evaluation = SearchDistanceEvaluation()
score = evaluation.evaluation(
{},
{
"search_result": (1, None)
}
)
"""
def __init__(self, max_distance=4.0, positive=False):
self.max_distance = max_distance
self.positive = positive
def evaluation(
self, src_dict: Dict[str, Any], cache_dict: Dict[str, Any], **_
) -> float:
"""Evaluate the similarity score of pair.
:param src_dict: the query dictionary to evaluate with cache.
:type src_dict: Dict
:param cache_dict: the cache dictionary.
:type cache_dict: Dict
:return: evaluation score.
"""
distance, _ = cache_dict["search_result"]
if distance < 0:
distance = 0
elif distance > self.max_distance:
distance = self.max_distance
if self.positive:
return distance
return self.max_distance - distance
def range(self) -> Tuple[float, float]:
"""Range of similarity score.
:return: minimum and maximum of similarity score.
"""
return 0.0, self.max_distance
import openai
def similar_request():
onnx = Onnx()
vector_base = VectorBase('faiss', dimension=onnx.dimension)
data_manager = get_data_manager('sqlite', vector_base)
one_cache = Cache()
one_cache.init(embedding_func=onnx.to_embeddings,
data_manager=data_manager,
similarity_evaluation=SearchDistanceEvaluation(),
)
question1 = 'what do you think about chatgpt'
question2 = 'what do you feel like chatgpt'
start_time = time.time()
answer = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': question1}
],
cache_obj=one_cache
)
end_time = time.time()
print('Time consuming: {:.2f}s'.format(end_time - start_time))
print(f'Received: {response_text(answer)}')
start_time = time.time()
answer = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': question2}
],
cache_obj=one_cache
)
end_time = time.time()
print('Time consuming: {:.2f}s'.format(end_time - start_time))
print(f'Received: {response_text(answer)}') | null |
21,782 | import os
from langchain import Cohere
from langchain.llms import OpenAI
from gptcache.adapter.langchain_models import LangChainLLMs
from gptcache import cache, Cache
from gptcache.processor.pre import get_prompt
OpenAI.api_key = os.getenv("OPENAI_API_KEY")
Cohere.cohere_api_key = os.getenv("COHERE_API_KEY")
class LangChainLLMs(LLM):
"""LangChain LLM Wrapper.
:param llm: LLM from langchain.llms.
:type llm: Any
Example:
.. code-block:: python
from gptcache import cache
from gptcache.processor.pre import get_prompt
# init gptcache
cache.init(pre_embedding_func=get_prompt)
cache.set_openai_key()
from langchain.llms import OpenAI
from gptcache.adapter.langchain_models import LangChainLLMs
# run llm with gptcache
llm = LangChainLLMs(llm=OpenAI(temperature=0))
llm("Hello world")
"""
llm: Any
session: Session = None
tmp_args: Any = None
def _llm_type(self) -> str:
return self.llm._llm_type
def _identifying_params(self) -> Mapping[str, Any]:
return self.llm._identifying_params
def __str__(self) -> str:
return str(self.llm)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
_: Optional[CallbackManagerForLLMRun] = None,
) -> str:
session = (
self.session
if "session" not in self.tmp_args
else self.tmp_args.pop("session")
)
cache_obj = self.tmp_args.pop("cache_obj", cache)
return adapt(
self.llm,
_cache_data_convert,
_update_cache_callback,
prompt=prompt,
stop=stop,
cache_obj=cache_obj,
session=session,
**self.tmp_args,
)
async def _acall(self, prompt: str, stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None) -> str:
return await super()._acall(prompt, stop=stop, run_manager=run_manager)
def generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs,
) -> LLMResult:
self.tmp_args = kwargs
return super().generate(prompts, stop=stop, callbacks=callbacks)
async def agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs,
) -> LLMResult:
self.tmp_args = kwargs
return await super().agenerate(prompts, stop=stop, callbacks=callbacks)
def __call__(
self,
prompt: str,
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs,
) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
return (
self.generate([prompt], stop=stop, callbacks=callbacks, **kwargs)
.generations[0][0]
.text
)
def get_prompt(data: Dict[str, Any], **_: Dict[str, Any]) -> Any:
"""get the prompt of the llm request params
:param data: the user llm request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import get_prompt
content = get_prompt({"prompt": "foo"})
# "foo"
"""
return data.get("prompt")
def run():
data_file = "data_map.txt"
has_data = os.path.isfile(data_file)
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
)
if not has_data:
for i in range(10):
question = f"foo{i}"
answer = f"receiver the foo {i}"
cache.data_manager.save(question, answer, cache.embedding_func(question))
question = "foo0"
langchain_openai = OpenAI(model_name="text-ada-001")
llm = LangChainLLMs(llm=langchain_openai)
answer = llm(prompt=question, cache_obj=llm_cache)
print(answer)
answer = llm(prompt=question, cache_obj=llm_cache)
print(answer)
# TODO install cohere auto
langchain_cohere = Cohere()
llm = LangChainLLMs(llm=langchain_cohere)
answer = llm(prompt=question, cache_obj=llm_cache)
print(answer) | null |
21,783 | import time
from langchain import OpenAI
from langchain.chains.question_answering import load_qa_chain
from langchain.schema import Document
from gptcache import cache
from gptcache.adapter.api import init_similar_cache
from gptcache.adapter.langchain_models import LangChainLLMs
def get_content_func(data, **_):
return data.get("prompt").split("Question:")[-1] | null |
21,784 | import time
import torch
from transformers import pipeline
from gptcache.processor.pre import get_inputs
from gptcache.manager import manager_factory
from gptcache import Cache
from gptcache.embedding import Onnx
from gptcache.adapter.dolly import Dolly
def get_inputs(data: Dict[str, Any], **_: Dict[str, Any]):
"""get the inputs of the llm request params
:param data: the user llm request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import get_inputs
content = get_inputs({"inputs": "hello"})
# "hello"
"""
return data.get("inputs")
def Onnx(model="GPTCache/paraphrase-albert-onnx"):
return onnx.Onnx(model)
class Dolly:
"""Wrapper for Dolly (https://github.com/databrickslabs/dolly.git).
Example using from_model:
.. code-block:: python
from gptcache import cache
from gptcache.processor.pre import get_inputs
cache.init(pre_embedding_func=get_inputs)
from gptcache.adapter.dolly import Dolly
dolly = Dolly.from_model(
model="databricks/dolly-v2-12b", torch_dtype=torch.bfloat16, trust_remote_code=True, device=0
)
Example passing pipeline in directly:
.. code-block:: python
import torch
from transformers import pipeline
from gptcache import cache
from gptcache.processor.pre import get_inputs
cache.init(pre_embedding_func=get_inputs)
from gptcache.adapter.dolly import Dolly
pipe = pipeline(
model="databricks/dolly-v2-12b", torch_dtype=torch.bfloat16, trust_remote_code=True, device=0
)
dolly = Dolly(pipe)
"""
def __init__(self, dolly_pipeline: Any):
self._dolly_pipeline = dolly_pipeline
def from_model(cls, model: str, **kwargs):
pipe = pipeline(model=model, **kwargs)
return cls(pipe)
def __call__(self, prompt: str, **kwargs):
return adapt(
self._dolly_pipeline,
_cache_data_convert,
_update_cache_callback,
inputs=prompt,
**kwargs
)
def dolly_base_usage():
onnx = Onnx()
m = manager_factory("sqlite,faiss,local", data_dir="./dolly", vector_params={"dimension": onnx.dimension})
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_inputs,
data_manager=m,
embedding_func=onnx.to_embeddings
)
llm = Dolly.from_model(model="databricks/dolly-v2-3b", torch_dtype=torch.bfloat16, trust_remote_code=True, device=0)
context = """George Washington (February 22, 1732[b] – December 14, 1799) was an American military officer, statesman,
and Founding Father who served as the first president of the United States from 1789 to 1797."""
for _ in range(2):
start_time = time.time()
answer = llm(context, cache_obj=llm_cache)
print("Time consuming: {:.2f}s".format(time.time() - start_time))
print(f"Received: {answer[0]['generated_text']}")
print(f"Hit cache: {answer[0].get('gptcache', False)}") | null |
21,785 | import time
import torch
from transformers import pipeline
from gptcache.processor.pre import get_inputs
from gptcache.manager import manager_factory
from gptcache import Cache
from gptcache.embedding import Onnx
from gptcache.adapter.dolly import Dolly
def get_inputs(data: Dict[str, Any], **_: Dict[str, Any]):
"""get the inputs of the llm request params
:param data: the user llm request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import get_inputs
content = get_inputs({"inputs": "hello"})
# "hello"
"""
return data.get("inputs")
def Onnx(model="GPTCache/paraphrase-albert-onnx"):
return onnx.Onnx(model)
class Dolly:
"""Wrapper for Dolly (https://github.com/databrickslabs/dolly.git).
Example using from_model:
.. code-block:: python
from gptcache import cache
from gptcache.processor.pre import get_inputs
cache.init(pre_embedding_func=get_inputs)
from gptcache.adapter.dolly import Dolly
dolly = Dolly.from_model(
model="databricks/dolly-v2-12b", torch_dtype=torch.bfloat16, trust_remote_code=True, device=0
)
Example passing pipeline in directly:
.. code-block:: python
import torch
from transformers import pipeline
from gptcache import cache
from gptcache.processor.pre import get_inputs
cache.init(pre_embedding_func=get_inputs)
from gptcache.adapter.dolly import Dolly
pipe = pipeline(
model="databricks/dolly-v2-12b", torch_dtype=torch.bfloat16, trust_remote_code=True, device=0
)
dolly = Dolly(pipe)
"""
def __init__(self, dolly_pipeline: Any):
self._dolly_pipeline = dolly_pipeline
def from_model(cls, model: str, **kwargs):
pipe = pipeline(model=model, **kwargs)
return cls(pipe)
def __call__(self, prompt: str, **kwargs):
return adapt(
self._dolly_pipeline,
_cache_data_convert,
_update_cache_callback,
inputs=prompt,
**kwargs
)
def dolly_from_hugggingface():
onnx = Onnx()
m = manager_factory("sqlite,faiss,local", data_dir="./dolly_hg", vector_params={"dimension": onnx.dimension})
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_inputs,
data_manager=m,
embedding_func=onnx.to_embeddings
)
pipe = pipeline(model="databricks/dolly-v2-3b", torch_dtype=torch.bfloat16,
trust_remote_code=True, device=0, return_full_text=True)
llm = Dolly(pipe)
context = """George Washington (February 22, 1732[b] – December 14, 1799) was an American military officer, statesman,
and Founding Father who served as the first president of the United States from 1789 to 1797."""
for _ in range(2):
start_time = time.time()
answer = llm(context, cache_obj=llm_cache)
print("Time consuming: {:.2f}s".format(time.time() - start_time))
print(f"Received: {answer[0]['generated_text']}")
print(f"Hit cache: {answer[0].get('gptcache', False)}") | null |
21,786 | import os
from gptcache.manager import get_data_manager
from gptcache.adapter import openai
from gptcache import cache
import openai
def run():
dir_name, _ = os.path.split(os.path.abspath(__file__))
data_file = dir_name + '/data_map.txt'
data_manager = get_data_manager(data_path=data_file, max_size=10)
cache.init(data_manager=data_manager)
cache.set_openai_key()
answer = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': 'what is chatgpt'}
],
)
print(answer) | null |
21,787 | import os
import numpy as np
from gptcache import cache
from gptcache.adapter import openai
from gptcache.manager import get_data_manager, CacheBase, VectorBase
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
d = 8
def mock_embeddings(data, **kwargs):
return np.random.random((d, )).astype('float32')
import openai
class SearchDistanceEvaluation(SimilarityEvaluation):
"""Using search distance to evaluate sentences pair similarity.
This is the evaluator to compare two embeddings according to their distance computed in embedding retrieval stage.
In the retrieval stage, `search_result` is the distance used for approximate nearest neighbor search and have been
put into `cache_dict`. `max_distance` is used to bound this distance to make it between [0-`max_distance`]. `positive` is
used to indicate this distance is directly proportional to the similarity of two entites. If `positive` is set `False`,
`max_distance` will be used to substract this distance to get the final score.
:param max_distance: the bound of maximum distance.
:type max_distance: float
:param positive: if the larger distance indicates more similar of two entities, It is True. Otherwise it is False.
:type positive: bool
Example:
.. code-block:: python
from gptcache.similarity_evaluation import SearchDistanceEvaluation
evaluation = SearchDistanceEvaluation()
score = evaluation.evaluation(
{},
{
"search_result": (1, None)
}
)
"""
def __init__(self, max_distance=4.0, positive=False):
self.max_distance = max_distance
self.positive = positive
def evaluation(
self, src_dict: Dict[str, Any], cache_dict: Dict[str, Any], **_
) -> float:
"""Evaluate the similarity score of pair.
:param src_dict: the query dictionary to evaluate with cache.
:type src_dict: Dict
:param cache_dict: the cache dictionary.
:type cache_dict: Dict
:return: evaluation score.
"""
distance, _ = cache_dict["search_result"]
if distance < 0:
distance = 0
elif distance > self.max_distance:
distance = self.max_distance
if self.positive:
return distance
return self.max_distance - distance
def range(self) -> Tuple[float, float]:
"""Range of similarity score.
:return: minimum and maximum of similarity score.
"""
return 0.0, self.max_distance
def run():
scalar_stores = [
CacheBase('sqlite', sql_url='sqlite:///./sqlite.db'),
CacheBase('postgresql', sql_url='postgresql+psycopg2://postgres:123456@127.0.0.1:5432/postgres'),
CacheBase('mysql', sql_url='mysql+pymysql://root:123456@127.0.0.1:3306/mysql'),
CacheBase('mariadb', sql_url='mariadb+pymysql://root:123456@127.0.0.1:3307/mysql'),
CacheBase('sqlserver', sql_url='ssql+pyodbc://sa:Strongpsw_123@127.0.0.1:1434/msdb?driver=ODBC+Driver+17+for+SQL+Server'),
CacheBase('oracle', sql_url='oracle+cx_oracle://oracle:123456@127.0.0.1:1521/?service_name=helowin&encoding=UTF-8&nencoding=UTF-8'),
CacheBase('dynamo'),
]
for scalar_store in scalar_stores:
if os.path.exists('faiss.index'):
os.remove('faiss.index')
vector_base = VectorBase('faiss', dimension=d)
data_manager = get_data_manager(scalar_store, vector_base)
cache.init(embedding_func=mock_embeddings,
data_manager=data_manager,
similarity_evaluation=SearchDistanceEvaluation(),
)
cache.set_openai_key()
answer = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': 'what is chatgpt'}
],
)
print('answer:', answer)
answer = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': 'what is chatgpt'}
],
)
print('answer cached:', answer) | null |
21,788 | import numpy as np
from gptcache import cache
from gptcache.adapter import openai
from gptcache.manager import CacheBase, VectorBase, get_data_manager
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
d = 8
def mock_embeddings(data, **kwargs):
return np.random.random((d, )).astype('float32')
import openai
class SearchDistanceEvaluation(SimilarityEvaluation):
"""Using search distance to evaluate sentences pair similarity.
This is the evaluator to compare two embeddings according to their distance computed in embedding retrieval stage.
In the retrieval stage, `search_result` is the distance used for approximate nearest neighbor search and have been
put into `cache_dict`. `max_distance` is used to bound this distance to make it between [0-`max_distance`]. `positive` is
used to indicate this distance is directly proportional to the similarity of two entites. If `positive` is set `False`,
`max_distance` will be used to substract this distance to get the final score.
:param max_distance: the bound of maximum distance.
:type max_distance: float
:param positive: if the larger distance indicates more similar of two entities, It is True. Otherwise it is False.
:type positive: bool
Example:
.. code-block:: python
from gptcache.similarity_evaluation import SearchDistanceEvaluation
evaluation = SearchDistanceEvaluation()
score = evaluation.evaluation(
{},
{
"search_result": (1, None)
}
)
"""
def __init__(self, max_distance=4.0, positive=False):
self.max_distance = max_distance
self.positive = positive
def evaluation(
self, src_dict: Dict[str, Any], cache_dict: Dict[str, Any], **_
) -> float:
"""Evaluate the similarity score of pair.
:param src_dict: the query dictionary to evaluate with cache.
:type src_dict: Dict
:param cache_dict: the cache dictionary.
:type cache_dict: Dict
:return: evaluation score.
"""
distance, _ = cache_dict["search_result"]
if distance < 0:
distance = 0
elif distance > self.max_distance:
distance = self.max_distance
if self.positive:
return distance
return self.max_distance - distance
def range(self) -> Tuple[float, float]:
"""Range of similarity score.
:return: minimum and maximum of similarity score.
"""
return 0.0, self.max_distance
def run():
vector_stores = [
'faiss',
'milvus',
'chromadb',
'docarray',
'redis',
'weaviate',
]
for vector_store in vector_stores:
cache_base = CacheBase('sqlite')
vector_base = VectorBase(vector_store, dimension=d)
data_manager = get_data_manager(cache_base, vector_base)
cache.init(
embedding_func=mock_embeddings,
data_manager=data_manager,
similarity_evaluation=SearchDistanceEvaluation(),
)
cache.set_openai_key()
answer = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[{'role': 'user', 'content': 'what is chatgpt'}],
)
print(answer) | null |
21,789 | from gptcache import Cache
from gptcache.embedding import Onnx
from gptcache.manager.eviction import EvictionBase
from gptcache.manager import get_data_manager, CacheBase, VectorBase, manager_factory
def Onnx(model="GPTCache/paraphrase-albert-onnx"):
return onnx.Onnx(model)
def EvictionBase(name: str, **kwargs):
"""Generate specific CacheStorage with the configuration.
:param name: the name of the eviction, like: memory
:type name: str
:param policy: eviction strategy
:type policy: str
:param maxsize: the maxsize of cache data
:type maxsize: int
:param clean_size: will clean the size of data when the size of cache data reaches the max size
:type clean_size: int
:param on_evict: the function for cleaning the data in the store
:type on_evict: Callable[[List[Any]], None]
Example:
.. code-block:: python
from gptcache.manager import EvictionBase
cache_base = EvictionBase('memory', policy='lru', maxsize=10, clean_size=2, on_evict=lambda x: print(x))
"""
return eviction_manager.EvictionBase.get(name, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `get_data_manager_example` function. Write a Python function `def get_data_manager_example()` to solve the following problem:
This example shows how to create a data manager with a mongo as a scalar storage, faiss vector base, and redis eviction base. This type of configuration can be used to scale GPTCache horizontally. Where keys will be maintained in redis key-value store instead of in-memory. The eviction of the keys will be handled based on the eviction policy of redis.
Here is the function:
def get_data_manager_example():
"""
This example shows how to create a data manager with a mongo as a scalar storage, faiss vector base,
and redis eviction base.
This type of configuration can be used to scale GPTCache horizontally.
Where keys will be maintained in redis key-value store instead of in-memory.
The eviction of the keys will be handled based on the eviction policy of redis.
"""
onnx = Onnx()
data_manager = get_data_manager(cache_base=CacheBase("mongo", url="mongodb://localhost:27017/"),
vector_base=VectorBase("faiss", dimension=onnx.dimension),
eviction_base=EvictionBase("redis",
maxmemory="100mb",
policy="allkeys-lru",
ttl=100))
cache = Cache()
cache.init(data_manager=data_manager)
question = "What is github?"
answer = "Online platform for version control and code collaboration."
embedding = onnx.to_embeddings(question)
cache.import_data([question], [answer], [embedding]) | This example shows how to create a data manager with a mongo as a scalar storage, faiss vector base, and redis eviction base. This type of configuration can be used to scale GPTCache horizontally. Where keys will be maintained in redis key-value store instead of in-memory. The eviction of the keys will be handled based on the eviction policy of redis. |
21,790 | from gptcache import Cache
from gptcache.embedding import Onnx
from gptcache.manager.eviction import EvictionBase
from gptcache.manager import get_data_manager, CacheBase, VectorBase, manager_factory
def Onnx(model="GPTCache/paraphrase-albert-onnx"):
return onnx.Onnx(model)
def EvictionBase(name: str, **kwargs):
"""Generate specific CacheStorage with the configuration.
:param name: the name of the eviction, like: memory
:type name: str
:param policy: eviction strategy
:type policy: str
:param maxsize: the maxsize of cache data
:type maxsize: int
:param clean_size: will clean the size of data when the size of cache data reaches the max size
:type clean_size: int
:param on_evict: the function for cleaning the data in the store
:type on_evict: Callable[[List[Any]], None]
Example:
.. code-block:: python
from gptcache.manager import EvictionBase
cache_base = EvictionBase('memory', policy='lru', maxsize=10, clean_size=2, on_evict=lambda x: print(x))
"""
return eviction_manager.EvictionBase.get(name, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `get_manager_example_redis_only` function. Write a Python function `def get_manager_example_redis_only()` to solve the following problem:
Note: Since, `RedisScalarStorage` can be configured to internally handle the ttl of the keys and their eviction. In this scenario, `no_op_eviction` is used as the eviction base. It will not add any keys or update their ttls. This example shows how to create a data manager with a redis as a scalar storage, as well as eviction base. This type of configuration can be used to scale GPTCache horizontally. Where keys will be maintained in redis key-value store instead of in-memory. The eviction of the keys will be handled based on the eviction policy of redis.
Here is the function:
def get_manager_example_redis_only():
"""
Note: Since, `RedisScalarStorage` can be configured to internally handle the ttl of the keys and their eviction.
In this scenario, `no_op_eviction` is used as the eviction base. It will not add any keys or update their ttls.
This example shows how to create a data manager with a redis as a scalar storage, as well as eviction base.
This type of configuration can be used to scale GPTCache horizontally.
Where keys will be maintained in redis key-value store instead of in-memory.
The eviction of the keys will be handled based on the eviction policy of redis.
"""
onnx = Onnx()
data_manager = get_data_manager(cache_base=CacheBase("redis", maxmemory="100mb", policy="allkeys-lru", ttl=100),
vector_base=VectorBase("faiss", dimension=onnx.dimension),
eviction_base=EvictionBase("no_op_eviction"))
cache = Cache()
cache.init(data_manager=data_manager)
question = "What is github?"
answer = "Online platform for version control and code collaboration."
embedding = onnx.to_embeddings(question)
cache.import_data([question], [answer], [embedding]) | Note: Since, `RedisScalarStorage` can be configured to internally handle the ttl of the keys and their eviction. In this scenario, `no_op_eviction` is used as the eviction base. It will not add any keys or update their ttls. This example shows how to create a data manager with a redis as a scalar storage, as well as eviction base. This type of configuration can be used to scale GPTCache horizontally. Where keys will be maintained in redis key-value store instead of in-memory. The eviction of the keys will be handled based on the eviction policy of redis. |
21,791 | from gptcache import Cache
from gptcache.embedding import Onnx
from gptcache.manager.eviction import EvictionBase
from gptcache.manager import get_data_manager, CacheBase, VectorBase, manager_factory
def Onnx(model="GPTCache/paraphrase-albert-onnx"):
return onnx.Onnx(model)
def manager_factory_example():
onnx = Onnx()
data_manager = manager_factory("redis,faiss",
eviction_manager="redis",
scalar_params={"url": "redis://localhost:6379"},
vector_params={"dimension": onnx.dimension},
eviction_params={"maxmemory": "100mb",
"policy": "allkeys-lru",
"ttl": 1}
)
cache = Cache()
cache.init(data_manager=data_manager)
question = "What is github?"
answer = "Online platform for version control and code collaboration."
embedding = onnx.to_embeddings(question)
cache.import_data([question], [answer], [embedding]) | null |
21,792 | import os
import time
from gptcache import cache
from gptcache.adapter import openai
from gptcache.embedding import Onnx
from gptcache.manager import manager_factory
from gptcache.processor.context import SummarizationContextProcess
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
def response_text(openai_resp):
def cache_init():
import openai
def base_request():
cache_init()
for _ in range(2):
start_time = time.time()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "Can you give me some tips for staying focused while working from home?",
},
{
"role": "system",
"content": "Sure! Here are some tips: create a designated workspace, set a schedule, take breaks, minimize distractions, and practice good time management.",
},
{
"role": "user",
"content": "Those are all great suggestions. Do you have any tips for maintaining a healthy work-life balance while working from home?",
},
{
"role": "system",
"content": "Definitely! Setting clear boundaries between work and personal time, scheduling regular breaks throughout the day, and finding ways to disconnect from work after hours can help. Additionally, make time for hobbies and other activities you enjoy outside of work to help you relax and recharge.",
},
{"role": "user", "content": "can you give meore tips?"},
],
temperature=0,
)
print("Time consuming: {:.2f}s".format(time.time() - start_time))
print(f"Received: {response_text(response)}") | null |
21,793 | import os
import time
from gptcache import cache
from gptcache.adapter import openai
from gptcache.embedding import Onnx
from gptcache.manager import manager_factory
from gptcache.processor.context import SelectiveContextProcess
from gptcache.similarity_evaluation import SearchDistanceEvaluation
from gptcache.utils import import_selective_context
def response_text(openai_resp):
return openai_resp["choices"][0]["message"]["content"]
def cache_init():
context_processor = SelectiveContextProcess()
onnx = Onnx()
data_manager = manager_factory(
"sqlite,faiss", vector_params={"dimension": onnx.dimension}
)
evaluation = SearchDistanceEvaluation()
cache.init(
pre_embedding_func=context_processor.pre_process,
embedding_func=onnx.to_embeddings,
data_manager=data_manager,
similarity_evaluation=evaluation,
)
os.environ["OPENAI_API_KEY"] = "API KEY"
cache.set_openai_key()
import openai
def base_request():
cache_init()
for _ in range(2):
start_time = time.time()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "Can you give me some tips for staying focused while working from home?",
},
{
"role": "system",
"content": "Sure! Here are some tips: create a designated workspace, set a schedule, take breaks, minimize distractions, and practice good time management.",
},
{
"role": "user",
"content": "Those are all great suggestions. Do you have any tips for maintaining a healthy work-life balance while working from home?",
},
{
"role": "system",
"content": "Definitely! Setting clear boundaries between work and personal time, scheduling regular breaks throughout the day, and finding ways to disconnect from work after hours can help. Additionally, make time for hobbies and other activities you enjoy outside of work to help you relax and recharge.",
},
{"role": "user", "content": "can you give meore tips?"},
],
temperature=0,
)
print("Time consuming: {:.2f}s".format(time.time() - start_time))
print(f"Received: {response_text(response)}") | null |
21,794 | import json
import os
import time
from gptcache.adapter import openai
from gptcache import cache, Config
from gptcache.manager import get_data_manager, CacheBase, VectorBase
from gptcache.similarity_evaluation.onnx import OnnxModelEvaluation
from gptcache.embedding import Onnx as EmbeddingOnnx
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
import openai
class SearchDistanceEvaluation(SimilarityEvaluation):
"""Using search distance to evaluate sentences pair similarity.
This is the evaluator to compare two embeddings according to their distance computed in embedding retrieval stage.
In the retrieval stage, `search_result` is the distance used for approximate nearest neighbor search and have been
put into `cache_dict`. `max_distance` is used to bound this distance to make it between [0-`max_distance`]. `positive` is
used to indicate this distance is directly proportional to the similarity of two entites. If `positive` is set `False`,
`max_distance` will be used to substract this distance to get the final score.
:param max_distance: the bound of maximum distance.
:type max_distance: float
:param positive: if the larger distance indicates more similar of two entities, It is True. Otherwise it is False.
:type positive: bool
Example:
.. code-block:: python
from gptcache.similarity_evaluation import SearchDistanceEvaluation
evaluation = SearchDistanceEvaluation()
score = evaluation.evaluation(
{},
{
"search_result": (1, None)
}
)
"""
def __init__(self, max_distance=4.0, positive=False):
self.max_distance = max_distance
self.positive = positive
def evaluation(
self, src_dict: Dict[str, Any], cache_dict: Dict[str, Any], **_
) -> float:
"""Evaluate the similarity score of pair.
:param src_dict: the query dictionary to evaluate with cache.
:type src_dict: Dict
:param cache_dict: the cache dictionary.
:type cache_dict: Dict
:return: evaluation score.
"""
distance, _ = cache_dict["search_result"]
if distance < 0:
distance = 0
elif distance > self.max_distance:
distance = self.max_distance
if self.positive:
return distance
return self.max_distance - distance
def range(self) -> Tuple[float, float]:
"""Range of similarity score.
:return: minimum and maximum of similarity score.
"""
return 0.0, self.max_distance
def run():
with open("mock_data.json", "r") as mock_file:
mock_data = json.load(mock_file)
embedding_onnx = EmbeddingOnnx()
# if you want more accurate results,
# you can use onnx's results to evaluate the model,
# it will make the results more accurate, but the cache hit rate will decrease
# evaluation_onnx = EvaluationOnnx()
# class WrapEvaluation(SearchDistanceEvaluation):
#
# def __init__(self):
# self.evaluation_onnx = EvaluationOnnx()
#
# def evaluation(self, src_dict, cache_dict, **kwargs):
# rank1 = super().evaluation(src_dict, cache_dict, **kwargs)
# if rank1 <= 0.5:
# rank2 = evaluation_onnx.evaluation(src_dict, cache_dict, **kwargs)
# return rank2 if rank2 != 0 else 1
# return 0
#
# def range(self):
# return 0.0, 1.0
class WrapEvaluation(SearchDistanceEvaluation):
def evaluation(self, src_dict, cache_dict, **kwargs):
return super().evaluation(src_dict, cache_dict, **kwargs)
def range(self):
return super().range()
sqlite_file = "sqlite.db"
faiss_file = "faiss.index"
has_data = os.path.isfile(sqlite_file) and os.path.isfile(faiss_file)
cache_base = CacheBase("sqlite")
vector_base = VectorBase("faiss", dimension=embedding_onnx.dimension)
data_manager = get_data_manager(cache_base, vector_base, max_size=100000)
cache.init(
embedding_func=embedding_onnx.to_embeddings,
data_manager=data_manager,
similarity_evaluation=WrapEvaluation(),
config=Config(similarity_threshold=0.95),
)
i = 0
for pair in mock_data:
pair["id"] = str(i)
i += 1
if not has_data:
print("insert data")
start_time = time.time()
questions, answers = map(
list, zip(*((pair["origin"], pair["id"]) for pair in mock_data))
)
cache.import_data(questions=questions, answers=answers)
print(
"end insert data, time consuming: {:.2f}s".format(time.time() - start_time)
)
all_time = 0.0
hit_cache_positive, hit_cache_negative = 0, 0
fail_count = 0
for pair in mock_data:
mock_messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": pair["similar"]},
]
try:
start_time = time.time()
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=mock_messages,
)
res_text = openai.get_message_from_openai_answer(res)
if res_text == pair["id"]:
hit_cache_positive += 1
else:
hit_cache_negative += 1
consume_time = time.time() - start_time
all_time += consume_time
print("cache hint time consuming: {:.2f}s".format(consume_time))
except:
fail_count += 1
print("average time: {:.2f}s".format(all_time / len(mock_data)))
print("cache_hint_positive:", hit_cache_positive)
print("hit_cache_negative:", hit_cache_negative)
print("fail_count:", fail_count)
print("average embedding time: ", cache.report.average_embedding_time())
print("average search time: ", cache.report.average_search_time()) | null |
21,795 | import re
import string
from typing import Dict, Any
The provided code snippet includes necessary dependencies for implementing the `last_content` function. Write a Python function `def last_content(data: Dict[str, Any], **_: Dict[str, Any]) -> Any` to solve the following problem:
get the last content of the message list :param data: the user llm request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import last_content content = last_content({"messages": [{"content": "foo1"}, {"content": "foo2"}]}) # content = "foo2"
Here is the function:
def last_content(data: Dict[str, Any], **_: Dict[str, Any]) -> Any:
"""get the last content of the message list
:param data: the user llm request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import last_content
content = last_content({"messages": [{"content": "foo1"}, {"content": "foo2"}]})
# content = "foo2"
"""
return data.get("messages")[-1]["content"] | get the last content of the message list :param data: the user llm request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import last_content content = last_content({"messages": [{"content": "foo1"}, {"content": "foo2"}]}) # content = "foo2" |
21,796 | import re
import string
from typing import Dict, Any
The provided code snippet includes necessary dependencies for implementing the `last_content_without_prompt` function. Write a Python function `def last_content_without_prompt(data: Dict[str, Any], **params: Dict[str, Any]) -> Any` to solve the following problem:
get the last content of the message list without prompts content :param data: the user llm request data :type data: Dict[str, Any] :param params: the special gptcache params, like prompts param in the cache object :type params: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import last_content_without_prompt content = last_content_without_prompt( {"messages": [{"content": "foo1"}, {"content": "foo2"}]}, prompts=["foo"] ) # content = "2"
Here is the function:
def last_content_without_prompt(data: Dict[str, Any], **params: Dict[str, Any]) -> Any:
"""get the last content of the message list without prompts content
:param data: the user llm request data
:type data: Dict[str, Any]
:param params: the special gptcache params, like prompts param in the cache object
:type params: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import last_content_without_prompt
content = last_content_without_prompt(
{"messages": [{"content": "foo1"}, {"content": "foo2"}]}, prompts=["foo"]
)
# content = "2"
"""
last_content_str = data.get("messages")[-1]["content"]
prompts = params.get("prompts", [])
if prompts is None:
return last_content_str
pattern = "|".join(prompts)
new_content_str = re.sub(pattern, "", last_content_str)
return new_content_str | get the last content of the message list without prompts content :param data: the user llm request data :type data: Dict[str, Any] :param params: the special gptcache params, like prompts param in the cache object :type params: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import last_content_without_prompt content = last_content_without_prompt( {"messages": [{"content": "foo1"}, {"content": "foo2"}]}, prompts=["foo"] ) # content = "2" |
21,797 | import re
import string
from typing import Dict, Any
def _get_pattern_value(pattern_str: str, value_str: str):
literal_text_arr = []
field_name_arr = []
for literal_text, field_name, _, _ in string.Formatter().parse(pattern_str):
literal_text_arr.append(literal_text)
if field_name is not None:
field_name_arr.append(
field_name if field_name else str(len(field_name_arr))
)
pattern_values = {}
last_end = 0
for i, literal_text in enumerate(literal_text_arr):
start = value_str.find(literal_text, last_end)
if i == len(literal_text_arr) - 1:
end = len(value_str)
else:
end = value_str.find(literal_text_arr[i + 1], start + 1)
if start == -1 or end == -1:
break
start += len(literal_text)
pattern_values[field_name_arr[i]] = value_str[start:end]
last_end = end
return pattern_values
The provided code snippet includes necessary dependencies for implementing the `last_content_without_template` function. Write a Python function `def last_content_without_template(data: Dict[str, Any], **params: Dict[str, Any]) -> Any` to solve the following problem:
get the last content's template values of the message list without template content. When considering a cache agent or chain, the majority of the content consists of template content, while the essential information is simply a list of parameters within the template. In this way, the cache key is composed of a string made up of all the parameter values in the list. WARNING: Two parameters without intervals cannot appear in the template, for example: template = "{foo}{hoo}" is not supported, but template = "{foo}:{hoo}" is supported :param data: the user llm request data :type data: Dict[str, Any] :Example with str template: .. code-block:: python from gptcache import Config from gptcache.processor.pre import last_content_without_template template_obj = "tell me a joke about {subject}" prompt = template_obj.format(subject="animal") value = last_content_without_template( data={"messages": [{"content": prompt}]}, cache_config=Config(template=template_obj) ) print(value) # ['animal'] :Example with langchain template: .. code-block:: python from langchain import PromptTemplate from gptcache import Config from gptcache.processor.pre import last_content_without_template template_obj = PromptTemplate.from_template("tell me a joke about {subject}") prompt = template_obj.format(subject="animal") value = last_content_without_template( data={"messages": [{"content": prompt}]}, cache_config=Config(template=template_obj.template), ) print(value) # ['animal'] NOTE: At present, only the simple PromptTemplate in langchain is supported. For ChatPromptTemplate, it needs to be adjusted according to the template array. If you need to use it, you need to pass in the final dialog template yourself. The reason why it cannot be advanced is that ChatPromptTemplate does not provide a method to directly return the template string.
Here is the function:
def last_content_without_template(data: Dict[str, Any], **params: Dict[str, Any]) -> Any:
"""get the last content's template values of the message list without template content.
When considering a cache agent or chain, the majority of the content consists of template content,
while the essential information is simply a list of parameters within the template.
In this way, the cache key is composed of a string made up of all the parameter values in the list.
WARNING: Two parameters without intervals cannot appear in the template,
for example: template = "{foo}{hoo}" is not supported,
but template = "{foo}:{hoo}" is supported
:param data: the user llm request data
:type data: Dict[str, Any]
:Example with str template:
.. code-block:: python
from gptcache import Config
from gptcache.processor.pre import last_content_without_template
template_obj = "tell me a joke about {subject}"
prompt = template_obj.format(subject="animal")
value = last_content_without_template(
data={"messages": [{"content": prompt}]}, cache_config=Config(template=template_obj)
)
print(value)
# ['animal']
:Example with langchain template:
.. code-block:: python
from langchain import PromptTemplate
from gptcache import Config
from gptcache.processor.pre import last_content_without_template
template_obj = PromptTemplate.from_template("tell me a joke about {subject}")
prompt = template_obj.format(subject="animal")
value = last_content_without_template(
data={"messages": [{"content": prompt}]},
cache_config=Config(template=template_obj.template),
)
print(value)
# ['animal']
NOTE: At present, only the simple PromptTemplate in langchain is supported.
For ChatPromptTemplate, it needs to be adjusted according to the template array.
If you need to use it, you need to pass in the final dialog template yourself.
The reason why it cannot be advanced is that ChatPromptTemplate
does not provide a method to directly return the template string.
"""
last_content_str = data.get("messages")[-1]["content"]
cache_config = params.get("cache_config", None)
if not (cache_config and cache_config.template):
return last_content_str
pattern_value = _get_pattern_value(cache_config.template, last_content_str)
return str(list(pattern_value.values())) | get the last content's template values of the message list without template content. When considering a cache agent or chain, the majority of the content consists of template content, while the essential information is simply a list of parameters within the template. In this way, the cache key is composed of a string made up of all the parameter values in the list. WARNING: Two parameters without intervals cannot appear in the template, for example: template = "{foo}{hoo}" is not supported, but template = "{foo}:{hoo}" is supported :param data: the user llm request data :type data: Dict[str, Any] :Example with str template: .. code-block:: python from gptcache import Config from gptcache.processor.pre import last_content_without_template template_obj = "tell me a joke about {subject}" prompt = template_obj.format(subject="animal") value = last_content_without_template( data={"messages": [{"content": prompt}]}, cache_config=Config(template=template_obj) ) print(value) # ['animal'] :Example with langchain template: .. code-block:: python from langchain import PromptTemplate from gptcache import Config from gptcache.processor.pre import last_content_without_template template_obj = PromptTemplate.from_template("tell me a joke about {subject}") prompt = template_obj.format(subject="animal") value = last_content_without_template( data={"messages": [{"content": prompt}]}, cache_config=Config(template=template_obj.template), ) print(value) # ['animal'] NOTE: At present, only the simple PromptTemplate in langchain is supported. For ChatPromptTemplate, it needs to be adjusted according to the template array. If you need to use it, you need to pass in the final dialog template yourself. The reason why it cannot be advanced is that ChatPromptTemplate does not provide a method to directly return the template string. |
21,798 | import re
import string
from typing import Dict, Any
The provided code snippet includes necessary dependencies for implementing the `all_content` function. Write a Python function `def all_content(data: Dict[str, Any], **_: Dict[str, Any]) -> Any` to solve the following problem:
get all content of the message list :param data: the user llm request data :type data: Dict[str, Any] :Example: .. code-block:: python from gptcache.processor.pre import all_content content = all_content( {"messages": [{"content": "foo1"}, {"content": "foo2"}]} ) # content = "foo1\\nfoo2"
Here is the function:
def all_content(data: Dict[str, Any], **_: Dict[str, Any]) -> Any:
"""get all content of the message list
:param data: the user llm request data
:type data: Dict[str, Any]
:Example:
.. code-block:: python
from gptcache.processor.pre import all_content
content = all_content(
{"messages": [{"content": "foo1"}, {"content": "foo2"}]}
)
# content = "foo1\\nfoo2"
"""
s = ""
messages = data.get("messages")
for i, message in enumerate(messages):
if i == len(messages) - 1:
s += message["content"]
else:
s += message["content"] + "\n"
return s | get all content of the message list :param data: the user llm request data :type data: Dict[str, Any] :Example: .. code-block:: python from gptcache.processor.pre import all_content content = all_content( {"messages": [{"content": "foo1"}, {"content": "foo2"}]} ) # content = "foo1\\nfoo2" |
21,799 | import re
import string
from typing import Dict, Any
The provided code snippet includes necessary dependencies for implementing the `get_file_bytes` function. Write a Python function `def get_file_bytes(data: Dict[str, Any], **_: Dict[str, Any]) -> bytes` to solve the following problem:
get the file bytes of the llm request params :param data: the user llm request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import get_file_bytes content = get_file_bytes({"file": open("test.txt", "rb")})
Here is the function:
def get_file_bytes(data: Dict[str, Any], **_: Dict[str, Any]) -> bytes:
"""get the file bytes of the llm request params
:param data: the user llm request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import get_file_bytes
content = get_file_bytes({"file": open("test.txt", "rb")})
"""
return data.get("file").peek() | get the file bytes of the llm request params :param data: the user llm request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import get_file_bytes content = get_file_bytes({"file": open("test.txt", "rb")}) |
21,800 | import re
import string
from typing import Dict, Any
The provided code snippet includes necessary dependencies for implementing the `get_input_str` function. Write a Python function `def get_input_str(data: Dict[str, Any], **_: Dict[str, Any]) -> str` to solve the following problem:
get the image and question str of the llm request params :param data: the user llm request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import get_input_str content = get_input_str({"input": {"image": open("test.png", "rb"), "question": "foo"}})
Here is the function:
def get_input_str(data: Dict[str, Any], **_: Dict[str, Any]) -> str:
"""get the image and question str of the llm request params
:param data: the user llm request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import get_input_str
content = get_input_str({"input": {"image": open("test.png", "rb"), "question": "foo"}})
"""
input_data = data.get("input")
return str(input_data["image"].peek()) + input_data["question"] | get the image and question str of the llm request params :param data: the user llm request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import get_input_str content = get_input_str({"input": {"image": open("test.png", "rb"), "question": "foo"}}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.