| import streamlit as st |
|
|
| |
| |
|
|
|
|
| st.title('Question answering help desk application') |
|
|
|
|
| |
| st.subheader('1. A simple question') |
| |
|
|
|
|
| WIKI_URL = 'https://en.wikipedia.org/w/api.php' |
| WIKI_QUERY = "?format=json&action=query&prop=extracts&explaintext=1" |
| WIKI_BERT = "&titles=BERT_(language_model)" |
| WIKI_METHOD = 'GET' |
|
|
| response = req.request(WIKI_METHOD, f'{WIKI_URL}{WIKI_QUERY}{WIKI_BERT}') |
| resp_json = json.loads(response.content.decode("utf-8")) |
| wiki_bert = resp_json['query']['pages']['62026514']['extract'] |
| paragraph = wiki_bert |
|
|
| written_passage = st.text_area( |
| 'Paragraph used for QA (you can also edit, or copy/paste new content)', |
| paragraph, |
| height=250 |
| ) |
| if written_passage: |
| paragraph = written_passage |
|
|
| question = 'How many languages does bert understand?' |
| written_question = st.text_input( |
| 'Question used for QA (you can also edit, and experiment with the answers)', |
| question |
| ) |
| if written_question: |
| question = written_question |
|
|
| QA_URL = "https://api-inference.huggingface.co/models/deepset/roberta-base-squad2" |
| QA_METHOD = 'POST' |
|
|
|
|
| if st.button('Run QA inference (get answer prediction)'): |
| if paragraph and question: |
| inputs = {'question': question, 'context': paragraph} |
| payload = json.dumps(inputs) |
| prediction = req.request(QA_METHOD, QA_URL, data=payload) |
| answer = json.loads(prediction.content.decode("utf-8")) |
| answer_span = answer["answer"] |
| answer_score = answer["score"] |
| st.write(f'Answer: **{answer_span}**') |
| start_par = max(0, answer["start"]-86) |
| stop_para = min(answer["end"]+90, len(paragraph)) |
| answer_context = paragraph[start_par:stop_para].replace(answer_span, f'**{answer_span}**') |
| st.write(f'Answer context (and score): ... _{answer_context}_ ... (score: {format(answer_score, ".3f")})') |
| st.write(f'Answer JSON: ') |
| st.write(answer) |
| else: |
| st.write('Write some passage of text and a question') |
| st.stop() |
|
|
|
|
| """ |
| from transformers import pipeline |
| |
| x = st.slider('Select a value') |
| st.write(x, 'squared is', x * x) |
| |
| question_answerer = pipeline("question-answering") |
| |
| context = r" Extractive Question Answering is the task of extracting an answer from a text given a question. |
| An example of a question answering dataset is the SQuAD dataset, which is entirely based on that task. |
| If you would like to fine-tune a model on a SQuAD task, you may leverage the |
| examples/pytorch/question-answering/run_squad.py script." |
| question = "What is extractive question answering?" #"What is a good example of a question answering dataset?" |
| result = question_answerer(question=question, context=context) |
| answer = result['answer'] |
| score = round(result['score'], 4) |
| span = f"start: {result['start']}, end: {result['end']}" |
| |
| st.write(answer) |
| st.write(f"score: {score}") |
| st.write(f"span: {span}") |
| """ |