File size: 1,246 Bytes
b66f8ad
 
 
b5568be
 
b66f8ad
b5568be
 
 
 
 
b66f8ad
b5568be
 
 
 
 
 
 
 
b66f8ad
 
 
b5568be
 
b66f8ad
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import torch
import gradio as gr

from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer ,pipeline

config = PeftConfig.from_pretrained("ShishuTripathi/entity_coder")
model = AutoModelForCausalLM.from_pretrained("ybelkada/falcon-7b-sharded-bf16")
model = PeftModel.from_pretrained(model, "ShishuTripathi/entity_coder")
tokenizer = AutoTokenizer.from_pretrained("ShishuTripathi/entity_coder")
generator = pipeline('text-generation' , model = model, tokenizer =tokenizer, max_length = 50)

def text_generation(input_text):
    prompt = f"### Narrative: {input_text} \n ### Reported Term:"
    out = generator(prompt)
    output = out[0]['generated_text'].replace('|endoftext|',' ').strip()
    return output

title = "Preferred Term Extractor and Coder"
description = "The term used to describe an adverse event in the Database of Adverse Event Notifications - medicines is the MedDRA 'preferred term', which describes a single medical concept"

gr.Interface(
    text_generation,
    [gr.inputs.Textbox(lines=2, label="Enter Narrative or Phrase")],
    [gr.outputs.Textbox(type="auto", label="Extracted Preffered Term")],
    title=title,
    description=description,
    theme="huggingface"
).launch()