Gagan2209 commited on
Commit
6a3d488
·
verified ·
1 Parent(s): 1b12d98

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -303,7 +303,7 @@ def soap_pipeline(text: str):
303
 
304
  INSTRUCTIONS = {
305
  "NER" : """
306
- ** NOTE -> First Inference Could take upto 1 minute due to loading of heavy dependencies. (Further Inferences will be quick) I have used LRU Cache**
307
 
308
  **Paste the transcript in the below format:**
309
 
@@ -316,15 +316,14 @@ INSTRUCTIONS = {
316
 
317
  """,
318
  "Sentiment & Intent": """
319
- ** NOTE -> First Inference Could take upto 1 minute due to loading of heavy dependencies. (Further Inferences will be quick) I have used LRU Cache**
320
-
321
- **Paste the patient transcript in the below format:**
322
 
323
  Patient: That’s great to hear. So, I don’t need to worry about this affecting me in the future?
324
  """,
325
 
326
  "SOAP Generation" : """
327
- ** NOTE -> First Inference Could take upto 1 minute due to loading of heavy dependencies. (Further Inferences will be quick) I have used LRU Cache**
328
 
329
 
330
  SOAP generation is currently not fully accurate.
@@ -336,7 +335,7 @@ INSTRUCTIONS = {
336
  }
337
 
338
  with gr.Blocks() as demo:
339
- gr.Markdown("# 🩺 Physician Notetaker Toolkit")
340
 
341
  with gr.Row():
342
  task = gr.Radio(
 
303
 
304
  INSTRUCTIONS = {
305
  "NER" : """
306
+ **NOTE -> First Inference Could take upto 1 minute due to loading of heavy dependencies. (Further Inferences will be quick) I have used LRU Cache**
307
 
308
  **Paste the transcript in the below format:**
309
 
 
316
 
317
  """,
318
  "Sentiment & Intent": """
319
+ **NOTE -> First Inference Could take upto 1 minute due to loading of heavy dependencies. (Further Inferences will be quick) I have used LRU Cache**
320
+ **Paste the patient transcript in the below format:**
 
321
 
322
  Patient: That’s great to hear. So, I don’t need to worry about this affecting me in the future?
323
  """,
324
 
325
  "SOAP Generation" : """
326
+ **NOTE -> First Inference Could take upto 1 minute due to loading of heavy dependencies. (Further Inferences will be quick) I have used LRU Cache**
327
 
328
 
329
  SOAP generation is currently not fully accurate.
 
335
  }
336
 
337
  with gr.Blocks() as demo:
338
+ gr.Markdown("Physician Notetaker")
339
 
340
  with gr.Row():
341
  task = gr.Radio(