developer3000 commited on
Commit
300cc8d
·
1 Parent(s): 48032cb

Add application file

Browse files
Files changed (1) hide show
  1. app.py +20 -43
app.py CHANGED
@@ -1,49 +1,26 @@
 
 
1
  import gradio as gr
2
- import torch
3
- from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
4
- from threading import Thread
5
 
6
- #tokenizer = AutoTokenizer.from_pretrained("togethercomputer/RedPajama-INCITE-Chat-3B-v1")
7
- #model = AutoModelForCausalLM.from_pretrained("togethercomputer/RedPajama-INCITE-Chat-3B-v1", torch_dtype=torch.float16)
8
 
9
- tokenizer = AutoTokenizer.from_pretrained("aaditya/OpenBioLLM-Llama3-8B-GGUF")
10
- model = AutoModelForCausalLM.from_pretrained("aaditya/openbiollm-llama3-8b.Q5_K_M.gguf", torch_dtype=torch.float16)
 
 
 
 
11
 
12
- class StopOnTokens(StoppingCriteria):
13
- def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
14
- stop_ids = [29, 0]
15
- for stop_id in stop_ids:
16
- if input_ids[0][-1] == stop_id:
17
- return True
18
- return False
19
 
20
- def predict(message, history):
21
- history_transformer_format = history + [[message, ""]]
22
- stop = StopOnTokens()
 
 
 
23
 
24
- messages = "".join(["".join(["\n<human>:"+item[0], "\n<bot>:"+item[1]])
25
- for item in history_transformer_format])
26
-
27
- model_inputs = tokenizer([messages], return_tensors="pt")
28
- streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
29
- generate_kwargs = dict(
30
- model_inputs,
31
- streamer=streamer,
32
- max_new_tokens=1024,
33
- do_sample=True,
34
- top_p=0.95,
35
- top_k=1000,
36
- temperature=1.0,
37
- num_beams=1,
38
- stopping_criteria=StoppingCriteriaList([stop])
39
- )
40
- t = Thread(target=model.generate, kwargs=generate_kwargs)
41
- t.start()
42
-
43
- partial_message = ""
44
- for new_token in streamer:
45
- if new_token != '<':
46
- partial_message += new_token
47
- yield partial_message
48
-
49
- gr.ChatInterface(predict).launch()
 
1
+ from huggingface_hub import hf_hub_download
2
+ from llama_cpp import Llama
3
  import gradio as gr
 
 
 
4
 
5
+ model_name = "aaditya/OpenBioLLM-Llama3-8B-GGUF"
6
+ model_file = "openbiollm-llama3-8b.Q5_K_M.gguf"
7
 
8
+ model_path = hf_hub_download(model_name,
9
+ filename=model_file,
10
+ local_dir='/content')
11
+ print("My model path: ", model_path)
12
+ llm = Llama(model_path=model_path,
13
+ n_gpu_layers=-1)
14
 
15
+ def my_inference_function(Question):
16
+ prompt = f"You are an expert and experienced from the healthcare and biomedical domain with extensive medical knowledge and practical experience. Your name is OpenBioLLM, and you were developed by Saama AI Labs with Open Life Science AI. who's willing to help answer the user's query with explanation. In your explanation, leverage your deep medical expertise such as relevant anatomical structures, physiological processes, diagnostic criteria, treatment guidelines, or other pertinent medical concepts. Use precise medical terminology while still aiming to make the explanation clear and accessible to a general audience. Medical Question: {Question} Medical Answer:"
17
+ response = llm(prompt, max_tokens=4000)['choices'][0]['text']
18
+ return response
 
 
 
19
 
20
+ gradio_interface = gradio.Interface(
21
+ fn = my_inference_function,
22
+ inputs = "text",
23
+ outputs = "text"
24
+ )
25
+ gradio_interface.launch()
26