binqiangliu commited on
Commit
fe4f27c
·
1 Parent(s): d687e0e

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +54 -60
main.py CHANGED
@@ -1,63 +1,57 @@
1
- from fastapi import FastAPI
2
- import pickle
3
- import uvicorn
4
- import pandas as pd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  app = FastAPI()
7
 
8
- # @app.get("/")
9
- # def read_root():
10
- # return {"Hello": "World!"}
11
-
12
-
13
- # Function to load pickle file
14
- def load_pickle(filename):
15
- with open(filename, 'rb') as file:
16
- data = pickle.load(file)
17
- return data
18
-
19
- # Load pickle file
20
- ml_components = load_pickle('ml_sepsis.pkl')
21
-
22
- # Components in the pickle file
23
- ml_model = ml_components['model']
24
- pipeline_processing = ml_components['pipeline']
25
-
26
- #Endpoints
27
- #Root endpoints
28
- @app.get("/")
29
- def root():
30
- return {"API": "An API for Sepsis Prediction."}
31
-
32
- @app.get('/Predict_Sepsis')
33
- async def predict(Plasma_glucose: int, Blood_Work_Result_1: int,
34
- Blood_Pressure: int, Blood_Work_Result_2: int,
35
- Blood_Work_Result_3: int, Body_mass_index: float,
36
- Blood_Work_Result_4: float,Age: int, Insurance:float):
37
-
38
- data = pd.DataFrame({'Plasma glucose': [Plasma_glucose], 'Blood Work Result-1': [Blood_Work_Result_1],
39
- 'Blood Pressure': [Blood_Pressure], 'Blood Work Result-2': [Blood_Work_Result_2],
40
- 'Blood Work Result-3': [Blood_Work_Result_3], 'Body mass index': [Body_mass_index],
41
- 'Blood Work Result-4': [Blood_Work_Result_4], 'Age': [Age], 'Insurance':[Insurance]})
42
-
43
- data_prepared = pipeline_processing.transform(data)
44
-
45
- model_output = ml_model.predict(data_prepared).tolist()
46
-
47
- prediction = make_prediction(model_output)
48
-
49
- return prediction
50
-
51
-
52
-
53
-
54
- def make_prediction(data_prepared):
55
-
56
- output_pred = data_prepared
57
-
58
- if output_pred == 0:
59
- output_pred = "Sepsis status is Negative"
60
- else:
61
- output_pred = "Sepsis status is Positive"
62
-
63
- return output_pred
 
1
+ from fastapi import FastAPI, Request
2
+ from fastapi.responses import JSONResponse
3
+ from pydantic import BaseModel
4
+
5
+ from langchain import PromptTemplate, LLMChain
6
+ from langchain.memory import StreamlitChatMessageHistory
7
+ import numpy as np
8
+ from langchain.chains import LLMChain
9
+ from langchain.prompts import PromptTemplate
10
+ from langchain.memory import ConversationBufferMemory
11
+ from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
12
+ from langchain import HuggingFaceHub
13
+ import os
14
+ from dotenv import load_dotenv
15
+ load_dotenv()
16
+ from pathlib import Path
17
+ from huggingface_hub import InferenceClient
18
+ from langchain import HuggingFaceHub
19
+ import requests
20
+ import uuid
21
+ import sys
22
+
23
+
24
+ HUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN')
25
+ repo_id = os.environ.get('repo_id')
26
+ port = os.getenv('port')
27
+
28
+ llm = HuggingFaceHub(repo_id=repo_id,
29
+ model_kwargs={"min_length":1024,
30
+ "max_new_tokens":5632, "do_sample":True,
31
+ "temperature":0.1,
32
+ "top_k":50,
33
+ "top_p":0.95, "eos_token_id":49155})
34
+
35
+ prompt_template = """
36
+ <<SYS>>You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
37
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
38
+ In each conversation, question is placed after [INST] while your answer should be placed after [/INST].<</SYS>>
39
+ [INST] {user_question} [/INST]
40
+ assistant:
41
+ """
42
+
43
+ llm_chain = LLMChain(llm=llm, prompt=PromptTemplate.from_template(prompt_template))
44
 
45
  app = FastAPI()
46
 
47
+ class ChatRequest(BaseModel):
48
+ user_question: str
49
+
50
+ @app.post('/api/chat')
51
+ async def chat(request: Request, chat_request: ChatRequest):
52
+ user_query = chat_request.user_question
53
+ initial_response = llm_chain.run(user_query)
54
+ return JSONResponse({'response': initial_response})
55
+
56
+ if __name__ == '__main__':
57
+ uvicorn.run(app, host='0.0.0.0')