binqiangliu commited on
Commit
17e4567
·
1 Parent(s): ea9c4b2

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +61 -62
main.py CHANGED
@@ -1,63 +1,62 @@
1
- from fastapi import FastAPI
2
- import pickle
3
  import uvicorn
4
- import pandas as pd
5
-
6
- app = FastAPI()
7
-
8
- # @app.get("/")
9
- # def read_root():
10
- # return {"Hello": "World!"}
11
-
12
-
13
- # Function to load pickle file
14
- def load_pickle(filename):
15
- with open(filename, 'rb') as file:
16
- data = pickle.load(file)
17
- return data
18
-
19
- # Load pickle file
20
- ml_components = load_pickle('ml_sepsis.pkl')
21
-
22
- # Components in the pickle file
23
- ml_model = ml_components['model']
24
- pipeline_processing = ml_components['pipeline']
25
-
26
- #Endpoints
27
- #Root endpoints
28
- @app.get("/")
29
- def root():
30
- return {"API": "An API for Sepsis Prediction."}
31
-
32
- @app.get('/Predict_Sepsis')
33
- async def predict(Plasma_glucose: int, Blood_Work_Result_1: int,
34
- Blood_Pressure: int, Blood_Work_Result_2: int,
35
- Blood_Work_Result_3: int, Body_mass_index: float,
36
- Blood_Work_Result_4: float,Age: int, Insurance:float):
37
-
38
- data = pd.DataFrame({'Plasma glucose': [Plasma_glucose], 'Blood Work Result-1': [Blood_Work_Result_1],
39
- 'Blood Pressure': [Blood_Pressure], 'Blood Work Result-2': [Blood_Work_Result_2],
40
- 'Blood Work Result-3': [Blood_Work_Result_3], 'Body mass index': [Body_mass_index],
41
- 'Blood Work Result-4': [Blood_Work_Result_4], 'Age': [Age], 'Insurance':[Insurance]})
42
-
43
- data_prepared = pipeline_processing.transform(data)
44
-
45
- model_output = ml_model.predict(data_prepared).tolist()
46
-
47
- prediction = make_prediction(model_output)
48
-
49
- return prediction
50
-
51
-
52
-
53
-
54
- def make_prediction(data_prepared):
55
-
56
- output_pred = data_prepared
57
-
58
- if output_pred == 0:
59
- output_pred = "Sepsis status is Negative"
60
- else:
61
- output_pred = "Sepsis status is Positive"
62
-
63
- return output_pred
 
 
 
 
1
  import uvicorn
2
+ from flask import Flask, request, jsonify
3
+ from langchain import PromptTemplate, LLMChain
4
+ from langchain.memory import StreamlitChatMessageHistory
5
+ import numpy as np
6
+ from langchain.chains import LLMChain
7
+ from langchain.prompts import PromptTemplate
8
+ from langchain.memory import ConversationBufferMemory
9
+ from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
10
+ from langchain import HuggingFaceHub
11
+ import os
12
+ from dotenv import load_dotenv
13
+ load_dotenv()
14
+ from pathlib import Path
15
+ from huggingface_hub import InferenceClient
16
+ from langchain import HuggingFaceHub
17
+ import requests
18
+
19
+ # 初始化Chatbot
20
+ HUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN')
21
+ repo_id = os.environ.get('repo_id')
22
+ port = os.getenv('port')
23
+
24
+ llm = HuggingFaceHub(repo_id=repo_id,
25
+ model_kwargs={"min_length":1024,
26
+ "max_new_tokens":5632, "do_sample":True,
27
+ "temperature":0.1,
28
+ "top_k":50,
29
+ "top_p":0.95, "eos_token_id":49155})
30
+
31
+ #prompt_template = """You are a very helpful AI assistant. Please response to the user's input question with as many details as possible.
32
+ #Question: {user_question}
33
+ #Helpful AI Repsonse:
34
+ #"""
35
+
36
+ prompt_template = """
37
+ <<SYS>>You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
38
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
39
+ In each conversation, question is placed after [INST] while your answer should be placed after [/INST].<</SYS>>
40
+ [INST] {user_question} [/INST]
41
+ assistant:
42
+ """
43
+
44
+ llm_chain = LLMChain(llm=llm, prompt=PromptTemplate.from_template(prompt_template))
45
+
46
+ # 定义API端点
47
+ app = Flask(__name__)
48
+ @app.route('/api/chat', methods=['POST'])
49
+ def chat():
50
+ data = request.get_json()
51
+ #user_query = data['query']
52
+ #此处的['query']中的query可以自定义名称,例如修改为user_question,那么调用API的代码中,需要相应的使用data = {'user_question': user_query},user_question需一致
53
+ user_query = data['user_question']
54
+ #temp_user_query=user_query
55
+ # 调用Chatbot
56
+ initial_response = llm_chain.run(user_query)
57
+ #st.write("AI Response")
58
+ #st.write(initial_response)
59
+ return jsonify({'response': initial_response})
60
+
61
+ #if __name__ == "__main__":
62
+ # uvicorn.run(app, host='0.0.0.0', port=5000)