Spaces:
Runtime error
Runtime error
| #import uvicorn | |
| #import streamlit as st | |
| import flask | |
| #之前没有import flask | |
| from flask import Flask, request, jsonify | |
| from langchain import PromptTemplate, LLMChain | |
| #from langchain.memory import StreamlitChatMessageHistory | |
| #import numpy as np | |
| from langchain.chains import LLMChain | |
| from langchain.prompts import PromptTemplate | |
| from langchain.memory import ConversationBufferMemory | |
| #from langchain.memory.chat_message_histories import StreamlitChatMessageHistory | |
| from langchain import HuggingFaceHub | |
| import os | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| #from pathlib import Path | |
| from huggingface_hub import InferenceClient | |
| from langchain import HuggingFaceHub | |
| import requests | |
| #from pydantic import BaseModel | |
| import uuid | |
| import sys | |
| # 初始化Chatbot | |
| hf_token = os.environ.get('HUGGINGFACEHUB_API_TOKEN') | |
| repo_id = os.environ.get('repo_id') | |
| #port = os.getenv('port') | |
| llm = HuggingFaceHub(repo_id=repo_id, | |
| #huggingfacehub_api_token="hf_p***K", | |
| huggingfacehub_api_token=hf_token, | |
| model_kwargs={"min_length":1024, | |
| "max_new_tokens":5632, "do_sample":True, | |
| "temperature":0.1, | |
| "top_k":50, | |
| "top_p":0.95, "eos_token_id":49155}) | |
| #prompt_template = """ | |
| #<<SYS>>You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. | |
| #If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. | |
| #In each conversation, question is placed after [INST] while your answer should be placed after [/INST].<</SYS>> | |
| #[INST] {user_question} [/INST] | |
| #assistant: | |
| #""" | |
| prompt_template = """ | |
| <<SYS>>You are a helpful, respectful and honest assistant. If you don't know the answer to a question, please don't share false information.In each conversation, question is placed after [INST] while your answer should be placed after [/INST].<</SYS>> | |
| [INST] {user_question} [/INST] | |
| assistant: | |
| """ | |
| llm_chain = LLMChain(llm=llm, prompt=PromptTemplate.from_template(prompt_template)) | |
| # 定义API端点 | |
| app = Flask(__name__) | |
| def home_api(): | |
| data = request.get_json() | |
| user_query = data['user_question'] | |
| print(user_query) | |
| return {"Message":"Flask Home API Deploy Success on HF"} | |
| def chat(): | |
| #async def chat(): | |
| #不支持async | |
| data = request.get_json() | |
| #user_query = data['query'] | |
| #此处的['query']中的query可以自定义名称,例如修改为user_question,那么调用API的代码中,需要相应的使用data = {'user_question': user_query},user_question需一致 | |
| user_query = data['user_question'] | |
| # 调用Chatbot | |
| initial_response = llm_chain.run(user_query) | |
| #return jsonify({'response': initial_response}) | |
| #找到问题了:jsonify在Huggingface不支持;在Github然后部署到Render是可以的! | |
| return {'response': initial_response} | |
| #if __name__ == "__main__": | |
| # uvicorn.run(app, host='0.0.0.0', port=5000) | |
| #app.run(host='0.0.0.0') | |
| #app.run(host='0.0.0.0', port=10000) |