weizhiwang commited on
Commit
171eb84
·
1 Parent(s): 6ac3aa5

add system prompt

Browse files
Files changed (1) hide show
  1. example/backend/server.py +4 -2
example/backend/server.py CHANGED
@@ -125,7 +125,9 @@ async def generate_response(request: QueryRequest):
125
  context, paper_objects = format_papers_context(relevant_papers, similarities)
126
 
127
  # Prepare the prompt with context
128
- full_prompt = f"{context}\n\nQuestion: {request.message}\nAnswer:"
 
 
129
 
130
  # Generate response using Llama model
131
  inputs = llama_tokenizer(full_prompt, return_tensors="pt").to(llama_model.device)
@@ -169,4 +171,4 @@ async def generate_response(request: QueryRequest):
169
  raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")
170
 
171
  if __name__ == "__main__":
172
- uvicorn.run("server:app", host="0.0.0.0", port=8000, reload=True)
 
125
  context, paper_objects = format_papers_context(relevant_papers, similarities)
126
 
127
  # Prepare the prompt with context
128
+ # full_prompt = f"{context}\n\nQuestion: {request.message}\nAnswer:"
129
+ system_prompt = "You are a helpful assistant on the topic of materials science. If the question is not related to the scientific papers and research, please answer the question as a general assistant. For the scientic research questions, please only answer the question using the given retrieved papers and their contents. Do not generate the paper title and contents which do not exist in the retrieved context."
130
+ full_prompt = f"{system_prompt}\n\n{context}\n\nQuestion: {request.message}\n\nAnswer:"
131
 
132
  # Generate response using Llama model
133
  inputs = llama_tokenizer(full_prompt, return_tensors="pt").to(llama_model.device)
 
171
  raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")
172
 
173
  if __name__ == "__main__":
174
+ uvicorn.run("server:app", host="0.0.0.0", port=8000, reload=True)