Sam-Oliveira commited on
Commit
9f1f697
·
1 Parent(s): e05c9d5

Change ideation prompt

Browse files
Files changed (2) hide show
  1. src/ideate.py +3 -3
  2. src/summarise.py +3 -5
src/ideate.py CHANGED
@@ -6,9 +6,9 @@ from typing import Optional,List
6
  from helpers import rows_by_tag
7
 
8
  IDEA_PROMPT = (
9
- " You are a senior ML researcher. For each of the papers in CONTEXT, propose THREE fresh research projects."
10
- "For each give a new **Title**, one-sentence **Motivation**, two-sentence **Method idea**, "
11
- "and one-sentence **Evaluation method**.\n"
12
  "===CONTEXT===\n"
13
  "{context}\n"
14
  "===PROJECT IDEAS===\n"
 
6
  from helpers import rows_by_tag
7
 
8
  IDEA_PROMPT = (
9
+ " You are a senior ML researcher. CONTEXT provides a list of papers. From this list of papers, propose THREE new research projects."
10
+ "For each research project proposed, give a new Title, one-sentence on Motivation and background, two-sentences on the new method, "
11
+ "and one-sentence on Evaluation method.\n"
12
  "===CONTEXT===\n"
13
  "{context}\n"
14
  "===PROJECT IDEAS===\n"
src/summarise.py CHANGED
@@ -18,16 +18,14 @@ PROMPT = (
18
 
19
  # ---------------------------------------------------------------------- #
20
  def load_pipe():
21
- # Cache directories are already set up in streamlit_app.py
22
  cache_dir = pathlib.Path(tempfile.gettempdir()) / "hf_cache"
23
-
24
  model = AutoModelForCausalLM.from_pretrained(
25
  MODEL_NAME,
26
- cache_dir=str(cache_dir),
27
- load_in_4bit=True,
28
  device_map="auto"
29
  )
30
- tok = AutoTokenizer.from_pretrained(MODEL_NAME, cache_dir=str(cache_dir))
31
  tok.pad_token = tok.eos_token
32
  return pipeline(
33
  "text-generation",
 
18
 
19
  # ---------------------------------------------------------------------- #
20
  def load_pipe():
 
21
  cache_dir = pathlib.Path(tempfile.gettempdir()) / "hf_cache"
 
22
  model = AutoModelForCausalLM.from_pretrained(
23
  MODEL_NAME,
24
+ cache_dir=cache_dir,
25
+ #load_in_4bit=True,
26
  device_map="auto"
27
  )
28
+ tok = AutoTokenizer.from_pretrained(MODEL_NAME, cache_dir=cache_dir)
29
  tok.pad_token = tok.eos_token
30
  return pipeline(
31
  "text-generation",