Kotta commited on
Commit
45c8251
·
1 Parent(s): d95975e

feature(#200): implemented the decoupled prompts for each platform.

Browse files
Brain/src/rising_plugin/risingplugin.py CHANGED
@@ -19,7 +19,14 @@ from firebase_admin import storage
19
 
20
  from .csv_embed import get_embed
21
  from .llm.falcon_llm import FalconLLM
22
- from .llm.llms import get_llm, GPT_4, FALCON_7B, get_llm_chain, MOBILE_PROMPT
 
 
 
 
 
 
 
23
  from .pinecone_engine import init_pinecone
24
  from .rails_validate import validate_rails
25
  from ..common.brain_exception import BrainException
@@ -351,7 +358,9 @@ def ask_question(
351
  docs = []
352
 
353
  if ACTION_FLAG:
354
- docs.append(Document(page_content=MOBILE_PROMPT, metadata=""))
 
 
355
  # temperature shouldbe 0.
356
  chain_data = get_llm_chain(
357
  model=DEFAULT_GPT_MODEL, setting=setting, temperature=0.0
 
19
 
20
  from .csv_embed import get_embed
21
  from .llm.falcon_llm import FalconLLM
22
+ from .llm.llms import (
23
+ get_llm,
24
+ GPT_4,
25
+ FALCON_7B,
26
+ get_llm_chain,
27
+ MOBILE_PROMPT,
28
+ EXTENSION_PROMPT,
29
+ )
30
  from .pinecone_engine import init_pinecone
31
  from .rails_validate import validate_rails
32
  from ..common.brain_exception import BrainException
 
358
  docs = []
359
 
360
  if ACTION_FLAG:
361
+ # apply the proper prompt for each platform
362
+ prompt_template = EXTENSION_PROMPT if is_browser else MOBILE_PROMPT
363
+ docs.append(Document(page_content=prompt_template, metadata=""))
364
  # temperature shouldbe 0.
365
  chain_data = get_llm_chain(
366
  model=DEFAULT_GPT_MODEL, setting=setting, temperature=0.0