alx-d commited on
Commit
ebed6ef
·
verified ·
1 Parent(s): 882fc81

Update cbt.py

Browse files
Files changed (1) hide show
  1. cbt.py +11 -5
cbt.py CHANGED
@@ -23,14 +23,20 @@ RATE_LIMIT = 3
23
  @limits(calls=RATE_LIMIT, period=1)
24
  def create_service_context():
25
 
26
- #constraint parameters
 
 
 
 
 
 
 
27
  max_input_size = 4096
28
  num_outputs = 512
29
  max_chunk_overlap = 20
30
- chunk_size_limit = 600
31
-
32
- #allows the user to explicitly set certain constraint parameters
33
- prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
34
 
35
  #LLMPredictor is a wrapper class around LangChain's LLMChain that allows easy integration into LlamaIndex
36
  llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
 
23
  @limits(calls=RATE_LIMIT, period=1)
24
  def create_service_context():
25
 
26
+ # Constraint parameters ORIGINAL
27
+ # max_input_size = 4096
28
+ # num_outputs = 512
29
+ # max_chunk_overlap = 20
30
+ # chunk_size_limit = 600
31
+
32
+ # Allows the user to explicitly set certain constraint parameters
33
+ # prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
34
  max_input_size = 4096
35
  num_outputs = 512
36
  max_chunk_overlap = 20
37
+ chunk_size_limit = 600
38
+ prompt_helper = PromptHelper(max_input_size, num_outputs, chunk_overlap_ratio= 0.1, chunk_size_limit=chunk_size_limit)
39
+ # llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name="gpt-4", max_tokens=num_outputs))
 
40
 
41
  #LLMPredictor is a wrapper class around LangChain's LLMChain that allows easy integration into LlamaIndex
42
  llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))