Ahmed-14 commited on
Commit
c6c272e
·
1 Parent(s): bdf2669

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -9
app.py CHANGED
@@ -12,17 +12,13 @@ from langchain import OpenAI
12
 
13
 
14
 
15
- token = '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'
16
- from huggingface_hub import HfFileSystem
17
- fs = HfFileSystem(token=token)
18
-
19
- text_list = fs.ls("datasets/GoChat/Gochat247_Data/Data", detail=False)
20
 
 
 
 
21
  from llama_index import Document
22
- documents = [Document(t) for t in text_list]
23
-
24
- # Setup your LLM
25
-
26
 
27
  # define LLM
28
  llm_predictor = LLMPredictor(llm=OpenAI(temperature=0,
@@ -35,6 +31,7 @@ max_input_size = 4096
35
  num_output = 256
36
  # set maximum chunk overlap
37
  max_chunk_overlap = 20
 
38
  prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
39
 
40
  service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
 
12
 
13
 
14
 
15
+ from datasets import load_dataset
 
 
 
 
16
 
17
+ dataset = load_dataset("GoChat/Gochat247_Data")
18
+ data = dataset['train']["text"]
19
+ data_text = ' '.join(data)
20
  from llama_index import Document
21
+ documents = Document(data_text)
 
 
 
22
 
23
  # define LLM
24
  llm_predictor = LLMPredictor(llm=OpenAI(temperature=0,
 
31
  num_output = 256
32
  # set maximum chunk overlap
33
  max_chunk_overlap = 20
34
+
35
  prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
36
 
37
  service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)