3v324v23 commited on
Commit
e1119a2
·
2 Parent(s): 9a8e19f 72fafe8

Merge remote-tracking branch 'hub/main'

Browse files
Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +75 -0
  4. pre-requirements.txt +8 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Smart
3
+ emoji: 📈
4
+ colorFrom: pink
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 3.28.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: other
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from glob import glob
2
+ from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
3
+ from langchain.chat_models import ChatOpenAI
4
+ import gradio as gr
5
+ import sys
6
+ import os
7
+ import zipfile
8
+
9
+
10
+
11
+ OPENAI_API_KEY = os.getenv('token')
12
+
13
+
14
+
15
+ def construct_index():
16
+ # extrair arquivos do zip
17
+ zip_path = "tema.zip"
18
+ try:
19
+ with zipfile.ZipFile(zip_path, 'r') as zip_ref:
20
+ zip_ref.extractall(".")
21
+ except Exception as e:
22
+ print("Erro ao extrair o arquivo zip:", e)
23
+
24
+
25
+ max_input_size = 3500
26
+ num_outputs = 512
27
+ max_chunk_overlap = 20
28
+ chunk_size_limit = 600
29
+
30
+ prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
31
+
32
+ llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.3, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
33
+
34
+ documents = SimpleDirectoryReader(".").load_data()
35
+
36
+ index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
37
+
38
+ index.save_to_disk('index.json')
39
+
40
+ return index
41
+
42
+
43
+ def chatbot(input_text):
44
+ index = GPTSimpleVectorIndex.load_from_disk('index.json')
45
+
46
+ # Ler e concatenar os documentos da pasta "docs" como o contexto relevante
47
+ documents = ""
48
+ for file_path in glob(os.path.join(".", "*.{txt,pdf}")):
49
+ with open(file_path, "r") as f:
50
+ documents += f.read() + " "
51
+ contexto = documents.strip()
52
+
53
+ # Combinar o contexto e a pergunta de entrada
54
+ with open('tema.txt', 'r') as f:
55
+ texto_prefixo = f.readline().strip()
56
+ texto_entrada = f"Dentro do assunto {texto_prefixo} me responda: {input_text}{contexto} se não for {texto_prefixo} não responda"
57
+ print(texto_entrada)
58
+
59
+ response = index.query(texto_entrada, response_mode="compact")
60
+ return response.response
61
+
62
+
63
+ description = """
64
+ A IA foi treinada com materiais enviados e responde perguntas sobre o tema definido!
65
+ """
66
+
67
+ iface = gr.Interface(fn=chatbot,
68
+ inputs=gr.components.Textbox(lines=7, label="Como podemos te ajudar?"),
69
+ outputs="text",
70
+ description=description,
71
+ title="Demonstração Chat OpenAI")
72
+
73
+
74
+ index = construct_index()
75
+ iface.launch(share=False)
pre-requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ openai
2
+ gpt_index==0.4.24
3
+ transformers
4
+ PyPDF2
5
+ PyCryptodome
6
+ spacy
7
+ gensim
8
+ https://github.com/explosion/spacy-models/releases/download/pt_core_news_sm-3.5.0/pt_core_news_sm-3.5.0.tar.gz