File size: 10,268 Bytes
5692274
 
 
 
 
 
 
 
 
 
 
 
 
 
7432f2e
5692274
 
7432f2e
5692274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7432f2e
5692274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e71e760
 
 
 
d7cb3fe
e71e760
5692274
d7cb3fe
 
5692274
d7cb3fe
 
 
 
 
 
 
5692274
 
e71e760
d7cb3fe
e71e760
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d7cb3fe
5692274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e71e760
aea603d
 
e71e760
 
 
 
 
aea603d
 
5692274
 
 
 
 
 
 
 
 
aea603d
5692274
 
 
 
 
 
 
 
 
 
e446536
5692274
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
import pickle
import openai
import re
import numpy as np
import tensorflow_hub as hub
import openai
import os
import gradio as gr
import pandas as pd
import io
import fitz
from sklearn.neighbors import NearestNeighbors
import warnings
warnings.filterwarnings(action='ignore')

indi_pkl_file_path = "individual_list.pkl"  # Specify the file path from which to read the list
busi_pkl_file_path = "business_list.pkl"  # Specify the file path from which to read the list

indi_loaded_array_file_path= "individual_numpy.npy"
busi_loaded_array_file_path= "business_numpy.npy"

with open(indi_pkl_file_path, "rb") as f:
    indi_texts = pickle.load(f)    
    
with open(busi_pkl_file_path, "rb") as f:
    busi_texts = pickle.load(f)  

#df = pd.read_excel('ABS_Statewise_Tax_Revenue_data.xlsx')
#ndf= df.copy()

global indi_loaded_array
global busi_loaded_array

indi_loaded_array = np.load(indi_loaded_array_file_path)
busi_loaded_array = np.load(busi_loaded_array_file_path)
openAI_key = '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'

# key1: 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
start_page=1


def text_to_chunks(texts, word_length=150, start_page=1):
    text_toks = [t.split(' ') for t in texts] # length of text_toks is equal to number of pages in PDF 
    #print("text_toks: ",text_toks)
    page_nums = []
    chunks = []
    
    for idx, words in enumerate(text_toks):
        #print(f'idx: {idx}\nwords: {words}')  
        for i in range(0, len(words), word_length):
            #print("i: ",i)
            chunk = words[i:i+word_length]
            #print("chunk: ",chunk)
            if (i+word_length) > len(words) and (len(chunk) < word_length) and (
                len(text_toks) != (idx+1)):
                text_toks[idx+1] = chunk + text_toks[idx+1]
                continue
            chunk = ' '.join(chunk).strip()
            chunk = f'[{idx+start_page}]' + ' ' + '"' + chunk + '"'
            #print(chunk)
            chunks.append(chunk)
    return chunks

class SemanticSearch:
    
    def __init__(self,data,loaded_array, batch=1000, n_neighbors=5) :
        self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')
        self.fitted = False
        self.data = data
        self.embeddings = loaded_array
        n_neighbors = min(n_neighbors, len(self.embeddings))
        self.nn = NearestNeighbors(n_neighbors=n_neighbors)
        self.nn.fit(self.embeddings)
        self.fitted = True
    
    def __call__(self, text, return_data=True):
        inp_emb = self.use([text])
        neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0]
        
        if return_data:
            return [self.data[i] for i in neighbors]
        else:
            return neighbors

def generate_text(openAI_key,prompt, engine="text-davinci-003"):
    openai.api_key = openAI_key
    completions = openai.Completion.create(
        engine=engine,
        prompt=prompt,
        max_tokens=512,
        n=1,
        stop=None,
        temperature=0,
    )
    message = completions.choices[0].text
    return message

def generate_answer(question,openAI_key):
    topn_chunks = recommender(question)
    prompt = ""
    prompt += 'search results:\n\n'
    for c in topn_chunks:
        prompt += c + '\n\n'
        
    prompt += "Instructions: Compose a comprehensive reply to the query using the search results given. "\
              "Consider the provided information from Australian Taxation Office (ATO) and answer only when context is clear to you. For example : Tax, Australia etc."\
              "Keep The tone conversational "\
              "Provide example from the text to make the answer understandable."\
              "If there are multiple points, use bullets to write those points."\
              "If there is any related hyperlink, include that in the answer."\
              "If the search results mention multiple subjects with the same name, create separate answers for each. "\
              "Only include information found in the results and don't add any additional information."\
              "Make sure the answer is correct and don't output false content. "\
              "If the text does not relate to the query, simply state 'Are you sure this is a taxation related query? If not kindly ask taxation related queries'." \
              "Ignore outlier search results which has nothing to do with the question. Only answer what is asked."\
              "The answer should be short and concise. Answer step-by-step. \n\nQuery: {question}\nAnswer:"\
    
    prompt += f"Query: {question}\nAnswer:"
    answer = generate_text(openAI_key, prompt,"text-davinci-003")
    return answer

def classify_chatgpt(question, openAI_key, engine='gpt-3.5-turbo-0301'):
      openai.api_key = openAI_key
      message = [{'role': 'system', 'content': '''We have a user query related to taxes from Australia and your task is to classify the query into one of the category from the following given two categories only:
      
      1. Business Taxation Query
      2. Individual Taxation Query

      Context: Individual Taxation Query are those queries which are related to the individual taxation of the residents of a country (here from Australia).\n
              Business Taxation Query are those queries which are related from the Business perspective.

              
      Instructions:
      1. Understand the query very well.\n
      2. Return the result only as a string. \n
      3. Do not put any punctuation mark after classifying it. \n
      4. Do not write any extra information and just do the Classification from the mentioned two categories.'''},
                                {'role': 'user', 'content': question}]

      completions = openai.ChatCompletion.create(
          model=engine,
          messages=message,
          max_tokens=512,
          n=1,
          stop=None,
          temperature=0.7,
      )
      message = completions.choices[0].message['content']
      #print(message)
      return message

def question_answer(question,openAI_key):

    return generate_answer(question,openAI_key)


indi_chunks = text_to_chunks(indi_texts, start_page=start_page)
busi_chunks = text_to_chunks(busi_texts, start_page=start_page)




def output_generation(typed_ques):

    type_of_query = 'dummy'

    if typed_ques == "Hello" :
        out_pu = "Do you want to continue with myGovID or as Guest."
        return out_pu, type_of_query
        
    elif typed_ques == "myGovID":
        out_pu = "Hi User, Please Enter Your Username"
        return out_pu, type_of_query

    elif len(typed_ques.split('.')) == 2:
        out_pu = "Please enter your password"
        return out_pu, type_of_query

    elif typed_ques.isdigit() or typed_ques == "Guest":
        out_pu = "Welcome to Tax GenAI, Please ask your question"
        return out_pu, type_of_query

    else:
        
        type_of_query= classify_chatgpt(typed_ques, openAI_key)
        global recommender
        #print(type_of_query)
        
        if type_of_query in ['Individual Taxation Query','Individual Taxation Query.']:
            
            recommender = SemanticSearch(data=indi_chunks,loaded_array = indi_loaded_array)
            out_pu = question_answer(question= typed_ques, openAI_key= openAI_key)
            #print(out_pu)
            return out_pu, type_of_query
        elif type_of_query in ['Business Taxation Query','Business Taxation Query.']:
            recommender = SemanticSearch(data=busi_chunks, loaded_array=busi_loaded_array)
            out_pu = question_answer(question= typed_ques, openAI_key= openAI_key)
            #print(out_pu)
            return out_pu, type_of_query


title = 'CDI Citizen Intelligence 360 Tool: Tax Advisory'
#description = """ Citizen Intelligence 360 sources data from various government agencies and makes it accessible to citizens in a user-friendly format. This can help citizens better understand how their local government works and stay informed about important initiatives and changes in their communities. Citizen Intelligence 360 is a citizen intelligence tool that uses mapping technology to provide citizens with access to government data, performance report and other civic information. It helps to increase transparency and accountability in local government, and empowers citizens to make informed decisions and participate in the democratic process."""
description = """Welcome to Citizen Intelligence 360 Tool, your intelligent tax companion. Harnessing the power of advanced GEN AI, the tax advisory tool is here to revolutionize your tax experience. With its unrivaled expertise and personalized guidance, the tool simplifies complex tax matters, providing accurate answers to your questions instantly. Say goodbye to confusing endless research and experience the future of tax advisory with Citizen Intelligence 360 Tool - the intelligent solution for your tax needs."""
img_ = '''<html><head><style>
    .image {
      width: 500px; 
      height: 150px;
      position: static;
    }</style></head><body><img src= https://blog.ipleaders.in/wp-content/uploads/2020/08/HDFC_Life_Filed_Your_Returns_Here%D0%A2s_How_You_Can_Check_to_Be_Sure_Sept19-1.jpg  class=image></body></html>'''


def gradio_chatbox(input,history):
    history = history or []
    
    typed_ques = input
                    
    answe, query_type = output_generation(typed_ques)

    ccc = "{0}".format(answe)
    output = ccc
    history.append((input,output))
    #print(history)
    return history, history

   

block = gr.Blocks()


with block:
    gr.Markdown(f'<center><h1>{title}</h1></center>')
    gr.Markdown(f'<center><h1>{img_}</h1></center>')
    gr.Markdown(description)
    chatbot = gr.Chatbot(label="Tax GenAI")
    message = gr.Textbox(label='Please ask your question',placeholder = "Welcome! This is Tax GenAI. Type ""Hello"" to continue.")
    state = gr.State()
    
    submit = gr.Button("SEND")
    #submit1 = gr.Button("SEND")
    #submit2 = gr.Button("SEND")
    submit.style(full_width=None, size='lg' )
    #submit1.style(full_width=None, size='lg')
    #submit2.style(full_width=None, size='lg')
    submit.click(gradio_chatbox, inputs=[message, state], outputs=[chatbot, state])


block.launch()