rajatchaudhari commited on
Commit
09d7751
·
1 Parent(s): 65688d9

Update space

Browse files
Files changed (2) hide show
  1. app.py +163 -42
  2. requirements.txt +11 -1
app.py CHANGED
@@ -1,61 +1,182 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
27
 
28
- response = ""
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  """
43
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
  """
45
  demo = gr.ChatInterface(
46
  respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
 
 
 
 
 
 
58
  ],
 
59
  )
60
 
61
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer
3
+ import os
4
 
5
+ import torch
6
+ from llama_index.llms.huggingface import HuggingFaceLLM
7
+
8
+ # Optional quantization to 4bit
9
+ from transformers import BitsAndBytesConfig
10
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
11
+ from llama_index.core import Settings
12
+
13
+ import faiss
14
+ from llama_index.core import (
15
+ load_index_from_storage,
16
+ StorageContext,
17
+ )
18
+ from llama_index.vector_stores.faiss import FaissVectorStore
19
+ from llama_index.core.tools import QueryEngineTool, ToolMetadata
20
+
21
+ import json
22
+ from typing import Sequence, List
23
+
24
+ from llama_index.core.llms import ChatMessage
25
+ from llama_index.core.tools import BaseTool, FunctionTool
26
+ from llama_index.core.agent import ReActAgent
27
+
28
+ import nest_asyncio
29
+
30
+ from llama_index.core.tools import QueryEngineTool, ToolMetadata
31
+
32
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
33
+
34
+ DESCRIPTION = '''
35
+ <div>
36
+ <h1 style="text-align: center;">Mistral 7B Instruct v0.3</h1>
37
+ <p>This Space demonstrates the Agent based RAG on multiple documents using Gemma 2b it and llama index</p>
38
+ </div>
39
+ '''
40
+
41
+ tokenizer = AutoTokenizer.from_pretrained(
42
+ "google/gemma-1.1-2b-it",
43
+ token=HF_TOKEN,
44
+ )
45
+
46
+ stopping_ids = [
47
+ tokenizer.eos_token_id,
48
+ tokenizer.convert_tokens_to_ids("<|eot_id|>"),
49
+ ]
50
+
51
+
52
+ quantization_config = BitsAndBytesConfig(
53
+ load_in_4bit = True,
54
+ bnb_4bit_compute_dtype=torch.float16,
55
+ bnb_4bit_quant_type = "nf4",
56
+ bnb_4bit_use_double_quant = True,
57
+ )
58
+
59
+ llm = HuggingFaceLLM(
60
+ model_name = "google/gemma-1.1-2b-it",
61
+ model_kwargs = {
62
+ "token": HF_TOKEN,
63
+ "torch_dtype": torch.bfloat16, # comment this line and uncomment below to use 4bit
64
+ #"quantization_config": quantization_config
65
+ },
66
+ generate_kwargs = {
67
+ "do_sample": True,
68
+ "temperature": 0.6,
69
+ "top_p": 0.9,
70
+ },
71
+ tokenizer_name = "google/gemma-1.1-2b-it",
72
+ tokenizer_kwargs = {"token": HF_TOKEN},
73
+ stopping_ids = stopping_ids,
74
+ )
75
 
76
 
 
 
 
 
 
 
 
 
 
77
 
78
+ embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-large-en-v1.5")
 
 
 
 
79
 
 
80
 
 
81
 
82
+ # dimensions of bge-large-en-v1.5 obtained from https://huggingface.co/BAAI/bge-large-en-v1.5
83
+ d = 1024
84
+ faiss_index = faiss.IndexFlatL2(d)
 
 
 
 
 
85
 
86
+
87
+ nest_asyncio.apply()
88
+
89
+ # bge embedding model
90
+ Settings.embed_model = embed_model
91
+
92
+ # GPU - Llama-3-8B-Instruct model
93
+ # CPU - Gemma 1.1 2B it instruct
94
+ Settings.llm = llm
95
+
96
+ # rebuild storage context
97
+
98
+ geoVectorStore = FaissVectorStore.from_persist_dir("./geoindex/")
99
+
100
+ geoStorageContext = StorageContext.from_defaults(
101
+ vector_store=geoVectorStore, persist_dir="./geoindex/")
102
+
103
+ geoindex = load_index_from_storage(storage_context=geoStorageContext)
104
+
105
+ bioVectorStore = FaissVectorStore.from_persist_dir("./bioindex/")
106
+
107
+ bioStorageContext = StorageContext.from_defaults(
108
+ vector_store=bioVectorStore, persist_dir="./bioindex/")
109
+
110
+ bioindex = load_index_from_storage(storage_context=geoStorageContext)
111
+
112
+ geo_engine = geoindex.as_query_engine(similarity_top_k=3)
113
+ bio_engine = bioindex.as_query_engine(similarity_top_k=3)
114
+
115
+ query_engine_tools = [
116
+ QueryEngineTool(
117
+ query_engine=geo_engine,
118
+ metadata=ToolMetadata(
119
+ name="geography",
120
+ description=(
121
+ "This is a geography textbook, it provides information about geography. "
122
+ "Use a detailed plain text question as input to the tool."
123
+ ),
124
+ ),
125
+ ),
126
+ QueryEngineTool(
127
+ query_engine=bio_engine,
128
+ metadata=ToolMetadata(
129
+ name="biology",
130
+ description=(
131
+ "This is a biology textbook it provides information about biology. "
132
+ "Use a detailed plain text question as input to the tool."
133
+ ),
134
+ ),
135
+ ),
136
+ ]
137
+
138
+ agent = ReActAgent.from_tools(
139
+ query_engine_tools,
140
+ llm=llm,
141
+ verbose=False,
142
+ )
143
+
144
+ def respond(
145
+ message,
146
+ # history: list[tuple[str, str]],
147
+ # system_message,
148
+ # max_tokens,
149
+ # temperature,
150
+ # top_p,
151
+ ):
152
+ prompt=f'''Analyze the question: {message} and use appropriate tool to get the relevant context and answer the question, do not answer on your own and output only Observation'''
153
+ response = agent.chat(prompt)
154
+ return print(str(response))
155
 
156
  """
157
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
158
  """
159
  demo = gr.ChatInterface(
160
  respond,
161
+ # additional_inputs=[
162
+ # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
163
+ # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
164
+ # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
165
+ # gr.Slider(
166
+ # minimum=0.1,
167
+ # maximum=1.0,
168
+ # value=0.95,
169
+ # step=0.05,
170
+ # label="Top-p (nucleus sampling)",
171
+ # ),
172
+ # ],
173
+ examples=[
174
+ ["What are different types of rural settlement?"],
175
+ ["Explain Urbanisation in India?"],
176
+ ["What was the level of urbanisation in India in 2011?"],
177
+ ["List the religious and cultural towns in India?"],
178
  ],
179
+ cache_examples=False,
180
  )
181
 
182
 
requirements.txt CHANGED
@@ -1 +1,11 @@
1
- huggingface_hub==0.22.2
 
 
 
 
 
 
 
 
 
 
 
1
+ huggingface_hub==0.22.2
2
+ llama-index
3
+ llama-index-llms-huggingface
4
+ llama-index-embeddings-huggingface
5
+ transformers
6
+ accelerate
7
+ bitsandbytes
8
+ llama-index-readers-file
9
+ pymupdf
10
+ llama-index-vector-stores-faiss
11
+ faiss-cpu