cmarley314 commited on
Commit
ce48500
·
verified ·
1 Parent(s): 4a6cff3

Upload 8 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Solar[[:space:]]Eclipse[[:space:]]Information.pdf filter=lfs diff=lfs merge=lfs -text
37
+ Solar[[:space:]]Eclipse[[:space:]]Table.pdf filter=lfs diff=lfs merge=lfs -text
Solar Eclipse Information.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bb79220fd02ad5e690cdbdedd4e8adfe14af816ada1b3f788b29784d8d243e6
3
+ size 1640826
Solar Eclipse Table.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64e43d97461076f828796edbda176477ebc9b2726b6f3e58237dfcaa1a7754c8
3
+ size 10125700
agents2.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from api_interface import API_Interface
2
+
3
+ ai = API_Interface(chunk_size=1200)
4
+
5
+ class Obnoxious_Agent:
6
+ """Checks if a query is obnoxious."""
7
+ def check_query(self, query, need_answer=False) -> bool:
8
+ """Checks if a query is obnoxious. Responds with True or False."""
9
+ prompt = f"""
10
+ Is this query obnoxious? Answer using 'Yes' or 'No', and explain.
11
+
12
+ Query: {query}
13
+ """
14
+ messages = [{"role": "user", "content": prompt}]
15
+ response = ai.client_chat(messages)
16
+ if need_answer:
17
+ print(response)
18
+ return response.lower().find("yes") != -1
19
+
20
+
21
+ class Query_Agent:
22
+ """Retrieves relevant documents from the vector store."""
23
+
24
+ def query_vector_store(self, query, k_docs=3, k_tables=4):
25
+ (docs, scores), (tables, tscores) = ai.query_pinecone_vector_store(query, top_k_docs=k_docs, top_k_tbls=k_tables)
26
+ return docs, scores, tables, tscores
27
+
28
+
29
+ class Answering_Agent:
30
+ """Generates responses for user queries."""
31
+ def __init__(self, mode="concise"):
32
+ self.mode = mode
33
+
34
+ def check_mode(self, query):
35
+ prompt = f"""
36
+ If the query asks for a change in speaking style, which most closely matches the user's needs - "concise" or "talkative"?
37
+ If the query does not ask for a change in speaking style, respond with "N/A"?
38
+
39
+ Query: {query}
40
+ """
41
+ messages = [{"role": "user", "content": prompt}]
42
+ response = ai.client_chat(messages)
43
+
44
+ new_mode = False
45
+ if response.lower().find("concise") != -1:
46
+ self.mode = "concise"
47
+ new_mode = True
48
+ elif response.lower().find("talkative") != -1:
49
+ self.mode = "talktative"
50
+ new_mode = True
51
+ return new_mode
52
+
53
+ def generate_response(self, query:str, documents:list, conv_history:list, context:str=""):
54
+ if context != "":
55
+ context = f"\nYou are given the following conversation context: {context}\n\n"
56
+
57
+ """Generates response for user query given relevant documents and previous conversation.
58
+ Returns both the raw response and the extended conversation history used to generate the response."""
59
+ new_messages = [{"role": "developer", "content": f"""
60
+ You are given the following information:
61
+
62
+ {documents}
63
+ {context}
64
+
65
+ Do not answer any unrelated questions in the query.
66
+ Answer the following query in a {self.mode} manner:
67
+
68
+ {query}
69
+ """}
70
+ ]
71
+
72
+ conv_history.extend(new_messages)
73
+ response = ai.client_chat(new_messages)
74
+
75
+ return response, conv_history # return conv_history in case it's useful
76
+
77
+ def requires_context(self, query, need_answer=False) -> bool:
78
+ prompt = f"""
79
+ Does this query require further context? Answer using "Yes" or "No", then explain.
80
+
81
+ Query: {query}
82
+ """
83
+ messages = [{"role": "user", "content": prompt}]
84
+ response = ai.client_chat(messages)
85
+ if need_answer:
86
+ print(response)
87
+ return response.lower().find("yes") != -1
88
+
89
+
90
+ class Relevant_Documents_Agent:
91
+ """Determines if documents are relevant to the query."""
92
+
93
+ def get_relevance(self, query, documents, need_answer = False) -> bool:
94
+ relevances = [self.__get_doc_relevance(query, doc, need_answer) for doc in documents]
95
+ print("\n\n", relevances)
96
+ return any(relevances)
97
+
98
+ def __get_doc_relevance(self, query, document, need_answer=False)->bool:
99
+ prompt = f"""Does any portion of the following text or context share the
100
+ same or similar topic as the following query, and is the query related
101
+ to solar eclipses, numerical data, or astronomy? Answer with "Yes" or "No", and explain why or why not.
102
+
103
+ Text: {document.page_content}
104
+
105
+ Query: {query}"""
106
+
107
+ messages = [{"role": "user", "content": prompt}]
108
+ response = ai.client_chat(messages)
109
+ if need_answer:
110
+ print("Relevance prompt:", prompt[250:1000].replace("\n", " <> "))
111
+ print("Relevant response:", response, "\n\n\n")
112
+
113
+ return response.lower().find("yes") != -1
114
+
115
+ class Greeting_Agent:
116
+ def check_greeting(self, query):
117
+ prompt = f"""
118
+ Would the following statement be regarded as a general
119
+ greeting or friendly conversation opener? Answer using 'Yes' or 'No'.
120
+
121
+ Statement: {query}
122
+ """
123
+ messages = [{"role": "user", "content": prompt}]
124
+ response = ai.client_chat(messages)
125
+ return response.lower().find("yes") != -1
126
+ def get_greeting_response(self, query):
127
+ messages = [{"role": "user", "content": query}]
128
+ response = ai.client_chat(messages)
129
+ return response
130
+
131
+ class Head_Agent:
132
+ def __init__(self):
133
+ self.setup_sub_agents()
134
+
135
+
136
+ def setup_sub_agents(self):
137
+ self.a_obnoxious = Obnoxious_Agent()
138
+ self.a_query = Query_Agent()
139
+ self.a_answering = Answering_Agent()
140
+ self.a_relevant = Relevant_Documents_Agent()
141
+ self.a_greeting = Greeting_Agent()
142
+
143
+ def generate_response(self, query, conv_history:list=None):
144
+ conv_history = conv_history or []
145
+ if self.a_obnoxious.check_query(query):
146
+ return "Please do not ask obnoxious questions."
147
+
148
+ if self.a_greeting.check_greeting(query):
149
+ return self.a_greeting.get_greeting_response(query)
150
+
151
+ ret = ""
152
+
153
+ if self.a_answering.check_mode(query):
154
+ ret = "I have updated my communication style to better suit your needs!\n\n"
155
+
156
+ context = ""
157
+ for msg in conv_history[-8:-1]:
158
+ if msg["role"] != "developer":
159
+ context += f"{msg["role"]}: {msg["content"]}\n"
160
+
161
+ no_rel = ("No relevant information found. "
162
+ "Please refine your query or ask another question pertaining to solar eclipses on Earth.")
163
+
164
+
165
+ documents, scores, tables, tscores = self.a_query.query_vector_store(context)
166
+ context_plus = "This is our prior conversation for additional context:\n\n" + context + f"Latest user query: {query}"
167
+ docs_is_c_relevant = self.a_relevant.get_relevance(context_plus, documents+tables, need_answer=True)
168
+ print(scores, "\n", tscores, "\n\n", context_plus, "\n", "#"*50)
169
+
170
+ if not docs_is_c_relevant:
171
+ return no_rel
172
+ print("Using context to generate response.")
173
+ r, h = self.a_answering.generate_response(query, documents+tables, conv_history, context)
174
+
175
+ ret += r
176
+ return ret
api_interface.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_openai import OpenAIEmbeddings
2
+ from langchain_community.vectorstores import Pinecone
3
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
4
+ from langchain_pinecone import PineconeVectorStore
5
+ from pinecone import Pinecone
6
+ from tqdm import tqdm
7
+ from openai import OpenAI
8
+ import string
9
+ import pickle
10
+ import os
11
+ import time
12
+
13
+ from langchain_community.document_loaders import PyMuPDFLoader
14
+
15
+ class API_Interface:
16
+ def __init__(self, chunk_size:int = 1500, embed_model:str = "text-embedding-3-small", chat_model:str = "gpt-3.5-turbo"):
17
+ self.chunk_size = chunk_size
18
+ self.embed_model = embed_model
19
+ self.chat_model = chat_model
20
+
21
+ with open("open_ai_key.txt") as infile:
22
+ OPEN_AI_KEY = infile.readline().strip()
23
+ with open("pinecone_key.txt") as infile:
24
+ PINECONE_KEY = infile.readline().strip()
25
+
26
+ self.__client = OpenAI(api_key=OPEN_AI_KEY)
27
+ self.__pc = Pinecone(api_key=PINECONE_KEY)
28
+ self.__index = self.__pc.Index('eep596mp2')
29
+
30
+ print("Chunking documents.")
31
+ self.chunked_texts, self.chunked_pnums = self.__chunk_document()
32
+ self.table_texts, self.table_pnums = self.__chunk_tables()
33
+ print("Initializing vector store.")
34
+ self.namespace, self.vectorstore = self.__init_vectorstore(OPEN_AI_KEY)
35
+ print("Initializing table store.")
36
+ self.tablespace, self.tablestore = self.__init_tablestore(OPEN_AI_KEY)
37
+
38
+ def __chunk_document(self) -> tuple[list[str], list[int]]:
39
+ loader = PyMuPDFLoader(file_path = "Solar Eclipse Information.pdf", mode = "page")
40
+ docs = loader.load()
41
+
42
+ page_texts = [page.page_content for page in docs] # Extract page_content
43
+ page_numbers = [page.metadata["page"] for page in docs] # Extract metadata["page"]
44
+
45
+ splitter = RecursiveCharacterTextSplitter(chunk_size=self.chunk_size, chunk_overlap=500)
46
+
47
+ chunked_texts, chunk_page_numbers = [], []
48
+ previous_page_tail = ""
49
+
50
+ for text, pnum in zip(page_texts, page_numbers):
51
+ chunks = splitter.split_text(previous_page_tail + " " + text)
52
+ chunked_texts.extend(chunks[:-1])
53
+ chunk_page_numbers.extend([pnum]*(len(chunks)-1))
54
+ previous_page_tail = chunks[-1]
55
+ chunked_texts.append(chunks[-1])
56
+ chunk_page_numbers.append(pnum)
57
+
58
+
59
+ return chunked_texts, chunk_page_numbers
60
+
61
+ def __chunk_tables(self):
62
+ tabler = PyMuPDFLoader(file_path = "Solar Eclipse Table.pdf", mode = "page")
63
+ tables = tabler.load()
64
+
65
+ HEADER = "Catalog Number, Canon Plate, Calendar Date, Terrestrial Dynamical Time of Greatest Eclipse, UT - TD (s), Luna Number, Saros Number, Eclipse Type, QLE, Gamma, Eclipse Magnitude, Latitude, Longitude, Sun Altitude, Sun Azimuth, Path Width (km), Central Line Duration"
66
+
67
+ table_texts = []
68
+ # print(tables[0].page_content)
69
+ c = tables[0].page_content
70
+ i = c.find("km")
71
+ # print(c[i+3:])
72
+ for page in tables:
73
+ c = page.page_content
74
+ i = c.find("km")
75
+ values = c[i+3:].split("\n")
76
+ text = ""
77
+ idv = 0
78
+ dates = [None, None]
79
+ partial_flag = False
80
+ for val in values:
81
+ if idv == 2:
82
+ year = val
83
+ if dates[0] is None:
84
+ dates[0] = year
85
+ else:
86
+ dates[1] = year
87
+ if idv % 16 == 4:
88
+ val = val.replace(" ", " ")
89
+ text += val + " "
90
+ idv += 1
91
+ if val.startswith("P"):
92
+ partial_flag = True
93
+ if val.endswith("W") or val.endswith("E"):
94
+ if partial_flag:
95
+ idv = -2
96
+ else:
97
+ idv = -4
98
+ if idv == 0:
99
+ text += "\n"
100
+ partial_flag = False
101
+ table_texts.append(f"Solar eclipses between {dates[0]} and {dates[1]}:\n\n" + HEADER + "\n" + text)
102
+ table_numbers = [page.metadata["page"] for page in tables]
103
+
104
+ return table_texts, table_numbers
105
+
106
+
107
+ def __init_vectorstore(self, OPEN_AI_KEY):
108
+
109
+ NAMESPACE = f"ns_eclipse_{self.chunk_size}"
110
+
111
+ _ns = self.__index.describe_index_stats()['namespaces'].get(NAMESPACE)
112
+ if _ns is not None and _ns.get('vector_count') in (None, 0):
113
+ self.__index.delete(delete_all=True, namespace=NAMESPACE)
114
+ _ns = None
115
+
116
+ if _ns is None:
117
+ print("... generating embeddings.")
118
+ self.__generate_embeddings()
119
+
120
+ records = []
121
+ for i, (text, pnum, embedding) in enumerate(zip(self.chunked_texts, self.chunked_pnums, self.embeddings)):
122
+ records.append({
123
+ "id": f"chunk{i}",
124
+ "values": embedding,
125
+ "metadata": {
126
+ "text": text,
127
+ "page_number": pnum
128
+ }
129
+ })
130
+
131
+ print(len(records))
132
+ batch_size = 180
133
+ print("... upsertting records.")
134
+ for b in tqdm(range((len(records)-1)//batch_size+1)):
135
+ self.__index.upsert(records[b*batch_size:(b+1)*batch_size], namespace=NAMESPACE)
136
+ # print(b+1, "/", (len(records)-1)//batch_size+1)
137
+
138
+ while self.__index.describe_index_stats()['namespaces'].get(NAMESPACE) is None:
139
+ time.sleep(1)
140
+
141
+ print("Index stats:", self.__index.describe_index_stats())
142
+ openaiembs = OpenAIEmbeddings(api_key=OPEN_AI_KEY, model=self.embed_model)
143
+ vectorstore = PineconeVectorStore(self.__index, embedding=openaiembs)
144
+
145
+ return NAMESPACE, vectorstore
146
+
147
+ def __init_tablestore(self, OPEN_AI_KEY):
148
+
149
+ NAMESPACE = f"ts_eclipse"
150
+
151
+ _ns = self.__index.describe_index_stats()['namespaces'].get(NAMESPACE)
152
+ if _ns is not None and _ns.get('vector_count') in (None, 0):
153
+ self.__index.delete(delete_all=True, namespace=NAMESPACE)
154
+ _ns = None
155
+
156
+ if _ns is None:
157
+ print("... generating table embeddings.")
158
+ self.__generate_table_embeddings()
159
+
160
+ records = []
161
+ for i, (text, pnum, embedding) in enumerate(zip(self.table_texts, self.table_pnums, self.tmbeddings)):
162
+ records.append({
163
+ "id": f"chunk{i}",
164
+ "values": embedding,
165
+ "metadata": {
166
+ "text": text,
167
+ "page_number": pnum
168
+ }
169
+ })
170
+
171
+ print(len(records))
172
+ batch_size = 180
173
+ print("... upsertting records.")
174
+ for b in tqdm(range((len(records)-1)//batch_size+1)):
175
+ self.__index.upsert(records[b*batch_size:(b+1)*batch_size], namespace=NAMESPACE)
176
+ # print(b+1, "/", (len(records)-1)//batch_size+1)
177
+
178
+ while self.__index.describe_index_stats()['namespaces'].get(NAMESPACE) is None:
179
+ time.sleep(1)
180
+
181
+ print("Index stats:", self.__index.describe_index_stats())
182
+ openaiembs = OpenAIEmbeddings(api_key=OPEN_AI_KEY, model=self.embed_model)
183
+ vectorstore = PineconeVectorStore(self.__index, embedding=openaiembs)
184
+
185
+ return NAMESPACE, vectorstore
186
+
187
+ def __generate_embeddings(self) -> None:
188
+ """ Generates self.embeddings """
189
+ EMBED_PATH = f"eclipse_text_embeddings_{self.chunk_size}.pkl"
190
+
191
+ def get_embedding(text):
192
+ text = text.replace("\n", " ")
193
+ # text = text.replace(string.punctuation, "")
194
+ response = self.__client.embeddings.create(input = [text], model=self.embed_model)
195
+ return response.data[0].embedding
196
+
197
+ if not os.path.exists(EMBED_PATH):
198
+ self.embeddings = []
199
+ for text in tqdm(self.chunked_texts):
200
+ self.embeddings.append(get_embedding(text))
201
+
202
+ with open(EMBED_PATH, "wb") as outfile:
203
+ pickle.dump(self.embeddings, outfile)
204
+ else:
205
+ print("--- found existing embeddings file. Shortcutting.")
206
+ with open(EMBED_PATH, "rb") as infile:
207
+ self.embeddings:list[list[float]] = pickle.load(infile)
208
+
209
+
210
+ def __generate_table_embeddings(self) -> None:
211
+ """ Generates self.tmbeddings """
212
+ TABLE_PATH = f"eclipse_table_embeddings.pkl"
213
+ HEADER = """Catalog Number, Canon Plate, Calendar Date, Terrestrial Dynamical Time of Greatest Eclipse, UT - TD (s), Luna Number, Saros Number, Eclipse Type, QLE, Gamma, Eclipse Magnitude, Latitude, Longitude, Sun Altitude, Sun Azimuth, Path Width (km), Central Line Duration"""
214
+
215
+ def get_embedding(text):
216
+ text = text.replace("\n", " ")
217
+ response = self.__client.embeddings.create(input = [text], model=self.embed_model)
218
+ return response.data[0].embedding
219
+
220
+ if not os.path.exists(TABLE_PATH):
221
+ self.tmbeddings = []
222
+
223
+ for table in tqdm(self.table_texts):
224
+ self.tmbeddings.append(get_embedding(table))
225
+
226
+ with open(TABLE_PATH, "wb") as outfile:
227
+ pickle.dump(self.tmbeddings, outfile)
228
+ else:
229
+ print("--- found existing embeddings file. Shortcutting.")
230
+ with open(TABLE_PATH, "rb") as infile:
231
+ self.tmbeddings:list[list[float]] = pickle.load(infile)
232
+
233
+
234
+ def query_pinecone_vector_store(self, query:str, top_k_docs:int = 5, top_k_tbls:int = 5,
235
+ namespace:str = None, tablespace:str = None):
236
+ namespace = namespace or self.namespace
237
+ tablespace = tablespace or self.tablespace
238
+ assert namespace in self.__index.describe_index_stats().get('namespaces')
239
+ assert tablespace in self.__index.describe_index_stats().get('namespaces')
240
+
241
+ response = self.vectorstore.similarity_search_with_relevance_scores(query=query,
242
+ k=top_k_docs,
243
+ namespace=namespace)
244
+ tesponse = self.vectorstore.similarity_search_with_relevance_scores(query=query,
245
+ k=top_k_tbls,
246
+ namespace=tablespace)
247
+
248
+ return [tuple(zip(*response)), tuple(zip(*tesponse))]
249
+
250
+
251
+ def client_chat(self, messages, model=None):
252
+ model = model or self.chat_model
253
+ response = self.__client.chat.completions.create(messages=messages, model=model)
254
+ return response.choices[0].message.content
255
+
256
+ if __name__ == "__main__":
257
+ tester = API_Interface()
258
+ my_query = "What is the backpropogation algorithm?"
259
+ response = tester.query_pinecone_vector_store(my_query)
260
+ for doc in response:
261
+ print(doc.metadata["page_number"], doc.page_content, "\n\n")
app.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ st.set_page_config(layout="wide")
4
+ st.title("Mini Project 2 Part 3: Streamlit Chatbot")
5
+ if "messages" not in st.session_state:
6
+ # ... (initialize messages)
7
+ st.session_state.messages = []
8
+
9
+ with st.spinner("Please wait while the chat bot is loading!"):
10
+ from agents2 import *
11
+
12
+
13
+ # Display existing chat messages
14
+ for message in st.session_state.messages:
15
+ if message["role"] != "developer":
16
+ with st.chat_message(message["role"]):
17
+ st.markdown(message["content"])
18
+ # Wait for user input
19
+ a_h = Head_Agent()
20
+ if prompt := st.chat_input("What would you like to chat about?"):
21
+ st.session_state.messages.append({"role": "user", "content": prompt})
22
+
23
+ with st.chat_message("user"):
24
+ st.markdown(prompt)
25
+ with st.chat_message("assistant"):
26
+ ai_message = a_h.generate_response(prompt, st.session_state.messages)
27
+ st.markdown(ai_message)
28
+
29
+ st.session_state.messages.append({"role": "assistant", "content": ai_message})
eclipse_table_embeddings.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34dc5992431812d25a06c722e5edde59aa3fac59f7309389f5798f665801e401
3
+ size 2752555
eclipse_text_embeddings_1200.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e49f944552529449a2eb875e803a4864e28d6445598eb32551325e5d74854fd0
3
+ size 3126019
eclipse_text_embeddings_1500.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95b96bed648622c3e801011cfa8e586c9bd18447dfd5af698c28d4240f3c0c87
3
+ size 2185453