siddh4rth commited on
Commit
f5592d1
·
1 Parent(s): 5f21a53

first commit

Browse files
Files changed (1) hide show
  1. app.py +96 -0
app.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ import openai
4
+ import pinecone
5
+
6
+ pinecone.init(
7
+ api_key="d307ef27-f3ee-4335-b89d-c866395df920",
8
+ environment="us-east1-gcp",
9
+ )
10
+
11
+ limit = 5000
12
+ # 3750
13
+
14
+ embed_model = "text-embedding-ada-002"
15
+
16
+ index_name = 'gen-qa'
17
+ index = pinecone.Index(index_name)
18
+
19
+ # retrieve relevant answers
20
+ def retrieve(query):
21
+ res = openai.Embedding.create(
22
+ input=[query],
23
+ engine=embed_model,
24
+ )
25
+
26
+ # retrieve from Pinecone
27
+ xq = res['data'][0]['embedding']
28
+
29
+ # get relevant contexts
30
+ res = index.query(xq, top_k=3, include_metadata=True)
31
+ contexts = [
32
+ x['metadata']['text'] for x in res['matches']
33
+ ]
34
+
35
+ # build our prompt with the retrieved contexts included
36
+ prompt_start = (
37
+ "Answer the question based on the context below.\n\n"+
38
+ "Context:\n"
39
+ )
40
+ prompt_end = (
41
+ f"\n\nQuestion: {query}\nAnswer:"
42
+ )
43
+
44
+ # append contexts until hitting limit
45
+ for i in range(1, len(contexts)):
46
+ if len("\n\n---\n\n".join(contexts[:i])) >= limit:
47
+ prompt = (
48
+ prompt_start +
49
+ "\n\n---\n\n".join(contexts[:i-1]) +
50
+ prompt_end
51
+ )
52
+ break
53
+ elif i == len(contexts)-1:
54
+ prompt = (
55
+ prompt_start +
56
+ "\n\n---\n\n".join(contexts) +
57
+ prompt_end
58
+ )
59
+ return prompt
60
+
61
+ # then we complete the context-infused query
62
+ def complete(prompt):
63
+ # query text-davinci-003
64
+ res = openai.Completion.create(
65
+ engine='text-davinci-003',
66
+ prompt=prompt,
67
+ temperature=0,
68
+ max_tokens=500,
69
+ top_p=1,
70
+ frequency_penalty=0,
71
+ presence_penalty=0,
72
+ stop=None
73
+ )
74
+ # res = openai.ChatCompletion.create(
75
+ # model="gpt-3.5-turbo",
76
+ # messages=prompt,
77
+ # )
78
+ return res['choices'][0]['text'].strip()
79
+
80
+ # query = (
81
+ # input("Question: ")
82
+ # )
83
+
84
+
85
+ # print(result)
86
+
87
+ def greet(query):
88
+ # first we retrieve relevant items from Pinecone
89
+ query_with_contexts = retrieve(query)
90
+ # return only the main answer
91
+ result = complete(query_with_contexts)
92
+ return result
93
+
94
+ iface = gr.Interface(fn=greet, inputs="text", outputs="text")
95
+ iface.launch()
96
+