rohan112 commited on
Commit
70498c9
·
1 Parent(s): 0c57bb4
Files changed (1) hide show
  1. app.py +91 -0
app.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """## RetrievalQA with LLaMA 2-70B on Together API"""
2
+ # import libraries
3
+ import os
4
+ import together
5
+ import logging
6
+ from typing import Any, Dict, List, Mapping, Optional
7
+ from pydantic import Extra, Field, root_validator
8
+ from langchain.llms.base import LLM
9
+ from langchain.vectorstores import Chroma
10
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
11
+ from langchain.chains import RetrievalQA
12
+ from langchain.document_loaders import TextLoader
13
+ from langchain.document_loaders import PyPDFLoader
14
+ from langchain.document_loaders import DirectoryLoader
15
+ from langchain.embeddings import HuggingFaceInstructEmbeddings
16
+ from langchain.chains.question_answering import load_qa_chain
17
+ import gradio as gr
18
+
19
+ # set your API key
20
+ os.environ["TOGETHER_API_KEY"] = "6216ce36aadcb06c35436e7d6bbbc18b354d8140f6e805db485d70ecff4481d0"
21
+ together.api_key = os.environ["TOGETHER_API_KEY"]
22
+
23
+ # set llama model
24
+ together.Models.start("togethercomputer/llama-2-70b-chat")
25
+
26
+
27
+ class TogetherLLM(LLM):
28
+ """Together large language models."""
29
+
30
+ model: str = "togethercomputer/llama-2-70b-chat"
31
+ """model endpoint to use"""
32
+
33
+ together_api_key: str = os.environ["TOGETHER_API_KEY"]
34
+ """Together API key"""
35
+
36
+ temperature: float = 0.7
37
+ """What sampling temperature to use."""
38
+
39
+ max_tokens: int = 512
40
+ """The maximum number of tokens to generate in the completion."""
41
+
42
+ class Config:
43
+ extra = Extra.forbid
44
+
45
+ @property
46
+ def _llm_type(self) -> str:
47
+ """Return type of LLM."""
48
+ return "together"
49
+
50
+ def _call(
51
+ self,
52
+ prompt: str,
53
+ **kwargs: Any,
54
+ ) -> str:
55
+ """Call to Together endpoint."""
56
+ together.api_key = self.together_api_key
57
+ output = together.Complete.create(prompt,
58
+ model=self.model,
59
+ max_tokens=self.max_tokens,
60
+ temperature=self.temperature,
61
+ )
62
+ text = output['output']['choices'][0]['text']
63
+ return text
64
+
65
+ # Load and process the text files
66
+ loader = TextLoader('data.txt')
67
+ # loader = DirectoryLoader('./folder/', glob="./*.pdf", loader_cls=PyPDFLoader)
68
+ documents = loader.load()
69
+
70
+ # Make a chain
71
+ llm = TogetherLLM(
72
+ model= "togethercomputer/llama-2-70b-chat",
73
+ temperature = 0.1,
74
+ max_tokens = 1024
75
+ )
76
+
77
+ # chain
78
+ chain = load_qa_chain(llm=llm, chain_type="stuff")
79
+ query1= "what is this story about?"
80
+ chain.run(input_documents=documents, question=query1)
81
+
82
+
83
+ # gradio
84
+ description = "This is a chatbot application based on the llama2 70B model. Simply type an input to get started with chatting."
85
+ examples = [["what is your contact number?"], ["where you are currently working?"]]
86
+
87
+
88
+ def greet(query1, history):
89
+ return chain.run(input_documents=documents, question="answer as if person responding. do not ask question back. \n Question: "+query1)
90
+
91
+ gr.ChatInterface(greet,title = "Chat with my Bot", description=description,examples=examples).launch(debug = True)