Keith Bourne commited on
Commit
70228fa
·
1 Parent(s): 06f0b72

Add update

Browse files
Files changed (2) hide show
  1. app.py +28 -0
  2. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+
4
+ st.title('Meditron Chat')
5
+
6
+ # API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-70b-chat-hf"
7
+ # # headers = {
8
+ # # "Accept" : "application/json",
9
+ # # "Content-Type": "application/json"
10
+ # # }
11
+
12
+ # auth = os.environ['auth']
13
+ # headers = {"Authorization": f"{auth}"}
14
+
15
+ # def query(payload):
16
+ # response = requests.post(API_URL, headers=headers, json=payload)
17
+ # return response.json()
18
+
19
+ # output = query({
20
+ # "inputs": "Can you get amnesia from anesthesia?",
21
+ # "parameters": {}
22
+ # })
23
+ # st.write(f'Can you get amnesia from anesthesia?')
24
+
25
+ # st.write(f'Model is thinking...')
26
+
27
+ # print(f'Query response is {output}')
28
+ # st.write(f'Query response is {output}')
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ streamlit # this worked, but example gave one below: st-gsheets-connection
2
+ requests
3
+ # langchain
4
+ # wget
5
+ # llama-index
6
+ # cohere
7
+ # llama-cpp-python