sergioska commited on
Commit
4dfbd04
·
1 Parent(s): 55dc66d

using pipeline

Browse files
Files changed (1) hide show
  1. app.py +4 -7
app.py CHANGED
@@ -1,13 +1,10 @@
1
  import streamlit as st
2
  # Load model directly
3
- from transformers import AutoTokenizer, AutoModelForCausalLM
4
 
5
- tokenizer = AutoTokenizer.from_pretrained("openchat/openchat")
6
- model = AutoModelForCausalLM.from_pretrained("openchat/openchat")
7
- prompt = ""
8
- model_input = tokenizer(prompt, return_tensors="pt").to("cuda")
9
- response = tokenizer.decode(model.generate(**model_input,temperature=0.1, max_length=5000)[0], skip_special_tokens=True)
10
 
11
  prompt = st.chat_input("Say something")
12
  if prompt:
13
- st.write(f"User has sent the following prompt: {prompt}")
 
1
  import streamlit as st
2
  # Load model directly
3
+ from transformers import pipeline
4
 
5
+ classifier = pipeline("sentiment-analysis")
6
+ output = classifier("We are very happy to show you the 🤗 Transformers library.")
 
 
 
7
 
8
  prompt = st.chat_input("Say something")
9
  if prompt:
10
+ st.write(f"User has sent the following prompt: {output}")