lfernandopg commited on
Commit
d3a6db8
·
1 Parent(s): 818a96d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -0
app.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ from transformers import BertTokenizer, TFBertForSequenceClassification
4
+ import torch
5
+
6
+ @st.cache(allow_output_mutation=True)
7
+ def get_model():
8
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
9
+ model = TFBertForSequenceClassification.from_pretrained("lfernandopg/Proyecto-Transformers")
10
+ return tokenizer,model
11
+
12
+
13
+ tokenizer,model = get_model()
14
+
15
+ user_input = st.text_area('Enter Text to Analyze')
16
+ button = st.button("Analyze")
17
+
18
+ d = {
19
+ 0 : 'Accountant',
20
+ 1 : 'Actuary',
21
+ 2 : 'Biologist',
22
+ 3 : 'Chemist',
23
+ 4 : 'Civil engineer',
24
+ 5 : 'Computer programmer',
25
+ 6 : 'Data scientist',
26
+ 7 : 'Database administrator',
27
+ 8 : 'Dentist',
28
+ 9 : 'Economist',
29
+ 10 : 'Environmental engineer',
30
+ 11 : 'Financial analyst',
31
+ 12 : 'IT manager',
32
+ 13 : 'Mathematician',
33
+ 14 : 'Mechanical engineer',
34
+ 15 : 'Physician assistant',
35
+ 16 : 'Psychologist',
36
+ 17 : 'Statistician',
37
+ 18 : 'Systems analyst',
38
+ 19 : 'Technical writer ',
39
+ 20 : 'Web developer '
40
+ }
41
+
42
+
43
+ if user_input and button :
44
+ test_sample = tokenizer([user_input], padding=True, truncation=True, max_length=512,return_tensors='pt')
45
+ # test_sample
46
+ output = model(**test_sample)
47
+ st.write("Logits: ",output.logits)
48
+ y_pred = np.argmax(output.logits.detach().numpy(),axis=1)
49
+ st.write("Prediction: ",d[y_pred[0]])