File size: 1,497 Bytes
177fa2a
0591939
 
 
 
 
 
 
 
 
 
5bfdcc1
 
 
0591939
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5bfdcc1
0591939
 
 
5bfdcc1
0591939
 
 
5bfdcc1
0591939
 
 
 
 
 
5bfdcc1
0591939
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import os

os.environ["HF_HOME"] = "/tmp/huggingface"
os.makedirs("/tmp/huggingface", exist_ok=True)
from fastapi import FastAPI
from pydantic import BaseModel
import torch
import numpy as np
from transformers import AutoTokenizer, AutoModel
from sklearn.linear_model import LogisticRegression
import uvicorn

app = FastAPI()

# Load Hugging Face model
model_name = "bert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(model_name)

# Function to get text embeddings
def get_embedding(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
    return outputs.last_hidden_state[:, 0, :].numpy()

# Sample dataset
texts = ["I love this!", "This is terrible.", "Fantastic experience!", "I hate it.", "Absolutely wonderful!", "Worst ever!"]
labels = [1, 0, 1, 0, 1, 0]  # 1 = Positive, 0 = Negative
X = np.vstack([get_embedding(text) for text in texts])
y = np.array(labels)

# Train model
clf = LogisticRegression()
clf.fit(X, y)

# Define request format
class InputText(BaseModel):
    text: str

@app.post("/predict")
def predict_sentiment(data: InputText):
    user_embedding = get_embedding(data.text)
    prediction = clf.predict(user_embedding)
    sentiment = "Positive 😊" if prediction[0] == 1 else "Negative 😡"
    return {"sentiment": sentiment}

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=7860)