Spaces:
Sleeping
Sleeping
| import os | |
| os.environ["HF_HOME"] = "/tmp/huggingface" | |
| os.makedirs("/tmp/huggingface", exist_ok=True) | |
| from fastapi import FastAPI | |
| from pydantic import BaseModel | |
| import torch | |
| import numpy as np | |
| from transformers import AutoTokenizer, AutoModel | |
| from sklearn.linear_model import LogisticRegression | |
| import uvicorn | |
| app = FastAPI() | |
| # Load Hugging Face model | |
| model_name = "bert-base-uncased" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModel.from_pretrained(model_name) | |
| # Function to get text embeddings | |
| def get_embedding(text): | |
| inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512) | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| return outputs.last_hidden_state[:, 0, :].numpy() | |
| # Sample dataset | |
| texts = ["I love this!", "This is terrible.", "Fantastic experience!", "I hate it.", "Absolutely wonderful!", "Worst ever!"] | |
| labels = [1, 0, 1, 0, 1, 0] # 1 = Positive, 0 = Negative | |
| X = np.vstack([get_embedding(text) for text in texts]) | |
| y = np.array(labels) | |
| # Train model | |
| clf = LogisticRegression() | |
| clf.fit(X, y) | |
| # Define request format | |
| class InputText(BaseModel): | |
| text: str | |
| def predict_sentiment(data: InputText): | |
| user_embedding = get_embedding(data.text) | |
| prediction = clf.predict(user_embedding) | |
| sentiment = "Positive ๐" if prediction[0] == 1 else "Negative ๐ก" | |
| return {"sentiment": sentiment} | |
| if __name__ == "__main__": | |
| uvicorn.run(app, host="0.0.0.0", port=7860) |