File size: 2,490 Bytes
836237f
 
 
 
 
 
 
 
 
 
ba5b501
 
 
 
 
 
 
 
 
 
 
 
 
836237f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
from fastapi import FastAPI
import nest_asyncio
from pyngrok import ngrok
import uvicorn
import requests
import torch
# from transformers import DistilBertTokenizer, DistilBertForSequenceClassification

# tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
# model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
import torch
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification

tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")

inputs = tokenizer("Its very hot outside", return_tensors="pt")
with torch.no_grad():
    logits = model(**inputs).logits

predicted_class_id = logits.argmax().item()
model.config.id2label[predicted_class_id]


app = FastAPI()

url = "https://sentiment-analysis9.p.rapidapi.com/sentiment"
#user_input = input("Please enter a text for sentiment analysis: ")
def call_sentiment_api(user_input):
  payload = [
    {
      "id": "1",
      "language": "en",
      "text":user_input
    }
  ]
  headers = {
    "content-type": "application/json",
    "Accept": "application/json",
    "X-RapidAPI-Key": "5cf8fcaf61msh613f010a34f3576p1953e5jsn110a1e6c667d",
    "X-RapidAPI-Host": "sentiment-analysis9.p.rapidapi.com"
  }

  response = requests.post(url, json=payload, headers=headers)

  print(response.json())
  return response.json()

# def sentiment_model_hf(user_input):
#   inputs = tokenizer(user_input, return_tensors="pt")
#   with torch.no_grad():
#     logits = model(**inputs).logits
#   predicted_class_id = logits.argmax().item()

#   return model.config.id2label[predicted_class_id]


@app.get('/sentiment_ra/{user_input}')
async def sentiment(user_input):
  return call_sentiment_api(user_input)

# @app.get('/sentiment_hf/{user_input}')
# async def sentiment_hf(user_input):
#   return sentiment_model_hf(user_input)


@app.get('/a')
async def abc():


  return "Hello Atom Camp --  Our first Endpoint"

@app.get('/2nd')
async def atom():


  return "its our 2nd endpoint"

@app.get('/{multiply}')
async def atom(multiply):


  return multiply*10

@app.get('/')
async def html():

  return "Welcome to Our FastAPI Endpoints"

ngrok_tunnel = ngrok.connect(8000)
print('Public URL:', ngrok_tunnel.public_url)
nest_asyncio.apply()
uvicorn.run(app, port=8000)