musaashaikh commited on
Commit
c392d41
·
verified ·
1 Parent(s): 7f92a67

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -61
app.py CHANGED
@@ -1,64 +1,31 @@
1
- from azure.ai.textanalytics import TextAnalyticsClient
2
- from azure.core.credentials import AzureKeyCredential
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import gradio as gr
5
-
6
-
7
- # Azure Text Analytics setup
8
- azure_endpoint = "https://t6langservice.cognitiveservices.azure.com/"
9
- azure_api_key = "3CU1gpUszvl7pLb90Ivdp1Kd5WZc56savzdXOK5GV40JHebnxnxoJQQJ99BAACYeBjFXJ3w3AAAaACOGkRT5"
10
-
11
- # Authenticate client
12
- text_analytics_client = TextAnalyticsClient(endpoint=azure_endpoint, credential=AzureKeyCredential(azure_api_key))
13
-
14
-
15
- # Load Hugging Face chatbot model
16
- model_name = "microsoft/DialoGPT-medium"
17
- tokenizer = AutoTokenizer.from_pretrained(model_name)
18
- chat_model = AutoModelForCausalLM.from_pretrained(model_name)
19
-
20
-
21
-
22
-
23
- print ("Defining funtion")
24
-
25
- def respond(
26
- message,
27
- history: list[tuple[str, str]],
28
- system_message,
29
- max_tokens
30
- ):
31
- print ("Response: ")
32
-
33
- messages = [{"role": "system", "content": system_message}]
34
-
35
- messages.append({"role": "user", "content": message})
36
-
37
- print ("Getting response", messages)
38
-
39
- # response = chat_model(messages)
40
-
41
- response = text_analytics_client.analyze_sentiment([messages])[0]
42
-
43
-
44
- print ("Got response", response)
45
-
46
- return response[-1]['generated_text'][-1]['content']
47
-
48
-
49
-
50
-
51
- """
52
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
53
- """
54
- demo = gr.ChatInterface(
55
- respond,
56
- additional_inputs=[
57
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
58
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
59
- ],
60
- )
61
-
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ def load_model():
5
+ # Load a pre-trained HuggingFace pipeline for sentiment analysis
6
+ model_pipeline = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
7
+ return model_pipeline
8
+
9
+ def classify_text(model, text):
10
+ # Use the loaded model to classify text
11
+ result = model(text)
12
+ return result
13
+
14
+ def main():
15
+ # Load the model
16
+ model = load_model()
17
+
18
+ # Define the Gradio interface
19
+ interface = gr.Interface(
20
+ fn=lambda text: classify_text(model, text),
21
+ inputs=gr.Textbox(lines=2, placeholder="Enter Text Here..."),
22
+ outputs="json",
23
+ title="Text Classification with HuggingFace",
24
+ description="This interface uses a HuggingFace model to classify text sentiments. Enter a sentence to see its classification."
25
+ )
26
+
27
+ # Launch the Gradio app
28
+ interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  if __name__ == "__main__":
31
+ main()