moza2025 commited on
Commit
beaff03
·
verified ·
1 Parent(s): 1d2b4c2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +133 -63
app.py CHANGED
@@ -1,64 +1,134 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
  import gradio as gr
3
+ from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
4
+ from google.colab import userdata
5
+ import gtts
6
+
7
+ # === Load Pipeline ===
8
+ sentiment_pipeline = pipeline("sentiment-analysis", verbose = 0, model="distilbert/distilbert-base-uncased-finetuned-sst-2-english")
9
+
10
+ # === Sentiment Analysis ===
11
+ def analyze_sentiment(text):
12
+ result = sentiment_pipeline(text)[0]
13
+ return f"{result['label']} (Score: {result['score']:.2f})"
14
+
15
+
16
+ # Initialize the summarization pipeline, tokenizer, and model
17
+ model_name = "sshleifer/distilbart-cnn-12-6"
18
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
19
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
20
+ summary_pipe = pipeline("summarization", model=model, tokenizer=tokenizer)
21
+
22
+
23
+ # Function to chunk text with an overlap
24
+ def chunk_text_with_overlap(tokens, max_length, overlap):
25
+ chunks = []
26
+ for i in range(0, len(tokens), max_length - overlap):
27
+ chunk = tokens[i:i + max_length]
28
+ chunks.append(chunk)
29
+ return chunks
30
+
31
+
32
+ # === Summarization ===
33
+ def summarize_text(text):
34
+
35
+ # Get the maximum length from the model configuration
36
+ max_length = model.config.max_position_embeddings
37
+ print('max_length:', max_length)
38
+
39
+ # Define the overlap
40
+ overlap = 50 # Adjust overlap as needed
41
+
42
+ # Tokenize the text
43
+ tokens = tokenizer(text, return_tensors='pt', truncation=False)['input_ids'][0]
44
+
45
+ # Chunk the tokens with overlap
46
+ chunks = chunk_text_with_overlap(tokens, max_length, overlap)
47
+
48
+ # Summarize each chunk
49
+ summaries = []
50
+ for chunk in chunks:
51
+ input_ids = chunk.unsqueeze(0) # Add batch dimension
52
+ summary_ids = model.generate(input_ids, max_length=max_length, num_beams=4, length_penalty=2.0, early_stopping=True)
53
+ summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
54
+ summaries.append(summary)
55
+
56
+ # Combine the summaries into a final summary
57
+ final_summary = ' '.join(summaries)
58
+
59
+ return final_summary
60
+
61
+
62
+ # === Text-to-Speech ===
63
+ def text_to_speech(text):
64
+ tts = gtts.gTTS(text)
65
+ tts.save("output.mp3")
66
+ return "output.mp3"
67
+
68
+
69
+ chat_pipeline = pipeline("text-generation", model="yasserrmd/Human-Like-Qwen2.5-1.5B-Instruct")
70
+ # === Chatbot ===
71
+ def chatbot(message, chat_history):
72
+ # Generate response
73
+ result = chat_pipeline(message, max_new_tokens=10)
74
+
75
+ # Extract only the reply
76
+ bot_reply = result[0]["generated_text"]
77
+
78
+ return bot_reply
79
+
80
+ chat_pipeline1 = pipeline("text-generation", model="yasserrmd/Human-Like-Qwen2.5-1.5B-Instruct")
81
+
82
+ # === Chatbot ===
83
+ def chatbot1(message, chat_history):
84
+ # Generate response
85
+ result = chat_pipeline1(message, max_new_tokens=10)
86
+
87
+ # Extract only the reply
88
+ bot_reply = result[0]["generated_text"]
89
+
90
+ return bot_reply
91
+
92
+
93
+ # === Build Gradio Interface ===
94
+ with gr.Blocks() as demo:
95
+ with gr.Tabs():
96
+ with gr.Tab("Sentiment Analysis"):
97
+ gr.Markdown("### 🔍 Sentiment Analysis")
98
+ sentiment_input = gr.Textbox(label="Enter text", lines=3, placeholder="Type a sentence to analyze...")
99
+ sentiment_button = gr.Button("Analyze")
100
+ sentiment_output = gr.Textbox(label="Sentiment")
101
+ sentiment_button.click(analyze_sentiment, inputs=sentiment_input, outputs=sentiment_output)
102
+
103
+ with gr.Tab("Summarization"):
104
+ gr.Markdown("### ✂️ Summarization")
105
+ summary_input = gr.Textbox(label="Enter text", lines=8, placeholder="Paste long text here...")
106
+ summary_button = gr.Button("Summarize")
107
+ summary_output = gr.Textbox(label="Summary")
108
+ summary_button.click(summarize_text, inputs=summary_input, outputs=summary_output)
109
+
110
+ with gr.Tab("Text-to-Speech"):
111
+ gr.Markdown("### 🗣️ Text-to-Speech (using Bark)")
112
+ tts_input = gr.Textbox(label="Enter text to speak", lines=3, placeholder="Try something like: 'Hello, how are you?'")
113
+ tts_button = gr.Button("Generate Speech")
114
+ tts_output = gr.Audio(label="Generated Speech", type="numpy")
115
+ tts_button.click(text_to_speech, inputs=tts_input, outputs=tts_output)
116
+
117
+
118
+ with gr.TabItem("Chatbot 1"):
119
+ gr.ChatInterface(
120
+ chatbot,
121
+ type="messages",
122
+ title="Chatbot 1",
123
+ description="Start the conversation in Chatbot 1!")
124
+
125
+
126
+ with gr.TabItem("Chatbot 2"):
127
+ gr.ChatInterface(
128
+ chatbot1,
129
+ type="messages",
130
+ title="Chatbot 2",
131
+ description="Start the conversation in Chatbot 2!")
132
+
133
+
134
+ demo.launch()