Talha812 commited on
Commit
faf41a3
Β·
verified Β·
1 Parent(s): 732f284

Upload 5 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ brain_tumor_model.keras filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from app1_groq_chat import groq_chat_app
3
+ from app2_speech_chat import app2_speech_chat
4
+ from app3_brain_tumor import app3_brain_tumor
5
+
6
+ groq_chat = groq_chat_app()
7
+ speech_chat = app2_speech_chat()
8
+ tumor_detection = app3_brain_tumor()
9
+
10
+ with gr.Blocks() as main_app:
11
+ gr.Markdown("# πŸ§ πŸŽ€πŸ€– Combined AI Applications Interface")
12
+ with gr.Tab("🧠 Brain Tumor Detection"):
13
+ tumor_detection.render()
14
+ with gr.Tab("🎀 Speech-to-Speech ChatBot"):
15
+ speech_chat.render()
16
+ with gr.Tab("πŸ€– Groq Chatbot"):
17
+ groq_chat.render()
18
+
19
+ main_app.launch(share=True)
app1_groq_chat.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from groq import Groq
3
+ import os
4
+
5
+ client = Groq(api_key=os.getenv("GROQ_API_KEY"))
6
+
7
+ def groq_chat_app():
8
+ def chat_with_groq(user_input, history):
9
+ if history is None:
10
+ history = []
11
+
12
+ if not user_input.strip():
13
+ return history, []
14
+
15
+ history.append({"role": "user", "content": user_input})
16
+
17
+ try:
18
+ response = client.chat.completions.create(
19
+ messages=history,
20
+ model="llama-3.1-8b-instant"
21
+ )
22
+ reply = response.choices[0].message.content
23
+ history.append({"role": "assistant", "content": reply})
24
+
25
+ formatted_history = [
26
+ (m["content"] if m["role"] == "user" else None,
27
+ m["content"] if m["role"] == "assistant" else None)
28
+ for m in history if m["role"] != "system"
29
+ ]
30
+
31
+ return history, formatted_history, ""
32
+
33
+ except Exception as e:
34
+ return history, [("Error", str(e))], ""
35
+
36
+ with gr.Blocks() as demo:
37
+ gr.Markdown("## πŸ€– Groq Chatbot with Submit Button")
38
+
39
+ chatbot = gr.Chatbot()
40
+ state = gr.State([])
41
+ msg = gr.Textbox(label="Your Message", placeholder="Type something...")
42
+ submit_btn = gr.Button("πŸ“€ Submit")
43
+ clear_btn = gr.ClearButton([msg, chatbot, state])
44
+
45
+ submit_btn.click(
46
+ fn=chat_with_groq,
47
+ inputs=[msg, state],
48
+ outputs=[state, chatbot, msg]
49
+ )
50
+
51
+ return demo
app2_speech_chat.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tempfile
3
+ import gradio as gr
4
+ from groq import Groq
5
+ import whisper
6
+ from gtts import gTTS
7
+
8
+ client = Groq(api_key=os.getenv("GROQ_API_KEY"))
9
+ whisper_model = whisper.load_model("small")
10
+
11
+ def app2_speech_chat():
12
+ def speech_to_speech(audio_file):
13
+ try:
14
+ result = whisper_model.transcribe(audio_file)
15
+ user_text = result["text"]
16
+
17
+ chat_completion = client.chat.completions.create(
18
+ messages=[{"role": "user", "content": user_text}],
19
+ model="llama-3.3-70b-versatile"
20
+ )
21
+ bot_reply = chat_completion.choices[0].message.content
22
+
23
+ tts = gTTS(bot_reply)
24
+ temp_audio_path = tempfile.mktemp(suffix=".mp3")
25
+ tts.save(temp_audio_path)
26
+
27
+ return user_text, bot_reply, temp_audio_path
28
+
29
+ except Exception as e:
30
+ return f"Error: {str(e)}", "", None
31
+
32
+ interface = gr.Interface(
33
+ fn=speech_to_speech,
34
+ inputs=gr.Audio(sources=["microphone", "upload"], type="filepath"),
35
+ outputs=[
36
+ gr.Textbox(label="Transcribed Text"),
37
+ gr.Textbox(label="Groq Reply"),
38
+ gr.Audio(label="Reply Audio")
39
+ ],
40
+ title="🎀 Speech-to-Speech ChatBot",
41
+ description="Speak or upload audio β†’ Whisper transcribes β†’ Groq replies β†’ gTTS speaks the answer."
42
+ )
43
+
44
+ return interface
app3_brain_tumor.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from tensorflow.keras.models import load_model
3
+ from tensorflow.keras.preprocessing import image
4
+ import numpy as np
5
+ from PIL import Image
6
+
7
+ model = load_model("/content/brain_tumor_model.keras")
8
+
9
+ def predict_tumor(img):
10
+ img = img.convert('RGB').resize((150, 150))
11
+ img_array = image.img_to_array(img)
12
+ img_array = np.expand_dims(img_array, axis=0)
13
+ img_array /= 255.0
14
+
15
+ prediction = model.predict(img_array)
16
+ result = "Yes, Tumor Detected πŸ˜”" if prediction[0][0] > 0.5 else "No Tumor Detected 😊"
17
+ return result
18
+
19
+ def app3_brain_tumor():
20
+ interface = gr.Interface(
21
+ fn=predict_tumor,
22
+ inputs=gr.Image(type="pil", label="Upload MRI Scan"),
23
+ outputs=gr.Textbox(label="Prediction"),
24
+ title="🧠 Brain Tumor Detection",
25
+ description="Upload an MRI scan to check for tumor presence."
26
+ )
27
+ return interface
brain_tumor_model.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f533c25bb87e76f2a0793a3508585f76b52a0bcd732d0bd49eef118bd50108b4
3
+ size 127677329