File size: 5,818 Bytes
52f90bd 15f1486 221a4d1 52f90bd b61e8cb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
import gradio as gr
import asyncio
import random
import torch
from huggingface_hub import InferenceClient
from sentence_transformers import SentenceTransformer
# ---------------------- THEME ----------------------
theme = gr.themes.Ocean(
secondary_hue="lime",
neutral_hue="teal",
text_size="lg",
spacing_size="lg",
).set(
body_background_fill='*primary_400',
body_background_fill_dark='*primary_950',
body_text_color='*primary_50',
body_text_color_dark='*primary_50',
background_fill_primary_dark='*secondary_500',
background_fill_secondary='*primary_700',
background_fill_secondary_dark='*primary_900',
button_primary_background_fill='linear-gradient(120deg, *secondary_800 0%, *primary_300 60%, *primary_800 100%)',
button_primary_background_fill_dark='linear-gradient(120deg, *secondary_400 0%, *primary_400 60%, *primary_600 100%)',
button_primary_background_fill_hover='linear-gradient(120deg, *secondary_400 0%, *primary_300 60%, *neutral_300 100%)',
)
# ---------------------- LOAD KNOWLEDGE BASE ----------------------
with open("Skin_cancer_harvard.txt", "r", encoding="utf-8") as file:
Skin_cancer_harvard_text = file.read()
print(Skin_cancer_harvard_text)
def preprocess_text(text):
cleaned_text = text.strip()
chunks = cleaned_text.split("\n")
cleaned_chunks = [chunk.strip() for chunk in chunks if chunk.strip()]
print(cleaned_chunks)
print(len(cleaned_chunks))
return cleaned_chunks
cleaned_chunks = preprocess_text(Skin_cancer_harvard_text)
# ---------------------- EMBEDDINGS ----------------------
model = SentenceTransformer('all-MiniLM-L6-v2')
def create_embeddings(text_chunks):
chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True)
print(chunk_embeddings.shape)
return chunk_embeddings
chunk_embeddings = create_embeddings(cleaned_chunks)
# ---------------------- SEMANTIC SEARCH ----------------------
def get_top_chunks(query, chunk_embeddings, text_chunks):
query_embedding = model.encode(query, convert_to_tensor=True)
query_embedding_normalized = query_embedding / query_embedding.norm()
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
top_indices = torch.topk(similarities, k=3).indices
top_chunks = [cleaned_chunks[i] for i in top_indices]
return top_chunks
# ---------------------- LLM CLIENT ----------------------
client = InferenceClient("microsoft/phi-4")
# ---------------------- CHAT FUNCTION ----------------------
def respond(message, history):
info = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
messages = [
{
'role': 'system',
'content': (
f'You are a friendly chatbot using {info} to answer questions. '
'You are always willing to help and want the best for the user. '
'You need to emphasize that you are not a medical professional at the end '
'of the message, but you are here to help to the best of your ability. '
'Be confident and comforting to the users when helping them. '
'In your response add suggestions for a couple follow-up questions '
'to further the conversation with the chatbot.'
)
}
]
if history:
messages.extend(history)
messages.append({'role': 'user', 'content': message})
# Run blocking HF API in background thread (prevents StopIteration error)
def blocking_call():
return client.chat_completion(messages, max_tokens=500, top_p=0.8)
response = asyncio.run(asyncio.to_thread(blocking_call))
content = response['choices'][0]['message']['content'].strip()
history.append((message, content))
return history, content
# ---------------------- GRADIO APP ----------------------
with gr.Blocks(theme=theme) as chatbot:
with gr.Row(scale=1):
gr.Image("Capstone_Banner.png")
with gr.Row():
with gr.Column(scale=1):
gr.Image("Aloe_the_Turtle.png")
with gr.Row():
gr.Markdown(
"Click the button below to access the teachable machine, an AI Visual Scanner to detect Skin Cancer. "
"The main purpose of this teachable machine is to check if you have a cancerous or non-cancerous mole. "
"Place your mole near your camera and the analysis will be represented below. "
"Note that these results are not 100% accurate, so be sure to consult a medical professional if you have any concerns."
)
with gr.Row(scale=1):
gr.Button(
value="AI Visual Testing Moles for Skin Cancer!",
link="https://teachablemachine.withgoogle.com/models/onfoEa0p-/"
)
with gr.Column(scale=3):
gr.ChatInterface(
fn=respond,
title="Your Personal Skin Chatbot!",
description=(
"Welcome, my name is Aloe the Turtle and I am here to help you address any dermatology-related "
"questions you may have on topics such as Skin Cancer, Acne, Eczema, and much more. "
"Just remember, while I have comprehensive knowledge on skin concerns, I am not a medical professional!"
),
type="messages",
theme=theme,
examples=[
"What ingredients should I use to clear my Acne?",
"What can I do to proactively prevent Skin Cancer?",
"How do I tell the difference between eczema and psoriasis?"
]
)
chatbot.launch()
|