File size: 6,671 Bytes
63d7787
 
 
45c89d8
 
 
411fe95
df69d1a
5c01db9
 
488702b
5c01db9
18d210e
 
 
 
 
b2d1117
b0dfd4c
18d210e
411fe95
9a28df7
 
16966b6
9a28df7
 
31affcc
a0c66a4
45c89d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15614e7
45c89d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a9f090a
63d7787
28505aa
222a3da
f0ae13d
 
63d7787
 
466ad79
f28985c
466ad79
eca7fc0
466ad79
f28985c
63d7787
 
 
f28985c
63d7787
9a28df7
f63dfe7
7c1d52a
87b170f
cd105ae
8014b9d
7dc67c3
d21fdd1
7dc67c3
7757f39
 
 
 
27b006b
 
47d83f1
 
27b006b
 
 
 
 
 
df69d1a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
import gradio as gr
import random
from huggingface_hub import InferenceClient
from sentence_transformers import SentenceTransformer
import torch

theme = gr.themes.Base()  # use a simple base theme, we'll add custom CSS

#spotify code 
spotify_embed_code = """
    <iframe data-testid="embed-iframe" style="border-radius:12px" src="https://open.spotify.com/embed/playlist/1v6ec9D4MLMDYOetq8z9kB?utm_source=generator" width="100%" height="352" frameBorder="0" allowfullscreen="" allow="autoplay; clipboard-write; encrypted-media; fullscreen; picture-in-picture" loading="lazy"></iframe>
"""
theme = gr.themes.Ocean(
    primary_hue="green",
    secondary_hue="cyan",
    neutral_hue="sky",
).set(
    body_background_fill ="f1eeccff", button_secondary_background_fill="#ABA8A6", button_primary_text_color="#004643",
    background_fill_primary="neutral_200"
)

gradient_css = """
body {
    background: linear-gradient(135deg, #76c893, #1a759f);
}
"""

    
# SEMANTIC SEARCH STEP 2
with open("songs_knowledge_base_2.txt", "r", encoding="utf-8") as file:
  songs_knowledge_base_text = file.read()

# SEMANTIC SEARCH STEP 3
def preprocess_text(text):
  # Strip extra whitespace from the beginning and the end of the text
  cleaned_text = text.strip()

  # Split the cleaned_text by every newline character (\n)
  chunks = cleaned_text.split("*")

  # Create an empty list to store cleaned chunks
  cleaned_chunks = []

  # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
  for chunk in chunks:
    stripped_chunk = chunk.strip()
    cleaned_chunks.append(stripped_chunk)

  # Print cleaned_chunks
  print(cleaned_chunks)


  # Print the length of cleaned_chunks
  print(len(cleaned_chunks))

  # Return the cleaned_chunks
  return cleaned_chunks

# Call the preprocess_text function and store the result in a cleaned_chunks variable
cleaned_chunks = preprocess_text(songs_knowledge_base_text)


#SEMANTIC STEP 4
# Load the pre-trained embedding model that converts text to vectors
model = SentenceTransformer('all-MiniLM-L6-v2')

def create_embeddings(text_chunks):
  # Convert each text chunk into a vector embedding and store as a tensor
  chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
  #contains number version of words that we put in chunks
  
  # Print the chunk embeddings
  print(chunk_embeddings)

  # Print the shape of chunk_embeddings
  print(chunk_embeddings.shape)

  # Return the chunk_embeddings
  return chunk_embeddings

# Call the create_embeddings function and store the result in a new chunk_embeddings variable
chunk_embeddings = create_embeddings(cleaned_chunks)

#SEMANTICS step 5
# Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
def get_top_chunks(query, chunk_embeddings, text_chunks):
  # Convert the query text into a vector embedding
  query_embedding = model.encode(query, convert_to_tensor=True) # Complete this line

  # Normalize the query embedding to unit length for accurate similarity comparison
  query_embedding_normalized = query_embedding / query_embedding.norm()

  # Normalize all chunk embeddings to unit length for consistent comparison
  chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)

  # Calculate cosine similarity between query and all chunks using matrix multiplication
  similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line

  # Print the similarities
  print(similarities)

  # Find the indices of the 3 chunks with highest similarity scores
  top_indices = torch.topk(similarities, k=10).indices
  #gives index values of top chunks

  # Print the top indices
  print(top_indices)

  # Create an empty list to store the most relevant chunks
  top_chunks = []

  # Loop through the top indices and retrieve the corresponding text chunks
  for i in top_indices:
    relevant_info = cleaned_chunks[i]
    top_chunks.append(relevant_info)
    # stored the text version of the index chunks we got from top_indcies

  # Return the list of most relevant chunks
  return top_chunks

client = InferenceClient('Qwen/Qwen2.5-72B-Instruct')
def respond(message, history):
    info = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
    messages = [{'role': 'system', 'content': f'You are a friendly chatbot using {info} to answer questions. You love creating playlists and will give at least 10 songs as a response. You will also capitalize the first letters of the first and last names of every artist you name. Also very important, make sure the songs match pretty accurately what the user is asking based on factors like tempo, emotion, lyrics, and more'}]

          
    if history:
        messages.extend(history)
   
    messages.append({"role": "user", "content": message})
    
    response = client.chat_completion(messages, max_tokens = 600)
    
    return response["choices"][0]["message"]["content"].strip()
def echo(message, history):
    return message
def yes_no(message, history):
    responses = ["Yes", "No"]
    return random.choice(responses)
with gr.Blocks(theme=theme, css=gradient_css) as chatbot:
    with gr.Row(scale=1):
        with gr.Column(scale=4):
            gr.Image("music_banner_pic.png")
            #width=10000, height=300) 
    with gr.Row(scale=3):
        with gr.Column(scale=1):
            gr.Image("White_AuxAI_logo .png")
        with gr.Column(scale=4):
            gr.ChatInterface(respond, type="messages", 
                title = "AuxAI", 
                theme = theme, description = "Hi! I’m AuxAI, your friendly music recommendation assistant. Tell me your mood, genre, or style and I’ll suggest some songs! AuxAI is on a mission to make music discovery personalized, inspiring and effortless. Our software connects the user to a playlist of their liking that perfectly match their mood, vibe or moment. We hope you uncover favorites and rediscover old ones, turning each search into a seamless find!", 
                examples=["Give me some jazzy songs to study and focus to", "Give me a playlist of hype, upbeat songs to workout to", "Give me songs like Espresso by Sabrina Carpenter"],)
#__________________________________________
#ADD SONG PLAYLIST HERE, EXAMPLE IMAGES, AND LINKS TO RESOURCES
    with gr.Row(scale=1): 
        gr.Markdown("### Enjoy some of our favorite songs!")
    with gr.Row(scale=1):
        gr.HTML(spotify_embed_code)
    # with gr.Row(scale=1):
        # with gr.Column():
        # resources links here
#__________________________________________

chatbot.launch()