Spaces:
Sleeping
Sleeping
Update scripts/services/services.py
Browse files- scripts/services/services.py +112 -89
scripts/services/services.py
CHANGED
|
@@ -1,89 +1,112 @@
|
|
| 1 |
-
|
| 2 |
-
import
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
import
|
| 7 |
-
from
|
| 8 |
-
import
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
import
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Importing necessary libraries:
|
| 2 |
+
import wave # Handles WAV audio files
|
| 3 |
+
import io # Input/Output operations, helps with working with data in memory
|
| 4 |
+
|
| 5 |
+
# Libraries for speech recognition and language processing:
|
| 6 |
+
import speech_recognition as sr # Speech recognition capabilities
|
| 7 |
+
from groq import Groq # Groq client for interacting with Groq AI platform
|
| 8 |
+
import numpy as np # Powerful numerical computations (handling audio data)
|
| 9 |
+
from scipy.io.wavfile import write # Writing audio files
|
| 10 |
+
import librosa # Audio analysis (resampling audio)
|
| 11 |
+
|
| 12 |
+
# Vectorstore and language model interactions:
|
| 13 |
+
from langchain_community.vectorstores import FAISS # Efficient similarity search for documents
|
| 14 |
+
from langchain_huggingface import HuggingFaceEmbeddings # Embeddings for language models
|
| 15 |
+
import streamlit as st # Creating interactive web applications
|
| 16 |
+
|
| 17 |
+
# Downsample audio from 96kHz to 44kHz (common for many applications)
|
| 18 |
+
def downsample_audio(audio_data):
|
| 19 |
+
# Convert audio data to suitable format for librosa
|
| 20 |
+
audio_data = audio_data.astype(np.float32)
|
| 21 |
+
|
| 22 |
+
# Resample the audio, changing sample rate from 96000 to 44000
|
| 23 |
+
resampled_audio = librosa.resample(audio_data, orig_sr=96000, target_sr=44000)
|
| 24 |
+
|
| 25 |
+
# Save the resampled audio as a WAV file named 'aud.wav'
|
| 26 |
+
write('aud.wav', 44000, resampled_audio.astype(np.int16))
|
| 27 |
+
|
| 28 |
+
# Save audio received as bytes (often from web interfaces) to a file
|
| 29 |
+
def save_audio_from_bytes(audio_bytes):
|
| 30 |
+
# Convert audio bytes into a NumPy array (easier to work with)
|
| 31 |
+
audio_array = np.frombuffer(audio_bytes, dtype=np.int16)
|
| 32 |
+
|
| 33 |
+
# Save the original (high-sample-rate) audio as 'audio.wav'
|
| 34 |
+
write('audio.wav', 96000, audio_array)
|
| 35 |
+
|
| 36 |
+
# Downsample the audio for further processing
|
| 37 |
+
downsample_audio(audio_array)
|
| 38 |
+
|
| 39 |
+
# Interact with Groq AI platform to get responses from a language model
|
| 40 |
+
def run_groq(prompt, model):
|
| 41 |
+
# Truncate the prompt if using 'llama2-70b-chat' model
|
| 42 |
+
if model == 'llama3-70b-8192':
|
| 43 |
+
prompt = prompt[:8191]
|
| 44 |
+
|
| 45 |
+
# Create a Groq client (you'll need an API key)
|
| 46 |
+
client = Groq(api_key='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')
|
| 47 |
+
|
| 48 |
+
# Send the prompt to the specified language model on Groq
|
| 49 |
+
chat_completion = client.chat.completions.create(
|
| 50 |
+
messages=[
|
| 51 |
+
{'role': 'user', 'content': prompt}
|
| 52 |
+
],
|
| 53 |
+
model=model
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
# Return the generated response from the model
|
| 57 |
+
return chat_completion.choices[0].message.content
|
| 58 |
+
|
| 59 |
+
# Transcribe audio from the 'aud.wav' file (assumes Spanish language)
|
| 60 |
+
def transcript():
|
| 61 |
+
# Create a speech recognizer object
|
| 62 |
+
recognizer = sr.Recognizer()
|
| 63 |
+
|
| 64 |
+
# Open the audio file 'aud.wav'
|
| 65 |
+
with sr.AudioFile('aud.wav') as source:
|
| 66 |
+
# Read the audio data from the file
|
| 67 |
+
audio = recognizer.record(source)
|
| 68 |
+
|
| 69 |
+
# Use Google's speech recognition to transcribe the audio (Spanish)
|
| 70 |
+
text = recognizer.recognize_google(audio, language='es')
|
| 71 |
+
|
| 72 |
+
# Return the transcribed text
|
| 73 |
+
return text
|
| 74 |
+
|
| 75 |
+
# Perform Retrieval Augmented Generation (RAG) to answer queries using context
|
| 76 |
+
def run_rag(query, vc): # 'vc' is assumed to be a Vectorstore
|
| 77 |
+
# Find similar documents in the Vectorstore based on the query
|
| 78 |
+
similar_docs = vc.similarity_search(query, k=10) # Get top 10 similar docs
|
| 79 |
+
|
| 80 |
+
# Combine the content of these similar documents into a single context
|
| 81 |
+
context = '\n'.join([doc.page_content for doc in similar_docs])
|
| 82 |
+
|
| 83 |
+
# Display the context in the Streamlit sidebar
|
| 84 |
+
st.sidebar.write(context)
|
| 85 |
+
|
| 86 |
+
# Construct a prompt for the language model, including the context
|
| 87 |
+
prompt = f'''
|
| 88 |
+
Use the following context as your learned knowledge, inside <context></context> XML tags.
|
| 89 |
+
<context>
|
| 90 |
+
{context}
|
| 91 |
+
</context>
|
| 92 |
+
|
| 93 |
+
The context is taken from a set of pdfs
|
| 94 |
+
|
| 95 |
+
When answer to user:
|
| 96 |
+
- If you don't know, just say that you don't know.
|
| 97 |
+
- If you don't know when you are not sure, ask for clarification.
|
| 98 |
+
Avoid mentioning that you obtained the information from the context.
|
| 99 |
+
And answer according to the language of the user's question.
|
| 100 |
+
Make your answers detailed
|
| 101 |
+
|
| 102 |
+
- Return yout answer in Spanish
|
| 103 |
+
|
| 104 |
+
Given the context information, answer the query.
|
| 105 |
+
Query: {query}
|
| 106 |
+
'''
|
| 107 |
+
|
| 108 |
+
# Get a response from the 'llama2-70b-chat' model on Groq
|
| 109 |
+
response = run_groq(prompt, 'llama3-70b-8192')
|
| 110 |
+
|
| 111 |
+
# Return the generated response
|
| 112 |
+
return response
|