Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,28 +5,34 @@ import streamlit as st
|
|
| 5 |
from sentence_transformers import SentenceTransformer
|
| 6 |
from groq import Groq
|
| 7 |
|
| 8 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
index_path = "faiss_index.idx"
|
| 10 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 11 |
|
| 12 |
-
# Load FAISS index or create if missing
|
| 13 |
if os.path.exists(index_path):
|
| 14 |
index = faiss.read_index(index_path)
|
| 15 |
else:
|
| 16 |
-
# Sample dataset (replace with real dataset)
|
| 17 |
-
data = [
|
| 18 |
-
"Alkaline electrolysis efficiency is 65-70%.",
|
| 19 |
-
"PEM electrolysis is 75-80% efficient but expensive.",
|
| 20 |
-
"SOEC electrolysis reaches 85% efficiency with heat.",
|
| 21 |
-
]
|
| 22 |
embeddings = model.encode(data).astype('float32')
|
| 23 |
dimension = embeddings.shape[1]
|
| 24 |
index = faiss.IndexFlatL2(dimension)
|
| 25 |
index.add(embeddings)
|
| 26 |
faiss.write_index(index, index_path)
|
| 27 |
|
| 28 |
-
#
|
| 29 |
-
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
def rag_pipeline(query):
|
| 32 |
"""Retrieves relevant information from FAISS and queries Llama 3 via Groq API."""
|
|
@@ -37,11 +43,13 @@ def rag_pipeline(query):
|
|
| 37 |
k = 3 # Retrieve top 3 relevant results
|
| 38 |
_, indices = index.search(query_embedding, k)
|
| 39 |
|
| 40 |
-
#
|
| 41 |
-
retrieved_texts = [data[i] for i in indices[0] if i < len(data)]
|
|
|
|
|
|
|
| 42 |
context = "\n".join(retrieved_texts)
|
| 43 |
|
| 44 |
-
# Generate response with Llama 3
|
| 45 |
response = client.chat.completions.create(
|
| 46 |
model="llama3-8b-32k",
|
| 47 |
messages=[
|
|
@@ -50,10 +58,9 @@ def rag_pipeline(query):
|
|
| 50 |
]
|
| 51 |
)
|
| 52 |
|
| 53 |
-
# β
Correct way to extract the response content
|
| 54 |
return response.choices[0].message.content # β
Fix applied
|
| 55 |
|
| 56 |
-
# Streamlit UI
|
| 57 |
st.title("Green Hydrogen Techno-Economic Analyzer")
|
| 58 |
|
| 59 |
query = st.text_input("Enter your question about hydrogen production:")
|
|
|
|
| 5 |
from sentence_transformers import SentenceTransformer
|
| 6 |
from groq import Groq
|
| 7 |
|
| 8 |
+
# β
Sample dataset (Replace with actual dataset later)
|
| 9 |
+
data = [
|
| 10 |
+
"Alkaline electrolysis efficiency is 65-70%.",
|
| 11 |
+
"PEM electrolysis is 75-80% efficient but expensive.",
|
| 12 |
+
"SOEC electrolysis reaches 85% efficiency with heat.",
|
| 13 |
+
"Green hydrogen production costs depend on electricity price.",
|
| 14 |
+
"Solar-powered electrolysis is cost-effective in sunny regions.",
|
| 15 |
+
"Hydrogen storage is critical for economic viability.",
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
# Load FAISS index or create if missing
|
| 19 |
index_path = "faiss_index.idx"
|
| 20 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 21 |
|
|
|
|
| 22 |
if os.path.exists(index_path):
|
| 23 |
index = faiss.read_index(index_path)
|
| 24 |
else:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
embeddings = model.encode(data).astype('float32')
|
| 26 |
dimension = embeddings.shape[1]
|
| 27 |
index = faiss.IndexFlatL2(dimension)
|
| 28 |
index.add(embeddings)
|
| 29 |
faiss.write_index(index, index_path)
|
| 30 |
|
| 31 |
+
# β
Set Groq API key as an environment variable
|
| 32 |
+
os.environ["GROQ_API_KEY"] = "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" # Replace with your actual key
|
| 33 |
+
|
| 34 |
+
# Initialize Groq API using the environment variable
|
| 35 |
+
client = Groq(api_key=os.getenv("GROQ_API_KEY"))
|
| 36 |
|
| 37 |
def rag_pipeline(query):
|
| 38 |
"""Retrieves relevant information from FAISS and queries Llama 3 via Groq API."""
|
|
|
|
| 43 |
k = 3 # Retrieve top 3 relevant results
|
| 44 |
_, indices = index.search(query_embedding, k)
|
| 45 |
|
| 46 |
+
# β
Ensure retrieved indices are valid
|
| 47 |
+
retrieved_texts = [data[i] for i in indices[0] if 0 <= i < len(data)]
|
| 48 |
+
|
| 49 |
+
# Merge retrieved context
|
| 50 |
context = "\n".join(retrieved_texts)
|
| 51 |
|
| 52 |
+
# β
Generate response with Llama 3
|
| 53 |
response = client.chat.completions.create(
|
| 54 |
model="llama3-8b-32k",
|
| 55 |
messages=[
|
|
|
|
| 58 |
]
|
| 59 |
)
|
| 60 |
|
|
|
|
| 61 |
return response.choices[0].message.content # β
Fix applied
|
| 62 |
|
| 63 |
+
# β
Streamlit UI
|
| 64 |
st.title("Green Hydrogen Techno-Economic Analyzer")
|
| 65 |
|
| 66 |
query = st.text_input("Enter your question about hydrogen production:")
|