File size: 1,727 Bytes
75e78ee
 
 
5578f37
 
 
75e78ee
 
5578f37
 
 
 
 
75e78ee
5578f37
 
75e78ee
5578f37
 
75e78ee
 
5578f37
 
75e78ee
 
5578f37
 
75e78ee
5578f37
 
 
75e78ee
5578f37
 
 
75e78ee
5578f37
75e78ee
5578f37
 
75e78ee
 
5578f37
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# Load the model and tokenizer from Hugging Face
@st.cache_resource
def load_model():
    tokenizer = AutoTokenizer.from_pretrained("t5-small")  # Small T5 model for demo
    model = AutoModelForSeq2SeqLM.from_pretrained("t5-small")
    return tokenizer, model

tokenizer, model = load_model()

# Streamlit app
st.title("Seismic Event Prediction App")

# File upload section
uploaded_file = st.file_uploader("Upload CSV File", type=["csv"])

if uploaded_file is not None:
    # Load CSV data
    data = pd.read_csv(uploaded_file)

    # Display the data
    st.write("## Uploaded Data")
    st.dataframe(data)

    # Input slider for choosing an example (index between 0 and N-1)
    st.write("## Select an example to visualize:")
    idx = st.slider("Choose an index", 0, len(data) - 1, 0)

    # Prediction and plotting
    st.write("### Selected example:", idx)
    st.write(data.iloc[idx])

    # Plot the predictions
    fig, ax = plt.subplots()
    ax.plot(data['x'], label="X-axis data", color="blue")
    ax.axvline(x=data.iloc[idx]['prediction'], color="red", label="Predicted Earthquake")
    ax.legend()
    st.pyplot(fig)

    # Use the Hugging Face model to generate a simple summary or prediction based on the selected row
    input_text = f"Predict seismic event for index {idx}."
    inputs = tokenizer.encode(input_text, return_tensors="pt")
    outputs = model.generate(inputs, max_length=50, num_beams=4, early_stopping=True)
    generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)

    st.write("### Model Prediction:")
    st.write(generated_text)