# # import os # # import json # # import streamlit as st # # import pandas as pd # # import plotly.express as px # # from together import Together # # from dotenv import load_dotenv # # import re # # # -------------------# # # # Secure API key load # # # -------------------# # # load_dotenv() # # TOGETHER_API_KEY = os.getenv("TOGETHER_API_KEY", "987adcf573b9658c775b671270aef959b3d38793771932f372f9f2a9ed5b78bf") # # client = Together(api_key=TOGETHER_API_KEY) # # # -------------------# # # # Streamlit UI setup # # # -------------------# # # st.set_page_config(page_title="FutureScope: Research Direction Explorer", layout="wide") # # st.markdown(""" # # # # """, unsafe_allow_html=True) # # # -------------------# # # # App Title # # # -------------------# # # st.markdown("
Discover how your research area evolved and where it's heading next 🚀
", unsafe_allow_html=True) # # # -------------------# # # # User Input # # # -------------------# # # user_topic = st.text_input("🔍 Enter your research topic", placeholder="e.g. Graph Neural Networks for Drug Discovery") # # # -------------------# # # # Main Logic # # # -------------------# # # if st.button("Generate Research Insights"): # # if not user_topic.strip(): # # st.warning("⚠️ Please enter a valid research topic.") # # else: # # with st.spinner("Analyzing topic evolution and forecasting future directions... ⏳"): # # # Prompt Design # # prompt = f""" # # You are a world-class AI research assistant specialized in analyzing research trends. # # Given the topic: "{user_topic}", perform the following: # # 1. Summarize how this research area has evolved in the past 10–15 years. # # 2. Identify key milestones and subfields in a timeline format. # # 3. Predict 3–5 future research directions and explain why each matters. # # Return the output strictly in JSON format like this: # # {{ # # "evolution_summary": "...", # # "timeline": [{{"year": ..., "trend": "..."}}, ...], # # "future_directions": [{{"title": "...", "reason": "..."}}, ...] # # }} # # """ # # # Call Together API # # response = client.chat.completions.create( # # model="meta-llama/Llama-3.3-70B-Instruct-Turbo-Free", # # messages=[{"role": "user", "content": prompt}] # # ) # # raw_content = response.choices[0].message.content # # # -------------------# # # # JSON Cleaning & Parsing # # # -------------------# # # def extract_json(text): # # """Extract valid JSON portion from the model response.""" # # text = text.strip() # # text = re.sub(r"^```json|```$", "", text).strip() # remove code fences # # match = re.search(r'\{.*\}', text, re.DOTALL) # # if match: # # return match.group(0) # # return text # # cleaned = extract_json(raw_content) # # try: # # data = json.loads(cleaned) # # except Exception as e: # # st.error(f"⚠️ Failed to parse JSON: {e}") # # st.text_area("Raw Response", raw_content, height=300) # # st.stop() # # # -------------------# # # # Display Results # # # -------------------# # # st.markdown("## 🧩 Evolution Summary") # # st.markdown(f"{item['reason']}
# #