Spaces:
Sleeping
Sleeping
| import logging | |
| logging.basicConfig(level=logging.DEBUG) | |
| import streamlit as st | |
| import os | |
| from openai import OpenAI | |
| API_Key= os.getenv("OPENAI_API_KEY") | |
| # generateContext = pipeline("text-generation", model="meta-llama/Llama-3.1-405B-Instruct") | |
| client = OpenAI(base_url = "https://integrate.api.nvidia.com/v1", | |
| api_key = API_Key | |
| ) | |
| def main(): | |
| st.title("Context Analyzer") | |
| st.write("This app analyzes the context of the text you input and provides a detailed explanation.") | |
| content = st.text_input("Please input a text:") | |
| if st.button("Get Context"): | |
| if content.strip(): | |
| context = client.chat.completions.create( | |
| model="meta/llama-3.1-405b-instruct", | |
| messages=[{"role": "user", "content": f"Please explain the context of the paragraph: {content}"}], | |
| temperature=0.2, | |
| top_p=0.7, | |
| max_tokens=1024, | |
| stream=True | |
| ) | |
| explanation = "" | |
| for response in context: | |
| if response.choices[0].delta.content is not None: | |
| explanation += response.choices[0].delta.content | |
| st.write (f"Explanation: {explanation}") | |
| else: | |
| st.write (f"Please input a content") | |
| if __name__ == "__main__": | |
| main() |