Spaces:
Runtime error
Runtime error
| import os | |
| from contextlib import contextmanager, redirect_stdout | |
| from io import StringIO | |
| from pathlib import Path | |
| from time import sleep | |
| import pandas as pd | |
| import plotly.graph_objects as go | |
| import streamlit as st | |
| from langchain import LLMChain, PromptTemplate | |
| from langchain.agents import create_pandas_dataframe_agent | |
| from langchain.llms import OpenAI | |
| from langchain.tools.python.tool import PythonAstREPLTool | |
| from langchain.utilities import PythonREPL | |
| os.environ["OPENAI_API_KEY"] = "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" | |
| DATA_FOLDER = "/Users/eliebrosset/Documents/GitHub/HUMANITICS/data/aigle/export" | |
| df = pd.read_csv(f"{DATA_FOLDER}/transactiondescriptor.csv", low_memory=False) | |
| ## execution | |
| execution = PythonAstREPLTool(locals={"df": df, "go": go}) | |
| context = f""" | |
| You have been provided with a pandas DataFrame named df. Based on a specific request, generate a chart using Plotly Graph Objects, and write Python code to accomplish this task. | |
| To begin, review the result of `print(df.head())` in the code snippet below, which displays a sample of the DataFrame's content: | |
| {df.head()} | |
| Based on the given data, you must identify the type of chart necessary to answer the request, as well as the columns that should be used in the chart. Be sure to consider the chart's readability, visual appeal, and informativeness when selecting the appropriate chart type. | |
| Once you have identified the appropriate chart type and columns to use, write Python code to filter the DataFrame according to the request and generate the chart using Plotly Graph Objects. | |
| The output of your code should be valid Python code with the following format: | |
| # Filter the DataFrame based on the request | |
| df_filtered = df[...] | |
| # Generate a chart based on the filtered DataFrame | |
| fig = go.Figure(...) | |
| # Update the layout of the chart to make it more appealing | |
| fig.update_layout(...) | |
| """ | |
| # template for an instruction with input | |
| prompt_with_context = PromptTemplate( | |
| input_variables=["instruction", "context"], | |
| template="{instruction}\n\nInput:\n{context}", | |
| ) | |
| ##openai | |
| llm = OpenAI(temperature=0.3, model_name="text-davinci-003", max_tokens=500) | |
| llm_context_chain = LLMChain(llm=llm, prompt=prompt_with_context) | |
| print("prompt_with_context==>", prompt_with_context) | |
| st.dataframe(df.head()) | |
| def st_capture(output_func): | |
| with StringIO() as stdout, redirect_stdout(stdout): | |
| old_write = stdout.write | |
| def new_write(string): | |
| ret = old_write(string) | |
| output_func(stdout.getvalue()) | |
| return ret | |
| stdout.write = new_write | |
| yield | |
| instruction = st.text_input("Enter instruction here:", "") | |
| answer = st.empty() | |
| if instruction: | |
| output = st.empty() | |
| with st_capture(output.info): | |
| code_generate = llm_context_chain.predict( | |
| instruction=instruction, context=context | |
| ).lstrip() | |
| print("code_generate==>", code_generate) | |
| response = execution.run(code_generate) | |
| answer = st.write(response) | |