Spaces:
Sleeping
Sleeping
File size: 6,149 Bytes
4bf0813 75e6d8f 4bf0813 df5259e 4bf0813 df5259e 4bf0813 df5259e 4bf0813 df5259e 4bf0813 df5259e 4bf0813 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 |
import json
import requests
import plotly.graph_objs as go
import re
import os
import json
from openai import OpenAI
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
OPENROUTER_API_KEY = "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"
or_client = OpenAI(api_key=OPENROUTER_API_KEY, base_url="https://openrouter.ai/api/v1")
def chat_with_llama(messages, model="meta-llama/llama-3-70b-instruct:nitro"):
response = or_client.chat.completions.create(
model=model,
messages=messages,
max_tokens=4096,
)
response_message = response.choices[0].message.content
print(response_message, "\n\n\n")
return response_message
SysPrompt = """
You are a helpful expert ai assistant capable of executing python code in an interactive jupyter environment. Use the provided tools as needed to complete the given task. use markdown step to add detailed explanations in markdown format before the code. Use plotly as default charting library unless specified. Use IPython.display to render HTML and other compatible files.
You should output only in the following xml format to perform any of the following steps:
## Output Format:
<execute_python>python code here</execute_python>
<markdown>any explanations in markdown format</markdown>
these are the preinstalled Libraries in current environment:
pandas
matplotlib
plotly
yfinance
numpy
seaborn
scikit-learn
statsmodels
geopandas
geopy
folium
IPython
"""
# Function to execute code asynchronously
def execute_code(code):
headers = {"accept": "application/json", "Content-Type": "application/json"}
data = {"session_token": "", "code": code}
response = requests.post(
"https://pvanand-code-execution.hf.space/execute",
headers=headers,
data=json.dumps(data),
)
if response.status_code == 200:
# Code execution returned results
if response.json()["status"] == "success":
output = response.json()["value"]
else:
# Code execution failed
output = [
{
"error": {
"ename": "Execution request failed",
"evalue": response.json()["value"],
"traceback": [],
}
}
]
print(response.json()["value"])
else:
output = []
return output
def extract_steps(text):
steps = []
pattern = re.compile(r"<(\w+)>(.*?)</\1>", re.DOTALL)
matches = pattern.findall(text)
for tag, content in matches:
if tag == "execute_python":
content = re.sub(r"```python|```", "", content).strip()
steps.append({"type": tag, "content": content.strip()})
return steps
def execute_llm_code(code):
try:
output = execute_code(code)
except Exception as e:
# st.error("Exception occurred: " + str(e))
output = None
return output
def call_llm(history, model="meta-llama/llama-3-70b-instruct:nitro"):
# Simulate LLM call_llm
if history[0]["role"] != "system":
history.insert(0, {"role": "system", "content": SysPrompt})
response = chat_with_llama(history, model=model)
llm_steps = extract_steps(response)
result = []
python_code = []
if llm_steps:
for step in llm_steps:
if step["type"] == "execute_python":
python_code.append(step["content"])
output = execute_llm_code(code=step["content"])
if output != None:
clear_output = process_execution_output(execution_output=output)
result += clear_output
else:
pass
else:
result.append({"type": "text", "content": str(step["content"])})
return (result, response, python_code)
else:
return [response], response, python_code
def process_execution_output(execution_output):
OUTPUT = []
# st.write(output)
if isinstance(execution_output, str):
pass
# Code Execution Output Only
else:
for item in execution_output:
if "text" in item:
exclude_list = [
"%%",
"NoneType",
"YFTzMissingError",
"Failed download",
"FutureWarning",
]
if not list(filter(lambda x: x in str(item["text"]), exclude_list)):
OUTPUT.append({"type": "text", "content": item["text"]})
elif "data" in item:
if "image/png" in item["data"]:
OUTPUT.append(
{"type": "image", "content": item["data"]["image/png"]}
)
elif "application/vnd.plotly.v1+json" in item["data"]:
plotly_data = item["data"]["application/vnd.plotly.v1+json"]
if isinstance(plotly_data, str):
plotly_data = json.loads(plotly_data)
go_json = str(go.Figure(plotly_data).to_json())
OUTPUT.append({"type": "plotly", "content": go_json})
elif "<folium.folium.Map at 0x7f2aef096f50>" in item["data"]:
OUTPUT.append(
{"type": "FoliumMap", "content": item["data"]["text/html"]}
)
# None of the above and not an empty script then render html
elif "text/html" in item["data"]:
script_tag_only = (
item["data"]["text/html"].strip()[:7] == "<script"
) # TODO: Check full script tag
if not script_tag_only:
# st.html(item["data"]["text/html"])
OUTPUT.append(
{"type": "HTML", "content": item["data"]["text/html"]}
)
elif "error" in item:
pass
# st.error(f"Error: {item['error']['ename']} - {item['error']['evalue']}")
return OUTPUT
|