ashishbangwal commited on
Commit
4bf0813
·
1 Parent(s): 6218447
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .venv
2
+ .env
3
+ __pychache__
Dockerfile ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python runtime as a parent image
2
+ FROM python:3.10.9
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /app
6
+
7
+ # Copy the current directory contents into the container at /app
8
+ COPY requirements.txt /app
9
+
10
+ # Install any needed packages specified in requirements.txt
11
+ RUN pip install --no-cache-dir -r requirements.txt
12
+
13
+ COPY . /app
14
+
15
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
main.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules.functions import call_llm
2
+ from fastapi import FastAPI
3
+ from pydantic import BaseModel, Field
4
+ from typing import List, Set
5
+ from typing_extensions import TypedDict, Literal
6
+
7
+ app = FastAPI(debug=True)
8
+
9
+
10
+ class Message(TypedDict):
11
+ role: Literal["system", "user", "assistant"]
12
+ content: str
13
+
14
+
15
+ class Output(TypedDict):
16
+ type: str
17
+ content: str
18
+
19
+
20
+ class History(BaseModel):
21
+ history: List[Message] = Field(
22
+ examples=[
23
+ [
24
+ {"role": "user", "content": "Tell me a joke."},
25
+ {
26
+ "role": "assistant",
27
+ "content": "Why did the scarecrow win an award? Because he was outstanding in his field!",
28
+ },
29
+ {"role": "user", "content": "Tell me another joke."},
30
+ ]
31
+ ]
32
+ )
33
+
34
+
35
+ class Response(BaseModel):
36
+ response: List[Output] = Field(
37
+ examples=[
38
+ [
39
+ {
40
+ "type": "text",
41
+ "content": "### Nifty 50 Annual Return for Past 10 Years...",
42
+ },
43
+ {
44
+ "type": "plotly",
45
+ "content": '{"data":[{"x":[null,6.75517596225125.....}',
46
+ },
47
+ ]
48
+ ]
49
+ )
50
+
51
+
52
+ @app.post("/response")
53
+ async def get_response(history: History) -> Response:
54
+ print(history)
55
+ return call_llm(history.history) # type: ignore
modules/__pycache__/functions.cpython-310.pyc ADDED
Binary file (4.26 kB). View file
 
modules/functions.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import requests
3
+ import plotly.graph_objs as go
4
+ import re
5
+ import os
6
+
7
+ import json
8
+ from openai import OpenAI
9
+
10
+
11
+ OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
12
+ or_client = OpenAI(api_key=OPENROUTER_API_KEY, base_url="https://openrouter.ai/api/v1")
13
+
14
+
15
+ def chat_with_llama(messages, model="meta-llama/llama-3-70b-instruct:nitro"):
16
+ response = or_client.chat.completions.create(
17
+ model=model,
18
+ messages=messages,
19
+ max_tokens=4096,
20
+ )
21
+
22
+ response_message = response.choices[0].message.content
23
+
24
+ print(response_message, "\n\n\n")
25
+
26
+ return response_message
27
+
28
+
29
+ SysPrompt = """
30
+ You are a helpful expert ai assistant capable of executing python code in an interactive jupyter environment. Use the provided tools as needed to complete the given task. use markdown step to add detailed explanations in markdown format before the code. Use plotly as default charting library unless specified. Use IPython.display to render HTML and other compatible files.
31
+ You should output only in the following xml format to perform any of the following steps:
32
+ ## Output Format:
33
+
34
+ <execute_python>python code here</execute_python>
35
+ <markdown>any explanations in markdown format</markdown>
36
+
37
+
38
+ these are the preinstalled Libraries in current environment:
39
+ pandas
40
+ matplotlib
41
+ plotly
42
+ yfinance
43
+ numpy
44
+ seaborn
45
+ scikit-learn
46
+ statsmodels
47
+ geopandas
48
+ geopy
49
+ folium
50
+ IPython
51
+ """
52
+
53
+
54
+ # Function to execute code asynchronously
55
+ def execute_code(code):
56
+ headers = {"accept": "application/json", "Content-Type": "application/json"}
57
+
58
+ data = {"session_token": "", "code": code}
59
+
60
+ response = requests.post(
61
+ "https://pvanand-code-execution.hf.space/execute",
62
+ headers=headers,
63
+ data=json.dumps(data),
64
+ )
65
+
66
+ if response.status_code == 200:
67
+ # Code execution returned results
68
+ if response.json()["status"] == "success":
69
+ output = response.json()["value"]
70
+ else:
71
+ # Code execution failed
72
+ output = [
73
+ {
74
+ "error": {
75
+ "ename": "Execution request failed",
76
+ "evalue": response.json()["value"],
77
+ "traceback": [],
78
+ }
79
+ }
80
+ ]
81
+ print(response.json()["value"])
82
+ else:
83
+ output = []
84
+ return output
85
+
86
+
87
+ def extract_steps(text):
88
+ steps = []
89
+ pattern = re.compile(r"<(\w+)>(.*?)</\1>", re.DOTALL)
90
+ matches = pattern.findall(text)
91
+
92
+ for tag, content in matches:
93
+ if tag == "execute_python":
94
+ content = re.sub(r"```python|```", "", content).strip()
95
+ steps.append({"type": tag, "content": content.strip()})
96
+
97
+ return steps
98
+
99
+
100
+ def execute_llm_code(code):
101
+ try:
102
+ output = execute_code(code)
103
+ except Exception as e:
104
+ # st.error("Exception occurred: " + str(e))
105
+ output = None
106
+ return output
107
+
108
+
109
+ def call_llm(history, model="meta-llama/llama-3-70b-instruct:nitro"):
110
+ # Simulate LLM call_llm
111
+ history.insert(0, {"role": "system", "content": SysPrompt})
112
+ response = chat_with_llama(history, model=model)
113
+ llm_steps = extract_steps(response)
114
+ result = []
115
+ if llm_steps:
116
+ for step in llm_steps:
117
+ if step["type"] == "execute_python":
118
+ output = execute_llm_code(code=step["content"])
119
+ if output != None:
120
+ clear_output = process_execution_output(execution_output=output)
121
+ result += clear_output
122
+ else:
123
+ pass
124
+ else:
125
+ result.append({"type": "text", "content": str(step["content"])})
126
+ return {"response": result}
127
+ else:
128
+ return {"response": [response]}
129
+
130
+
131
+ def process_execution_output(execution_output):
132
+ OUTPUT = []
133
+ # st.write(output)
134
+ if isinstance(execution_output, str):
135
+ pass
136
+ # Code Execution Output Only
137
+
138
+ else:
139
+ for item in execution_output:
140
+ if "text" in item:
141
+ exclude_list = [
142
+ "%%",
143
+ "NoneType",
144
+ "YFTzMissingError",
145
+ "Failed download",
146
+ "FutureWarning",
147
+ ]
148
+ if not list(filter(lambda x: x in str(item["text"]), exclude_list)):
149
+ OUTPUT.append({"type": "text", "content": item["text"]})
150
+
151
+ elif "data" in item:
152
+
153
+ if "image/png" in item["data"]:
154
+ OUTPUT.append(
155
+ {"type": "image", "content": item["data"]["image/png"]}
156
+ )
157
+
158
+ elif "application/vnd.plotly.v1+json" in item["data"]:
159
+ plotly_data = item["data"]["application/vnd.plotly.v1+json"]
160
+ if isinstance(plotly_data, str):
161
+ plotly_data = json.loads(plotly_data)
162
+ go_json = str(go.Figure(plotly_data).to_json())
163
+ OUTPUT.append({"type": "plotly", "content": go_json})
164
+
165
+ elif "<folium.folium.Map at 0x7f2aef096f50>" in item["data"]:
166
+ OUTPUT.append(
167
+ {"type": "FoliumMap", "content": item["data"]["text/html"]}
168
+ )
169
+
170
+ # None of the above and not an empty script then render html
171
+ elif "text/html" in item["data"]:
172
+ script_tag_only = (
173
+ item["data"]["text/html"].strip()[:7] == "<script"
174
+ ) # TODO: Check full script tag
175
+ if not script_tag_only:
176
+ # st.html(item["data"]["text/html"])
177
+ OUTPUT.append(
178
+ {"type": "HTML", "content": item["data"]["text/html"]}
179
+ )
180
+
181
+ elif "error" in item:
182
+ pass
183
+ # st.error(f"Error: {item['error']['ename']} - {item['error']['evalue']}")
184
+ return OUTPUT
requirements.txt ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ annotated-types==0.7.0
2
+ anyio==4.4.0
3
+ certifi==2024.7.4
4
+ charset-normalizer==3.3.2
5
+ click==8.1.7
6
+ distro==1.9.0
7
+ dnspython==2.6.1
8
+ email_validator==2.2.0
9
+ exceptiongroup==1.2.2
10
+ fastapi==0.112.0
11
+ fastapi-cli==0.0.5
12
+ h11==0.14.0
13
+ httpcore==1.0.5
14
+ httptools==0.6.1
15
+ httpx==0.27.0
16
+ idna==3.7
17
+ Jinja2==3.1.4
18
+ kaleido==0.2.1
19
+ markdown-it-py==3.0.0
20
+ MarkupSafe==2.1.5
21
+ mdurl==0.1.2
22
+ openai==1.37.1
23
+ packaging==24.1
24
+ plotly==5.23.0
25
+ pydantic==2.8.2
26
+ pydantic_core==2.20.1
27
+ Pygments==2.18.0
28
+ python-dotenv==1.0.1
29
+ python-multipart==0.0.9
30
+ PyYAML==6.0.1
31
+ requests==2.32.3
32
+ rich==13.7.1
33
+ shellingham==1.5.4
34
+ sniffio==1.3.1
35
+ starlette==0.37.2
36
+ tenacity==9.0.0
37
+ tqdm==4.66.4
38
+ typer==0.12.3
39
+ typing_extensions==4.12.2
40
+ urllib3==2.2.2
41
+ uvicorn==0.30.5
42
+ uvloop==0.19.0
43
+ watchfiles==0.22.0
44
+ websockets==12.0