Spaces:
Sleeping
Sleeping
UPLOAD
Browse files- .gitignore +171 -0
- Dockerfile +11 -0
- LICENSE +21 -0
- analyticsHub/__init__.py +0 -0
- analyticsHub/components/__init__.py +6 -0
- analyticsHub/components/codeGeneratorAgent.py +31 -0
- analyticsHub/components/failsafeAgent.py +31 -0
- analyticsHub/components/metadataGenerator.py +33 -0
- analyticsHub/components/queryRephraserAgent.py +43 -0
- analyticsHub/components/speechToText.py +23 -0
- analyticsHub/models/__init__.py +0 -0
- analyticsHub/models/requestModels.py +105 -0
- analyticsHub/pipelines/__init__.py +0 -0
- analyticsHub/pipelines/pipeline.py +78 -0
- analyticsHub/routers/__init__.py +2 -0
- analyticsHub/routers/authentication.py +245 -0
- analyticsHub/routers/blends.py +118 -0
- analyticsHub/routers/dashboard.py +104 -0
- analyticsHub/routers/dataLoader.py +174 -0
- analyticsHub/routers/projectManager.py +200 -0
- analyticsHub/routers/reportingTool.py +48 -0
- analyticsHub/routers/utilities.py +28 -0
- analyticsHub/utils/__init__.py +0 -0
- analyticsHub/utils/exceptions.py +33 -0
- analyticsHub/utils/functions.py +27 -0
- analyticsHub/utils/logger.py +28 -0
- analyticsHub/workflows/__init__.py +0 -0
- analyticsHub/workflows/reportingWorkflow.py +100 -0
- app.py +53 -0
- config.ini +23 -0
- params.yaml +834 -0
- requirements.txt +28 -0
- setup.py +28 -0
.gitignore
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py,cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
# .python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# UV
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
#uv.lock
|
| 102 |
+
|
| 103 |
+
# poetry
|
| 104 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 105 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 106 |
+
# commonly ignored for libraries.
|
| 107 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 108 |
+
#poetry.lock
|
| 109 |
+
|
| 110 |
+
# pdm
|
| 111 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 112 |
+
#pdm.lock
|
| 113 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 114 |
+
# in version control.
|
| 115 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
| 116 |
+
.pdm.toml
|
| 117 |
+
.pdm-python
|
| 118 |
+
.pdm-build/
|
| 119 |
+
|
| 120 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 121 |
+
__pypackages__/
|
| 122 |
+
|
| 123 |
+
# Celery stuff
|
| 124 |
+
celerybeat-schedule
|
| 125 |
+
celerybeat.pid
|
| 126 |
+
|
| 127 |
+
# SageMath parsed files
|
| 128 |
+
*.sage.py
|
| 129 |
+
|
| 130 |
+
# Environments
|
| 131 |
+
.env
|
| 132 |
+
.venv
|
| 133 |
+
env/
|
| 134 |
+
venv/
|
| 135 |
+
ENV/
|
| 136 |
+
env.bak/
|
| 137 |
+
venv.bak/
|
| 138 |
+
|
| 139 |
+
# Spyder project settings
|
| 140 |
+
.spyderproject
|
| 141 |
+
.spyproject
|
| 142 |
+
|
| 143 |
+
# Rope project settings
|
| 144 |
+
.ropeproject
|
| 145 |
+
|
| 146 |
+
# mkdocs documentation
|
| 147 |
+
/site
|
| 148 |
+
|
| 149 |
+
# mypy
|
| 150 |
+
.mypy_cache/
|
| 151 |
+
.dmypy.json
|
| 152 |
+
dmypy.json
|
| 153 |
+
|
| 154 |
+
# Pyre type checker
|
| 155 |
+
.pyre/
|
| 156 |
+
|
| 157 |
+
# pytype static type analyzer
|
| 158 |
+
.pytype/
|
| 159 |
+
|
| 160 |
+
# Cython debug symbols
|
| 161 |
+
cython_debug/
|
| 162 |
+
|
| 163 |
+
# PyCharm
|
| 164 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 165 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 166 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 167 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 168 |
+
#.idea/
|
| 169 |
+
|
| 170 |
+
# PyPI configuration file
|
| 171 |
+
.pypirc
|
Dockerfile
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
COPY requirements.txt ./
|
| 6 |
+
|
| 7 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 8 |
+
|
| 9 |
+
COPY . .
|
| 10 |
+
|
| 11 |
+
CMD ["python", "app.py"]
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 ReviveAnalyticsPvtLtd
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
analyticsHub/__init__.py
ADDED
|
File without changes
|
analyticsHub/components/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass, field
|
| 2 |
+
|
| 3 |
+
@dataclass
|
| 4 |
+
class REPLManager:
|
| 5 |
+
manager: dict = field(default_factory=dict)
|
| 6 |
+
replManager = REPLManager()
|
analyticsHub/components/codeGeneratorAgent.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_core.output_parsers import StrOutputParser
|
| 2 |
+
from langchain_core.prompts import PromptTemplate
|
| 3 |
+
from langchain_core.runnables import RunnablePassthrough
|
| 4 |
+
from ..utils.functions import readYaml, getConfig
|
| 5 |
+
from ..utils.exceptions import CustomException
|
| 6 |
+
from langchain_cerebras import ChatCerebras
|
| 7 |
+
from ..utils.logger import logger
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
class CodeGenerator:
|
| 11 |
+
def __init__(self):
|
| 12 |
+
logger.info("Initializing CodeGenerator.")
|
| 13 |
+
self.yamlPath = os.path.join(os.getcwd(), "params.yaml")
|
| 14 |
+
self.config = getConfig(os.path.join(os.getcwd(), "config.ini"))
|
| 15 |
+
|
| 16 |
+
def getCodeGeneratorChain(self):
|
| 17 |
+
try:
|
| 18 |
+
logger.info("Constructing code generation chain.")
|
| 19 |
+
promptTemplate = readYaml(self.yamlPath)["codeGeneratorAgentPrompt"]
|
| 20 |
+
codeGeneratorPrompt = PromptTemplate.from_template(promptTemplate)
|
| 21 |
+
llm = ChatCerebras(
|
| 22 |
+
model=self.config.get("CODEGENERATOR", "model"),
|
| 23 |
+
temperature=self.config.getfloat("CODEGENERATOR", "temperature")
|
| 24 |
+
)
|
| 25 |
+
codeGeneratorParser = StrOutputParser()
|
| 26 |
+
codeGeneratorChain = RunnablePassthrough() | codeGeneratorPrompt | llm | codeGeneratorParser
|
| 27 |
+
logger.info("code generation chain constructed successfully.")
|
| 28 |
+
return codeGeneratorChain
|
| 29 |
+
except Exception as e:
|
| 30 |
+
logger.error(f"Error constructing code generation chain: {e}")
|
| 31 |
+
raise CustomException(e)
|
analyticsHub/components/failsafeAgent.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_core.output_parsers import StrOutputParser
|
| 2 |
+
from langchain_core.prompts import PromptTemplate
|
| 3 |
+
from langchain_core.runnables import RunnablePassthrough
|
| 4 |
+
from ..utils.functions import readYaml, getConfig
|
| 5 |
+
from ..utils.exceptions import CustomException
|
| 6 |
+
from langchain_cerebras import ChatCerebras
|
| 7 |
+
from ..utils.logger import logger
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
class FailsafeCodeGenerator:
|
| 11 |
+
def __init__(self):
|
| 12 |
+
logger.info("Initializing Failsafe.")
|
| 13 |
+
self.yamlPath = os.path.join(os.getcwd(), "params.yaml")
|
| 14 |
+
self.config = getConfig(os.path.join(os.getcwd(), "config.ini"))
|
| 15 |
+
|
| 16 |
+
def getFailsafeCodeGeneratorChain(self):
|
| 17 |
+
try:
|
| 18 |
+
logger.info("Constructing failsafe code generation chain.")
|
| 19 |
+
promptTemplate = readYaml(self.yamlPath)["codeDebuggerAgentPrompt"]
|
| 20 |
+
codeGeneratorPrompt = PromptTemplate.from_template(promptTemplate)
|
| 21 |
+
llm = ChatCerebras(
|
| 22 |
+
model=self.config.get("FAILSAFECODEGENERATOR", "model"),
|
| 23 |
+
temperature=self.config.getfloat("FAILSAFECODEGENERATOR", "temperature")
|
| 24 |
+
)
|
| 25 |
+
codeGeneratorParser = StrOutputParser()
|
| 26 |
+
failsafeCodeGeneratorChain = RunnablePassthrough() | codeGeneratorPrompt | llm | codeGeneratorParser
|
| 27 |
+
logger.info("failsafe code generation chain constructed successfully.")
|
| 28 |
+
return failsafeCodeGeneratorChain
|
| 29 |
+
except Exception as e:
|
| 30 |
+
logger.error(f"Error constructing failsafe code generation chain: {e}")
|
| 31 |
+
raise CustomException(e)
|
analyticsHub/components/metadataGenerator.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_core.output_parsers import StrOutputParser
|
| 2 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 3 |
+
from langchain_core.runnables import RunnableLambda
|
| 4 |
+
from ..utils.functions import readYaml, getConfig
|
| 5 |
+
from ..utils.exceptions import CustomException
|
| 6 |
+
from langchain_cerebras import ChatCerebras
|
| 7 |
+
from ..utils.logger import logger
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
class MetadataGenerator:
|
| 11 |
+
def __init__(self):
|
| 12 |
+
logger.info("Initializing MetadataGenerator.")
|
| 13 |
+
self.yamlPath = os.path.join(os.getcwd(), "params.yaml")
|
| 14 |
+
self.config = getConfig(os.path.join(os.getcwd(), "config.ini"))
|
| 15 |
+
|
| 16 |
+
def getMetadataChain(self):
|
| 17 |
+
try:
|
| 18 |
+
logger.info("Constructing metadata generation chain.")
|
| 19 |
+
promptTemplate = readYaml(self.yamlPath)["metadataGeneratorPrompt"]
|
| 20 |
+
prompt = ChatPromptTemplate.from_template(promptTemplate)
|
| 21 |
+
llm = ChatCerebras(
|
| 22 |
+
model=self.config.get("METADATAGENERATOR", "model"),
|
| 23 |
+
temperature=self.config.getfloat("METADATAGENERATOR", "temperature")
|
| 24 |
+
)
|
| 25 |
+
outputParser = StrOutputParser()
|
| 26 |
+
chain = {
|
| 27 |
+
"metadata": RunnableLambda(lambda x: x["metadata"])
|
| 28 |
+
} | prompt | llm | outputParser
|
| 29 |
+
logger.info("Metadata generation chain constructed successfully.")
|
| 30 |
+
return chain
|
| 31 |
+
except Exception as e:
|
| 32 |
+
logger.error(f"Error constructing metadata generation chain: {e}")
|
| 33 |
+
raise CustomException(e)
|
analyticsHub/components/queryRephraserAgent.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_core.output_parsers import JsonOutputParser
|
| 2 |
+
from langchain_core.prompts import PromptTemplate
|
| 3 |
+
from ..utils.functions import readYaml, getConfig
|
| 4 |
+
from ..utils.exceptions import CustomException
|
| 5 |
+
from pydantic import Field, BaseModel
|
| 6 |
+
from langchain_cerebras import ChatCerebras
|
| 7 |
+
from ..utils.logger import logger
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
class QueryRephraseOutput(BaseModel):
|
| 11 |
+
rephrasedOutput: str | None = Field(
|
| 12 |
+
description="A clear and concise rephrased version of the user's query. If the query is unclear, invalid, or requires clarification, this will be `None`."
|
| 13 |
+
)
|
| 14 |
+
doubt: str | None = Field(
|
| 15 |
+
description="A message indicating any doubt, required clarification, or reason why the input query is invalid. If the query is successfully rephrased, this will be `None`."
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
class QueryRephaser:
|
| 19 |
+
def __init__(self):
|
| 20 |
+
logger.info("Initializing QueryRephaser.")
|
| 21 |
+
self.yamlPath = os.path.join(os.getcwd(), "params.yaml")
|
| 22 |
+
self.config = getConfig(os.path.join(os.getcwd(), "config.ini"))
|
| 23 |
+
|
| 24 |
+
def getQueryRephraserChain(self):
|
| 25 |
+
try:
|
| 26 |
+
logger.info("Constructing query rephraser chain.")
|
| 27 |
+
queryRephraseParser = JsonOutputParser(pydantic_object = QueryRephraseOutput)
|
| 28 |
+
queryRephrasePrompt = PromptTemplate(
|
| 29 |
+
template = readYaml(self.yamlPath)["queryRephraserAgentPrompt"],
|
| 30 |
+
input_variables = ["metadata", "query"],
|
| 31 |
+
partial_variables = {"format_instructions": queryRephraseParser.get_format_instructions()}
|
| 32 |
+
)
|
| 33 |
+
llm = ChatCerebras(
|
| 34 |
+
model=self.config.get("QUERYREPHRASER", "model"),
|
| 35 |
+
temperature=self.config.getfloat("QUERYREPHRASER", "temperature"),
|
| 36 |
+
max_tokens=self.config.getint("QUERYREPHRASER", "maxTokens")
|
| 37 |
+
)
|
| 38 |
+
queryRephraseChain = queryRephrasePrompt | llm | queryRephraseParser
|
| 39 |
+
logger.info("Query rephraser chain constructed successfully.")
|
| 40 |
+
return queryRephraseChain
|
| 41 |
+
except Exception as e:
|
| 42 |
+
logger.error(f"Error constructing query rephraser chain: {e}")
|
| 43 |
+
raise CustomException(e)
|
analyticsHub/components/speechToText.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..utils.functions import readYaml, getConfig
|
| 2 |
+
from ..utils.exceptions import CustomException
|
| 3 |
+
from ..utils.logger import logger
|
| 4 |
+
from groq import Groq
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
class SpeechToText:
|
| 8 |
+
def __init__(self):
|
| 9 |
+
logger.info("Initializing speech-to-text Model.")
|
| 10 |
+
self.config = getConfig(os.path.join(os.getcwd(), "config.ini"))
|
| 11 |
+
self.client = Groq()
|
| 12 |
+
|
| 13 |
+
def getTranscript(self, b64String = str) -> str:
|
| 14 |
+
try:
|
| 15 |
+
logger.info("generating transcript.")
|
| 16 |
+
transcription = self.client.audio.transcriptions.create(
|
| 17 |
+
url = f'data:audio/webm;base64,{b64String}',
|
| 18 |
+
model = self.config.get("SPEECHTOTEXT", "model")
|
| 19 |
+
)
|
| 20 |
+
return transcription.text.strip()
|
| 21 |
+
except Exception as e:
|
| 22 |
+
logger.error(f"Error constructing metadata generation chain: {e}")
|
| 23 |
+
raise CustomException(e)
|
analyticsHub/models/__init__.py
ADDED
|
File without changes
|
analyticsHub/models/requestModels.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel
|
| 2 |
+
|
| 3 |
+
class SignUp(BaseModel):
|
| 4 |
+
email: str
|
| 5 |
+
password: str
|
| 6 |
+
|
| 7 |
+
class Login(BaseModel):
|
| 8 |
+
email: str
|
| 9 |
+
password: str
|
| 10 |
+
|
| 11 |
+
class LoginWithProvider(BaseModel):
|
| 12 |
+
email: str
|
| 13 |
+
sub: str | None = None
|
| 14 |
+
id: str | None = None
|
| 15 |
+
nodeId: str | None = None
|
| 16 |
+
provider: str
|
| 17 |
+
|
| 18 |
+
class OnboardingDetails(BaseModel):
|
| 19 |
+
usage: str
|
| 20 |
+
fullName: str
|
| 21 |
+
email: str
|
| 22 |
+
role: str
|
| 23 |
+
companyName: str
|
| 24 |
+
industryType: str
|
| 25 |
+
companySize: str
|
| 26 |
+
country: str
|
| 27 |
+
goals: str
|
| 28 |
+
source: str
|
| 29 |
+
|
| 30 |
+
class NewCredentials(BaseModel):
|
| 31 |
+
newPassword: str
|
| 32 |
+
|
| 33 |
+
class UpdateProjectState(BaseModel):
|
| 34 |
+
projectId: str
|
| 35 |
+
action: str
|
| 36 |
+
|
| 37 |
+
class CreateProject(BaseModel):
|
| 38 |
+
projectName: str
|
| 39 |
+
projectDescription: str | None = None
|
| 40 |
+
|
| 41 |
+
class GenerateChartInput(BaseModel):
|
| 42 |
+
inputQuery: str
|
| 43 |
+
projectId: str
|
| 44 |
+
|
| 45 |
+
class SpeechToTextModel(BaseModel):
|
| 46 |
+
b64String: str
|
| 47 |
+
|
| 48 |
+
class DeleteTable(BaseModel):
|
| 49 |
+
projectId: str
|
| 50 |
+
tableName: str
|
| 51 |
+
|
| 52 |
+
class EditMetadata(BaseModel):
|
| 53 |
+
projectId: str
|
| 54 |
+
tableName: str
|
| 55 |
+
tableDescription: str | None = None
|
| 56 |
+
columnName: str | None = None
|
| 57 |
+
columnDescription: str | None = None
|
| 58 |
+
|
| 59 |
+
class PanelChartDetails(BaseModel):
|
| 60 |
+
projectId: str
|
| 61 |
+
chartType: str
|
| 62 |
+
xAxis: str
|
| 63 |
+
yAxis: str
|
| 64 |
+
dataSource: str
|
| 65 |
+
aggregationMetric: str
|
| 66 |
+
|
| 67 |
+
class CreateDataBlend(BaseModel):
|
| 68 |
+
projectId: str
|
| 69 |
+
blendOn: list[str]
|
| 70 |
+
blendName: str
|
| 71 |
+
tables: list[str]
|
| 72 |
+
joinTypes: list[str]
|
| 73 |
+
|
| 74 |
+
class GetFieldsFromSources(BaseModel):
|
| 75 |
+
projectId: str
|
| 76 |
+
tableName: str
|
| 77 |
+
|
| 78 |
+
class LoadMySQLorPostgreSQL(BaseModel):
|
| 79 |
+
projectId: str
|
| 80 |
+
user: str
|
| 81 |
+
password: str
|
| 82 |
+
host: str
|
| 83 |
+
port: int
|
| 84 |
+
db: str
|
| 85 |
+
table: str
|
| 86 |
+
|
| 87 |
+
class LoadMongoDB(BaseModel):
|
| 88 |
+
projectId: str
|
| 89 |
+
connectionString: str
|
| 90 |
+
db: str
|
| 91 |
+
collection: str
|
| 92 |
+
|
| 93 |
+
class CreatePage(BaseModel):
|
| 94 |
+
projectId: str
|
| 95 |
+
pageName: str
|
| 96 |
+
|
| 97 |
+
class ExportToDashboard(BaseModel):
|
| 98 |
+
projectId: str
|
| 99 |
+
page: str
|
| 100 |
+
chartType: str
|
| 101 |
+
title: str
|
| 102 |
+
xLabels: list[str]
|
| 103 |
+
yLabels: list[str]
|
| 104 |
+
data: dict[str, list]
|
| 105 |
+
layout: dict[str, int]
|
analyticsHub/pipelines/__init__.py
ADDED
|
File without changes
|
analyticsHub/pipelines/pipeline.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..components.metadataGenerator import MetadataGenerator
|
| 2 |
+
from ..workflows.reportingWorkflow import reportingToolWorkflow
|
| 3 |
+
from ..components.speechToText import SpeechToText
|
| 4 |
+
from ..utils.exceptions import CustomException
|
| 5 |
+
from ..utils.functions import readYaml
|
| 6 |
+
from ..components import replManager
|
| 7 |
+
from supabase import create_client
|
| 8 |
+
from urllib.request import urlopen
|
| 9 |
+
from ..utils.logger import logger
|
| 10 |
+
import orjson
|
| 11 |
+
import os
|
| 12 |
+
|
| 13 |
+
class CompletePipeline:
|
| 14 |
+
def __init__(self):
|
| 15 |
+
logger.info("Initializing CompletePipeline components.")
|
| 16 |
+
self.speechToTextModule = SpeechToText()
|
| 17 |
+
self.metadataGenerator = MetadataGenerator()
|
| 18 |
+
self.yamlPath = os.path.join(os.getcwd(), "params.yaml")
|
| 19 |
+
self.supabaseClient = create_client(
|
| 20 |
+
supabase_url = os.environ["SUPABASE_URL"],
|
| 21 |
+
supabase_key = os.environ["SUPABASE_KEY"]
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
def generateMetadata(self, projectId: str) -> dict:
|
| 25 |
+
try:
|
| 26 |
+
dataFiles = [x.get("name") for x in self.supabaseClient.storage.from_("AnalyticsHub").list(path = projectId) if x.get("name").endswith(".parquet")]
|
| 27 |
+
results = ""
|
| 28 |
+
for fileName in dataFiles:
|
| 29 |
+
dataframeName = fileName.replace(".parquet", "")
|
| 30 |
+
codeString = readYaml(self.yamlPath)["attributeInfoCode"].format(dataframeName = dataframeName, projectId = projectId)
|
| 31 |
+
results += replManager.manager[projectId].run(codeString)
|
| 32 |
+
metadataChain = self.metadataGenerator.getMetadataChain()
|
| 33 |
+
metadata = metadataChain.invoke({"metadata": results})
|
| 34 |
+
metadataParts = metadata.split("```")
|
| 35 |
+
metadata = metadataParts[-2]
|
| 36 |
+
metadata = orjson.loads("\n".join(metadata.split("\n")[1:]).encode())
|
| 37 |
+
return metadata
|
| 38 |
+
except Exception as e:
|
| 39 |
+
logger.error(CustomException(e))
|
| 40 |
+
raise CustomException(e)
|
| 41 |
+
|
| 42 |
+
def generateChart(self, inputQuery: str, metadata: dict, projectId: str) -> dict:
|
| 43 |
+
try:
|
| 44 |
+
response = reportingToolWorkflow.invoke({
|
| 45 |
+
"inputQuery": inputQuery,
|
| 46 |
+
"metadata": metadata,
|
| 47 |
+
"projectId": projectId
|
| 48 |
+
})
|
| 49 |
+
return response
|
| 50 |
+
except Exception as e:
|
| 51 |
+
logger.error(CustomException(e))
|
| 52 |
+
raise CustomException(e)
|
| 53 |
+
|
| 54 |
+
def generateChartFromPanel(self, projectId: str, chartType: str, xAxis: str, yAxis: str, aggregationMetric: str, dataSource: str) -> dict:
|
| 55 |
+
try:
|
| 56 |
+
blendConfigUrl = os.environ["FILE_URL"].format(projectId = projectId, fileName = "blendConfig.json").replace(".parquet", "")
|
| 57 |
+
blendConfig = orjson.loads(urlopen(blendConfigUrl).read())
|
| 58 |
+
blendedTables = list(blendConfig.keys())
|
| 59 |
+
if dataSource in blendedTables:
|
| 60 |
+
tablesUsed = blendConfig[dataSource].get("tables")
|
| 61 |
+
joinTypes = blendConfig[dataSource].get("joinTypes")
|
| 62 |
+
blendOn = blendConfig[dataSource].get("blendOn")
|
| 63 |
+
response = replManager.manager[projectId].run(f"getDataForChart(projectId='{projectId}', chartType='{chartType}', xAxis='{xAxis}', yAxis='{yAxis}', aggregationMetric='{aggregationMetric}', tablesUsed={tablesUsed}, joinTypes={joinTypes}, blendOn={blendOn})")
|
| 64 |
+
else:
|
| 65 |
+
response = replManager.manager[projectId].run(f"getDataForChart(projectId='{projectId}', chartType='{chartType}', xAxis='{xAxis}', yAxis='{yAxis}', aggregationMetric='{aggregationMetric}', tablesUsed='{dataSource}')")
|
| 66 |
+
response = orjson.loads(response.encode())
|
| 67 |
+
return response
|
| 68 |
+
except Exception as e:
|
| 69 |
+
logger.error(CustomException(e))
|
| 70 |
+
raise CustomException(e)
|
| 71 |
+
|
| 72 |
+
def speechToText(self, b64String: str) -> str:
|
| 73 |
+
try:
|
| 74 |
+
return self.speechToTextModule.getTranscript(b64String = b64String)
|
| 75 |
+
except Exception as e:
|
| 76 |
+
logger.error(CustomException(e))
|
| 77 |
+
raise CustomException(e)
|
| 78 |
+
|
analyticsHub/routers/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..pipelines.pipeline import CompletePipeline
|
| 2 |
+
pipeline = CompletePipeline()
|
analyticsHub/routers/authentication.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..models.requestModels import SignUp, Login, LoginWithProvider, OnboardingDetails, NewCredentials
|
| 2 |
+
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
| 3 |
+
from fastapi import APIRouter, HTTPException, Depends
|
| 4 |
+
from supabase.lib.client_options import ClientOptions
|
| 5 |
+
from fastapi.responses import JSONResponse
|
| 6 |
+
from ..utils.functions import verifyToken
|
| 7 |
+
from supabase import create_client
|
| 8 |
+
from typing import Annotated
|
| 9 |
+
from jose import jwt
|
| 10 |
+
import pandas as pd
|
| 11 |
+
import datetime
|
| 12 |
+
import hashlib
|
| 13 |
+
import os
|
| 14 |
+
|
| 15 |
+
router = APIRouter()
|
| 16 |
+
security = HTTPBearer()
|
| 17 |
+
client = create_client(
|
| 18 |
+
supabase_url = os.environ["SUPABASE_URL"],
|
| 19 |
+
supabase_key = os.environ["SUPABASE_KEY"],
|
| 20 |
+
options=ClientOptions(
|
| 21 |
+
auto_refresh_token=False,
|
| 22 |
+
persist_session=False,
|
| 23 |
+
)
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
@router.post("/signUp")
|
| 27 |
+
async def signup(signupDetails: SignUp):
|
| 28 |
+
try:
|
| 29 |
+
passwordString = signupDetails.password + os.environ["SECRET_KEY"]
|
| 30 |
+
hashedPassword = hashlib.md5(passwordString.encode("utf-8")).hexdigest()
|
| 31 |
+
allUsers = list()
|
| 32 |
+
page = 1
|
| 33 |
+
while True:
|
| 34 |
+
response = client.auth.admin.list_users(page = page, per_page = 1000)
|
| 35 |
+
if response == []:
|
| 36 |
+
break
|
| 37 |
+
else:
|
| 38 |
+
allUsers.extend(response)
|
| 39 |
+
page += 1
|
| 40 |
+
allUsers = [x.email for x in allUsers]
|
| 41 |
+
if signupDetails.email not in allUsers:
|
| 42 |
+
response = client.auth.sign_up(
|
| 43 |
+
{"email": signupDetails.email, "password": hashedPassword}
|
| 44 |
+
)
|
| 45 |
+
client.table(table_name = "Users").insert(
|
| 46 |
+
{
|
| 47 |
+
"userId": response.user.id,
|
| 48 |
+
"email": signupDetails.email,
|
| 49 |
+
"password": hashedPassword
|
| 50 |
+
}
|
| 51 |
+
).execute()
|
| 52 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "userId": response.user.id})
|
| 53 |
+
else:
|
| 54 |
+
return JSONResponse(status_code = 409, content = {"status": "ERROR", "errorDetail": "User Already Exists"})
|
| 55 |
+
except Exception as e:
|
| 56 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 57 |
+
|
| 58 |
+
@router.get("/confirmMail/{userId}")
|
| 59 |
+
async def confirmMail(userId: str):
|
| 60 |
+
try:
|
| 61 |
+
allUsers = list()
|
| 62 |
+
page = 1
|
| 63 |
+
while True:
|
| 64 |
+
response = client.auth.admin.list_users(page = page, per_page = 1000)
|
| 65 |
+
if response == []:
|
| 66 |
+
break
|
| 67 |
+
else:
|
| 68 |
+
allUsers.extend(response)
|
| 69 |
+
page += 1
|
| 70 |
+
email = list(filter(lambda x: True if x.id == userId else False, allUsers))[0].email
|
| 71 |
+
response = client.auth.resend({
|
| 72 |
+
"type": "signup",
|
| 73 |
+
"email": email,
|
| 74 |
+
"options": {
|
| 75 |
+
"email_redirect_to": "https://localhost:3000/login"
|
| 76 |
+
}
|
| 77 |
+
})
|
| 78 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS"})
|
| 79 |
+
except Exception as e:
|
| 80 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 81 |
+
|
| 82 |
+
@router.post("/login")
|
| 83 |
+
async def login(loginDetails: Login):
|
| 84 |
+
try:
|
| 85 |
+
passwordString = loginDetails.password + os.environ["SECRET_KEY"]
|
| 86 |
+
hashedPassword = hashlib.md5(passwordString.encode("utf-8")).hexdigest()
|
| 87 |
+
allUsers = list()
|
| 88 |
+
page = 1
|
| 89 |
+
while True:
|
| 90 |
+
response = client.auth.admin.list_users(page = page, per_page = 1000)
|
| 91 |
+
if response == []:
|
| 92 |
+
break
|
| 93 |
+
else:
|
| 94 |
+
allUsers.extend(response)
|
| 95 |
+
page += 1
|
| 96 |
+
filteredResult = list(filter(lambda x: True if x.email == loginDetails.email else False, allUsers))
|
| 97 |
+
if filteredResult == []:
|
| 98 |
+
return JSONResponse(status_code = 404, content = {"status": "ERROR", "errorDetail": "User not found"})
|
| 99 |
+
elif filteredResult[0].user_metadata.get("email_verified") == False:
|
| 100 |
+
return JSONResponse(status_code = 401, content = {"status": "ERROR", "errorDetail": "Email not verified"})
|
| 101 |
+
else:
|
| 102 |
+
allData = pd.DataFrame(client.table("Users").select("userId", "email", "password", "onboarded").execute().data, columns = ["userId", "email", "password", "onboarded"])
|
| 103 |
+
dataSlice = allData[allData["email"] == loginDetails.email].iloc[0, :]
|
| 104 |
+
if dataSlice["password"] != hashedPassword:
|
| 105 |
+
return JSONResponse(status_code = 401, content = {"status": "ERROR", "errorDetail": "Invalid email or password"})
|
| 106 |
+
else:
|
| 107 |
+
sessionStartTime = str(datetime.datetime.utcnow())
|
| 108 |
+
dictItems = {
|
| 109 |
+
"userId": dataSlice["userId"],
|
| 110 |
+
"email": loginDetails.email,
|
| 111 |
+
"password": hashedPassword,
|
| 112 |
+
"sessionStartTime": sessionStartTime
|
| 113 |
+
}
|
| 114 |
+
accessToken = jwt.encode(dictItems, os.environ["SECRET_KEY"], "HS256")
|
| 115 |
+
client.table("Sessions").insert({
|
| 116 |
+
"userId": dataSlice["userId"],
|
| 117 |
+
"email": dataSlice["email"],
|
| 118 |
+
"accessToken": accessToken,
|
| 119 |
+
"sessionStartTime": sessionStartTime,
|
| 120 |
+
"lastActivity": sessionStartTime
|
| 121 |
+
}).execute()
|
| 122 |
+
response = {
|
| 123 |
+
"status": "SUCCESS",
|
| 124 |
+
"userId": dataSlice["userId"],
|
| 125 |
+
"email": dataSlice["email"],
|
| 126 |
+
"accessToken": accessToken,
|
| 127 |
+
"onboarded": int(dataSlice["onboarded"])
|
| 128 |
+
}
|
| 129 |
+
return JSONResponse(status_code = 200, content = response)
|
| 130 |
+
except Exception as e:
|
| 131 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 132 |
+
|
| 133 |
+
@router.post("/loginWithProvider")
|
| 134 |
+
async def loginWithProvider(loginDetails: LoginWithProvider):
|
| 135 |
+
try:
|
| 136 |
+
passwordString = str(loginDetails.sub) + str(loginDetails.id) + str(loginDetails.nodeId) + os.environ["SECRET_KEY"]
|
| 137 |
+
hashedPassword = hashlib.md5(passwordString.encode("utf-8")).hexdigest()
|
| 138 |
+
registeredUsers = pd.DataFrame(client.table("Users").select("userId", "email", "password", "onboarded").execute().data, columns = ["userId", "email", "password", "onboarded"])
|
| 139 |
+
if loginDetails.email not in registeredUsers["email"].unique():
|
| 140 |
+
response = client.table(table_name = "Users").insert(
|
| 141 |
+
{
|
| 142 |
+
"email": loginDetails.email,
|
| 143 |
+
"password": hashedPassword
|
| 144 |
+
}
|
| 145 |
+
).execute()
|
| 146 |
+
registeredUsers = pd.DataFrame(client.table("Users").select("userId", "email", "password", "onboarded").execute().data, columns = ["userId", "email", "password", "onboarded"])
|
| 147 |
+
else:
|
| 148 |
+
pass
|
| 149 |
+
dataSlice = registeredUsers[registeredUsers["email"] == loginDetails.email].iloc[0, :]
|
| 150 |
+
sessionStartTime = str(datetime.datetime.utcnow())
|
| 151 |
+
dictItems = {
|
| 152 |
+
"userId": dataSlice["userId"],
|
| 153 |
+
"email": loginDetails.email,
|
| 154 |
+
"password": hashedPassword,
|
| 155 |
+
"sessionStartTime": sessionStartTime
|
| 156 |
+
}
|
| 157 |
+
accessToken = jwt.encode(dictItems, os.environ["SECRET_KEY"], "HS256")
|
| 158 |
+
client.table("Sessions").insert({
|
| 159 |
+
"userId": dataSlice["userId"],
|
| 160 |
+
"email": dataSlice["email"],
|
| 161 |
+
"accessToken": accessToken,
|
| 162 |
+
"sessionStartTime": sessionStartTime,
|
| 163 |
+
"lastActivity": sessionStartTime
|
| 164 |
+
}).execute()
|
| 165 |
+
response = {
|
| 166 |
+
"status": "SUCCESS",
|
| 167 |
+
"userId": dataSlice["userId"],
|
| 168 |
+
"email": dataSlice["email"],
|
| 169 |
+
"accessToken": accessToken,
|
| 170 |
+
"onboarded": int(dataSlice["onboarded"])
|
| 171 |
+
}
|
| 172 |
+
return JSONResponse(status_code = 200, content = response)
|
| 173 |
+
except Exception as e:
|
| 174 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 175 |
+
|
| 176 |
+
@router.post("/onboarding")
|
| 177 |
+
async def onboarding(onboardingDetails: OnboardingDetails, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 178 |
+
try:
|
| 179 |
+
if verifyToken(token = credentials.credentials):
|
| 180 |
+
dataToUpdate = {
|
| 181 |
+
"onboarded": 1,
|
| 182 |
+
"usage": onboardingDetails.usage,
|
| 183 |
+
"fullName": onboardingDetails.fullName,
|
| 184 |
+
"role": onboardingDetails.role,
|
| 185 |
+
"companyName": onboardingDetails.companyName,
|
| 186 |
+
"industryType": onboardingDetails.industryType,
|
| 187 |
+
"companySize": onboardingDetails.companySize,
|
| 188 |
+
"country": onboardingDetails.country,
|
| 189 |
+
"goals": onboardingDetails.goals,
|
| 190 |
+
"source": onboardingDetails.source
|
| 191 |
+
}
|
| 192 |
+
response = client.table("Users").update(dataToUpdate).eq("email", onboardingDetails.email).execute()
|
| 193 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "message": "User onboarded successfully."})
|
| 194 |
+
else:
|
| 195 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 196 |
+
except Exception as e:
|
| 197 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 198 |
+
|
| 199 |
+
@router.get("/initiatePasswordReset")
|
| 200 |
+
async def initiatePasswordReset(emailId: str):
|
| 201 |
+
try:
|
| 202 |
+
client.auth.reset_password_for_email(
|
| 203 |
+
emailId,
|
| 204 |
+
{
|
| 205 |
+
"redirect_to": "http://localhost:3000/login/create-new-password"
|
| 206 |
+
}
|
| 207 |
+
)
|
| 208 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "message": "Password reset initiated successfully."})
|
| 209 |
+
except Exception as e:
|
| 210 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 211 |
+
|
| 212 |
+
@router.patch("/resetPassword")
|
| 213 |
+
async def resetPassword(email: str, newCredentials: NewCredentials):
|
| 214 |
+
try:
|
| 215 |
+
passwordString = newCredentials.newPassword + os.environ["SECRET_KEY"]
|
| 216 |
+
hashedPassword = hashlib.md5(passwordString.encode("utf-8")).hexdigest()
|
| 217 |
+
allUsers = list()
|
| 218 |
+
page = 1
|
| 219 |
+
while True:
|
| 220 |
+
response = client.auth.admin.list_users(page = page, per_page = 1000)
|
| 221 |
+
if response == []:
|
| 222 |
+
break
|
| 223 |
+
else:
|
| 224 |
+
allUsers.extend(response)
|
| 225 |
+
page += 1
|
| 226 |
+
filteredResult = list(filter(lambda x: True if x.email == email else False, allUsers))[0]
|
| 227 |
+
response = client.auth.admin.update_user_by_id(
|
| 228 |
+
filteredResult.id,
|
| 229 |
+
{"password": hashedPassword}
|
| 230 |
+
)
|
| 231 |
+
response = client.table("Users").update({"password": hashedPassword}).eq("email", email).execute()
|
| 232 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "message": "Password updated successfully!"})
|
| 233 |
+
except Exception as e:
|
| 234 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 235 |
+
|
| 236 |
+
@router.get("/logout")
|
| 237 |
+
async def logout(credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 238 |
+
try:
|
| 239 |
+
if verifyToken(token = credentials.credentials):
|
| 240 |
+
response = client.table("Sessions").delete().eq("accessToken", credentials.credentials).execute()
|
| 241 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "message": "Session logged out successfully"})
|
| 242 |
+
else:
|
| 243 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 244 |
+
except Exception as e:
|
| 245 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
analyticsHub/routers/blends.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..models.requestModels import CreateDataBlend, GetFieldsFromSources
|
| 2 |
+
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
| 3 |
+
from fastapi.exceptions import HTTPException
|
| 4 |
+
from fastapi.responses import JSONResponse
|
| 5 |
+
from ..utils.functions import verifyToken
|
| 6 |
+
from fastapi import APIRouter, Depends
|
| 7 |
+
from urllib.request import urlopen
|
| 8 |
+
from supabase import create_client
|
| 9 |
+
from typing import Annotated
|
| 10 |
+
import json
|
| 11 |
+
import os
|
| 12 |
+
import io
|
| 13 |
+
|
| 14 |
+
router = APIRouter()
|
| 15 |
+
security = HTTPBearer()
|
| 16 |
+
client = create_client(
|
| 17 |
+
supabase_url = os.environ["SUPABASE_URL"],
|
| 18 |
+
supabase_key = os.environ["SUPABASE_KEY"]
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
@router.post("/createDataBlend")
|
| 22 |
+
async def createDataBlend(blendDetails: CreateDataBlend, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 23 |
+
try:
|
| 24 |
+
if verifyToken(token = credentials.credentials):
|
| 25 |
+
joinConfig = {
|
| 26 |
+
"tables": blendDetails.tables,
|
| 27 |
+
"blendOn": blendDetails.blendOn,
|
| 28 |
+
"joinTypes": blendDetails.joinTypes
|
| 29 |
+
}
|
| 30 |
+
project = client.table("Projects").select("projectId", "projectName", "dataTables").eq("projectId", blendDetails.projectId).execute().data[0]
|
| 31 |
+
if "blendConfig.json" in [x.get("name") for x in client.storage.from_("AnalyticsHub").list(path = blendDetails.projectId)]:
|
| 32 |
+
fileUrl = os.environ["FILE_URL"].format(projectId = blendDetails.projectId, fileName = "blendConfig.json").replace(".parquet", "")
|
| 33 |
+
blendConfig = json.loads(urlopen(fileUrl).read())
|
| 34 |
+
blendConfig[blendDetails.blendName] = joinConfig
|
| 35 |
+
else:
|
| 36 |
+
blendConfig = {blendDetails.blendName: joinConfig}
|
| 37 |
+
with io.BytesIO() as buffer:
|
| 38 |
+
buffer.write(json.dumps(blendConfig, indent=4).encode("utf-8"))
|
| 39 |
+
buffer.seek(0)
|
| 40 |
+
_ = client.storage.from_("AnalyticsHub").upload(path = f"{blendDetails.projectId}/blendConfig.json", file = buffer.getvalue(), file_options = {"upsert": "true"})
|
| 41 |
+
if blendDetails.blendName in project["dataTables"].split(", "):
|
| 42 |
+
pass
|
| 43 |
+
elif project["dataTables"]:
|
| 44 |
+
projectData = project["dataTables"] + f", {blendDetails.blendName}"
|
| 45 |
+
_ = client.table("Projects").update({"dataTables": projectData}).eq("projectId", blendDetails.projectId).execute()
|
| 46 |
+
else:
|
| 47 |
+
projectData = blendDetails.blendName
|
| 48 |
+
_ = client.table("Projects").update({"dataTables": projectData}).eq("projectId", blendDetails.projectId).execute()
|
| 49 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "message": "Blend created successfully."})
|
| 50 |
+
else:
|
| 51 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 52 |
+
except Exception as e:
|
| 53 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 54 |
+
|
| 55 |
+
@router.get("/getDataSources")
|
| 56 |
+
async def getDataSources(projectId: str, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 57 |
+
try:
|
| 58 |
+
if verifyToken(token = credentials.credentials):
|
| 59 |
+
if "blendConfig.json" in [x.get("name") for x in client.storage.from_("AnalyticsHub").list(path = projectId)]:
|
| 60 |
+
blendConfigUrl = os.environ["FILE_URL"].format(projectId = projectId, fileName = "blendConfig.json").replace(".parquet", "")
|
| 61 |
+
blendConfig = json.loads(urlopen(blendConfigUrl).read())
|
| 62 |
+
blendedTables = list(blendConfig.keys())
|
| 63 |
+
blends = [
|
| 64 |
+
{"blendName": x, "tables": blendConfig[x].get("tables"), "joinTypes": blendConfig[x].get("joinTypes"), "blendOn": blendConfig[x].get("blendOn")} for x in blendedTables
|
| 65 |
+
]
|
| 66 |
+
else:
|
| 67 |
+
blends, blendedTables = list(), list()
|
| 68 |
+
metadataUrl = os.environ["FILE_URL"].format(projectId = projectId, fileName = "metadata.json").replace(".parquet", "")
|
| 69 |
+
metadata = json.loads(urlopen(metadataUrl).read())
|
| 70 |
+
rawTables = list(metadata.keys())
|
| 71 |
+
dataSources = {
|
| 72 |
+
"blends": blends,
|
| 73 |
+
"rawTables": rawTables,
|
| 74 |
+
"blendedTables": blendedTables
|
| 75 |
+
}
|
| 76 |
+
return JSONResponse(status_code = 200, content = dataSources)
|
| 77 |
+
else:
|
| 78 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 79 |
+
except Exception as e:
|
| 80 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 81 |
+
|
| 82 |
+
@router.post("/getFieldsFromSources")
|
| 83 |
+
async def getFieldsFromSources(details: GetFieldsFromSources, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 84 |
+
try:
|
| 85 |
+
if verifyToken(token = credentials.credentials):
|
| 86 |
+
blendConfigUrl = os.environ["FILE_URL"].format(projectId = details.projectId, fileName = "blendConfig.json").replace(".parquet", "")
|
| 87 |
+
metadataUrl = os.environ["FILE_URL"].format(projectId = details.projectId, fileName = "metadata.json").replace(".parquet", "")
|
| 88 |
+
blendConfig = json.loads(urlopen(blendConfigUrl).read())
|
| 89 |
+
metadata = json.loads(urlopen(metadataUrl).read())
|
| 90 |
+
blendedTables = list(blendConfig.keys())
|
| 91 |
+
allFields = list()
|
| 92 |
+
if details.tableName in blendedTables:
|
| 93 |
+
tablesUsed = blendConfig[details.tableName].get("tables")
|
| 94 |
+
for table in tablesUsed:
|
| 95 |
+
allFields.extend(metadata[table]["columns"])
|
| 96 |
+
else:
|
| 97 |
+
allFields = metadata[details.tableName]["columns"]
|
| 98 |
+
numericals = ["int64", "float64", "float32", "int32"]
|
| 99 |
+
categoricals = ["bool", "category", "object", "string"]
|
| 100 |
+
datetimeTypes = ["datetime64[ns]", "datetime64[ns, tz]"]
|
| 101 |
+
numericalColumns, categoricalColumns, datetimeColumns = list(), list(), list()
|
| 102 |
+
for column in allFields:
|
| 103 |
+
if column.get("type") in categoricals:
|
| 104 |
+
categoricalColumns.append(column["name"])
|
| 105 |
+
elif column["type"] in numericals:
|
| 106 |
+
numericalColumns.append(column["name"])
|
| 107 |
+
elif column["type"] in datetimeTypes:
|
| 108 |
+
datetimeColumns.append(column["name"])
|
| 109 |
+
response = {
|
| 110 |
+
"numericalColumns": numericalColumns,
|
| 111 |
+
"categoricalColumns": categoricalColumns,
|
| 112 |
+
"datetimeColumns": datetimeColumns
|
| 113 |
+
}
|
| 114 |
+
return JSONResponse(status_code = 200, content = response)
|
| 115 |
+
else:
|
| 116 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 117 |
+
except Exception as e:
|
| 118 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
analyticsHub/routers/dashboard.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..models.requestModels import CreatePage, ExportToDashboard
|
| 2 |
+
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
| 3 |
+
from fastapi.exceptions import HTTPException
|
| 4 |
+
from fastapi.responses import JSONResponse
|
| 5 |
+
from ..utils.functions import verifyToken
|
| 6 |
+
from fastapi import APIRouter, Depends
|
| 7 |
+
from supabase import create_client
|
| 8 |
+
from urllib.request import urlopen
|
| 9 |
+
from typing import Annotated
|
| 10 |
+
import uuid
|
| 11 |
+
import json
|
| 12 |
+
import os
|
| 13 |
+
import io
|
| 14 |
+
|
| 15 |
+
router = APIRouter()
|
| 16 |
+
security = HTTPBearer()
|
| 17 |
+
client = create_client(
|
| 18 |
+
supabase_url = os.environ["SUPABASE_URL"],
|
| 19 |
+
supabase_key = os.environ["SUPABASE_KEY"]
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
@router.post("/createPage")
|
| 23 |
+
async def createPage(details: CreatePage, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 24 |
+
try:
|
| 25 |
+
if verifyToken(token = credentials.credentials):
|
| 26 |
+
pageId = str(uuid.uuid4())
|
| 27 |
+
if "dashboardConfig.json" in [x.get("name") for x in client.storage.from_("AnalyticsHub").list(path = details.projectId)]:
|
| 28 |
+
fileUrl = os.environ["FILE_URL"].format(projectId = details.projectId, fileName = "dashboardConfig.json").replace(".parquet", "")
|
| 29 |
+
dashboardConfig = json.loads(urlopen(fileUrl).read())
|
| 30 |
+
dashboardConfig[pageId] = {"name": details.pageName, "widgets": []}
|
| 31 |
+
else:
|
| 32 |
+
dashboardConfig = {pageId: {"name": details.pageName, "widgets": []}}
|
| 33 |
+
with io.BytesIO() as buffer:
|
| 34 |
+
buffer.write(json.dumps(dashboardConfig, indent=4).encode("utf-8"))
|
| 35 |
+
buffer.seek(0)
|
| 36 |
+
_ = client.storage.from_("AnalyticsHub").upload(path = f"{details.projectId}/dashboardConfig.json", file = buffer.getvalue(), file_options = {"upsert": "true"})
|
| 37 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "pageId": pageId})
|
| 38 |
+
else:
|
| 39 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 40 |
+
except Exception as e:
|
| 41 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 42 |
+
|
| 43 |
+
@router.get("/getAllPages")
|
| 44 |
+
async def getAllPages(projectId: str, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 45 |
+
try:
|
| 46 |
+
if verifyToken(token = credentials.credentials):
|
| 47 |
+
if "dashboardConfig.json" in [x.get("name") for x in client.storage.from_("AnalyticsHub").list(path = projectId)]:
|
| 48 |
+
fileUrl = os.environ["FILE_URL"].format(projectId = projectId, fileName = "dashboardConfig.json").replace(".parquet", "")
|
| 49 |
+
dashboardConfig = json.loads(urlopen(fileUrl).read())
|
| 50 |
+
pages = [dashboardConfig[x]["name"] for x in dashboardConfig.keys()]
|
| 51 |
+
else:
|
| 52 |
+
pages = list()
|
| 53 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "pages": pages})
|
| 54 |
+
else:
|
| 55 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 56 |
+
except Exception as e:
|
| 57 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 58 |
+
|
| 59 |
+
@router.post("/exportToDashboard")
|
| 60 |
+
async def exportToDashboard(details: ExportToDashboard, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 61 |
+
try:
|
| 62 |
+
if verifyToken(token = credentials.credentials):
|
| 63 |
+
fileUrl = os.environ["FILE_URL"].format(projectId = details.projectId, fileName = "dashboardConfig.json").replace(".parquet", "")
|
| 64 |
+
dashboardConfig = json.loads(urlopen(fileUrl).read())
|
| 65 |
+
for pageDict in dashboardConfig.values():
|
| 66 |
+
if pageDict.get("name") == details.page:
|
| 67 |
+
pageDict = pageDict
|
| 68 |
+
else:
|
| 69 |
+
continue
|
| 70 |
+
widgetId = str(uuid.uuid4())
|
| 71 |
+
newWidget = {
|
| 72 |
+
"id": widgetId,
|
| 73 |
+
"chartType": details.chartType,
|
| 74 |
+
"title": details.title,
|
| 75 |
+
"xLabels": details.xLabels,
|
| 76 |
+
"yLabels": details.yLabels,
|
| 77 |
+
"data": details.data,
|
| 78 |
+
"layout": details.layout
|
| 79 |
+
}
|
| 80 |
+
pageDict["widgets"].append(newWidget)
|
| 81 |
+
dashboardConfig[details.page] = pageDict
|
| 82 |
+
with io.BytesIO() as buffer:
|
| 83 |
+
buffer.write(json.dumps(dashboardConfig, indent=4).encode("utf-8"))
|
| 84 |
+
buffer.seek(0)
|
| 85 |
+
_ = client.storage.from_("AnalyticsHub").upload(path = f"{details.projectId}/dashboardConfig.json", file = buffer.getvalue(), file_options = {"upsert": "true"})
|
| 86 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "widgetId": widgetId})
|
| 87 |
+
else:
|
| 88 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 89 |
+
except Exception as e:
|
| 90 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 91 |
+
|
| 92 |
+
@router.get("/getData")
|
| 93 |
+
async def getData(projectId: str, page: str, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 94 |
+
try:
|
| 95 |
+
if verifyToken(token = credentials.credentials):
|
| 96 |
+
fileUrl = os.environ["FILE_URL"].format(projectId = projectId, fileName = "dashboardConfig.json").replace(".parquet", "")
|
| 97 |
+
dashboardConfig = json.loads(urlopen(fileUrl).read())
|
| 98 |
+
pageInfo = dashboardConfig.get(page)
|
| 99 |
+
pageInfo["id"] = page
|
| 100 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "pageData": pageInfo})
|
| 101 |
+
else:
|
| 102 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 103 |
+
except Exception as e:
|
| 104 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
analyticsHub/routers/dataLoader.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..models.requestModels import DeleteTable, LoadMySQLorPostgreSQL, LoadMongoDB
|
| 2 |
+
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
| 3 |
+
from fastapi import APIRouter, Depends, UploadFile, File, Form
|
| 4 |
+
from fastapi.exceptions import HTTPException
|
| 5 |
+
from pymongo.mongo_client import MongoClient
|
| 6 |
+
from fastapi.responses import JSONResponse
|
| 7 |
+
from ..utils.functions import verifyToken
|
| 8 |
+
from pymongo.server_api import ServerApi
|
| 9 |
+
from sqlalchemy import create_engine
|
| 10 |
+
from urllib.request import urlopen
|
| 11 |
+
from supabase import create_client
|
| 12 |
+
from fastapi import APIRouter
|
| 13 |
+
import fireducks.pandas as pd
|
| 14 |
+
from typing import Annotated
|
| 15 |
+
import tempfile
|
| 16 |
+
import json
|
| 17 |
+
import io
|
| 18 |
+
import os
|
| 19 |
+
|
| 20 |
+
router = APIRouter()
|
| 21 |
+
security = HTTPBearer()
|
| 22 |
+
client = create_client(
|
| 23 |
+
supabase_url = os.environ["SUPABASE_URL"],
|
| 24 |
+
supabase_key = os.environ["SUPABASE_KEY"]
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
@router.post("/loadCsvData")
|
| 28 |
+
async def loadCsvData(projectId: Annotated[str, Form()], file: Annotated[UploadFile, File()], credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 29 |
+
try:
|
| 30 |
+
if verifyToken(token = credentials.credentials):
|
| 31 |
+
project = client.table("Projects").select("projectId", "projectName", "dataTables").eq("projectId", projectId).execute().data[0]
|
| 32 |
+
with tempfile.NamedTemporaryFile(delete = True, suffix = ".parquet") as temp:
|
| 33 |
+
pd.read_csv(io.BytesIO(await file.read())).to_parquet(temp.name, compression = "snappy")
|
| 34 |
+
_ = client.storage.from_("AnalyticsHub").upload(
|
| 35 |
+
file = temp.name,
|
| 36 |
+
path = f"{projectId}/{os.path.splitext(file.filename)[0] + '.parquet'}"
|
| 37 |
+
)
|
| 38 |
+
if project["dataTables"]:
|
| 39 |
+
projectData = project["dataTables"] + f", {os.path.splitext(file.filename)[0]}"
|
| 40 |
+
else:
|
| 41 |
+
projectData = os.path.splitext(file.filename)[0]
|
| 42 |
+
_ = client.table("Projects").update({"dataTables": projectData}).eq("projectId", projectId).execute()
|
| 43 |
+
temp.close()
|
| 44 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "message": "Data loaded successfully"})
|
| 45 |
+
else:
|
| 46 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 47 |
+
except Exception as e:
|
| 48 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 49 |
+
|
| 50 |
+
@router.post("/loadExcelData")
|
| 51 |
+
async def loadExcelData(projectId: Annotated[str, Form()], file: Annotated[UploadFile, File()], credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)], sheetName: Annotated[str | None, Form()] = None):
|
| 52 |
+
try:
|
| 53 |
+
if verifyToken(token = credentials.credentials):
|
| 54 |
+
project = client.table("Projects").select("projectId", "projectName", "dataTables").eq("projectId", projectId).execute().data[0]
|
| 55 |
+
with tempfile.NamedTemporaryFile(delete = True, suffix = ".parquet") as temp:
|
| 56 |
+
pd.read_excel(io.BytesIO(await file.read()), sheet_name = sheetName).to_parquet(temp.name, compression = "snappy")
|
| 57 |
+
if sheetName == None:
|
| 58 |
+
fileName = f"{os.path.splitext(file.filename)[0] + '.parquet'}"
|
| 59 |
+
else:
|
| 60 |
+
fileName = f"{os.path.splitext(file.filename)[0] + '_' + sheetName + '.parquet'}"
|
| 61 |
+
_ = client.storage.from_("AnalyticsHub").upload(
|
| 62 |
+
file = temp.name,
|
| 63 |
+
path = f"{projectId}/{fileName}"
|
| 64 |
+
)
|
| 65 |
+
if project["dataTables"]:
|
| 66 |
+
projectData = project["dataTables"] + f", {fileName}"
|
| 67 |
+
else:
|
| 68 |
+
projectData = fileName
|
| 69 |
+
_ = client.table("Projects").update({"dataTables": projectData}).eq("projectId", projectId).execute()
|
| 70 |
+
temp.close()
|
| 71 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "message": "Data loaded successfully"})
|
| 72 |
+
else:
|
| 73 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 74 |
+
except Exception as e:
|
| 75 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 76 |
+
|
| 77 |
+
@router.post("/loadMySql")
|
| 78 |
+
async def loadMySql(connection: LoadMySQLorPostgreSQL, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 79 |
+
try:
|
| 80 |
+
if verifyToken(token = credentials.credentials):
|
| 81 |
+
project = client.table("Projects").select("projectId", "projectName", "dataTables").eq("projectId", connection.projectId).execute().data[0]
|
| 82 |
+
with tempfile.NamedTemporaryFile(delete = True, suffix = ".parquet") as temp:
|
| 83 |
+
connStr = f"mysql+pymysql://{connection.user}:{connection.password}@{connection.host}:{connection.port}/{connection.db}"
|
| 84 |
+
engine = create_engine(connStr)
|
| 85 |
+
pd.read_sql(f"SELECT * FROM {connection.table}", engine).to_parquet(temp.name, compression = "snappy")
|
| 86 |
+
_ = client.storage.from_("AnalyticsHub").upload(
|
| 87 |
+
file = temp.name,
|
| 88 |
+
path = f"{connection.projectId}/{connection.table + '.parquet'}"
|
| 89 |
+
)
|
| 90 |
+
if project["dataTables"]:
|
| 91 |
+
projectData = project["dataTables"] + f", {connection.table + '.parquet'}"
|
| 92 |
+
else:
|
| 93 |
+
projectData = connection.table
|
| 94 |
+
_ = client.table("Projects").update({"dataTables": projectData}).eq("projectId", connection.projectId).execute()
|
| 95 |
+
temp.close()
|
| 96 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "message": "Data loaded successfully"})
|
| 97 |
+
else:
|
| 98 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 99 |
+
except Exception as e:
|
| 100 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 101 |
+
|
| 102 |
+
@router.post("/loadPostgreSQL")
|
| 103 |
+
async def loadPostgreSQL(connection: LoadMySQLorPostgreSQL, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 104 |
+
try:
|
| 105 |
+
if verifyToken(token = credentials.credentials):
|
| 106 |
+
project = client.table("Projects").select("projectId", "projectName", "dataTables").eq("projectId", connection.projectId).execute().data[0]
|
| 107 |
+
with tempfile.NamedTemporaryFile(delete = True, suffix = ".parquet") as temp:
|
| 108 |
+
connStr = f"postgresql+psycopg2://{connection.user}:{connection.password}@{connection.host}:{connection.port}/{connection.db}"
|
| 109 |
+
engine = create_engine(connStr)
|
| 110 |
+
pd.read_sql(f"SELECT * FROM {connection.table}", engine).to_parquet(temp.name, compression = "snappy")
|
| 111 |
+
_ = client.storage.from_("AnalyticsHub").upload(
|
| 112 |
+
file = temp.name,
|
| 113 |
+
path = f"{connection.projectId}/{connection.table + '.parquet'}"
|
| 114 |
+
)
|
| 115 |
+
if project["dataTables"]:
|
| 116 |
+
projectData = project["dataTables"] + f", {connection.table + '.parquet'}"
|
| 117 |
+
else:
|
| 118 |
+
projectData = connection.table
|
| 119 |
+
_ = client.table("Projects").update({"dataTables": projectData}).eq("projectId", connection.projectId).execute()
|
| 120 |
+
temp.close()
|
| 121 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "message": "Data loaded successfully"})
|
| 122 |
+
else:
|
| 123 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 124 |
+
except Exception as e:
|
| 125 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 126 |
+
|
| 127 |
+
@router.post("/loadMongoDB")
|
| 128 |
+
async def loadMongoDB(connection: LoadMongoDB, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 129 |
+
try:
|
| 130 |
+
if verifyToken(token = credentials.credentials):
|
| 131 |
+
project = client.table("Projects").select("projectId", "projectName", "dataTables").eq("projectId", connection.projectId).execute().data[0]
|
| 132 |
+
with tempfile.NamedTemporaryFile(delete = True, suffix = ".parquet") as temp:
|
| 133 |
+
mongoClient = MongoClient(connection.connectionString, server_api=ServerApi('1'))
|
| 134 |
+
records = list(mongoClient[connection.db][connection.collection].find())
|
| 135 |
+
for record in records: record.pop("_id")
|
| 136 |
+
pd.DataFrame(records).to_parquet(temp.name, compression = "snappy")
|
| 137 |
+
_ = client.storage.from_("AnalyticsHub").upload(
|
| 138 |
+
file = temp.name,
|
| 139 |
+
path = f"{connection.projectId}/{connection.collection + '.parquet'}"
|
| 140 |
+
)
|
| 141 |
+
if project["dataTables"]:
|
| 142 |
+
projectData = project["dataTables"] + f", {connection.collection + '.parquet'}"
|
| 143 |
+
else:
|
| 144 |
+
projectData = connection.collection
|
| 145 |
+
_ = client.table("Projects").update({"dataTables": projectData}).eq("projectId", connection.projectId).execute()
|
| 146 |
+
temp.close()
|
| 147 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "message": "Data loaded successfully"})
|
| 148 |
+
else:
|
| 149 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 150 |
+
except Exception as e:
|
| 151 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 152 |
+
|
| 153 |
+
@router.delete("/deleteTable")
|
| 154 |
+
async def deleteTable(tableDetails: DeleteTable, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 155 |
+
try:
|
| 156 |
+
if verifyToken(token = credentials.credentials):
|
| 157 |
+
_ = client.storage.from_("AnalyticsHub").remove(f"{tableDetails.projectId}/{tableDetails.tableName}" + ".parquet")
|
| 158 |
+
projectTables = client.table("Projects").select("dataTables").eq("projectId", tableDetails.projectId).execute().data[0]["dataTables"]
|
| 159 |
+
projectTables = projectTables.split(", ")
|
| 160 |
+
projectTables.remove(tableDetails.tableName)
|
| 161 |
+
projectTables = ", ".join(projectTables)
|
| 162 |
+
_ = client.table("Projects").update({"dataTables": projectTables}).eq("projectId", tableDetails.projectId).execute()
|
| 163 |
+
fileUrl = os.environ["FILE_URL"].format(projectId = tableDetails.projectId, fileName = "metadata.json").replace(".parquet", "")
|
| 164 |
+
jsonData = json.loads(urlopen(fileUrl).read())
|
| 165 |
+
jsonData.pop(tableDetails.tableName)
|
| 166 |
+
with io.BytesIO() as buffer:
|
| 167 |
+
buffer.write(json.dumps(jsonData, indent=4).encode("utf-8"))
|
| 168 |
+
buffer.seek(0)
|
| 169 |
+
client.storage.from_("AnalyticsHub").upload(path = f"{tableDetails.projectId}/metadata.json", file = buffer.getvalue(), file_options = {"upsert": "true"})
|
| 170 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "message": "Table deleted successfully"})
|
| 171 |
+
else:
|
| 172 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 173 |
+
except Exception as e:
|
| 174 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
analyticsHub/routers/projectManager.py
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..models.requestModels import UpdateProjectState, CreateProject, EditMetadata
|
| 2 |
+
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
| 3 |
+
from langchain_experimental.utilities import PythonREPL
|
| 4 |
+
from ..utils.functions import verifyToken, readYaml
|
| 5 |
+
from fastapi.exceptions import HTTPException
|
| 6 |
+
from fastapi.responses import JSONResponse
|
| 7 |
+
from ..components import replManager
|
| 8 |
+
from fastapi import APIRouter, Depends
|
| 9 |
+
from supabase import create_client
|
| 10 |
+
from urllib.request import urlopen
|
| 11 |
+
from typing import Annotated
|
| 12 |
+
from . import pipeline
|
| 13 |
+
from jose import jwt
|
| 14 |
+
import pandas as pd
|
| 15 |
+
import uuid
|
| 16 |
+
import json
|
| 17 |
+
import os
|
| 18 |
+
import io
|
| 19 |
+
|
| 20 |
+
router = APIRouter()
|
| 21 |
+
security = HTTPBearer()
|
| 22 |
+
client = create_client(
|
| 23 |
+
supabase_url = os.environ["SUPABASE_URL"],
|
| 24 |
+
supabase_key = os.environ["SUPABASE_KEY"]
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
@router.post("/createProject")
|
| 28 |
+
async def createProject(projectDetails: CreateProject, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 29 |
+
try:
|
| 30 |
+
if verifyToken(token = credentials.credentials):
|
| 31 |
+
projectId = str(uuid.uuid4())
|
| 32 |
+
replManager.manager[projectId] = PythonREPL()
|
| 33 |
+
_ = replManager.manager[projectId].run(readYaml("params.yaml")["redisFunctionCode"])
|
| 34 |
+
_ = replManager.manager[projectId].run(readYaml("params.yaml")["jsonSerializer"])
|
| 35 |
+
_ = replManager.manager[projectId].run(("globals()['__name__'] = '__main__'"))
|
| 36 |
+
_ = replManager.manager[projectId].run("globals().update(locals())")
|
| 37 |
+
decodedToken = jwt.decode(
|
| 38 |
+
credentials.credentials,
|
| 39 |
+
os.environ["SECRET_KEY"],
|
| 40 |
+
algorithms = ["HS256"]
|
| 41 |
+
)
|
| 42 |
+
_ = client.table("Projects").insert({
|
| 43 |
+
"projectId": projectId,
|
| 44 |
+
"projectName": projectDetails.projectName,
|
| 45 |
+
"projectDescription": projectDetails.projectDescription,
|
| 46 |
+
"ownerUserId": decodedToken["userId"],
|
| 47 |
+
"ownerUserMail": decodedToken["email"]
|
| 48 |
+
}).execute()
|
| 49 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "projectId": projectId, "message": "Project created successfully"})
|
| 50 |
+
else:
|
| 51 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 52 |
+
except Exception as e:
|
| 53 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 54 |
+
|
| 55 |
+
@router.get("/listProjects")
|
| 56 |
+
async def listProjects(credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 57 |
+
try:
|
| 58 |
+
if verifyToken(token = credentials.credentials):
|
| 59 |
+
decodedToken = jwt.decode(
|
| 60 |
+
credentials.credentials,
|
| 61 |
+
os.environ["SECRET_KEY"],
|
| 62 |
+
algorithms = ["HS256"]
|
| 63 |
+
)
|
| 64 |
+
data = pd.DataFrame(client.table("Projects").select("*").execute().data)
|
| 65 |
+
data = data[data["ownerUserId"] == decodedToken["userId"]]
|
| 66 |
+
return JSONResponse(status_code = 200, content = {"projects": data.to_dict(orient = "records")})
|
| 67 |
+
else:
|
| 68 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 69 |
+
except Exception as e:
|
| 70 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 71 |
+
|
| 72 |
+
@router.patch("/updateBookmark")
|
| 73 |
+
async def updateBookmark(updateBookmarkDetails: UpdateProjectState, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 74 |
+
try:
|
| 75 |
+
if verifyToken(token = credentials.credentials):
|
| 76 |
+
if updateBookmarkDetails.action == "add":
|
| 77 |
+
_ = client.table("Projects").update({"isBookmarked": 1}).eq("projectId", updateBookmarkDetails.projectId).execute()
|
| 78 |
+
else:
|
| 79 |
+
_ = client.table("Projects").update({"isBookmarked": 0}).eq("projectId", updateBookmarkDetails.projectId).execute()
|
| 80 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "message": "Project bookmark status updated successfully"})
|
| 81 |
+
else:
|
| 82 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 83 |
+
except Exception as e:
|
| 84 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 85 |
+
|
| 86 |
+
@router.patch("/updateArchive")
|
| 87 |
+
async def updateArchive(updateArchiveDetails: UpdateProjectState, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 88 |
+
try:
|
| 89 |
+
if verifyToken(token = credentials.credentials):
|
| 90 |
+
if updateArchiveDetails.action == "add":
|
| 91 |
+
_ = client.table("Projects").update({"isArchived": 1}).eq("projectId", updateArchiveDetails.projectId).execute()
|
| 92 |
+
else:
|
| 93 |
+
_ = client.table("Projects").update({"isArchived": 0}).eq("projectId", updateArchiveDetails.projectId).execute()
|
| 94 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "message": "Project archive status updated successfully"})
|
| 95 |
+
else:
|
| 96 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 97 |
+
except Exception as e:
|
| 98 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 99 |
+
|
| 100 |
+
@router.patch("/updateTrash")
|
| 101 |
+
async def updateTrash(updateTrashDetails: UpdateProjectState, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 102 |
+
try:
|
| 103 |
+
if verifyToken(token = credentials.credentials):
|
| 104 |
+
if updateTrashDetails.action == "add":
|
| 105 |
+
_ = client.table("Projects").update({"isTrash": 1}).eq("projectId", updateTrashDetails.projectId).execute()
|
| 106 |
+
else:
|
| 107 |
+
_ = client.table("Projects").update({"isTrash": 0}).eq("projectId", updateTrashDetails.projectId).execute()
|
| 108 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "message": "Project trash status updated successfully"})
|
| 109 |
+
else:
|
| 110 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 111 |
+
except Exception as e:
|
| 112 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 113 |
+
|
| 114 |
+
@router.post("/generateMetadata/{projectId}")
|
| 115 |
+
async def generateMetadata(projectId: str, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 116 |
+
try:
|
| 117 |
+
if verifyToken(token = credentials.credentials):
|
| 118 |
+
filenames = [x.get("name") for x in client.storage.from_("AnalyticsHub").list(projectId)]
|
| 119 |
+
if "metadata.json" in filenames:
|
| 120 |
+
fileUrl = os.environ["FILE_URL"].format(projectId = projectId, fileName = "metadata.json").replace(".parquet", "")
|
| 121 |
+
jsonData = json.loads(urlopen(fileUrl).read())
|
| 122 |
+
jsonDataTables = set(jsonData.keys())
|
| 123 |
+
newMetadata = pipeline.generateMetadata(projectId = projectId)
|
| 124 |
+
newMetadataTables = set(newMetadata.keys())
|
| 125 |
+
newKeys = newMetadataTables.difference(jsonDataTables)
|
| 126 |
+
for key in newKeys: jsonData[key] = newMetadata[key]
|
| 127 |
+
else:
|
| 128 |
+
jsonData = pipeline.generateMetadata(projectId = projectId)
|
| 129 |
+
_ = replManager.manager[projectId].run(f'metadata = {jsonData}')
|
| 130 |
+
with io.BytesIO() as buffer:
|
| 131 |
+
buffer.write(json.dumps(jsonData, indent=4).encode("utf-8"))
|
| 132 |
+
buffer.seek(0)
|
| 133 |
+
client.storage.from_("AnalyticsHub").upload(path = f"{projectId}/metadata.json", file = buffer.getvalue(), file_options = {"upsert": "true"})
|
| 134 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "metadata": jsonData})
|
| 135 |
+
else:
|
| 136 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 137 |
+
except Exception as e:
|
| 138 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 139 |
+
|
| 140 |
+
@router.get("/getMetadata/{projectId}")
|
| 141 |
+
async def getMetadata(projectId: str, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 142 |
+
try:
|
| 143 |
+
if verifyToken(token = credentials.credentials):
|
| 144 |
+
fileUrl = os.environ["FILE_URL"].format(projectId = projectId, fileName = "metadata.json").replace(".parquet", "")
|
| 145 |
+
jsonData = json.loads(urlopen(fileUrl).read())
|
| 146 |
+
newJson = {"tables": []}
|
| 147 |
+
for key in jsonData:
|
| 148 |
+
tableJson = {
|
| 149 |
+
"tableName": key,
|
| 150 |
+
"tableDesc": jsonData.get(key).get("description"),
|
| 151 |
+
"shape": jsonData.get(key).get("shape"),
|
| 152 |
+
"columns": jsonData.get(key).get("columns")
|
| 153 |
+
}
|
| 154 |
+
newJson.get("tables").append(tableJson)
|
| 155 |
+
return JSONResponse(status_code = 200, content = newJson)
|
| 156 |
+
else:
|
| 157 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 158 |
+
except Exception as e:
|
| 159 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 160 |
+
|
| 161 |
+
@router.put("/editMetadata")
|
| 162 |
+
async def editMetadata(modifiedMetadata: EditMetadata, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 163 |
+
try:
|
| 164 |
+
if verifyToken(token = credentials.credentials):
|
| 165 |
+
fileUrl = os.environ["FILE_URL"].format(projectId = modifiedMetadata.projectId, fileName = "metadata.json").replace(".parquet", "")
|
| 166 |
+
jsonData = json.loads(urlopen(fileUrl).read())
|
| 167 |
+
if modifiedMetadata.tableDescription and not (modifiedMetadata.columnName or modifiedMetadata.columnDescription):
|
| 168 |
+
jsonData[modifiedMetadata.tableName]["description"] = modifiedMetadata.tableDescription
|
| 169 |
+
elif (modifiedMetadata.columnName and modifiedMetadata.columnDescription) and not modifiedMetadata.tableDescription:
|
| 170 |
+
columns = jsonData[modifiedMetadata.tableName]["columns"]
|
| 171 |
+
for column in columns:
|
| 172 |
+
if column["name"] == modifiedMetadata.columnName:
|
| 173 |
+
idx = columns.index(column)
|
| 174 |
+
columns[idx]["description"] = modifiedMetadata.columnDescription
|
| 175 |
+
jsonData[modifiedMetadata.tableName]["columns"] = columns
|
| 176 |
+
else:
|
| 177 |
+
raise AttributeError("Invalid combination of parameters provided")
|
| 178 |
+
with io.BytesIO() as buffer:
|
| 179 |
+
buffer.write(json.dumps(jsonData, indent=4).encode("utf-8"))
|
| 180 |
+
buffer.seek(0)
|
| 181 |
+
client.storage.from_("AnalyticsHub").upload(path = f"{modifiedMetadata.projectId}/metadata.json", file = buffer.getvalue(), file_options = {"upsert": "true"})
|
| 182 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "metadata": jsonData})
|
| 183 |
+
else:
|
| 184 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 185 |
+
except Exception as e:
|
| 186 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 187 |
+
|
| 188 |
+
@router.delete("/deleteProject")
|
| 189 |
+
async def deleteProject(projectId: str, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 190 |
+
try:
|
| 191 |
+
if verifyToken(token = credentials.credentials):
|
| 192 |
+
_ = client.table("Projects").delete().eq("projectId", projectId).execute()
|
| 193 |
+
allFiles = client.storage.from_("AnalyticsHub").list(projectId)
|
| 194 |
+
fileNames = [os.path.join(projectId, x.get("name")) for x in allFiles]
|
| 195 |
+
_ = client.storage.from_("AnalyticsHub").remove(fileNames)
|
| 196 |
+
return JSONResponse(status_code = 200, content = {"status": "SUCCESS", "message": "Project deleted successfully"})
|
| 197 |
+
else:
|
| 198 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 199 |
+
except Exception as e:
|
| 200 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
analyticsHub/routers/reportingTool.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..models.requestModels import GenerateChartInput, PanelChartDetails
|
| 2 |
+
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
| 3 |
+
from fastapi.exceptions import HTTPException
|
| 4 |
+
from fastapi.responses import JSONResponse
|
| 5 |
+
from ..utils.functions import verifyToken
|
| 6 |
+
from fastapi import APIRouter, Depends
|
| 7 |
+
from urllib.request import urlopen
|
| 8 |
+
from typing import Annotated
|
| 9 |
+
from . import pipeline
|
| 10 |
+
import json
|
| 11 |
+
import os
|
| 12 |
+
|
| 13 |
+
router = APIRouter()
|
| 14 |
+
security = HTTPBearer()
|
| 15 |
+
|
| 16 |
+
@router.post("/generateChart")
|
| 17 |
+
async def generateChart(chartDetails: GenerateChartInput, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 18 |
+
try:
|
| 19 |
+
if verifyToken(token = credentials.credentials):
|
| 20 |
+
fileUrl = os.environ["FILE_URL"].format(projectId = chartDetails.projectId, fileName = "metadata.json").replace(".parquet", "")
|
| 21 |
+
response = pipeline.generateChart(
|
| 22 |
+
inputQuery = chartDetails.inputQuery,
|
| 23 |
+
projectId = chartDetails.projectId,
|
| 24 |
+
metadata = json.loads(urlopen(fileUrl).read())
|
| 25 |
+
)
|
| 26 |
+
return JSONResponse(status_code = 200, content = response)
|
| 27 |
+
else:
|
| 28 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 29 |
+
except Exception as e:
|
| 30 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
| 31 |
+
|
| 32 |
+
@router.post("/generatePanelChart")
|
| 33 |
+
async def generatePanelChart(panelChartDetails: PanelChartDetails, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 34 |
+
try:
|
| 35 |
+
if verifyToken(token = credentials.credentials):
|
| 36 |
+
response = pipeline.generateChartFromPanel(
|
| 37 |
+
projectId = panelChartDetails.projectId,
|
| 38 |
+
chartType = panelChartDetails.chartType,
|
| 39 |
+
xAxis = panelChartDetails.xAxis,
|
| 40 |
+
yAxis = panelChartDetails.yAxis,
|
| 41 |
+
aggregationMetric = panelChartDetails.aggregationMetric,
|
| 42 |
+
dataSource = panelChartDetails.dataSource
|
| 43 |
+
)
|
| 44 |
+
return JSONResponse(status_code = 200, content = response)
|
| 45 |
+
else:
|
| 46 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 47 |
+
except Exception as e:
|
| 48 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
analyticsHub/routers/utilities.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
| 2 |
+
from ..models.requestModels import SpeechToTextModel
|
| 3 |
+
from fastapi.exceptions import HTTPException
|
| 4 |
+
from fastapi.responses import JSONResponse
|
| 5 |
+
from ..utils.functions import verifyToken
|
| 6 |
+
from fastapi import APIRouter, Depends
|
| 7 |
+
from supabase import create_client
|
| 8 |
+
from typing import Annotated
|
| 9 |
+
from . import pipeline
|
| 10 |
+
import os
|
| 11 |
+
|
| 12 |
+
router = APIRouter()
|
| 13 |
+
security = HTTPBearer()
|
| 14 |
+
client = create_client(
|
| 15 |
+
supabase_url = os.environ["SUPABASE_URL"],
|
| 16 |
+
supabase_key = os.environ["SUPABASE_KEY"]
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
@router.post("/getSpeechTranscript")
|
| 20 |
+
async def getSpeechTranscript(speechToText: SpeechToTextModel, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]):
|
| 21 |
+
try:
|
| 22 |
+
if verifyToken(token = credentials.credentials):
|
| 23 |
+
transcriptText = pipeline.speechToText(b64String = speechToText.b64String)
|
| 24 |
+
return JSONResponse(status_code = 200, content = {"transcriptionText": transcriptText})
|
| 25 |
+
else:
|
| 26 |
+
return JSONResponse(status_code = 498, content = {"status": "ERROR", "errorDetail": "Invalid Token"})
|
| 27 |
+
except Exception as e:
|
| 28 |
+
raise HTTPException(status_code = 500, detail = f"Endpoint says: {e}")
|
analyticsHub/utils/__init__.py
ADDED
|
File without changes
|
analyticsHub/utils/exceptions.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
|
| 3 |
+
class CustomException(Exception):
|
| 4 |
+
def __init__(self, errorMessage):
|
| 5 |
+
"""
|
| 6 |
+
Initialize a CustomException with a detailed error message.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
errorMessage (str): The error message to be logged.
|
| 10 |
+
"""
|
| 11 |
+
super().__init__(errorMessage)
|
| 12 |
+
self.errorMessage = self.errorMessageDetail(errorMessage)
|
| 13 |
+
|
| 14 |
+
@staticmethod
|
| 15 |
+
def errorMessageDetail(error):
|
| 16 |
+
"""
|
| 17 |
+
Generate a detailed error message.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
error: The error object.
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
str: A formatted error message including line number and filename.
|
| 24 |
+
"""
|
| 25 |
+
_, _, exc_info = sys.exc_info()
|
| 26 |
+
filename = exc_info.tb_frame.f_code.co_filename
|
| 27 |
+
lineno = exc_info.tb_lineno
|
| 28 |
+
errorMessage = "Error encountered in line no [{}], filename : [{}], saying [{}]".format(lineno, filename, error)
|
| 29 |
+
return errorMessage
|
| 30 |
+
|
| 31 |
+
def __str__(self) -> str:
|
| 32 |
+
"""Return the detailed error message."""
|
| 33 |
+
return self.errorMessage
|
analyticsHub/utils/functions.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from supabase import create_client
|
| 2 |
+
import configparser
|
| 3 |
+
import datetime
|
| 4 |
+
import yaml
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
client = create_client(
|
| 8 |
+
supabase_url = os.environ["SUPABASE_URL"],
|
| 9 |
+
supabase_key = os.environ["SUPABASE_KEY"]
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
def verifyToken(token: str):
|
| 13 |
+
allTokens = [x["accessToken"] for x in client.table("Sessions").select("accessToken").execute().data]
|
| 14 |
+
if token in allTokens:
|
| 15 |
+
response = client.table("Sessions").update({"lastActivity": str(datetime.datetime.utcnow())}).eq("accessToken", token).execute()
|
| 16 |
+
return True
|
| 17 |
+
else: return False
|
| 18 |
+
|
| 19 |
+
def readYaml(filePath: str) -> dict:
|
| 20 |
+
with open(filePath, "r") as f:
|
| 21 |
+
content = yaml.safe_load(f)
|
| 22 |
+
return content
|
| 23 |
+
|
| 24 |
+
def getConfig(path: str) -> dict:
|
| 25 |
+
config = configparser.ConfigParser()
|
| 26 |
+
config.read(path)
|
| 27 |
+
return config
|
analyticsHub/utils/logger.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
# Create a logger instance
|
| 5 |
+
logger = logging.getLogger(__name__)
|
| 6 |
+
logger.setLevel(logging.DEBUG)
|
| 7 |
+
|
| 8 |
+
# Define the directory for log files
|
| 9 |
+
LOG_DIR = os.path.join(os.getcwd(), "logs")
|
| 10 |
+
os.makedirs(LOG_DIR, exist_ok=True)
|
| 11 |
+
LOG_FILE = os.path.join(LOG_DIR, "runningLogs.log")
|
| 12 |
+
|
| 13 |
+
# Initialize stream handler and file handler for console output
|
| 14 |
+
streamHandler = logging.StreamHandler()
|
| 15 |
+
fileHandler = logging.FileHandler(LOG_FILE)
|
| 16 |
+
|
| 17 |
+
# Set the logging level for each handler
|
| 18 |
+
streamHandler.setLevel(logging.INFO)
|
| 19 |
+
fileHandler.setLevel(logging.DEBUG)
|
| 20 |
+
|
| 21 |
+
# Configure the logging format for both handlers
|
| 22 |
+
logFormatter = logging.Formatter("[%(asctime)s: %(levelname)s: %(module)s: %(message)s]")
|
| 23 |
+
streamHandler.setFormatter(logFormatter)
|
| 24 |
+
fileHandler.setFormatter(logFormatter)
|
| 25 |
+
|
| 26 |
+
# Add the configured handlers to the logger
|
| 27 |
+
logger.addHandler(streamHandler)
|
| 28 |
+
logger.addHandler(fileHandler)
|
analyticsHub/workflows/__init__.py
ADDED
|
File without changes
|
analyticsHub/workflows/reportingWorkflow.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..components.failsafeAgent import FailsafeCodeGenerator
|
| 2 |
+
from ..components.queryRephraserAgent import QueryRephaser
|
| 3 |
+
from ..components.codeGeneratorAgent import CodeGenerator
|
| 4 |
+
from langgraph.graph import StateGraph, START, END
|
| 5 |
+
from typing_extensions import TypedDict
|
| 6 |
+
from ..components import replManager
|
| 7 |
+
import json
|
| 8 |
+
|
| 9 |
+
failsafeCodeGeneratorChain = FailsafeCodeGenerator().getFailsafeCodeGeneratorChain()
|
| 10 |
+
queryRephraseChain = QueryRephaser().getQueryRephraserChain()
|
| 11 |
+
codeGeneratorChain = CodeGenerator().getCodeGeneratorChain()
|
| 12 |
+
|
| 13 |
+
class State(TypedDict):
|
| 14 |
+
projectId: str
|
| 15 |
+
inputQuery: str
|
| 16 |
+
metadata: str
|
| 17 |
+
rephrasedQuery: str
|
| 18 |
+
generatedCode: str
|
| 19 |
+
codeOutput: str
|
| 20 |
+
finalOutput: dict
|
| 21 |
+
|
| 22 |
+
class ReportingToolWorkflow:
|
| 23 |
+
def __init__(self):
|
| 24 |
+
pass
|
| 25 |
+
def rephraseQuery(self, state: State):
|
| 26 |
+
response = queryRephraseChain.invoke({
|
| 27 |
+
"query": state["inputQuery"],
|
| 28 |
+
"metadata": state["metadata"]
|
| 29 |
+
})
|
| 30 |
+
return {
|
| 31 |
+
"rephrasedQuery": response
|
| 32 |
+
}
|
| 33 |
+
def generateCode(self, state: State):
|
| 34 |
+
response = codeGeneratorChain.invoke({
|
| 35 |
+
"query": state["rephrasedQuery"],
|
| 36 |
+
"metadata": state["metadata"]
|
| 37 |
+
})
|
| 38 |
+
return {
|
| 39 |
+
"generatedCode": f'fetch_data("{state["projectId"]}", '.join(response.split("fetch_data(")).replace("import pandas", "import fireducks.pandas").replace('indent=4', 'default=serializer')
|
| 40 |
+
}
|
| 41 |
+
def runInPythonSandbox(self, state: State):
|
| 42 |
+
code = "\n".join(state["generatedCode"].split("```")[-2].split("\n")[1:])
|
| 43 |
+
response = replManager.manager.get(state["projectId"]).run(code)
|
| 44 |
+
return {
|
| 45 |
+
"codeOutput": response
|
| 46 |
+
}
|
| 47 |
+
def outputEvaluationRouter(self, state: State):
|
| 48 |
+
try:
|
| 49 |
+
_ = json.loads(state["codeOutput"])
|
| 50 |
+
return "pass"
|
| 51 |
+
except json.JSONDecodeError:
|
| 52 |
+
return "fail"
|
| 53 |
+
def failsafe(self, state: State):
|
| 54 |
+
response = failsafeCodeGeneratorChain.invoke({
|
| 55 |
+
"user_query": state["rephrasedQuery"],
|
| 56 |
+
"metadata_context": state["metadata"],
|
| 57 |
+
"code_with_errors": state["generatedCode"],
|
| 58 |
+
"error_message": state["codeOutput"]
|
| 59 |
+
})
|
| 60 |
+
return {
|
| 61 |
+
"generatedCode": response
|
| 62 |
+
}
|
| 63 |
+
def formatJsonResponse(self, state: State):
|
| 64 |
+
if "codeOutput" in state.keys():
|
| 65 |
+
try:
|
| 66 |
+
response = json.loads(state["codeOutput"])
|
| 67 |
+
except Exception as e:
|
| 68 |
+
response = {"error": f"Endpoint says: {e}"}
|
| 69 |
+
return {
|
| 70 |
+
"finalOutput": response
|
| 71 |
+
}
|
| 72 |
+
else:
|
| 73 |
+
return {
|
| 74 |
+
"finalOutput": {"response": state["rephrasedQuery"]["doubt"]}
|
| 75 |
+
}
|
| 76 |
+
def router(self, state: State):
|
| 77 |
+
if state["rephrasedQuery"]["doubt"] == None:
|
| 78 |
+
return "continue"
|
| 79 |
+
else:
|
| 80 |
+
return "interrupt"
|
| 81 |
+
def createWorkflow(self):
|
| 82 |
+
workflow = StateGraph(State)
|
| 83 |
+
workflow.add_node("rephraseQuery", self.rephraseQuery)
|
| 84 |
+
workflow.add_node("generateCode", self.generateCode)
|
| 85 |
+
workflow.add_node("runInPythonSandbox", self.runInPythonSandbox)
|
| 86 |
+
workflow.add_node("failsafe", self.failsafe)
|
| 87 |
+
workflow.add_node("failsafePythonSandbox", self.runInPythonSandbox)
|
| 88 |
+
workflow.add_node("formatJsonResponse", self.formatJsonResponse)
|
| 89 |
+
workflow.add_edge(START, "rephraseQuery")
|
| 90 |
+
workflow.add_conditional_edges("rephraseQuery", self.router, {"continue": "generateCode", "interrupt": "formatJsonResponse"})
|
| 91 |
+
workflow.add_edge("generateCode", "runInPythonSandbox")
|
| 92 |
+
workflow.add_conditional_edges("runInPythonSandbox", self.outputEvaluationRouter, {"pass": "formatJsonResponse", "fail": "failsafe"})
|
| 93 |
+
workflow.add_edge("failsafe", "failsafePythonSandbox")
|
| 94 |
+
workflow.add_edge("failsafePythonSandbox", "formatJsonResponse")
|
| 95 |
+
workflow.add_edge("formatJsonResponse", END)
|
| 96 |
+
workflow = workflow.compile()
|
| 97 |
+
return workflow
|
| 98 |
+
|
| 99 |
+
graph = ReportingToolWorkflow()
|
| 100 |
+
reportingToolWorkflow = graph.createWorkflow()
|
app.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from analyticsHub.routers import authentication, projectManager, dataLoader, reportingTool, utilities, blends, dashboard
|
| 2 |
+
from langchain_experimental.utilities import PythonREPL
|
| 3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 4 |
+
from analyticsHub.utils.functions import readYaml
|
| 5 |
+
from analyticsHub.components import replManager
|
| 6 |
+
from supabase import create_client
|
| 7 |
+
from fastapi import FastAPI
|
| 8 |
+
import uvicorn
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
client = create_client(
|
| 12 |
+
supabase_url = os.environ["SUPABASE_URL"],
|
| 13 |
+
supabase_key = os.environ["SUPABASE_KEY"]
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
app = FastAPI(
|
| 17 |
+
title = "AnalyticsHub",
|
| 18 |
+
summary = "API Endpoints for AnalyticsHub.",
|
| 19 |
+
version = "1.0",
|
| 20 |
+
root_path = "/api/latest",
|
| 21 |
+
docs_url = "/documentation/docs",
|
| 22 |
+
redoc_url = "/documentation/redoc"
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
app.add_middleware(
|
| 26 |
+
CORSMiddleware,
|
| 27 |
+
allow_origins = ["*"],
|
| 28 |
+
allow_credentials = True,
|
| 29 |
+
allow_methods = ["*"],
|
| 30 |
+
allow_headers = ["*"],
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
app.include_router(authentication.router, prefix = "/auth", tags = ["Authentication"])
|
| 34 |
+
app.include_router(projectManager.router, prefix = "/projects", tags = ["Project Management"])
|
| 35 |
+
app.include_router(dataLoader.router, prefix = "/loaders", tags = ["Data Loader"])
|
| 36 |
+
app.include_router(blends.router, prefix = "/blends", tags = ["Blends"])
|
| 37 |
+
app.include_router(reportingTool.router, prefix = "/reportingTool", tags = ["Reporting Tool"])
|
| 38 |
+
app.include_router(dashboard.router, prefix = "/dashboard", tags = ["Dashboard"])
|
| 39 |
+
app.include_router(utilities.router, prefix = "/utils", tags = ["Utilities"])
|
| 40 |
+
|
| 41 |
+
@app.on_event("startup")
|
| 42 |
+
async def startupEvent():
|
| 43 |
+
projectIds = [x["projectId"] for x in client.table("Projects").select("projectId").execute().data]
|
| 44 |
+
for id in projectIds:
|
| 45 |
+
replManager.manager[id] = PythonREPL()
|
| 46 |
+
_ = replManager.manager[id].run(readYaml("params.yaml")["redisFunctionCode"])
|
| 47 |
+
_ = replManager.manager[id].run(readYaml("params.yaml")["jsonSerializer"])
|
| 48 |
+
_ = replManager.manager[id].run(readYaml("params.yaml")["panelChartDataCode"])
|
| 49 |
+
_ = replManager.manager[id].run(("globals()['__name__'] = '__main__'"))
|
| 50 |
+
_ = replManager.manager[id].run("globals().update(locals())")
|
| 51 |
+
|
| 52 |
+
if __name__ == "__main__":
|
| 53 |
+
uvicorn.run("app:app", host = "0.0.0.0", port = 8000, reload = True)
|
config.ini
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[QUERYREPHRASER]
|
| 2 |
+
model = llama-3.3-70b
|
| 3 |
+
temperature = 1
|
| 4 |
+
maxTokens = 512
|
| 5 |
+
|
| 6 |
+
[METADATAGENERATOR]
|
| 7 |
+
model = llama-3.3-70b
|
| 8 |
+
temperature = 1
|
| 9 |
+
|
| 10 |
+
[CODEGENERATOR]
|
| 11 |
+
model = llama-3.3-70b
|
| 12 |
+
temperature = 1
|
| 13 |
+
|
| 14 |
+
[FAILSAFECODEGENERATOR]
|
| 15 |
+
model = llama-3.3-70b
|
| 16 |
+
temperature = 1
|
| 17 |
+
|
| 18 |
+
[SPEECHTOTEXT]
|
| 19 |
+
model = whisper-large-v3-turbo
|
| 20 |
+
|
| 21 |
+
[APPLICATION]
|
| 22 |
+
host = 0.0.0.0
|
| 23 |
+
port = 8000
|
params.yaml
ADDED
|
@@ -0,0 +1,834 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
metadataGeneratorPrompt: |
|
| 2 |
+
I have a dataset consisting of several dataframes with associated attribute information provided below.
|
| 3 |
+
|
| 4 |
+
{metadata}
|
| 5 |
+
|
| 6 |
+
Generate a `metadata.json` file that strictly adheres to the structure outlined here. The output should be a JSON block only—no additional text, explanations, or comments. Each entry in the JSON should include the dataframe's name, a description, a detailed breakdown of its columns (including their names, data types, and descriptions), and a sample row showcasing representative values.
|
| 7 |
+
|
| 8 |
+
### Notes:
|
| 9 |
+
- The terms `dataframe1`, `dataframe2`, `column1`, `column2`, etc., are placeholders and do not represent the actual names, column labels, or values from the dataset. Replace them with the real dataframe and column names provided in the dataset's metadata.
|
| 10 |
+
- Ensure all descriptions and examples provided in the output JSON are consistent with the given dataset's structure and attributes.
|
| 11 |
+
- Ensure that all the dataframes and columns are mentioned in the expected format in the output metadata.
|
| 12 |
+
|
| 13 |
+
### Input Example:
|
| 14 |
+
For each dataframe:
|
| 15 |
+
|
| 16 |
+
DATAFRAME NAME: `<dataframe1>`
|
| 17 |
+
- `column1` (dtype: `<column1 dtype>`)
|
| 18 |
+
- `column2` (dtype: `<column2 dtype>`)
|
| 19 |
+
- `column3` (dtype: `<column3 dtype>`)
|
| 20 |
+
...
|
| 21 |
+
Shape: (number of rows, number of columns)
|
| 22 |
+
Sample row:
|
| 23 |
+
| column1 | column2 | column3 |
|
| 24 |
+
|-----------|-----------|-----------|
|
| 25 |
+
| value1 | value2 | value3 |
|
| 26 |
+
|
| 27 |
+
DATAFRAME NAME: `<dataframe2>`
|
| 28 |
+
- `column1` (dtype: `<column1 dtype>`)
|
| 29 |
+
- `column2` (dtype: `<column2 dtype>`)
|
| 30 |
+
- `column3` (dtype: `<column3 dtype>`)
|
| 31 |
+
...
|
| 32 |
+
Shape: (number of rows, number of columns)
|
| 33 |
+
Sample row:
|
| 34 |
+
| column1 | column2 | column3 |
|
| 35 |
+
|-----------|-----------|-----------|
|
| 36 |
+
| value1 | value2 | value3 |
|
| 37 |
+
|
| 38 |
+
### Expected Output Format (JSON only):
|
| 39 |
+
```json
|
| 40 |
+
{{
|
| 41 |
+
"<dataframe1>": {{
|
| 42 |
+
"description": "<Description of the dataframe>",
|
| 43 |
+
"shape": "<list of type [nunmber of rows, number of columns]>",
|
| 44 |
+
"columns": [
|
| 45 |
+
{{"name": "<column1>", "type": "<column1 datatype>", "description": "<column1 description>"}},
|
| 46 |
+
{{"name": "<column2>", "type": "<column2 datatype>", "description": "<column2 description>"}},
|
| 47 |
+
...
|
| 48 |
+
],
|
| 49 |
+
"sample_row": {{
|
| 50 |
+
"<column1>": "<value1>",
|
| 51 |
+
"<column2>": "<value2>",
|
| 52 |
+
...
|
| 53 |
+
}}
|
| 54 |
+
}},
|
| 55 |
+
"<dataframe2>": {{
|
| 56 |
+
...
|
| 57 |
+
}},
|
| 58 |
+
...
|
| 59 |
+
}}
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
attributeInfoCode: |
|
| 64 |
+
import fireducks.pandas as pd
|
| 65 |
+
import os
|
| 66 |
+
{dataframeName} = pd.read_parquet(os.environ["FILE_URL"].format(projectId = "{projectId}", fileName = "{dataframeName}"))
|
| 67 |
+
attributeInfo = 'DATAFRAME NAME: {dataframeName}\\n'
|
| 68 |
+
|
| 69 |
+
for column in {dataframeName}.columns:
|
| 70 |
+
attributeInfo += '- ' + str(column) + ' (' + {dataframeName}.get(column).dtype.name + ')\\n'
|
| 71 |
+
|
| 72 |
+
attributeInfo += 'SHAPE: ' + str({dataframeName}.shape) + '\\n'
|
| 73 |
+
attributeInfo += 'SAMPLE ROW:\\n' + str({dataframeName}.loc[{dataframeName}.index[:1]].to_string()) + '\\n'
|
| 74 |
+
print(attributeInfo)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
jsonSerializer: |
|
| 78 |
+
import numpy as np
|
| 79 |
+
import fireducks.pandas as pd
|
| 80 |
+
import datetime
|
| 81 |
+
import math
|
| 82 |
+
import json
|
| 83 |
+
def serializer(obj):
|
| 84 |
+
# Handle NumPy types
|
| 85 |
+
if isinstance(obj, (np.integer)):
|
| 86 |
+
return obj.item() # Convert to native Python int
|
| 87 |
+
elif isinstance(obj, (np.floating)):
|
| 88 |
+
if np.isnan(obj) or np.isinf(obj):
|
| 89 |
+
return None # Replace NaN/Infinity with JSON-compliant null
|
| 90 |
+
return obj.item() # Convert to native Python float
|
| 91 |
+
elif isinstance(obj, np.ndarray):
|
| 92 |
+
return obj.tolist() # Convert NumPy array to list
|
| 93 |
+
elif isinstance(obj, np.datetime64):
|
| 94 |
+
return str(obj) # Convert to ISO 8601 string
|
| 95 |
+
# Handle Pandas DataFrames and Series
|
| 96 |
+
elif isinstance(obj, pd.DataFrame):
|
| 97 |
+
return obj.to_dict(orient="records") # Convert to list of dicts
|
| 98 |
+
elif isinstance(obj, pd.Series):
|
| 99 |
+
return obj.tolist() # Convert Series to list
|
| 100 |
+
# Handle datetime types
|
| 101 |
+
elif isinstance(obj, (datetime.datetime, datetime.date)):
|
| 102 |
+
return obj.isoformat() # Convert to ISO 8601 string
|
| 103 |
+
# Handle sets and tuples
|
| 104 |
+
elif isinstance(obj, (set, tuple)):
|
| 105 |
+
return list(obj)
|
| 106 |
+
# Handle complex numbers
|
| 107 |
+
elif isinstance(obj, complex):
|
| 108 |
+
return {"real": obj.real, "imag": obj.imag} # Convert to dict
|
| 109 |
+
|
| 110 |
+
redisFunctionCode: |
|
| 111 |
+
def fetch_data(projectId: str, tableName: str):
|
| 112 |
+
import fireducks.pandas as pd
|
| 113 |
+
import redis
|
| 114 |
+
import os
|
| 115 |
+
import io
|
| 116 |
+
r = redis.Redis(host=os.environ["REDIS_HOST"], port=int(os.environ["REDIS_PORT"]), password=os.environ["REDIS_PASSWORD"])
|
| 117 |
+
key = f"{projectId}::{tableName}"
|
| 118 |
+
df = r.get(key)
|
| 119 |
+
if df is None:
|
| 120 |
+
buffer = io.BytesIO()
|
| 121 |
+
df = pd.read_parquet(os.environ["FILE_URL"].format(projectId = projectId, fileName = tableName))
|
| 122 |
+
df.to_parquet(buffer, compression = "snappy")
|
| 123 |
+
r.set(name = key, value = buffer.getvalue(), ex = 60)
|
| 124 |
+
else:
|
| 125 |
+
df = pd.read_parquet(io.BytesIO(df))
|
| 126 |
+
return df
|
| 127 |
+
|
| 128 |
+
queryRephraserAgentPrompt: |
|
| 129 |
+
You are a **Query Rewriter AI Agent**, ensuring user queries are **clear, valid, and executable** based on dataset metadata.
|
| 130 |
+
|
| 131 |
+
### **1. Understand the Query**
|
| 132 |
+
- Analyze the query within dataset context.
|
| 133 |
+
- Verify feasibility:
|
| 134 |
+
- Ensure required columns exist.
|
| 135 |
+
- Validate joins/merges via common columns.
|
| 136 |
+
- Check data type compatibility.
|
| 137 |
+
- Confirm transformations are practical.
|
| 138 |
+
- Verify the final transformed data can be stored in a DataFrame named `final_df`
|
| 139 |
+
|
| 140 |
+
### **2. Validate the Query**
|
| 141 |
+
- Return a **simple, non-technical doubt message** if the query is:
|
| 142 |
+
- Unclear, logically impossible, or requires infeasible transformations.
|
| 143 |
+
- Involves joins/merges without clear relationships.
|
| 144 |
+
- Operates on non-existent or incompatible columns.
|
| 145 |
+
- If valid, proceed to rephrasing.
|
| 146 |
+
|
| 147 |
+
### **3. Rephrase the Query**
|
| 148 |
+
- Convert it into a **standalone, precise version** including:
|
| 149 |
+
- **Objective:** Core analysis or visualization goal.
|
| 150 |
+
- **Transformations:**
|
| 151 |
+
- **MUST END with creating `final_df` containing the prepared data**
|
| 152 |
+
- Always specify:
|
| 153 |
+
1) Fetch required data using the `fetch_data` function
|
| 154 |
+
2) Join/merge operations if needed
|
| 155 |
+
3) Grouping/aggregation logic
|
| 156 |
+
4) Column selection/renaming
|
| 157 |
+
5) Final dataframe naming as `final_df`
|
| 158 |
+
- **Chart Type:**
|
| 159 |
+
- **MUST:** **Always analyze query intent and explicitly specify the optimal chart type** from: `line`, `scatter`, `bar`, `radar`, `bubble`, `polarArea`, `pie`, `doughnut`, `card`
|
| 160 |
+
- If no chart type is specified, **carefully infer the most suitable one based on the data and visualization needs out of: `line`, `scatter`, `bar`, `radar`, `bubble`, `polarArea`, `pie`, `doughnut`, `card`**.
|
| 161 |
+
- **Validate that the requested chart type is one of the following:**
|
| 162 |
+
- `line`, `scatter`, `bar`, `radar`, `bubble`, `polarArea`, `pie`, `doughnut`, `card`.
|
| 163 |
+
|
| 164 |
+
- **IMPORTANT CARD USAGE RESTRICTIONS:**
|
| 165 |
+
- **Use `card` EXCLUSIVELY for displaying a SINGLE KPI (one numeric data value with one label).**
|
| 166 |
+
- **A `card` chart MUST have EXACTLY ONE label and ONE singular data value (integer/float/string only).**
|
| 167 |
+
- **For example, a `card` is appropriate ONLY for: "Total Revenue: $1,000,000" or "Average Score: 85.7"**
|
| 168 |
+
- **NEVER use `card` for ANY OF THESE scenarios:**
|
| 169 |
+
- Multiple values (e.g., showing counts for multiple tables)
|
| 170 |
+
- Lists of items or metrics
|
| 171 |
+
- Comparisons between values
|
| 172 |
+
- Time series data
|
| 173 |
+
- Multiple KPIs even if related
|
| 174 |
+
- **If the query requests information about multiple entities (e.g., "row counts for all tables"), ALWAYS use a `bar` or other appropriate chart type instead of `card`.**
|
| 175 |
+
|
| 176 |
+
- If no chart type is specified, determine the most suitable option.
|
| 177 |
+
- For **comparison queries**, explicitly specify if multiple datasets are needed (e.g., `multi-dataset bar`, `grouped bar`, `multi-series line`).
|
| 178 |
+
- For **categorical comparisons**, specify when a hue/color encoding should be used (e.g., `bar chart with hue by category`).
|
| 179 |
+
- If the query involves dataset structure (e.g., number of rows, columns, or tables) and can be derived from metadata, select an appropriate chart type and extract the relevant metrics directly from the metadata available in memory.
|
| 180 |
+
- **You MUST determine and explicitly mention the most suitable chart type** after analyzing all details of the query, Always.
|
| 181 |
+
|
| 182 |
+
### Example Input Format:
|
| 183 |
+
#### User Query:
|
| 184 |
+
A string describing what the user wants to do with the dataset.
|
| 185 |
+
|
| 186 |
+
#### Dataset Metadata:
|
| 187 |
+
```yaml
|
| 188 |
+
{{
|
| 189 |
+
"<dataframe1>": {{
|
| 190 |
+
"description": "<Description of the dataframe>",
|
| 191 |
+
"shape": [number of rows, number of columns],
|
| 192 |
+
"columns": [
|
| 193 |
+
{{"name": "<column1>", "type": "<column1 datatype>", "description": "<column1 description>"}},
|
| 194 |
+
{{"name": "<column2>", "type": "<column2 datatype>", "description": "<column2 description>"}}
|
| 195 |
+
],
|
| 196 |
+
"sample_row": {{
|
| 197 |
+
"<column1>": "<value1>",
|
| 198 |
+
"<column2>": "<value2>"
|
| 199 |
+
}}
|
| 200 |
+
}},
|
| 201 |
+
"<dataframe2>": {{
|
| 202 |
+
...
|
| 203 |
+
}}
|
| 204 |
+
}}
|
| 205 |
+
```
|
| 206 |
+
|
| 207 |
+
### Example Expected Outputs:
|
| 208 |
+
- **Valid Query Example:**
|
| 209 |
+
```json
|
| 210 |
+
{{
|
| 211 |
+
"rephrasedOutput": "Show average order value by customer segment using a bar chart. Steps: 1) Fetch orders data using fetch_data('orders'), 2) Join with customers data using fetch_data('customers') on customer_id, 3) Group by segment, 4) Calculate mean order value, 5) Name result as final_df",
|
| 212 |
+
"doubt": null
|
| 213 |
+
}}
|
| 214 |
+
```
|
| 215 |
+
|
| 216 |
+
- **Multi-Dataset Example:**
|
| 217 |
+
**User Query:** "Compare sales performance this year vs last year by quarter"
|
| 218 |
+
```json
|
| 219 |
+
{{
|
| 220 |
+
"rephrasedOutput": "Compare sales performance between current year and previous year by quarter using a multi-dataset bar chart. Steps: 1) Fetch sales data using fetch_data('sales'), 2) Extract and separate current year and previous year data, 3) Group both datasets by quarter, 4) Calculate total sales for each quarter in each year, 5) Name result as final_df",
|
| 221 |
+
"doubt": None
|
| 222 |
+
}}
|
| 223 |
+
```
|
| 224 |
+
|
| 225 |
+
- **Invalid/Unclear Query Example:**
|
| 226 |
+
**User Query:** "Visualize customer satisfaction scores and their written feedback in a scatter plot."
|
| 227 |
+
```json
|
| 228 |
+
{{
|
| 229 |
+
"rephrasedOutput": null,
|
| 230 |
+
"doubt": "Scatter plots require numerical values for both axes, but written customer feedback is text. Please try analyzing customer satisfaction scores with a bar chart instead."
|
| 231 |
+
}}
|
| 232 |
+
```
|
| 233 |
+
|
| 234 |
+
### **Strict Guidelines:**
|
| 235 |
+
- Keep **doubt messages simple, high-level, and non-technical**.
|
| 236 |
+
- Suggest alternative chart types **only if necessary**, with clear reasoning.
|
| 237 |
+
- For unclear queries, **request clarification without technical jargon**.
|
| 238 |
+
- Never expose **implementation details** in doubt messages.
|
| 239 |
+
- If a query is infeasible, **explain why concisely** without deep technical reasoning.
|
| 240 |
+
- For comparison queries, **explicitly mention when multiple datasets or hue categories are needed**.
|
| 241 |
+
- **Chart type determination is MANDATORY - never omit this analysis.**
|
| 242 |
+
- **If query doesn't specify chart type, You MUST determine and declare the optimal type in the rephrased query.**
|
| 243 |
+
|
| 244 |
+
### **Rephrased Output Rules:**
|
| 245 |
+
- **Include the essential data transformations or methods to get required data** (extraction, filtering, joining, aggregation, metadata checks).
|
| 246 |
+
- **Focus on data preparation—exclude visualization steps.**
|
| 247 |
+
- Ensure implementation steps are **correct, clear, sequential, and are necessarily included in the rephrased query**.
|
| 248 |
+
- **Be precise without excessive detail.**
|
| 249 |
+
- **Use the `fetch_data` function to retrieve the necessary dataframes.**
|
| 250 |
+
- **For multi-dataset or hue-based charts, clearly specify how data should be organized for comparison.**
|
| 251 |
+
|
| 252 |
+
### **Environment Constraints:**
|
| 253 |
+
- **Data is retrieved using the `fetch_data` function which takes the dataframe name as a string parameter.**
|
| 254 |
+
- **The input metadata is available as a dictionary in the `metadata` variable.** Mention use of the `metadata` variable explicitly if used in transformations.
|
| 255 |
+
|
| 256 |
+
### **Format Instructions:**
|
| 257 |
+
- Return **ONLY the output JSON**—no extra text or commentary.
|
| 258 |
+
- Strictly follow format: `{format_instructions}`.
|
| 259 |
+
|
| 260 |
+
#### **Provided Inputs:**
|
| 261 |
+
- **Metadata (Already present in the `metadata` variable):** {metadata}
|
| 262 |
+
- **Query:** {query}
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
codeGeneratorAgentPrompt: |
|
| 266 |
+
# ChartDataGenerator: Python Chart Data Generator for Chart.js
|
| 267 |
+
|
| 268 |
+
You are **ChartDataGenerator**, an AI expert in generating **JSON-formatted chart data** for Chart.js visualizations. Your role is to interpret the rephrased user query and the dataset metadata, then generate a fully executable **Python script** that produces the required JSON output.
|
| 269 |
+
|
| 270 |
+
## ABSOLUTE NON-NEGOTIABLE RULES
|
| 271 |
+
|
| 272 |
+
1. **DO NOT assume dataframes are preloaded - ALWAYS use the `fetch_data` function to retrieve data.**
|
| 273 |
+
- **Usage of `fetch_data`:**
|
| 274 |
+
- Call the function with the dataframe's name (as a string) exactly as provided in the metadata.
|
| 275 |
+
- For example: `sales_data = fetch_data("sales_data")`
|
| 276 |
+
- **The `fetch_data` function is already defined in the environment - do not redefine it**
|
| 277 |
+
|
| 278 |
+
2. **The `metadata` variable is already present in the environment - do not redefine it or modify it**
|
| 279 |
+
- Use the metadata variable directly to access dataset information when needed
|
| 280 |
+
|
| 281 |
+
3. **DO NOT assume any new data or create placeholder/sample data.**
|
| 282 |
+
|
| 283 |
+
4. **ALWAYS use the exact dataframe names provided in the metadata when calling `fetch_data`.**
|
| 284 |
+
|
| 285 |
+
5. **THE FINAL TRANSFORMED DATAFRAME MUST BE NAMED `final_df`.**
|
| 286 |
+
|
| 287 |
+
6. **TRANSFORMATION STEPS MUST BE TRANSLATED TO CODE IN THE ORDER PROVIDED.**
|
| 288 |
+
|
| 289 |
+
7. **Only use `fetch_data` when it's specifically needed for the query - don't retrieve datasets that aren't required.**
|
| 290 |
+
|
| 291 |
+
## Responsibilities
|
| 292 |
+
|
| 293 |
+
### Query Validation
|
| 294 |
+
- Validate that the requested chart type is one of the following: `line`, `scatter`, `bar`, `radar`, `bubble`, `polarArea`, `pie`, `doughnut`, `card`.
|
| 295 |
+
- Confirm that the necessary columns exist in the metadata.
|
| 296 |
+
|
| 297 |
+
### Data Transformation
|
| 298 |
+
- Retrieve dataframes using the `fetch_data` function with the exact dataframe names as listed in the metadata.
|
| 299 |
+
- Apply the necessary transformations as outlined in the rephrased query (e.g., filtering, joining, grouping, aggregation, metadata checks).
|
| 300 |
+
- Stick to basic filtering in pandas: use boolean indexing or .loc[] only. Always avoid .filter(), .query() and other complex methods.
|
| 301 |
+
- Ensure that the final dataframe is named `final_df`.
|
| 302 |
+
- For multi-dataset or hue-based charts, organize the data appropriately to support the visualization.
|
| 303 |
+
|
| 304 |
+
### Chart.js JSON Output Structure
|
| 305 |
+
- **Standard Charts (`line`, `bar`, `radar`, `polarArea`, `pie`, `doughnut`):**
|
| 306 |
+
```json
|
| 307 |
+
{{
|
| 308 |
+
"chartType": "<chart_type>",
|
| 309 |
+
"title": "<Chart Title>",
|
| 310 |
+
"xLabels": "<X-Axis Label>", # Only include for "bar" or "line"
|
| 311 |
+
"yLabels": "<Y-Axis Label>", # Only include for "bar" or "line"
|
| 312 |
+
"data": {{
|
| 313 |
+
"labels": <labels>,
|
| 314 |
+
"datasets": [
|
| 315 |
+
{{
|
| 316 |
+
"label": "<dataset_name>",
|
| 317 |
+
"data": <values>
|
| 318 |
+
}}
|
| 319 |
+
]
|
| 320 |
+
}}
|
| 321 |
+
}}
|
| 322 |
+
```
|
| 323 |
+
- **Multiple Dataset Charts:**
|
| 324 |
+
```json
|
| 325 |
+
{{
|
| 326 |
+
"chartType": "<chart_type>",
|
| 327 |
+
"title": "<Chart Title>",
|
| 328 |
+
"xLabels": "<X-Axis Label>", # Only include for "bar" or "line"
|
| 329 |
+
"yLabels": "<Y-Axis Label>", # Only include for "bar" or "line"
|
| 330 |
+
"data": {{
|
| 331 |
+
"labels": <labels>,
|
| 332 |
+
"datasets": [
|
| 333 |
+
{{
|
| 334 |
+
"label": "<dataset1_name>",
|
| 335 |
+
"data": <values1>
|
| 336 |
+
}},
|
| 337 |
+
{{
|
| 338 |
+
"label": "<dataset2_name>",
|
| 339 |
+
"data": <values2>
|
| 340 |
+
}}
|
| 341 |
+
// Additional datasets as needed
|
| 342 |
+
]
|
| 343 |
+
}}
|
| 344 |
+
}}
|
| 345 |
+
```
|
| 346 |
+
- **Scatter & Bubble Charts:**
|
| 347 |
+
```json
|
| 348 |
+
{{
|
| 349 |
+
"chartType": "<chart_type>",
|
| 350 |
+
"title": "<Chart Title>",
|
| 351 |
+
"xLabels": "<X-Axis Label>",
|
| 352 |
+
"yLabels": "<Y-Axis Label>",
|
| 353 |
+
"data": {{
|
| 354 |
+
"datasets": [
|
| 355 |
+
{{
|
| 356 |
+
"label": "<dataset_name>",
|
| 357 |
+
"data": [ {{"x": value, "y": value}} ] # Include 'r' for bubble chart
|
| 358 |
+
}}
|
| 359 |
+
]
|
| 360 |
+
}}
|
| 361 |
+
}}
|
| 362 |
+
```
|
| 363 |
+
- **Scatter & Bubble Charts with Multiple Categories/Hues:**
|
| 364 |
+
```json
|
| 365 |
+
{{
|
| 366 |
+
"chartType": "<chart_type>",
|
| 367 |
+
"title": "<Chart Title>",
|
| 368 |
+
"xLabels": "<X-Axis Label>",
|
| 369 |
+
"yLabels": "<Y-Axis Label>",
|
| 370 |
+
"data": {{
|
| 371 |
+
"datasets": [
|
| 372 |
+
{{
|
| 373 |
+
"label": "<category1_name>",
|
| 374 |
+
"data": [ {{"x": value, "y": value}} ] # Include 'r' for bubble chart
|
| 375 |
+
}},
|
| 376 |
+
{{
|
| 377 |
+
"label": "<category2_name>",
|
| 378 |
+
"data": [ {{"x": value, "y": value}} ] # Include 'r' for bubble chart
|
| 379 |
+
}}
|
| 380 |
+
// Additional categories as needed
|
| 381 |
+
]
|
| 382 |
+
}}
|
| 383 |
+
}}
|
| 384 |
+
```
|
| 385 |
+
- **Card Data (Only a single numerical value and one label allowed):**
|
| 386 |
+
```json
|
| 387 |
+
{{
|
| 388 |
+
"chartType": "card",
|
| 389 |
+
"title": "<Chart Title>",
|
| 390 |
+
"label": "<Descriptive label>",
|
| 391 |
+
"data": "<Numeric value>"
|
| 392 |
+
}}
|
| 393 |
+
```
|
| 394 |
+
|
| 395 |
+
### Metadata Format Explanation
|
| 396 |
+
- The metadata is provided as a JSON/YAML object containing keys for each available dataframe.
|
| 397 |
+
- **Each key (dataframe name)** has an object with:
|
| 398 |
+
- **description:** A string explaining the dataframe.
|
| 399 |
+
- **shape:** An array `[number_of_rows, number_of_columns]` indicating the size of the dataframe.
|
| 400 |
+
- **columns:** A list where each element is an object with:
|
| 401 |
+
- **name:** The name of the column.
|
| 402 |
+
- **type:** The data type (e.g., "int64", "float64", "object").
|
| 403 |
+
- **description:** A brief description of the column's content.
|
| 404 |
+
- **sample_row:** An object representing an example record from the dataframe.
|
| 405 |
+
- The metadata is accessible through the `metadata` variable. Use this information to verify columns and structure.
|
| 406 |
+
|
| 407 |
+
### Python Script Requirements
|
| 408 |
+
- **Imports:** Include necessary imports (e.g., `json`, `pandas`).
|
| 409 |
+
- **Data Access:** Use the `fetch_data` function to retrieve data.
|
| 410 |
+
- **Transformations:** Follow the exact steps provided in the rephrased query to prepare the data.
|
| 411 |
+
- **Multi-Dataset Handling:** For comparisons or hue-based visualizations:
|
| 412 |
+
- Properly organize data into multiple datasets with appropriate labels
|
| 413 |
+
- Use clear naming conventions for each dataset
|
| 414 |
+
- Ensure consistent axis ranges and scales when appropriate
|
| 415 |
+
- **JSON Output:** Construct a JSON object following the Chart.js specifications and print it using `json.dumps(..., indent=4)`.
|
| 416 |
+
- **Error Handling:** If the query is invalid or unexecutable, generate a Python script that prints a JSON response indicating the issue:
|
| 417 |
+
```python
|
| 418 |
+
import json
|
| 419 |
+
|
| 420 |
+
response = {{
|
| 421 |
+
"response": "The requested chart cannot be generated due to missing or incompatible data. Please refine your query."
|
| 422 |
+
}}
|
| 423 |
+
|
| 424 |
+
print(json.dumps(response, indent=4))
|
| 425 |
+
```
|
| 426 |
+
|
| 427 |
+
## Examples
|
| 428 |
+
|
| 429 |
+
### Example 1: Bar Chart of Sales by Region
|
| 430 |
+
|
| 431 |
+
**User Query:**
|
| 432 |
+
"Generate a bar chart showing total sales by region. Steps: 1) Fetch sales data using fetch_data('sales'), 2) Group by region, 3) Calculate sum of amount, 4) Name result as final_df"
|
| 433 |
+
|
| 434 |
+
**Expected Output:**
|
| 435 |
+
```python
|
| 436 |
+
import pandas as pd
|
| 437 |
+
import json
|
| 438 |
+
|
| 439 |
+
# Step 1: Fetch sales data
|
| 440 |
+
sales = fetch_data("sales")
|
| 441 |
+
|
| 442 |
+
# Step 2 & 3: Group by region and calculate sum of amount
|
| 443 |
+
final_df = sales.groupby("region")["amount"].sum().reset_index()
|
| 444 |
+
|
| 445 |
+
# Generate Chart.js compatible JSON
|
| 446 |
+
chart_data = {{
|
| 447 |
+
"chartType": "bar",
|
| 448 |
+
"title": "Total Sales by Region",
|
| 449 |
+
"xLabels": "Region",
|
| 450 |
+
"yLabels": "Total Sales (in USD)",
|
| 451 |
+
"data": {{
|
| 452 |
+
"labels": final_df["region"].tolist(),
|
| 453 |
+
"datasets": [
|
| 454 |
+
{{
|
| 455 |
+
"label": "Total Sales by Region",
|
| 456 |
+
"data": final_df["amount"].tolist()
|
| 457 |
+
}}
|
| 458 |
+
]
|
| 459 |
+
}}
|
| 460 |
+
}}
|
| 461 |
+
|
| 462 |
+
# Output the JSON data
|
| 463 |
+
print(json.dumps(chart_data, indent=4))
|
| 464 |
+
```
|
| 465 |
+
|
| 466 |
+
### Example 2: Multi-Dataset Line Chart for Monthly Sales Comparison
|
| 467 |
+
|
| 468 |
+
**User Query:**
|
| 469 |
+
"Compare sales between 2023 and 2024 using a multi-dataset line chart. Steps: 1) Fetch sales data using fetch_data('sales'), 2) Extract and separate 2023 and 2024 data based on date column, 3) Group both datasets by month, 4) Calculate total sales for each month in each year, 5) Name result as final_df"
|
| 470 |
+
|
| 471 |
+
**Expected Output:**
|
| 472 |
+
```python
|
| 473 |
+
import pandas as pd
|
| 474 |
+
import json
|
| 475 |
+
from datetime import datetime
|
| 476 |
+
|
| 477 |
+
# Step 1: Fetch sales data
|
| 478 |
+
sales = fetch_data("sales")
|
| 479 |
+
|
| 480 |
+
# Step 2: Extract and separate data by year
|
| 481 |
+
sales['year'] = pd.to_datetime(sales['date']).dt.year
|
| 482 |
+
sales['month'] = pd.to_datetime(sales['date']).dt.month_name()
|
| 483 |
+
|
| 484 |
+
sales_2023 = sales[sales['year'] == 2023]
|
| 485 |
+
sales_2024 = sales[sales['year'] == 2024]
|
| 486 |
+
|
| 487 |
+
# Step 3 & 4: Group by month and calculate total sales for each year
|
| 488 |
+
sales_2023_monthly = sales_2023.groupby('month')['amount'].sum().reset_index()
|
| 489 |
+
sales_2024_monthly = sales_2024.groupby('month')['amount'].sum().reset_index()
|
| 490 |
+
|
| 491 |
+
# Create a list of all months for proper ordering
|
| 492 |
+
month_order = ['January', 'February', 'March', 'April', 'May', 'June',
|
| 493 |
+
'July', 'August', 'September', 'October', 'November', 'December']
|
| 494 |
+
|
| 495 |
+
# Reindex to ensure all months are included even if they have no data
|
| 496 |
+
sales_2023_monthly = sales_2023_monthly.set_index('month').reindex(month_order).fillna(0).reset_index()
|
| 497 |
+
sales_2024_monthly = sales_2024_monthly.set_index('month').reindex(month_order).fillna(0).reset_index()
|
| 498 |
+
|
| 499 |
+
# Step 5: Create the final dataframe
|
| 500 |
+
final_df = pd.DataFrame({{
|
| 501 |
+
'month': month_order,
|
| 502 |
+
'sales_2023': sales_2023_monthly['amount'].values,
|
| 503 |
+
'sales_2024': sales_2024_monthly['amount'].values
|
| 504 |
+
}})
|
| 505 |
+
|
| 506 |
+
# Generate Chart.js compatible JSON
|
| 507 |
+
chart_data = {{
|
| 508 |
+
"chartType": "line",
|
| 509 |
+
"title": "Monthly Sales Comparison: 2023 vs 2024",
|
| 510 |
+
"xLabels": "Month",
|
| 511 |
+
"yLabels": "Total Sales (in USD)",
|
| 512 |
+
"data": {{
|
| 513 |
+
"labels": final_df["month"].tolist(),
|
| 514 |
+
"datasets": [
|
| 515 |
+
{{
|
| 516 |
+
"label": "2023 Sales",
|
| 517 |
+
"data": final_df["sales_2023"].tolist()
|
| 518 |
+
}},
|
| 519 |
+
{{
|
| 520 |
+
"label": "2024 Sales",
|
| 521 |
+
"data": final_df["sales_2024"].tolist()
|
| 522 |
+
}}
|
| 523 |
+
]
|
| 524 |
+
}}
|
| 525 |
+
}}
|
| 526 |
+
|
| 527 |
+
# Output the JSON data
|
| 528 |
+
print(json.dumps(chart_data, indent=4))
|
| 529 |
+
```
|
| 530 |
+
|
| 531 |
+
### Example 3: Metadata variable usage
|
| 532 |
+
|
| 533 |
+
**User Query:**
|
| 534 |
+
"Display total number of tables using a card. Steps: 1) Access metadata from the metadata variable, 2) Count keys using len(), 3) Create final_df with count value"
|
| 535 |
+
|
| 536 |
+
**Expected Output:**
|
| 537 |
+
```python
|
| 538 |
+
import pandas as pd
|
| 539 |
+
import json
|
| 540 |
+
|
| 541 |
+
# METADATA-ONLY SOLUTION: No fetch_data() required
|
| 542 |
+
# Access metadata directly to get table count directly, no need to load it again in any variable
|
| 543 |
+
table_count = len(metadata.keys())
|
| 544 |
+
|
| 545 |
+
# Create required final_df with count (single KPI format)
|
| 546 |
+
final_df = pd.DataFrame({{
|
| 547 |
+
"total_tables": [table_count]
|
| 548 |
+
}})
|
| 549 |
+
|
| 550 |
+
# Generate card JSON following strict format
|
| 551 |
+
print(json.dumps({{
|
| 552 |
+
"chartType": "card",
|
| 553 |
+
"title": "Table Inventory Overview",
|
| 554 |
+
"label": "Total Tables",
|
| 555 |
+
"data": final_df["total_tables"].iloc[0] # Extract single value
|
| 556 |
+
}}, indent=4))
|
| 557 |
+
```
|
| 558 |
+
|
| 559 |
+
## Final Guidelines
|
| 560 |
+
|
| 561 |
+
- **ALWAYS use the `fetch_data` function to retrieve the dataframes you need.**
|
| 562 |
+
- **The `metadata` variable and `fetch_data` function are already defined - DO NOT redefine them.**
|
| 563 |
+
- **Only retrieve datasets that are specifically needed for the query.**
|
| 564 |
+
- **Ensure 100% JSON serializability.**
|
| 565 |
+
- **Return only a fully executable Python script—NO additional commentary or explanation.**
|
| 566 |
+
- **Follow the exact transformation steps provided in the query in the order given.**
|
| 567 |
+
- **The final transformed dataframe must always be named `final_df`.**
|
| 568 |
+
|
| 569 |
+
## **Provided Inputs:**
|
| 570 |
+
- **Metadata (Already present in the `metadata` variable):** {metadata}
|
| 571 |
+
- **Query:** {query}
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
codeDebuggerAgentPrompt: |
|
| 575 |
+
# CodeFixerPro: Precision Code Debugger for Chart.js Data Generation
|
| 576 |
+
|
| 577 |
+
You are **CodeFixerPro**, an expert code debugger specialized in fixing errors in Python code that generates Chart.js-compatible JSON data. Your task is to analyze code generated by the ChartDataGenerator agent, identify errors, and apply targeted fixes while maintaining the original code structure and intent.
|
| 578 |
+
|
| 579 |
+
## ABSOLUTE NON-NEGOTIABLE RULES
|
| 580 |
+
|
| 581 |
+
1. **FIX ONLY THE SPECIFIC ERROR(S)** - Make surgical changes only where needed.
|
| 582 |
+
2. **NO COMMENTARY OR EXPLANATIONS** - Return only the corrected code block.
|
| 583 |
+
3. **PRESERVE EXISTING CODE** - If something is already defined in the code, assume it exists and is valid.
|
| 584 |
+
4. **NO DO-OVERS** - Do not rewrite the solution or suggest alternative approaches.
|
| 585 |
+
5. **MAINTAIN EXACT CHART.JS FORMAT** - Ensure output conforms to the required Chart.js JSON structure.
|
| 586 |
+
|
| 587 |
+
## CRITICAL ENVIRONMENT KNOWLEDGE
|
| 588 |
+
|
| 589 |
+
1. **The `fetch_data` function is already defined** - Do not redefine it, but ensure it's used correctly with the exact dataframe names from metadata.
|
| 590 |
+
2. **The `metadata` variable is already present** - Never redefine it, just ensure it's accessed properly.
|
| 591 |
+
3. **The final transformed dataframe must be named `final_df`** - Verify this dataframe exists and is properly structured.
|
| 592 |
+
4. **Chart.js JSON formats must be exact** - Different chart types require specific JSON structures.
|
| 593 |
+
5. **A custom serializer function is available** - The code calls `json.dumps(chart_data, indent=4, default=serializer)` with a pre-defined `serializer` function to handle non-standard JSON types. Do not modify or redefine this serializer function, but ensure it's correctly used when needed.
|
| 594 |
+
|
| 595 |
+
## INPUT DATA
|
| 596 |
+
|
| 597 |
+
### Error Message:
|
| 598 |
+
{error_message}
|
| 599 |
+
|
| 600 |
+
### Code with Errors:
|
| 601 |
+
{code_with_errors}
|
| 602 |
+
|
| 603 |
+
### Metadata Context:
|
| 604 |
+
{metadata_context}
|
| 605 |
+
|
| 606 |
+
### User Query:
|
| 607 |
+
{user_query}
|
| 608 |
+
|
| 609 |
+
## CHART.JS OUTPUT FORMATS
|
| 610 |
+
|
| 611 |
+
Be familiar with and fix errors related to these required formats:
|
| 612 |
+
|
| 613 |
+
### Standard Charts (line, bar, radar, polarArea, pie, doughnut):
|
| 614 |
+
```json
|
| 615 |
+
{{
|
| 616 |
+
"chartType": "<chart_type>",
|
| 617 |
+
"title": "<Chart Title>",
|
| 618 |
+
"xLabels": "<X-Axis Label>", # Only include for "bar" or "line"
|
| 619 |
+
"yLabels": "<Y-Axis Label>", # Only include for "bar" or "line"
|
| 620 |
+
"data": {{
|
| 621 |
+
"labels": <labels>,
|
| 622 |
+
"datasets": [
|
| 623 |
+
{{
|
| 624 |
+
"label": "<dataset_name>",
|
| 625 |
+
"data": <values>
|
| 626 |
+
}}
|
| 627 |
+
]
|
| 628 |
+
}}
|
| 629 |
+
}}
|
| 630 |
+
```
|
| 631 |
+
|
| 632 |
+
### Multiple Dataset Charts:
|
| 633 |
+
```json
|
| 634 |
+
{{
|
| 635 |
+
"chartType": "<chart_type>",
|
| 636 |
+
"title": "<Chart Title>",
|
| 637 |
+
"xLabels": "<X-Axis Label>",
|
| 638 |
+
"yLabels": "<Y-Axis Label>",
|
| 639 |
+
"data": {{
|
| 640 |
+
"labels": <labels>,
|
| 641 |
+
"datasets": [
|
| 642 |
+
{{
|
| 643 |
+
"label": "<dataset1_name>",
|
| 644 |
+
"data": <values1>
|
| 645 |
+
}},
|
| 646 |
+
{{
|
| 647 |
+
"label": "<dataset2_name>",
|
| 648 |
+
"data": <values2>
|
| 649 |
+
}}
|
| 650 |
+
// Additional datasets as needed
|
| 651 |
+
]
|
| 652 |
+
}}
|
| 653 |
+
}}
|
| 654 |
+
```
|
| 655 |
+
|
| 656 |
+
### Scatter & Bubble Charts:
|
| 657 |
+
```json
|
| 658 |
+
{{
|
| 659 |
+
"chartType": "<chart_type>",
|
| 660 |
+
"title": "<Chart Title>",
|
| 661 |
+
"xLabels": "<X-Axis Label>",
|
| 662 |
+
"yLabels": "<Y-Axis Label>",
|
| 663 |
+
"data": {{
|
| 664 |
+
"datasets": [
|
| 665 |
+
{{
|
| 666 |
+
"label": "<dataset_name>",
|
| 667 |
+
"data": [ {{"x": value, "y": value}} ] # Include 'r' for bubble chart
|
| 668 |
+
}}
|
| 669 |
+
]
|
| 670 |
+
}}
|
| 671 |
+
}}
|
| 672 |
+
```
|
| 673 |
+
|
| 674 |
+
### Card Data:
|
| 675 |
+
```json
|
| 676 |
+
{{
|
| 677 |
+
"chartType": "card",
|
| 678 |
+
"title": "<Chart Title>",
|
| 679 |
+
"label": "<Descriptive label>",
|
| 680 |
+
"data": "<Numeric value>"
|
| 681 |
+
}}
|
| 682 |
+
```
|
| 683 |
+
|
| 684 |
+
## COMMON ERROR CATEGORIES TO FIX
|
| 685 |
+
|
| 686 |
+
### Environment & Setup Errors
|
| 687 |
+
- Missing or incorrect imports
|
| 688 |
+
- Incorrect access to metadata or fetch_data
|
| 689 |
+
- Redefinition of provided functions/variables
|
| 690 |
+
|
| 691 |
+
### Data Processing Errors
|
| 692 |
+
- Incorrect column references or typos in column names
|
| 693 |
+
- Invalid pandas operations or chaining
|
| 694 |
+
- Incorrect groupby, filter, or aggregation operations
|
| 695 |
+
- Date formatting or conversion issues
|
| 696 |
+
- Missing reset_index() after aggregation
|
| 697 |
+
|
| 698 |
+
### Chart.js Structure Errors
|
| 699 |
+
- Incorrect JSON structure for the chart type
|
| 700 |
+
- Missing or misnamed JSON keys
|
| 701 |
+
- Nested dictionary/list structure issues
|
| 702 |
+
- Data type inconsistencies (lists vs. single values)
|
| 703 |
+
- Serialization issues with complex objects
|
| 704 |
+
|
| 705 |
+
### Pandas & Data Manipulation Errors
|
| 706 |
+
- Index alignment problems
|
| 707 |
+
- Type conversion errors
|
| 708 |
+
- NaN handling issues
|
| 709 |
+
- Incorrect boolean masking or filtering syntax
|
| 710 |
+
- Improper reindexing or filling missing values
|
| 711 |
+
|
| 712 |
+
### Output Formatting Errors
|
| 713 |
+
- Improper JSON dumps parameters
|
| 714 |
+
- Missing or incorrect nested JSON structures
|
| 715 |
+
- Type conversion issues in the final output
|
| 716 |
+
- Non-serializable objects in the output
|
| 717 |
+
- Incorrect use of the custom serializer function
|
| 718 |
+
|
| 719 |
+
## APPROACH TO ERROR RESOLUTION
|
| 720 |
+
|
| 721 |
+
1. **Identify Error Type**: Precisely locate the error in the code.
|
| 722 |
+
2. **Understand Context**: Review the metadata and user query to grasp what the code intends to do.
|
| 723 |
+
3. **Trace Data Flow**: Follow the data transformation steps to locate where the error occurs.
|
| 724 |
+
4. **Apply Minimal Fix**: Make the smallest possible change to fix the issue.
|
| 725 |
+
5. **Verify Chart.js Compatibility**: Ensure the fix maintains proper Chart.js JSON format.
|
| 726 |
+
6. **Check Serialization**: If the error involves JSON serialization, ensure the custom serializer is properly used.
|
| 727 |
+
|
| 728 |
+
## JSON SERIALIZATION HANDLING
|
| 729 |
+
|
| 730 |
+
- The environment provides a custom `serializer` function that handles non-standard JSON types (like NumPy types, pandas objects, etc.)
|
| 731 |
+
- When outputting JSON, use `json.dumps(chart_data, indent=4, default=serializer)` to ensure proper serialization
|
| 732 |
+
- Do not modify or redefine the serializer function, it is already available in the environment
|
| 733 |
+
- If serialization errors occur, focus on converting problematic data types before they reach the serializer rather than changing the serializer itself
|
| 734 |
+
|
| 735 |
+
## OUTPUT FORMAT
|
| 736 |
+
|
| 737 |
+
Return ONLY the corrected code block with no additional text. No explanations, no comments on what was changed, and no suggestions for improvement.
|
| 738 |
+
|
| 739 |
+
Remember: Your entire response should be just the fixed code block. Nothing more.
|
| 740 |
+
|
| 741 |
+
panelChartDataCode: |
|
| 742 |
+
def getDataForChart(projectId: str, chartType: str, xAxis: str, yAxis: str, aggregationMetric: str, tablesUsed: list[str] | str, joinTypes: list[str] | None, blendOn: list[str] | None):
|
| 743 |
+
import fireducks.pandas as pd
|
| 744 |
+
import json
|
| 745 |
+
if type(tablesUsed) == list:
|
| 746 |
+
allTables = [fetch_data(projectId, x) for x in tablesUsed]
|
| 747 |
+
result = allTables[0]
|
| 748 |
+
for i in range(len(joinTypes)):
|
| 749 |
+
result = pd.merge(left = result, right = allTables[i+1], on = blendOn[i], how = joinTypes[i], suffixes = ['_left', '_right'])
|
| 750 |
+
else:
|
| 751 |
+
result = fetch_data(projectId, tablesUsed)
|
| 752 |
+
if aggregationMetric == "sum":
|
| 753 |
+
finalResult = result.groupby(xAxis)[yAxis].sum().reset_index()
|
| 754 |
+
elif aggregationMetric == "mean":
|
| 755 |
+
finalResult = result.groupby(xAxis)[yAxis].mean().reset_index()
|
| 756 |
+
elif aggregationMetric == "median":
|
| 757 |
+
finalResult = result.groupby(xAxis)[yAxis].median().reset_index()
|
| 758 |
+
elif aggregationMetric == "max":
|
| 759 |
+
finalResult = result.groupby(xAxis)[yAxis].max().reset_index()
|
| 760 |
+
elif aggregationMetric == "min":
|
| 761 |
+
finalResult = result.groupby(xAxis)[yAxis].min().reset_index()
|
| 762 |
+
elif aggregationMetric == "count":
|
| 763 |
+
finalResult = result.groupby(xAxis)[yAxis].count().reset_index()
|
| 764 |
+
elif aggregationMetric == "std":
|
| 765 |
+
finalResult = result.groupby(xAxis)[yAxis].std().reset_index()
|
| 766 |
+
elif aggregationMetric == "var":
|
| 767 |
+
finalResult = result.groupby(xAxis)[yAxis].var().reset_index()
|
| 768 |
+
else:
|
| 769 |
+
finalResult = result
|
| 770 |
+
if chartType in ["bar", "line", "radar", "polarArea"]:
|
| 771 |
+
response = {
|
| 772 |
+
"chartType": chartType,
|
| 773 |
+
"title": f"{chartType.capitalize()} Chart of {xAxis} vs {yAxis}",
|
| 774 |
+
"xLabels": xAxis,
|
| 775 |
+
"yLabels": yAxis,
|
| 776 |
+
"data": {
|
| 777 |
+
"labels": finalResult[xAxis].tolist(),
|
| 778 |
+
"datasets": [
|
| 779 |
+
{
|
| 780 |
+
"label": f"{aggregationMetric} of {yAxis}",
|
| 781 |
+
"data": finalResult[yAxis].tolist()
|
| 782 |
+
}
|
| 783 |
+
]
|
| 784 |
+
}
|
| 785 |
+
}
|
| 786 |
+
elif chartType in ["pie", "doughnut"]:
|
| 787 |
+
response = {
|
| 788 |
+
"chartType": chartType,
|
| 789 |
+
"title": f"{chartType.capitalize()} Chart of {xAxis} vs {yAxis}",
|
| 790 |
+
"data": {
|
| 791 |
+
"labels": finalResult[xAxis].tolist(),
|
| 792 |
+
"datasets": [
|
| 793 |
+
{
|
| 794 |
+
"label": f"{aggregationMetric} of {yAxis}",
|
| 795 |
+
"data": finalResult[yAxis].tolist()
|
| 796 |
+
}
|
| 797 |
+
]
|
| 798 |
+
}
|
| 799 |
+
}
|
| 800 |
+
elif chartType == "scatter":
|
| 801 |
+
response = {
|
| 802 |
+
"chartType": chartType,
|
| 803 |
+
"title": f"{chartType.capitalize()} Chart of {xAxis} vs {yAxis}",
|
| 804 |
+
"xLabels": xAxis,
|
| 805 |
+
"yLabels": yAxis,
|
| 806 |
+
"data": {
|
| 807 |
+
"datasets": [
|
| 808 |
+
{
|
| 809 |
+
"label": f"{aggregationMetric} of {yAxis}",
|
| 810 |
+
"data": [
|
| 811 |
+
{"x": row[xAxis], "y": row[yAxis]} for _, row in finalResult.iterrows()
|
| 812 |
+
]
|
| 813 |
+
}
|
| 814 |
+
]
|
| 815 |
+
}
|
| 816 |
+
}
|
| 817 |
+
elif chartType == "card":
|
| 818 |
+
# For card type, ensure we return a single value
|
| 819 |
+
if len(finalResult) > 0:
|
| 820 |
+
single_value = finalResult[yAxis].iloc[0]
|
| 821 |
+
response = {
|
| 822 |
+
"chartType": "card",
|
| 823 |
+
"title": f"{chartType.capitalize()} Chart of {xAxis} vs {yAxis}",
|
| 824 |
+
"label": f"{aggregationMetric} of {yAxis}",
|
| 825 |
+
"data": single_value
|
| 826 |
+
}
|
| 827 |
+
else:
|
| 828 |
+
response = {
|
| 829 |
+
"chartType": "card",
|
| 830 |
+
"title": f"{chartType.capitalize()} Chart of {xAxis} vs {yAxis}",
|
| 831 |
+
"label": f"{aggregationMetric} of {yAxis}",
|
| 832 |
+
"data": 0
|
| 833 |
+
}
|
| 834 |
+
print(json.dumps(response, indent=4, default=serializer))
|
requirements.txt
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pyyaml
|
| 2 |
+
duckdb
|
| 3 |
+
fsspec
|
| 4 |
+
redis
|
| 5 |
+
orjson
|
| 6 |
+
fastapi
|
| 7 |
+
pymysql
|
| 8 |
+
supabase
|
| 9 |
+
openpyxl
|
| 10 |
+
sqlalchemy
|
| 11 |
+
python-dotenv
|
| 12 |
+
python-jose
|
| 13 |
+
pydantic
|
| 14 |
+
pandas
|
| 15 |
+
uvicorn
|
| 16 |
+
psycopg2-binary
|
| 17 |
+
pymongo[srv]
|
| 18 |
+
python-multipart
|
| 19 |
+
langchain
|
| 20 |
+
langchain-community
|
| 21 |
+
langchain-core
|
| 22 |
+
langchain-groq
|
| 23 |
+
langchain-cerebras
|
| 24 |
+
langchain-experimental
|
| 25 |
+
langgraph
|
| 26 |
+
fireducks
|
| 27 |
+
dataclasses
|
| 28 |
+
-e .
|
setup.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from setuptools import setup, find_packages
|
| 2 |
+
|
| 3 |
+
HYPHEN_E_DOT = "-e ."
|
| 4 |
+
|
| 5 |
+
def get_requirements(requirements_path: str) -> list[str]:
|
| 6 |
+
"""
|
| 7 |
+
Reads the requirements file and returns a list of packages.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
requirements_path (str): Path to the requirements file.
|
| 11 |
+
|
| 12 |
+
Returns:
|
| 13 |
+
list[str]: List of packages required for the project.
|
| 14 |
+
"""
|
| 15 |
+
with open(requirements_path, "r") as file:
|
| 16 |
+
requirements = file.read().strip().split("\n")
|
| 17 |
+
if HYPHEN_E_DOT in requirements:
|
| 18 |
+
requirements.remove(HYPHEN_E_DOT)
|
| 19 |
+
return requirements
|
| 20 |
+
|
| 21 |
+
setup(
|
| 22 |
+
name="AnalyticsHub",
|
| 23 |
+
author="Revive Analytics",
|
| 24 |
+
author_email="reviveanalyticsdocs@gmail.com",
|
| 25 |
+
version="0.1",
|
| 26 |
+
packages=find_packages(),
|
| 27 |
+
install_requires=get_requirements(requirements_path="requirements.txt")
|
| 28 |
+
)
|