Spaces:
Paused
Paused
Upload folder using huggingface_hub
#1
by imseldrith - opened
- .dockerignore +7 -0
- .github/dependabot.yml +12 -0
- .github/workflows/python-app.yml +45 -0
- .gitignore +47 -0
- Dockerfile +42 -0
- LICENSE +3 -0
- app.py +104 -0
- exceptions/GPTException.py +7 -0
- exceptions/__init__.py +0 -0
- models/__init__.py +0 -0
- models/query_model.py +5 -0
- requirements.txt +72 -0
- run.py +4 -0
- templates/__init__.py +0 -0
- templates/index.html +70 -0
- templates/index.js +28 -0
- tests/__init__.py +0 -0
- tests/conftest.py +26 -0
- tests/test_app.py +82 -0
.dockerignore
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
env
|
| 2 |
+
.dockerignore
|
| 3 |
+
Dockerfile
|
| 4 |
+
Dockerfile.prod
|
| 5 |
+
venv
|
| 6 |
+
.pytest_cache
|
| 7 |
+
.coverage
|
.github/dependabot.yml
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# To get started with Dependabot version updates, you'll need to specify which
|
| 2 |
+
# package ecosystems to update and where the package manifests are located.
|
| 3 |
+
# Please see the documentation for all configuration options:
|
| 4 |
+
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
| 5 |
+
|
| 6 |
+
version: 2
|
| 7 |
+
updates:
|
| 8 |
+
- package-ecosystem: "" # See documentation for possible values
|
| 9 |
+
directory: "/" # Location of package manifests
|
| 10 |
+
schedule:
|
| 11 |
+
interval: "weekly"
|
| 12 |
+
|
.github/workflows/python-app.yml
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Python application Setup - Chat GPT
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches: [ "master" ]
|
| 6 |
+
pull_request:
|
| 7 |
+
branches: [ "master" ]
|
| 8 |
+
|
| 9 |
+
permissions:
|
| 10 |
+
contents: read
|
| 11 |
+
|
| 12 |
+
jobs:
|
| 13 |
+
build:
|
| 14 |
+
|
| 15 |
+
runs-on: ubuntu-latest
|
| 16 |
+
env:
|
| 17 |
+
OPEN_AI_KEY: ${{ secrets.OPEN_AI_KEY }}
|
| 18 |
+
|
| 19 |
+
steps:
|
| 20 |
+
- uses: actions/checkout@v3
|
| 21 |
+
- name: Set up Python 3.11
|
| 22 |
+
uses: actions/setup-python@v3
|
| 23 |
+
with:
|
| 24 |
+
python-version: "3.11"
|
| 25 |
+
- name: Install dependencies
|
| 26 |
+
run: |
|
| 27 |
+
python -m pip install --upgrade pip
|
| 28 |
+
python -m pip install -r requirements.txt
|
| 29 |
+
python -m pip install setuptools --upgrade
|
| 30 |
+
- name: Lint with flake8
|
| 31 |
+
run: |
|
| 32 |
+
# stop the build if there are Python syntax errors or undefined names
|
| 33 |
+
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
| 34 |
+
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
| 35 |
+
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
| 36 |
+
- name: Test with pytest
|
| 37 |
+
run: |
|
| 38 |
+
python -m pytest
|
| 39 |
+
- name : Test with pytest Code Coverage
|
| 40 |
+
run: |
|
| 41 |
+
python -m pytest --cov=OpenAI
|
| 42 |
+
##- name : Pip audit check
|
| 43 |
+
##run: |
|
| 44 |
+
##pip-audit
|
| 45 |
+
|
.gitignore
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# JetBrains IDE files
|
| 2 |
+
.idea/
|
| 3 |
+
*.iml
|
| 4 |
+
|
| 5 |
+
# Compiled files
|
| 6 |
+
__pycache__/
|
| 7 |
+
*.pyc
|
| 8 |
+
*.pyo
|
| 9 |
+
*.pyd
|
| 10 |
+
*.so
|
| 11 |
+
*.dll
|
| 12 |
+
|
| 13 |
+
# Cache
|
| 14 |
+
.cache/
|
| 15 |
+
|
| 16 |
+
# Distribution / packaging
|
| 17 |
+
*.egg-info/
|
| 18 |
+
dist/
|
| 19 |
+
build/
|
| 20 |
+
*.egg
|
| 21 |
+
|
| 22 |
+
# Logs
|
| 23 |
+
logs/
|
| 24 |
+
*.log
|
| 25 |
+
|
| 26 |
+
# OS generated files
|
| 27 |
+
.DS_Store
|
| 28 |
+
.DS_Store?
|
| 29 |
+
._*
|
| 30 |
+
.Spotlight-V100
|
| 31 |
+
.Trashes
|
| 32 |
+
ehthumbs.db
|
| 33 |
+
Thumbs.db
|
| 34 |
+
|
| 35 |
+
# Other
|
| 36 |
+
*.swp
|
| 37 |
+
*.swo
|
| 38 |
+
*.swn
|
| 39 |
+
*.bak
|
| 40 |
+
*.tmp
|
| 41 |
+
*.pyc
|
| 42 |
+
*.pyo
|
| 43 |
+
*.orig
|
| 44 |
+
*.rej
|
| 45 |
+
|
| 46 |
+
# pytest
|
| 47 |
+
.pytest_cache
|
Dockerfile
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use an official Python runtime as a parent image
|
| 2 |
+
FROM python:3.11-slim
|
| 3 |
+
|
| 4 |
+
# Checks all images
|
| 5 |
+
ENV DOCKER_CONTENT_TRUST = 1
|
| 6 |
+
|
| 7 |
+
# Labels
|
| 8 |
+
LABEL version = "1.0.0"
|
| 9 |
+
|
| 10 |
+
# Set the working directory
|
| 11 |
+
WORKDIR OpenAI
|
| 12 |
+
|
| 13 |
+
# Create volume directory
|
| 14 |
+
RUN mkdir -p /docker/data
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# Python should not write bytecode files (.pyc files) to disk.
|
| 18 |
+
ENV PYTHONDONTWRITEBYTECODE 1
|
| 19 |
+
# Disables buffering, allowing the output to be immediately displayed as it is generated.
|
| 20 |
+
ENV PYTHONUNBUFFERED 1
|
| 21 |
+
|
| 22 |
+
# Installs Nano and Curl
|
| 23 |
+
RUN apt-get update \
|
| 24 |
+
&& apt-get install -y curl \
|
| 25 |
+
&& apt-get -y install nano \
|
| 26 |
+
&& apt-get clean
|
| 27 |
+
|
| 28 |
+
# Copy the requirements file into the container
|
| 29 |
+
COPY requirements.txt .
|
| 30 |
+
|
| 31 |
+
# Install any needed packages specified in requirements.txt
|
| 32 |
+
RUN pip3 install --trusted-host pypi.python.org -r requirements.txt
|
| 33 |
+
|
| 34 |
+
# Set environment variables
|
| 35 |
+
ARG OPEN_AI_KEY
|
| 36 |
+
ENV OPEN_AI_KEY=${OPEN_AI_KEY}
|
| 37 |
+
|
| 38 |
+
# Copy the rest of the application code
|
| 39 |
+
COPY . .
|
| 40 |
+
|
| 41 |
+
# Run the command to start the app
|
| 42 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "80"]
|
LICENSE
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License
|
| 2 |
+
|
| 3 |
+
https://creativecommons.org/licenses/by-nc-nd/4.0/
|
app.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import openai
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
from exceptions.GPTException import GPTException
|
| 6 |
+
from models.query_model import QueryModel
|
| 7 |
+
from typing import Optional
|
| 8 |
+
from fastapi import FastAPI, Request, HTTPException
|
| 9 |
+
from fastapi.responses import JSONResponse
|
| 10 |
+
from fastapi.templating import Jinja2Templates
|
| 11 |
+
from fastapi.staticfiles import StaticFiles
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
import logging
|
| 14 |
+
|
| 15 |
+
app = FastAPI()
|
| 16 |
+
|
| 17 |
+
# logging
|
| 18 |
+
log = logging.getLogger("uvicorn")
|
| 19 |
+
openai.api_key = os.getenv("OPEN_AI_KEY")
|
| 20 |
+
openai.api_base = os.getenv("BASE_URL") #"https://api.waveai.link/v1"
|
| 21 |
+
|
| 22 |
+
templates_directory = Path(__file__).parent / "templates"
|
| 23 |
+
templates = Jinja2Templates(directory=templates_directory)
|
| 24 |
+
app.mount("/templates", StaticFiles(directory=templates_directory), name="templates")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@app.get("/")
|
| 28 |
+
async def index(request: Request):
|
| 29 |
+
"""
|
| 30 |
+
Function to return HTML template
|
| 31 |
+
:param request:
|
| 32 |
+
:return: HTML template
|
| 33 |
+
"""
|
| 34 |
+
try:
|
| 35 |
+
return templates.TemplateResponse("index.html", {"request": request})
|
| 36 |
+
except HTTPException as exc:
|
| 37 |
+
log.exception("An HTTPException occurred: %s", exc.detail)
|
| 38 |
+
return http_exception_handler(exc)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@app.post("/ask_gpt4/")
|
| 42 |
+
async def ask_gpt4(query_params: QueryModel, model: Optional[str] = "text-davinci-003") -> dict:
|
| 43 |
+
"""
|
| 44 |
+
Post Route receive a query and return a response with OpenAIAPI for chat GPT
|
| 45 |
+
:param query_params: User input in the form of questions to chat GPT
|
| 46 |
+
:param model: type of model as per OpenAIAPI specifications.
|
| 47 |
+
:return: Json in the form of a JSONResponse FastAPi instance
|
| 48 |
+
"""
|
| 49 |
+
try:
|
| 50 |
+
# Call the OpenAI API
|
| 51 |
+
# The model used is not gpt4 to use gpt4 as a model a different API has to be used and
|
| 52 |
+
# in specific the openai.ChatCompletion.create(.....)
|
| 53 |
+
response = openai.Completion.create(
|
| 54 |
+
engine=model,
|
| 55 |
+
prompt=query_params.user_input,
|
| 56 |
+
max_tokens=1000,
|
| 57 |
+
n=1,
|
| 58 |
+
stop=None,
|
| 59 |
+
temperature=0.5,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
if len(response.choices) > 0 and hasattr(response.choices[0], "text"):
|
| 63 |
+
answer = response.choices[0].text.strip()
|
| 64 |
+
return {"response": answer}
|
| 65 |
+
else:
|
| 66 |
+
error_msg = "ChatGPT response does not contain text attribute."
|
| 67 |
+
log.error(error_msg)
|
| 68 |
+
raise GPTException(error_msg)
|
| 69 |
+
|
| 70 |
+
# return {"error": "ChatGPT response does not contain text attribute."}
|
| 71 |
+
except GPTException as e:
|
| 72 |
+
raise e
|
| 73 |
+
except Exception as e:
|
| 74 |
+
log.error(f"Exception occurred: {str(e)}")
|
| 75 |
+
if not query_params.user_input: # Empty user_input case
|
| 76 |
+
raise GPTException("Empty user_input", status_code=400)
|
| 77 |
+
else:
|
| 78 |
+
raise GPTException(str(e))
|
| 79 |
+
# except Exception as e:
|
| 80 |
+
# return {"error": str(e)}
|
| 81 |
+
|
| 82 |
+
# Exception handling
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
@app.exception_handler(HTTPException)
|
| 86 |
+
async def http_exception_handler(exc: HTTPException) -> dict:
|
| 87 |
+
"""
|
| 88 |
+
Function for exception handling.
|
| 89 |
+
:param exc: the relevant exception raised
|
| 90 |
+
:return: dictionary, key , value a pair of status code and error detail.
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
log.debug("Calling http_exception_handler")
|
| 94 |
+
return {"detail": exc.detail, "status_code": exc.status_code}
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@app.exception_handler(GPTException)
|
| 98 |
+
async def gpt_exception_handler(request: Request, exc: GPTException):
|
| 99 |
+
return JSONResponse(
|
| 100 |
+
status_code=exc.status_code,
|
| 101 |
+
content={"error": str(exc)},
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
|
exceptions/GPTException.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class GPTException(Exception):
|
| 2 |
+
def __init__(self, message, status_code=400):
|
| 3 |
+
super().__init__(message)
|
| 4 |
+
self.status_code = status_code
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
|
exceptions/__init__.py
ADDED
|
File without changes
|
models/__init__.py
ADDED
|
File without changes
|
models/query_model.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, Field
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class QueryModel(BaseModel):
|
| 5 |
+
user_input: str = Field(min_length=1, max_length=1000)
|
requirements.txt
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
aiohttp
|
| 2 |
+
aiosignal
|
| 3 |
+
anyio
|
| 4 |
+
async-timeout
|
| 5 |
+
attrs
|
| 6 |
+
CacheControl
|
| 7 |
+
certifi
|
| 8 |
+
charset-normalizer
|
| 9 |
+
click
|
| 10 |
+
colorama
|
| 11 |
+
coverage
|
| 12 |
+
cyclonedx-python-lib
|
| 13 |
+
dataclasses-json
|
| 14 |
+
defusedxml
|
| 15 |
+
fastapi
|
| 16 |
+
filelock
|
| 17 |
+
flake8
|
| 18 |
+
frozenlist
|
| 19 |
+
greenlet
|
| 20 |
+
h11
|
| 21 |
+
html5lib
|
| 22 |
+
httpcore
|
| 23 |
+
httpx
|
| 24 |
+
idna
|
| 25 |
+
iniconfig
|
| 26 |
+
Jinja2
|
| 27 |
+
langchain
|
| 28 |
+
langchainplus-sdk
|
| 29 |
+
markdown-it-py
|
| 30 |
+
MarkupSafe
|
| 31 |
+
marshmallow
|
| 32 |
+
marshmallow-enum
|
| 33 |
+
mccabe
|
| 34 |
+
mdurl
|
| 35 |
+
msgpack
|
| 36 |
+
multidict
|
| 37 |
+
mypy-extensions
|
| 38 |
+
numexpr
|
| 39 |
+
numpy
|
| 40 |
+
openai
|
| 41 |
+
openapi-schema-pydantic
|
| 42 |
+
packageurl-python
|
| 43 |
+
packaging
|
| 44 |
+
pip-api
|
| 45 |
+
pip-requirements-parser
|
| 46 |
+
pip_audit
|
| 47 |
+
pluggy
|
| 48 |
+
py-serializable
|
| 49 |
+
pycodestyle
|
| 50 |
+
pydantic
|
| 51 |
+
pyflakes
|
| 52 |
+
Pygments
|
| 53 |
+
pyparsing
|
| 54 |
+
pytest
|
| 55 |
+
pytest-cov
|
| 56 |
+
PyYAML
|
| 57 |
+
requests
|
| 58 |
+
rich
|
| 59 |
+
six
|
| 60 |
+
sniffio
|
| 61 |
+
sortedcontainers
|
| 62 |
+
SQLAlchemy
|
| 63 |
+
starlette
|
| 64 |
+
tenacity
|
| 65 |
+
toml
|
| 66 |
+
tqdm
|
| 67 |
+
typing-inspect
|
| 68 |
+
typing_extensions
|
| 69 |
+
urllib3
|
| 70 |
+
uvicorn
|
| 71 |
+
webencodings
|
| 72 |
+
yarl
|
run.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import uvicorn
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
uvicorn.run("app:app", host="127.0.0.1", port=8000, reload=True)
|
templates/__init__.py
ADDED
|
File without changes
|
templates/index.html
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html>
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<title>Simple Web App to query Chat GPT any question you like</title>
|
| 6 |
+
<!-- Load Bootstrap CSS -->
|
| 7 |
+
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.0/dist/css/bootstrap.min.css">
|
| 8 |
+
</head>
|
| 9 |
+
<style>
|
| 10 |
+
body {
|
| 11 |
+
display: flex;
|
| 12 |
+
flex-direction: column;
|
| 13 |
+
align-items: center;
|
| 14 |
+
justify-content: center;
|
| 15 |
+
min-height: 100vh;
|
| 16 |
+
margin: 0;
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
.container {
|
| 20 |
+
max-height: 90vh;
|
| 21 |
+
overflow-y: auto;
|
| 22 |
+
padding: 1rem;
|
| 23 |
+
}
|
| 24 |
+
</style>
|
| 25 |
+
<body>
|
| 26 |
+
<div class="container">
|
| 27 |
+
<h1>Simple Web App to query Chat GPT</h1>
|
| 28 |
+
<form>
|
| 29 |
+
<label for="question">Enter your question:</label><br>
|
| 30 |
+
<input type="text" id="question" name="user_input"><br>
|
| 31 |
+
<br>
|
| 32 |
+
<button type="button" onclick="sendRequest()">Submit</button>
|
| 33 |
+
</form>
|
| 34 |
+
|
| 35 |
+
<br>
|
| 36 |
+
<div id="answer">Answer will be shown here</div>
|
| 37 |
+
</div>
|
| 38 |
+
</body>
|
| 39 |
+
<script>
|
| 40 |
+
async function sendRequest() {
|
| 41 |
+
const question = document.getElementsByName("user_input")[0].value;
|
| 42 |
+
const response = await fetch('/ask_gpt4/', {
|
| 43 |
+
method: 'POST',
|
| 44 |
+
headers: { 'Content-Type': 'application/json' },
|
| 45 |
+
body: JSON.stringify({ user_input: question })
|
| 46 |
+
});
|
| 47 |
+
const data = await response.json();
|
| 48 |
+
|
| 49 |
+
if (response.status === 200) {
|
| 50 |
+
if ("response" in data) {
|
| 51 |
+
const answerBox = document.createElement("code");
|
| 52 |
+
answerBox.style.display = "block";
|
| 53 |
+
answerBox.style.backgroundColor = "lightgreen";
|
| 54 |
+
answerBox.style.padding = "10px";
|
| 55 |
+
answerBox.innerHTML = data.response.replace(/\n/g, "<br>");
|
| 56 |
+
const answerElement = document.getElementById("answer");
|
| 57 |
+
while (answerElement.firstChild) {
|
| 58 |
+
answerElement.removeChild(answerElement.firstChild);
|
| 59 |
+
}
|
| 60 |
+
answerElement.appendChild(answerBox);
|
| 61 |
+
} else if ("error" in data) {
|
| 62 |
+
document.getElementById("answer").innerHTML = `Error: ${data.error}`;
|
| 63 |
+
} else {
|
| 64 |
+
document.getElementById("answer").innerHTML = "Unknown error.";
|
| 65 |
+
}
|
| 66 |
+
} else {
|
| 67 |
+
document.getElementById("answer").innerHTML = "Server error.";
|
| 68 |
+
}
|
| 69 |
+
}</script>
|
| 70 |
+
</html>
|
templates/index.js
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
async function sendRequest() {
|
| 2 |
+
const question = document.getElementsByName("user_input")[0].value;
|
| 3 |
+
const response = await fetch('/ask_gpt4/', {
|
| 4 |
+
method: 'POST',
|
| 5 |
+
headers: { 'Content-Type': 'application/json' },
|
| 6 |
+
body: JSON.stringify({ user_input: question })
|
| 7 |
+
});
|
| 8 |
+
const data = await response.json();
|
| 9 |
+
|
| 10 |
+
if (response.status === 200) {
|
| 11 |
+
if ("response" in data) {
|
| 12 |
+
const answerBox = document.createElement("code");
|
| 13 |
+
answerBox.classList.add("d-block", "p-3", "bg-success", "text-white");
|
| 14 |
+
answerBox.innerHTML = data.response.replace(/\n/g, "<br>");
|
| 15 |
+
const answerElement = document.getElementById("answer");
|
| 16 |
+
while (answerElement.firstChild) {
|
| 17 |
+
answerElement.removeChild(answerElement.firstChild);
|
| 18 |
+
}
|
| 19 |
+
answerElement.appendChild(answerBox);
|
| 20 |
+
} else if ("error" in data) {
|
| 21 |
+
document.getElementById("answer").innerHTML = `Error: ${data.error}`;
|
| 22 |
+
} else {
|
| 23 |
+
document.getElementById("answer").innerHTML = "Unknown error.";
|
| 24 |
+
}
|
| 25 |
+
} else {
|
| 26 |
+
document.getElementById("answer").innerHTML = "Server error.";
|
| 27 |
+
}
|
| 28 |
+
}
|
tests/__init__.py
ADDED
|
File without changes
|
tests/conftest.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# conftest.py
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
from fastapi.testclient import TestClient
|
| 5 |
+
import pytest
|
| 6 |
+
from functools import lru_cache
|
| 7 |
+
from fastapi import FastAPI
|
| 8 |
+
import httpx
|
| 9 |
+
|
| 10 |
+
# Add the path to your source code directory to the system path
|
| 11 |
+
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
| 12 |
+
|
| 13 |
+
# Create an instance of FASTAPI to use
|
| 14 |
+
app = FastAPI()
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@lru_cache() # Load once by using the lru_cache
|
| 18 |
+
@pytest.fixture(scope="module")
|
| 19 |
+
def test_client() -> TestClient:
|
| 20 |
+
"""
|
| 21 |
+
Function to create a test client instance to use for testing.
|
| 22 |
+
Uses a generator to potentially improve performance of a large number of tests.
|
| 23 |
+
:return: a Test client object.
|
| 24 |
+
"""
|
| 25 |
+
with TestClient(app) as client:
|
| 26 |
+
yield client
|
tests/test_app.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
from fastapi.testclient import TestClient
|
| 3 |
+
from fastapi import HTTPException
|
| 4 |
+
from typing import Union, Any
|
| 5 |
+
from app import ask_gpt4, app, http_exception_handler
|
| 6 |
+
from models.query_model import QueryModel
|
| 7 |
+
|
| 8 |
+
client = TestClient(app)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@pytest.mark.asyncio
|
| 12 |
+
@pytest.mark.parametrize("query_params, model, expected_output", [
|
| 13 |
+
(
|
| 14 |
+
{
|
| 15 |
+
"user_input": "What is the capital of France?Please answer with one word only and dont add dot at the end"},
|
| 16 |
+
"text-davinci-003",
|
| 17 |
+
"Paris"
|
| 18 |
+
),
|
| 19 |
+
(
|
| 20 |
+
{"user_input": "Which is the capital of UK? Please answer with one word only and dont add dot at the end"},
|
| 21 |
+
"text-davinci-003",
|
| 22 |
+
"London"
|
| 23 |
+
),
|
| 24 |
+
# Add more test cases here
|
| 25 |
+
])
|
| 26 |
+
async def test_ask_gpt4(query_params, model, expected_output):
|
| 27 |
+
response = client.post(
|
| 28 |
+
"/ask_gpt4/",
|
| 29 |
+
json={"user_input": query_params["user_input"], "model": model},
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
assert response.status_code == 200
|
| 33 |
+
json_response = response.json()
|
| 34 |
+
|
| 35 |
+
# Check if the response contains a valid answer
|
| 36 |
+
assert "response" in json_response or "error" in json_response
|
| 37 |
+
|
| 38 |
+
# If there's an error, check if it's a known error
|
| 39 |
+
if "error" in json_response:
|
| 40 |
+
assert json_response["error"] in [
|
| 41 |
+
"ChatGPT response does not contain text attribute.",
|
| 42 |
+
# Add other known errors here
|
| 43 |
+
]
|
| 44 |
+
else:
|
| 45 |
+
assert json_response["response"] == expected_output
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@pytest.mark.asyncio
|
| 49 |
+
@pytest.mark.parametrize("status_code, detail, expected_result", [
|
| 50 |
+
(404, "Not Found", {"detail": "Not Found", "status_code": 404}),
|
| 51 |
+
(500, "Internal Server Error", {"detail": "Internal Server Error", "status_code": 500}),
|
| 52 |
+
(401, "Unauthorized", {"detail": "Unauthorized", "status_code": 401}),
|
| 53 |
+
])
|
| 54 |
+
async def test_http_exception_handler(status_code: int, detail: Union[str, dict], expected_result: Any) -> None:
|
| 55 |
+
"""
|
| 56 |
+
Function to test http exception handler
|
| 57 |
+
:param status_code: int ,status code e.g. 400, 404 etc.
|
| 58 |
+
:param detail: str or Dict , detail message
|
| 59 |
+
:param expected_result:
|
| 60 |
+
:return: None
|
| 61 |
+
"""
|
| 62 |
+
exc = HTTPException(status_code=status_code, detail=detail)
|
| 63 |
+
result = await http_exception_handler(exc)
|
| 64 |
+
assert result == expected_result
|
| 65 |
+
|
| 66 |
+
# @pytest.mark.parametrize(
|
| 67 |
+
# "user_input,expected_status_code",
|
| 68 |
+
# [
|
| 69 |
+
# ("What is the capital of France?", 200),
|
| 70 |
+
# ("", 400), # Invalid query
|
| 71 |
+
# ],
|
| 72 |
+
# )
|
| 73 |
+
# def test_ask_gpt4_route(user_input: str, expected_status_code: int):
|
| 74 |
+
# query = QueryModel(user_input=user_input)
|
| 75 |
+
# response = client.post("/ask_gpt4/", json=query.dict())
|
| 76 |
+
#
|
| 77 |
+
# assert response.status_code == expected_status_code
|
| 78 |
+
#
|
| 79 |
+
# if expected_status_code == 200:
|
| 80 |
+
# assert "response" in response.json()
|
| 81 |
+
# else:
|
| 82 |
+
# assert "error" in response.json()
|