ayushsinghal1510 commited on
Commit
9b05cbc
·
1 Parent(s): 496994e

Init commit

Browse files
Files changed (5) hide show
  1. .gitignore +4 -0
  2. Dockerfile +13 -0
  3. app.py +59 -0
  4. config.yaml +7 -0
  5. requirements.txt +5 -0
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ *.env
2
+ *.pyc
3
+
4
+ __pycache__/
Dockerfile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ RUN useradd -m -u 1000 user
4
+ USER user
5
+ ENV PATH="/home/user/.local/bin:$PATH"
6
+
7
+ WORKDIR /app
8
+
9
+ COPY --chown=user ./requirements.txt requirements.txt
10
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
11
+
12
+ COPY --chown=user . /app
13
+ CMD ["python" , "app.py"]
app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI , Request , HTTPException
2
+ from groq import Groq
3
+ import yaml
4
+ import os
5
+ import uvicorn
6
+ from dotenv import load_dotenv
7
+ load_dotenv()
8
+
9
+ app : FastAPI = FastAPI()
10
+ groq_client : Groq = Groq(api_key = os.getenv('GROQ_API_KEY'))
11
+ with open('config.yaml') as config_file : config : dict = yaml.safe_load(config_file)
12
+
13
+ @app.get('/')
14
+ async def root() -> dict :
15
+ return {'message' : 'Chloe Chat Backend is running!'}
16
+
17
+ @app.post('/ask')
18
+ async def ask(request : Request) -> None :
19
+
20
+ data : dict = await request.json()
21
+
22
+ if (
23
+ 'query' not in data or
24
+ 'system-prompt' not in data
25
+ ) : raise HTTPException(
26
+ status_code = 400 ,
27
+ detail = 'Missing required fields: query, system-prompt'
28
+ )
29
+
30
+ query : str = data['query']
31
+ system_prompt : str = data['system-prompt']
32
+
33
+ response = groq_client.chat.completions.create(
34
+ model = config['groq']['model'] ,
35
+ messages = [
36
+ {
37
+ 'role' : 'system' ,
38
+ 'content' : system_prompt
39
+ } ,
40
+ {
41
+ 'role' : 'user' ,
42
+ 'content' : query
43
+ }
44
+ ] ,
45
+ temperature = config['groq']['temperature'] ,
46
+ max_completion_tokens = config['groq']['max-completion-tokens'] ,
47
+ top_p = config['groq']['top-p'] ,
48
+ stream = config['groq']['stream'] ,
49
+ stop = config['groq']['stop']
50
+ )
51
+
52
+ return str(response.choices[0].message.content)
53
+
54
+
55
+ if __name__ == '__main__' : uvicorn.run(
56
+ 'app:app' ,
57
+ port = 7860 ,
58
+ host = '0.0.0.0'
59
+ )
config.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ groq :
2
+ model : llama-3.3-70b-versatile
3
+ temperature : 1
4
+ max-completion-tokens : 1024
5
+ top-p : 1
6
+ stream : False
7
+ stop :
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ groq
2
+ fastapi
3
+ uvicorn
4
+ PyYAML
5
+ python.dotenv