Aziz3 commited on
Commit
fb05861
·
1 Parent(s): 9581bec

Fixing Prompt error

Browse files
Files changed (1) hide show
  1. app.py +30 -56
app.py CHANGED
@@ -1,70 +1,44 @@
1
- import os
2
- from openai import AsyncOpenAI # importing openai for API usage
3
- import chainlit as cl # importing chainlit for our app
4
- from chainlit.prompt import Prompt, PromptMessage # importing prompt tools
5
- from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
6
- from dotenv import load_dotenv
7
 
8
- load_dotenv()
9
 
10
- system_template = """You are a helpful assistant who always speaks in a pleasant tone!"""
11
- user_templere = """Think through your response step by step"""
12
 
13
- @cl.on_chat_start
14
- async def start_chat():
15
- settings = {
16
- "model": "gpt-3.5-turo",
17
- "temperature":0,
18
- "max_tokens":500,
19
- "top_p":1,
20
- "frequency_penalty":0,
21
- "presence_penalty":0,
22
- }
23
- cl.user_session.set("settings",settings)
24
-
25
-
26
- @cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
27
- async def main(message: cl.Message):
28
- settings = cl.user_session.get("settings")
29
 
30
- client = AsyncOpenAI()
31
 
32
- print(message.content)
33
-
34
- prompt = Prompt(
35
  provider=ChatOpenAI.id,
 
 
36
  messages=[
37
- PromptMessage(
38
- role="system",
39
- template=system_template,
40
- formatted=system_template,
41
- ),
42
- PromptMessage(
43
- role="user",
44
- template=user_template,
45
- formatted=user_template.format(input=message.content),
46
- ),
47
  ],
48
- inputs={"input": message.content},
49
- settings=settings,
50
  )
51
 
52
- print([m.to_openai() for m in prompt.messages])
 
 
 
 
 
53
 
54
- msg = cl.Message(content="")
55
 
56
- # Call OpenAI
57
- async for stream_resp in await client.chat.completions.create(
58
- messages=[m.to_openai() for m in prompt.messages], stream=True, **settings
59
- ):
60
- token = stream_resp.choices[0].delta.content
61
- if not token:
62
- token = ""
63
- await msg.stream_token(token)
64
 
65
- # Update the prompt object with the completion
66
- prompt.completion = msg.content
67
- msg.prompt = prompt
68
 
69
- # Send and close the message stream
70
- await msg.send()
 
 
1
+ from openai import AsyncOpenAI
2
+ import chainlit as cl
3
+ from chainlit.playground.providers import ChatOpenAI
 
 
 
4
 
5
+ client = AsyncOpenAI()
6
 
7
+ template = "Hello, {name}!"
8
+ inputs = {"name": "John"}
9
 
10
+ settings = {
11
+ "model": "gpt-3.5-turbo",
12
+ "temperature": 0,
13
+ # ... more settings
14
+ }
 
 
 
 
 
 
 
 
 
 
 
15
 
 
16
 
17
+ @cl.step(name="gpt-3.5", type="llm")
18
+ async def call_openai():
19
+ generation = cl.ChatGeneration(
20
  provider=ChatOpenAI.id,
21
+ inputs=inputs,
22
+ settings=settings,
23
  messages=[
24
+ cl.GenerationMessage(
25
+ template=template, formatted=template.format(**inputs), role="assistant"
26
+ )
 
 
 
 
 
 
 
27
  ],
 
 
28
  )
29
 
30
+ # Make the call to OpenAI
31
+ response = await client.chat.completions.create(
32
+ messages=[m.to_openai() for m in generation.messages], **settings
33
+ )
34
+
35
+ generation.completion = response.choices[0].message.content
36
 
37
+ cl.context.current_step.generation = generation
38
 
39
+ return generation.completion
 
 
 
 
 
 
 
40
 
 
 
 
41
 
42
+ @cl.on_chat_start
43
+ async def start():
44
+ await call_openai()