| import os |
|
|
| import gradio as gr |
| from openai import OpenAI |
| import logging |
| import anthropic |
|
|
| logging.basicConfig(level=logging.INFO) |
| logging.getLogger("gradio").setLevel(logging.INFO) |
| logging.getLogger("httpx").setLevel(logging.WARNING) |
|
|
|
|
| client = OpenAI() |
|
|
|
|
| def generate_completion(input, history): |
|
|
| messages = [ |
| { |
| "role": "system", |
| "content": "You are a world-class extractor of information from messy job postings.", |
| } |
| ] |
|
|
| |
| if history: |
| for entry in history: |
| |
| if len(entry) == 2: |
| |
| messages.append( |
| { |
| "role": "user", |
| "content": entry[0], |
| } |
| ) |
| |
| messages.append( |
| { |
| "role": "assistant", |
| "content": entry[1], |
| } |
| ) |
|
|
| |
| messages.append( |
| { |
| "role": "user", |
| "content": input, |
| } |
| ) |
|
|
| response = client.chat.completions.create( |
| model="gpt-3.5-turbo-0125", |
| messages=messages, |
| stream=True, |
| temperature=0, |
| max_tokens=4000, |
| ) |
|
|
| answer_str: str = "" |
| for chunk in response: |
| if chunk.choices[0].delta.content is not None: |
| answer_str += chunk.choices[0].delta.content |
| else: |
| answer_str += "" |
| yield answer_str |
|
|
|
|
| if __name__ == "__main__": |
| demo = gr.ChatInterface(fn=generate_completion) |
| demo.queue() |
| demo.launch() |
|
|