Spaces:
Sleeping
Sleeping
File size: 4,402 Bytes
7863544 365aaea 7863544 c9c6bd7 7863544 fbd2bde 7863544 fbd2bde 7863544 e9d73be 7863544 1ee659d 1290372 7863544 365aaea 7863544 e347df9 7863544 365aaea 54a0dbc 365aaea 426afd8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 | from langchain_openai.chat_models import ChatOpenAI
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
from langchain_core.tools import tool
from langgraph.graph import START, StateGraph
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
import gradio as gr
import spaces
import base64
from PIL import Image
from collections import Counter
from typing import Annotated, TypedDict
import time, sys, os
sys.path.append('code')
from modrag_molecule_functions import *
from modrag_property_functions import *
from modrag_protein_functions import *
openai_key = os.getenv("OPENAI_API_KEY")
tools = [name_node, smiles_node, related_node, structure_node,
substitution_node, lipinski_node, pharmfeature_node,
uniprot_node, listbioactives_node, getbioactives_node,
predict_node, gpt_node, pdb_node, find_node, docking_node,
target_node]
model = ChatOpenAI(model_name="gpt-5.2", api_key=openai_key).bind_tools(tools)
class State(TypedDict):
messages: Annotated[list, add_messages]
def model_node(state: State) -> State:
res = model.invoke(state['messages'])
return {'messages': res}
builder = StateGraph(State)
builder.add_node('model', model_node)
builder.add_node('tools', ToolNode(tools))
builder.add_edge(START, 'model')
builder.add_conditional_edges('model', tools_condition)
builder.add_edge('tools', 'model')
graph = builder.compile()
sys_message = SystemMessage(content="You are a helpful cat who says nyan and meow a lot.")
global messages
messages = [sys_message]
@spaces.GPU
def start_chat():
'''
'''
global chat_history, messages, reasoning
chat_history = []
reasoning = []
messages = [sys_message]
@spaces.GPU
def chat_turn(prompt: str):
'''
'''
global chat_history, messages, reasoning
human_message = HumanMessage(content=prompt)
messages.append(human_message)
local_history = [prompt]
input = {
'messages' : messages
}
for c in graph.stream(input):
try:
ai_mes = c['model']['messages'].content
messages.append(AIMessage(ai_mes))
if ai_mes != '':
print(f'message is {ai_mes}')
local_history.append(ai_mes)
except:
pass
try:
if os.path.exists('current_image.png'):
if os.path.getmtime('current_image.png') > time.time() - 30:
img = Image.open('current_image.png')
else:
img = None
else:
img = None
except:
img = None
try:
reasoning.append(c['tools']['messages'][0].content)
except:
pass
if len(local_history) != 2:
local_history.append('no message')
chat_history.append({'role': 'user', 'content': local_history[0]})
chat_history.append({'role': 'assistant', 'content': local_history[1]})
return '', img, chat_history
def send_reasoning():
global reasoning
return reasoning
start_chat()
with gr.Blocks(fill_height=True) as OpenAIMoDrAg:
top = gr.Markdown('''
# MoDrAg Chatbot using ChatGPT 5.2
- The *MOdular DRug design AGent*!
- This chatbot can answer questions about molecules, proteins, and their interactions.
It can also perform tasks such as predicting properties, finding similar molecules, and docking. Try it out!
- See the tool log box at the bottom for direct tool outputs.
''')
chat = gr.Chatbot()
with gr.Row(equal_height = True):
msg = gr.Textbox(label = 'query', scale = 8)
sub_button = gr.Button("Submit", scale = 2)
clear = gr.ClearButton([msg, chat])
img_box = gr.Image()
reasoning_box = gr.Textbox(label="Tool logs", lines = 20)
msg.submit(chat_turn, [msg], [msg, img_box, chat]).then(send_reasoning, [], [reasoning_box])
sub_button.click(chat_turn, [msg], [msg, img_box, chat])
clear.click(start_chat, [], [])
@gr.render(inputs=top)
def get_speech(args):
audio_file = 'MoDrAg_hello.mp3'
with open(audio_file, 'rb') as audio_bytes:
audio = base64.b64encode(audio_bytes.read()).decode("utf-8")
audio_player = f'<audio src="data:audio/mpeg;base64,{audio}" controls autoplay></audio>'
talk_ele = gr.HTML(audio_player)
OpenAIMoDrAg.launch(mcp_server = True) |