Skalalae commited on
Commit
c93641e
·
verified ·
1 Parent(s): 3ec0e09

Upload 5 files

Browse files
Files changed (5) hide show
  1. .gitattributes +35 -0
  2. app.py +135 -0
  3. documents.pkl +3 -0
  4. interface.py +45 -0
  5. requirements.txt +36 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from langchain_community.chat_models import ChatOpenAI
3
+ from langchain.memory import ConversationBufferMemory, SimpleMemory
4
+ from langchain.agents import initialize_agent, AgentType
5
+ from dotenv import load_dotenv
6
+ import os
7
+ import agent.planning_agent as planning_agent
8
+ import logging
9
+
10
+ from langchain_community.callbacks import ClearMLCallbackHandler
11
+ from langchain_core.callbacks import StdOutCallbackHandler
12
+ from clearml import Logger, Task
13
+
14
+ # Global variables
15
+ clearml_callback = None
16
+ def initialize_components():
17
+
18
+ OPENAI_API_KEY="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"
19
+ CLEARML_API_ACCESS_KEY="NYZZ07E2ZEW08V4DUGYY2PA7O6JX5F"
20
+ CLEARML_API_SECRET_KEY="MkfQrIOuKNFRWHfCz32cN-UVm_19M7_vgxAwRn8twnvHYJ1xeqD9T2GZcIX9RwnD8mw"
21
+ SERPAPI_API_KEY="619f2302253fbe56448bcf82565caf2a3263d845944682533f10b09a0d1650e6"
22
+
23
+
24
+
25
+ # Setup and use the ClearML Callback
26
+ clearml_callback = ClearMLCallbackHandler(
27
+ task_type="inference",
28
+ project_name="langchain_callback_demo",
29
+ task_name="llm",
30
+ tags=["test"],
31
+ # Change the following parameters based on the amount of detail you want tracked
32
+ visualize=True,
33
+ complexity_metrics=True,
34
+ stream_logs=True,)
35
+
36
+ callbacks = [StdOutCallbackHandler(), clearml_callback]
37
+
38
+ def main():
39
+
40
+ a_task = Task.get_task(project_name='langchain_callback_demo', task_name='llm')
41
+ loer = a_task.get_logger()
42
+ report_logs(loer)
43
+ # report text as debug example
44
+ report_debug_text(loer)
45
+ loer.flush()
46
+
47
+
48
+ # Configure logging
49
+ logging.basicConfig(level=logging.INFO)
50
+ logger = logging.getLogger(__name__)
51
+
52
+ # Global variables
53
+ llm = None
54
+ chat_memory = None
55
+ query_memory = None
56
+
57
+ def initialize_components():
58
+ global llm, chat_memory, query_memory
59
+ load_dotenv()
60
+ api_key = os.environ['OA_API']
61
+ os.environ['OPENAI_API_KEY'] = api_key
62
+
63
+ llm = ChatOpenAI(
64
+ model_name="gpt-3.5-turbo",
65
+ temperature=0,
66
+ api_key=api_key
67
+ )
68
+
69
+ # Initialize memories
70
+ chat_memory = ConversationBufferMemory(
71
+ memory_key="chat_history",
72
+ return_messages=True
73
+ )
74
+ query_memory = SimpleMemory()
75
+
76
+ # Initialize planning agent with both memories
77
+ planning_agent.initialize_planning_agent(llm, chat_memory, query_memory)
78
+
79
+ logger.info("Components initialized successfully")
80
+
81
+ def process_query(query, history):
82
+ try:
83
+ # Restore chat history from Gradio's history
84
+ if history:
85
+ for human_msg, ai_msg in history:
86
+ if chat_memory and hasattr(chat_memory, 'chat_memory'):
87
+ chat_memory.chat_memory.add_user_message(human_msg)
88
+ chat_memory.chat_memory.add_ai_message(ai_msg)
89
+
90
+ # Store original query in query memory
91
+ query_memory.memories['original_query'] = query
92
+
93
+ # Execute query through planning agent
94
+ response = planning_agent.execute(query)
95
+
96
+ # Add current interaction to chat memory
97
+ if chat_memory and hasattr(chat_memory, 'chat_memory'):
98
+ chat_memory.chat_memory.add_user_message(query)
99
+ chat_memory.chat_memory.add_ai_message(response)
100
+
101
+ return response
102
+
103
+ except Exception as e:
104
+ error_msg = f"Error processing query: {str(e)}"
105
+ logger.error(f"Error details: {str(e)}")
106
+
107
+ if chat_memory and hasattr(chat_memory, 'chat_memory'):
108
+ chat_memory.chat_memory.add_user_message(query)
109
+ chat_memory.chat_memory.add_ai_message(error_msg)
110
+
111
+ return error_msg
112
+
113
+ def clear_context():
114
+ planning_agent.clear_context()
115
+ chat_memory.clear()
116
+ query_memory.memories.clear()
117
+ return [], []
118
+
119
+ def create_gradio_app():
120
+ from interface import create_interface
121
+ return create_interface(process_query, clear_context)
122
+
123
+ def main():
124
+ """Main application entry point"""
125
+ try:
126
+ initialize_components()
127
+ app = create_gradio_app()
128
+ app.queue()
129
+ app.launch(server_name="0.0.0.0", server_port=7860, share=True)
130
+ except Exception as e:
131
+ logger.error(f"Error in main: {str(e)}")
132
+ raise
133
+
134
+ if __name__ == "__main__":
135
+ main()
documents.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0132d52582d53422098dfaec3a361e369e791db0a137d8b012ad920851f18b3f
3
+ size 2382882
interface.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def create_interface(process_query, clear_context):
4
+ with gr.Blocks(title="AI Assistant") as demo:
5
+ chatbot = gr.Chatbot(
6
+ [],
7
+ elem_id="chatbot",
8
+ bubble_full_width=False,
9
+ height=400
10
+ )
11
+
12
+ with gr.Row():
13
+ msg = gr.Textbox(
14
+ label="Your Message",
15
+ placeholder="Type your message here...",
16
+ scale=8
17
+ )
18
+ submit = gr.Button("Submit", scale=1)
19
+
20
+ clear = gr.Button("Clear")
21
+
22
+ def process_message(message, history):
23
+ response = process_query(message, history)
24
+ history.append((message, response))
25
+ return "", history
26
+
27
+ msg.submit(
28
+ process_message,
29
+ [msg, chatbot],
30
+ [msg, chatbot]
31
+ )
32
+
33
+ submit.click(
34
+ process_message,
35
+ [msg, chatbot],
36
+ [msg, chatbot]
37
+ )
38
+
39
+ clear.click(
40
+ clear_context,
41
+ None,
42
+ [chatbot, msg]
43
+ )
44
+
45
+ return demo
requirements.txt ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LangChain Ecosystem
2
+ langchain==0.1.3
3
+ langchain_openai==0.0.5
4
+ langchain-community
5
+ langchain-core
6
+
7
+ # Vector Databases & Similarity Search
8
+ chromadb==0.5.0
9
+ faiss-cpu
10
+
11
+ # OpenAI
12
+ openai==1.10
13
+ tiktoken>=0.5.2
14
+
15
+ # UI Framework
16
+ gradio==4.44.1
17
+ setuptools>=65.5.1
18
+
19
+ # Data Processing & Utils
20
+ numpy==1.26.2
21
+ pandas==2.1.3
22
+ python-dotenv==1.0.0
23
+
24
+ # Core Dependencies
25
+ #pydantic==2.5.2
26
+ typing-extensions==4.8.0
27
+ requests==2.31.0
28
+
29
+ # ClearML
30
+ clearml
31
+ spacy>=3.0.0,<4.0.0
32
+ en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1-py3-none-any.whl
33
+ textstat
34
+ serpapi==0.1.5
35
+ google-search-results
36
+ numexpr