Upload 17 files
Browse files- .env +2 -0
- .gitattributes +2 -35
- .gitignore +2 -0
- LICENSE +21 -0
- README.md +3 -13
- a.txt +1 -0
- b.txt +3 -0
- basic_chains.py +44 -0
- basic_knowledgebase.py +57 -0
- basic_llm.py +42 -0
- basic_memory.py +109 -0
- basic_tools.py +86 -0
- brainrent_part1.txt +5 -0
- brainrent_part2.pdf +0 -0
- fibonacci_list.py +20 -0
- range_h.md +7 -0
- requirements.txt +87 -0
.env
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
OPENAI_API_KEY=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
|
| 2 |
+
DEEPSEEK_API_KEY=sk-0dc2502319774252ae68e208c82dc419
|
.gitattributes
CHANGED
|
@@ -1,35 +1,2 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
# Auto detect text files and perform LF normalization
|
| 2 |
+
* text=auto
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.env
|
| 2 |
+
venv/
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 range-h
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
CHANGED
|
@@ -1,13 +1,3 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
colorFrom: purple
|
| 5 |
-
colorTo: purple
|
| 6 |
-
sdk: gradio
|
| 7 |
-
sdk_version: 5.35.0
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
-
license: apache-2.0
|
| 11 |
-
---
|
| 12 |
-
|
| 13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
+
# sample-python
|
| 2 |
+
|
| 3 |
+
### I am king
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
a.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
这是a.txt的内容
|
b.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
its a new file named as b.txt
|
| 2 |
+
x = 1
|
| 3 |
+
y = 1
|
basic_chains.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.prompts import PromptTemplate
|
| 2 |
+
from langchain.chains import LLMChain
|
| 3 |
+
from langchain_community.chat_models import ChatOpenAI
|
| 4 |
+
from langchain_deepseek import ChatDeepSeek
|
| 5 |
+
from langchain_core.runnables import RunnableLambda, RunnableMap
|
| 6 |
+
|
| 7 |
+
from dotenv import load_dotenv
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
load_dotenv()
|
| 11 |
+
|
| 12 |
+
def get_summary_prompt():
|
| 13 |
+
return PromptTemplate.from_template("Summarize the following:\n\n{text}")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def get_title_prompt():
|
| 17 |
+
return PromptTemplate.from_template("Create a 5-word title for this:\n\n{summary}")
|
| 18 |
+
|
| 19 |
+
def translate_prompt():
|
| 20 |
+
return PromptTemplate.from_template("Translate the following text to Chinese:\n\n{text}")
|
| 21 |
+
|
| 22 |
+
def build_hyper_chain():
|
| 23 |
+
llm = ChatDeepSeek(api_key=os.getenv("DEEPSEEK_API_KEY"), model="deepseek-chat", temperature=0.7)
|
| 24 |
+
summarize_chain = get_summary_prompt() | llm
|
| 25 |
+
title_chain = get_title_prompt() | llm
|
| 26 |
+
translate_chain = translate_prompt() | llm
|
| 27 |
+
|
| 28 |
+
def chain_fn(inputs):
|
| 29 |
+
summary = summarize_chain.invoke({"text": inputs["text"]})
|
| 30 |
+
title = title_chain.invoke({"summary": summary})
|
| 31 |
+
chinese_summary = translate_chain.invoke({"text": summary.content})
|
| 32 |
+
return {"summary": summary, "title": title, "chinese_summary": chinese_summary}
|
| 33 |
+
return RunnableLambda(chain_fn)
|
| 34 |
+
|
| 35 |
+
if __name__ == "__main__":
|
| 36 |
+
full_text = input("Paste your paragraph:\n\n")
|
| 37 |
+
|
| 38 |
+
chain = build_hyper_chain()
|
| 39 |
+
outputs = chain.invoke({"text": full_text})
|
| 40 |
+
|
| 41 |
+
# Display outputs
|
| 42 |
+
print(f"\n[Summary]: {outputs['summary'].content}")
|
| 43 |
+
print(f"\n[Title]: {outputs['title'].content}")
|
| 44 |
+
print(f"\n[Chinese Summary]: {outputs['chinese_summary'].content}")
|
basic_knowledgebase.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.agents import initialize_agent, AgentType, Tool
|
| 2 |
+
from langchain.memory import ConversationBufferMemory
|
| 3 |
+
from langchain_deepseek import ChatDeepSeek
|
| 4 |
+
from langchain_openai import ChatOpenAI
|
| 5 |
+
from langchain.document_loaders import TextLoader
|
| 6 |
+
from langchain_community.document_loaders import PyPDFLoader # Updated import for PDF loading
|
| 7 |
+
|
| 8 |
+
from langchain.text_splitter import CharacterTextSplitter
|
| 9 |
+
from langchain.chains.question_answering import load_qa_chain
|
| 10 |
+
from dotenv import load_dotenv
|
| 11 |
+
import os
|
| 12 |
+
|
| 13 |
+
load_dotenv()
|
| 14 |
+
|
| 15 |
+
llm = ChatDeepSeek(api_key=os.getenv("DEEPSEEK_API_KEY"), model="deepseek-chat")
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
memory = ConversationBufferMemory(
|
| 19 |
+
memory_key="chat_history",
|
| 20 |
+
return_messages=True
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
def doc_tool(query: str) -> str:
|
| 24 |
+
"""Search the knowledge base for an answer to the question in Chinese."""
|
| 25 |
+
docs = TextLoader("brainrent_part1.txt", encoding="utf-8").load()
|
| 26 |
+
splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
|
| 27 |
+
chunks = splitter.split_documents(docs)
|
| 28 |
+
qa_chain = load_qa_chain(llm=llm, chain_type="stuff")
|
| 29 |
+
return qa_chain.run(input_documents=chunks, question=query)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
tools = [
|
| 33 |
+
Tool(
|
| 34 |
+
name="KnowledgeBase",
|
| 35 |
+
func=doc_tool,
|
| 36 |
+
description="Useful for answering questions about Brainrent."
|
| 37 |
+
),
|
| 38 |
+
]
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# 4. Initialize the agent with tools and memory
|
| 42 |
+
agent = initialize_agent(
|
| 43 |
+
tools=tools,
|
| 44 |
+
llm=llm,
|
| 45 |
+
agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
|
| 46 |
+
memory=memory,
|
| 47 |
+
verbose=True
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
if __name__ == "__main__":
|
| 51 |
+
while True:
|
| 52 |
+
query = input("\nEnter your question (or 'exit' to quit): ")
|
| 53 |
+
if query.lower() == 'exit':
|
| 54 |
+
break
|
| 55 |
+
result = agent.invoke(query)
|
| 56 |
+
print(f"Result: {result['output']}")
|
| 57 |
+
|
basic_llm.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.prompts import PromptTemplate
|
| 2 |
+
from langchain.chains import LLMChain
|
| 3 |
+
from langchain_community.chat_models import ChatOpenAI
|
| 4 |
+
from langchain_deepseek import ChatDeepSeek
|
| 5 |
+
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
load_dotenv()
|
| 10 |
+
|
| 11 |
+
def get_openai_chain():
|
| 12 |
+
"""Builds an LLMChain using OpenAI's model"""
|
| 13 |
+
prompt = PromptTemplate.from_template("Answer this: {question}")
|
| 14 |
+
llm = ChatOpenAI(openai_api_key=os.getenv("OPENAI_API_KEY"), model="gpt-4o-mini")
|
| 15 |
+
return LLMChain(llm=llm, prompt=prompt)
|
| 16 |
+
|
| 17 |
+
def get_deepseek_chain():
|
| 18 |
+
"""Builds an LLMChain using DeepSeek's model"""
|
| 19 |
+
prompt = PromptTemplate.from_template("Answer this: {question}")
|
| 20 |
+
llm = ChatDeepSeek(api_key=os.getenv("DEEPSEEK_API_KEY"), model="deepseek-chat")
|
| 21 |
+
return prompt | llm
|
| 22 |
+
# return LLMChain(llm=llm, prompt=prompt)
|
| 23 |
+
|
| 24 |
+
def get_chain_by_model(model_name: str) -> LLMChain:
|
| 25 |
+
"""Chooses the chain based on model selection"""
|
| 26 |
+
if model_name == "openai":
|
| 27 |
+
return get_openai_chain()
|
| 28 |
+
elif model_name == "deepseek":
|
| 29 |
+
return get_deepseek_chain()
|
| 30 |
+
else:
|
| 31 |
+
raise ValueError("Unsupported model. Choose 'openai' or 'deepseek'.")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
if __name__ == "__main__":
|
| 35 |
+
question = input("Enter your question: ").strip()
|
| 36 |
+
|
| 37 |
+
model = input("Choose model (openai / deepseek): ").strip().lower()
|
| 38 |
+
|
| 39 |
+
chain = get_chain_by_model(model)
|
| 40 |
+
response = chain.invoke({"question": question})
|
| 41 |
+
print(f"\n[Response]:\n{response.content}")
|
| 42 |
+
#print(f"\n[Response]:\n{response['text']}")
|
basic_memory.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from typing import Optional
|
| 3 |
+
from langchain.agents import initialize_agent,Tool, AgentType
|
| 4 |
+
from langchain.memory import ConversationBufferMemory
|
| 5 |
+
from langchain_deepseek import ChatDeepSeek
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
+
import os
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
|
| 10 |
+
load_dotenv()
|
| 11 |
+
# 1. Initialize DeepSeek LLM
|
| 12 |
+
# You may need to pass your DeepSeek API key or client config here.
|
| 13 |
+
llm = ChatDeepSeek(api_key=os.getenv("DEEPSEEK_API_KEY"), model="deepseek-chat")
|
| 14 |
+
|
| 15 |
+
def get_weather(location: str) -> str:
|
| 16 |
+
# stub implementation—replace with real weather API call
|
| 17 |
+
return f"Weather forecast for {location}: Sunny with a high of 25°C and low of 15°C. 10% chance of precipitation."
|
| 18 |
+
|
| 19 |
+
def calculator(operation_input: str) -> str:
|
| 20 |
+
"""Perform basic arithmetic operations from a text description."""
|
| 21 |
+
try:
|
| 22 |
+
# Parse the operation input
|
| 23 |
+
parts = operation_input.split()
|
| 24 |
+
|
| 25 |
+
# Try to identify the operation and numbers
|
| 26 |
+
if "add" in operation_input.lower() or "+" in operation_input:
|
| 27 |
+
# Look for two numbers in the string
|
| 28 |
+
numbers = [float(s) for s in parts if s.replace('.', '', 1).isdigit()]
|
| 29 |
+
if len(numbers) >= 2:
|
| 30 |
+
return f"{numbers[0]} + {numbers[1]} = {numbers[0] + numbers[1]}"
|
| 31 |
+
else:
|
| 32 |
+
return "Error: Could not identify two numbers for addition"
|
| 33 |
+
|
| 34 |
+
elif "subtract" in operation_input.lower() or "-" in operation_input:
|
| 35 |
+
numbers = [float(s) for s in parts if s.replace('.', '', 1).isdigit()]
|
| 36 |
+
if len(numbers) >= 2:
|
| 37 |
+
return f"{numbers[0]} - {numbers[1]} = {numbers[0] - numbers[1]}"
|
| 38 |
+
else:
|
| 39 |
+
return "Error: Could not identify two numbers for subtraction"
|
| 40 |
+
|
| 41 |
+
elif "multiply" in operation_input.lower() or "*" in operation_input:
|
| 42 |
+
numbers = [float(s) for s in parts if s.replace('.', '', 1).isdigit()]
|
| 43 |
+
if len(numbers) >= 2:
|
| 44 |
+
return f"{numbers[0]} * {numbers[1]} = {numbers[0] * numbers[1]}"
|
| 45 |
+
else:
|
| 46 |
+
return "Error: Could not identify two numbers for multiplication"
|
| 47 |
+
|
| 48 |
+
elif "divide" in operation_input.lower() or "/" in operation_input:
|
| 49 |
+
numbers = [float(s) for s in parts if s.replace('.', '', 1).isdigit()]
|
| 50 |
+
if len(numbers) >= 2:
|
| 51 |
+
if numbers[1] == 0:
|
| 52 |
+
return "Error: Division by zero"
|
| 53 |
+
return f"{numbers[0]} / {numbers[1]} = {numbers[0] / numbers[1]}"
|
| 54 |
+
else:
|
| 55 |
+
return "Error: Could not identify two numbers for division"
|
| 56 |
+
|
| 57 |
+
elif "power" in operation_input.lower() or "^" in operation_input:
|
| 58 |
+
numbers = [float(s) for s in parts if s.replace('.', '', 1).isdigit()]
|
| 59 |
+
if len(numbers) >= 2:
|
| 60 |
+
return f"{numbers[0]}^{numbers[1]} = {numbers[0] ** numbers[1]}"
|
| 61 |
+
else:
|
| 62 |
+
return "Error: Could not identify two numbers for power operation"
|
| 63 |
+
|
| 64 |
+
elif "sqrt" in operation_input.lower():
|
| 65 |
+
numbers = [float(s) for s in parts if s.replace('.', '', 1).isdigit()]
|
| 66 |
+
if numbers:
|
| 67 |
+
if numbers[0] < 0:
|
| 68 |
+
return "Error: Cannot take square root of negative number"
|
| 69 |
+
return f"sqrt({numbers[0]}) = {math.sqrt(numbers[0])}"
|
| 70 |
+
else:
|
| 71 |
+
return "Error: Could not identify a number for square root"
|
| 72 |
+
else:
|
| 73 |
+
return f"Unknown operation. Please specify one of: add, subtract, multiply, divide, power, sqrt"
|
| 74 |
+
except Exception as e:
|
| 75 |
+
return f"Error performing calculation: {str(e)}"
|
| 76 |
+
|
| 77 |
+
tools = [
|
| 78 |
+
Tool(
|
| 79 |
+
name="Weather",
|
| 80 |
+
func=get_weather,
|
| 81 |
+
description="Get the current weather for a specified location."
|
| 82 |
+
),
|
| 83 |
+
Tool(
|
| 84 |
+
name="Calculator",
|
| 85 |
+
func=calculator,
|
| 86 |
+
description="Perform mathematical calculations. Available operations: add, subtract, multiply, divide, power, sqrt. For example: operation='add', a=2, b=3 will return '2 + 3 = 5'."
|
| 87 |
+
),
|
| 88 |
+
]
|
| 89 |
+
|
| 90 |
+
memory = ConversationBufferMemory(
|
| 91 |
+
memory_key="chat_history",
|
| 92 |
+
return_messages=True
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
agent_executor = initialize_agent(
|
| 96 |
+
tools=tools,
|
| 97 |
+
llm=llm,
|
| 98 |
+
memory=memory,
|
| 99 |
+
agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
|
| 100 |
+
verbose=True,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
if __name__ == "__main__":
|
| 104 |
+
while True:
|
| 105 |
+
query = input("\nEnter your question (or 'exit' to quit): ")
|
| 106 |
+
if query.lower() == 'exit':
|
| 107 |
+
break
|
| 108 |
+
result = agent_executor.invoke(query)
|
| 109 |
+
print(f"Result: {result['output']}")
|
basic_tools.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from langchain.agents import AgentType, initialize_agent
|
| 3 |
+
from langchain.tools import StructuredTool
|
| 4 |
+
from langchain_deepseek import ChatDeepSeek
|
| 5 |
+
from pydantic import BaseModel, Field
|
| 6 |
+
from typing import Optional
|
| 7 |
+
import math
|
| 8 |
+
import requests
|
| 9 |
+
|
| 10 |
+
class CalculatorInput(BaseModel):
|
| 11 |
+
operation: str = Field(description="Mathematical operation: add, subtract, multiply, divide, power, sqrt")
|
| 12 |
+
a: float = Field(description="First number")
|
| 13 |
+
b: Optional[float] = Field(default=None, description="Second number (not required for sqrt)")
|
| 14 |
+
|
| 15 |
+
def calculator(operation: str, a: float, b: Optional[float] = None) -> str:
|
| 16 |
+
"""Perform basic arithmetic operations."""
|
| 17 |
+
op = operation.lower()
|
| 18 |
+
|
| 19 |
+
if op == "add":
|
| 20 |
+
return f"{a} + {b} = {a + b}"
|
| 21 |
+
elif op == "subtract":
|
| 22 |
+
return f"{a} - {b} = {a - b}"
|
| 23 |
+
elif op == "multiply":
|
| 24 |
+
return f"{a} * {b} = {a * b}"
|
| 25 |
+
elif op == "divide":
|
| 26 |
+
if b == 0:
|
| 27 |
+
return "Error: Division by zero"
|
| 28 |
+
return f"{a} / {b} = {a / b}"
|
| 29 |
+
elif op == "power":
|
| 30 |
+
return f"{a}^{b} = {a ** b}"
|
| 31 |
+
elif op == "sqrt":
|
| 32 |
+
if a < 0:
|
| 33 |
+
return "Error: Cannot take square root of negative number"
|
| 34 |
+
return f"sqrt({a}) = {math.sqrt(a)}"
|
| 35 |
+
else:
|
| 36 |
+
return f"Unknown operation: {op}"
|
| 37 |
+
|
| 38 |
+
calculator_tool = StructuredTool.from_function(
|
| 39 |
+
func=calculator,
|
| 40 |
+
name="Calculator",
|
| 41 |
+
description="Useful for performing mathematical calculations. Input should include operation (add, subtract, multiply, divide, power, sqrt) and numbers.",
|
| 42 |
+
args_schema=CalculatorInput
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
class WeatherInput(BaseModel):
|
| 46 |
+
city: str = Field(description="Name of the city to get the weather for")
|
| 47 |
+
|
| 48 |
+
def get_weather(city: str) -> str:
|
| 49 |
+
"""模拟天气信息(不访问真实 API)"""
|
| 50 |
+
mock_data = {
|
| 51 |
+
"北京": {"description": "晴天", "temp": 26},
|
| 52 |
+
"上海": {"description": "小雨", "temp": 22},
|
| 53 |
+
"广州": {"description": "多云", "temp": 30},
|
| 54 |
+
"杭州": {"description": "雷阵雨", "temp": 24}
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
weather = mock_data.get(city, {"description": "未知", "temp": "N/A"})
|
| 58 |
+
return f"{city} 当前天气:{weather['description']},温度:{weather['temp']}°C"
|
| 59 |
+
|
| 60 |
+
weather_tool = StructuredTool.from_function(
|
| 61 |
+
func=get_weather,
|
| 62 |
+
name="WeatherTool",
|
| 63 |
+
description="查询指定城市的天气信息(模拟数据)",
|
| 64 |
+
args_schema=WeatherInput
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
llm = ChatDeepSeek(api_key=os.getenv("DEEPSEEK_API_KEY"), model="deepseek-chat")
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
tools = [calculator_tool, weather_tool]
|
| 72 |
+
|
| 73 |
+
agent = initialize_agent(
|
| 74 |
+
tools,
|
| 75 |
+
llm,
|
| 76 |
+
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
|
| 77 |
+
verbose=True
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
if __name__ == "__main__":
|
| 81 |
+
while True:
|
| 82 |
+
query = input("\nEnter your question (or 'exit' to quit): ")
|
| 83 |
+
if query.lower() == 'exit':
|
| 84 |
+
break
|
| 85 |
+
result = agent.invoke(query)
|
| 86 |
+
print(f"Result: {result['output']}")
|
brainrent_part1.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
脑租科技有限公司(BrainRent Tech Co., Ltd.)是一家前沿科技企业,专注于“思维即服务(TaaS,Thinking as a Service)”模式,为全球用户提供按需租用智能大脑资源的服务。公司总部位于多伦多,研发中心设在硅谷和青岛,致力于打造世界一流的云端智能思维平台。
|
| 2 |
+
|
| 3 |
+
BrainRent 提供的服务覆盖多个领域:学术研究支持、企业决策咨询、创新创意生成、复杂问题建模与推理等。用户可以通过移动应用或网页平台提交问题,系统会根据问题类型匹配最适合的“思维单元”(即高性能的AI大脑或真人思维专家),在约定时间内返回详尽的思考成果。
|
| 4 |
+
|
| 5 |
+
核心产品“云脑池(BrainCloud)”是一个整合了数千名训练有素的专家与数百个高智能AI Agent的分布式思维网络。用户可选择使用“AI智能思维”、“人类专家思维”或“混合思维模式”,满足不同的预算和精准度要求。
|
brainrent_part2.pdf
ADDED
|
Binary file (48.3 kB). View file
|
|
|
fibonacci_list.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# fibonacci_list.py
|
| 2 |
+
|
| 3 |
+
def print_fibonacci_list(n):
|
| 4 |
+
"""
|
| 5 |
+
Print the first `n` numbers in the Fibonacci sequence as a list.
|
| 6 |
+
"""
|
| 7 |
+
if n <= 0:
|
| 8 |
+
print([])
|
| 9 |
+
return
|
| 10 |
+
|
| 11 |
+
fib_list = [0] if n == 1 else [0, 1]
|
| 12 |
+
while len(fib_list) < n:
|
| 13 |
+
fib_list.append(fib_list[-1] + fib_list[-2])
|
| 14 |
+
|
| 15 |
+
print(fib_list)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# Example usage
|
| 19 |
+
if __name__ == "__main__":
|
| 20 |
+
print_fibonacci_list(10)
|
range_h.md
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
### 注意
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
* 下面是 range_h 的帅照
|
| 5 |
+
|
| 6 |
+
---
|
| 7 |
+

|
requirements.txt
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
aiohappyeyeballs==2.6.1
|
| 2 |
+
aiohttp==3.11.18
|
| 3 |
+
aiosignal==1.3.2
|
| 4 |
+
annotated-types==0.7.0
|
| 5 |
+
anyio==4.9.0
|
| 6 |
+
attrs==25.3.0
|
| 7 |
+
beautifulsoup4==4.12.3
|
| 8 |
+
Brotli==1.1.0
|
| 9 |
+
cachetools==5.5.2
|
| 10 |
+
certifi==2025.4.26
|
| 11 |
+
charset-normalizer==3.4.2
|
| 12 |
+
click==8.1.8
|
| 13 |
+
dataclasses-json==0.6.7
|
| 14 |
+
distro==1.9.0
|
| 15 |
+
fastapi==0.110.1
|
| 16 |
+
frozenlist==1.6.0
|
| 17 |
+
google-api-core==2.25.0rc1
|
| 18 |
+
google-api-python-client==2.125.0
|
| 19 |
+
google-auth==2.40.1
|
| 20 |
+
google-auth-httplib2==0.2.0
|
| 21 |
+
googleapis-common-protos==1.70.0
|
| 22 |
+
greenlet==3.2.2
|
| 23 |
+
h11==0.16.0
|
| 24 |
+
httpcore==1.0.9
|
| 25 |
+
httplib2==0.22.0
|
| 26 |
+
httpx==0.28.1
|
| 27 |
+
httpx-sse==0.4.0
|
| 28 |
+
idna==3.10
|
| 29 |
+
jiter==0.9.0
|
| 30 |
+
jsonpatch==1.33
|
| 31 |
+
jsonpointer==3.0.0
|
| 32 |
+
langchain==0.3.25
|
| 33 |
+
langchain-community==0.3.24
|
| 34 |
+
langchain-core==0.3.60
|
| 35 |
+
langchain-deepseek==0.1.3
|
| 36 |
+
langchain-openai==0.3.17
|
| 37 |
+
langchain-text-splitters==0.3.8
|
| 38 |
+
langgraph==0.4.5
|
| 39 |
+
langgraph-checkpoint==2.0.26
|
| 40 |
+
langgraph-prebuilt==0.1.8
|
| 41 |
+
langgraph-sdk==0.1.69
|
| 42 |
+
langsmith==0.1.147
|
| 43 |
+
marshmallow==3.26.1
|
| 44 |
+
multidict==6.4.3
|
| 45 |
+
mutagen==1.47.0
|
| 46 |
+
mypy_extensions==1.1.0
|
| 47 |
+
numpy==2.2.5
|
| 48 |
+
openai==1.79.0
|
| 49 |
+
orjson==3.10.18
|
| 50 |
+
ormsgpack==1.9.1
|
| 51 |
+
packaging==23.2
|
| 52 |
+
propcache==0.3.1
|
| 53 |
+
proto-plus==1.26.1
|
| 54 |
+
protobuf==6.30.2
|
| 55 |
+
pyasn1==0.6.1
|
| 56 |
+
pyasn1_modules==0.4.2
|
| 57 |
+
pycryptodomex==3.22.0
|
| 58 |
+
pydantic==2.11.4
|
| 59 |
+
pydantic-settings==2.9.1
|
| 60 |
+
pydantic_core==2.33.2
|
| 61 |
+
pyparsing==3.2.3
|
| 62 |
+
pypdf==4.2.0
|
| 63 |
+
python-dotenv==1.0.1
|
| 64 |
+
PyYAML==6.0.2
|
| 65 |
+
regex==2024.11.6
|
| 66 |
+
requests==2.32.3
|
| 67 |
+
requests-toolbelt==1.0.0
|
| 68 |
+
rsa==4.9.1
|
| 69 |
+
setuptools==80.4.0
|
| 70 |
+
sniffio==1.3.1
|
| 71 |
+
soupsieve==2.7
|
| 72 |
+
SQLAlchemy==2.0.40
|
| 73 |
+
starlette==0.37.2
|
| 74 |
+
tenacity==8.5.0
|
| 75 |
+
tiktoken==0.9.0
|
| 76 |
+
tqdm==4.67.1
|
| 77 |
+
typing-inspect==0.9.0
|
| 78 |
+
typing-inspection==0.4.0
|
| 79 |
+
typing_extensions==4.13.2
|
| 80 |
+
uritemplate==4.1.1
|
| 81 |
+
urllib3==2.4.0
|
| 82 |
+
uvicorn==0.29.0
|
| 83 |
+
websockets==15.0.1
|
| 84 |
+
wheel==0.45.1
|
| 85 |
+
xxhash==3.5.0
|
| 86 |
+
yarl==1.20.0
|
| 87 |
+
yt-dlp==2024.4.9
|