Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,6 +4,13 @@ import requests
|
|
| 4 |
import pytz
|
| 5 |
import yaml
|
| 6 |
from tools.final_answer import FinalAnswerTool
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
from Gradio_UI import GradioUI
|
| 9 |
|
|
@@ -34,6 +41,45 @@ def get_current_time_in_timezone(timezone: str) -> str:
|
|
| 34 |
return f"Error fetching time for timezone '{timezone}': {str(e)}"
|
| 35 |
|
| 36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
final_answer = FinalAnswerTool()
|
| 38 |
|
| 39 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
|
@@ -55,7 +101,7 @@ with open("prompts.yaml", 'r') as stream:
|
|
| 55 |
|
| 56 |
agent = CodeAgent(
|
| 57 |
model=model,
|
| 58 |
-
tools=[final_answer], ## add your tools here (don't remove final answer)
|
| 59 |
max_steps=6,
|
| 60 |
verbosity_level=1,
|
| 61 |
grammar=None,
|
|
|
|
| 4 |
import pytz
|
| 5 |
import yaml
|
| 6 |
from tools.final_answer import FinalAnswerTool
|
| 7 |
+
import nltk
|
| 8 |
+
import networkx as nx
|
| 9 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
| 10 |
+
from nltk.tokenize import sent_tokenize
|
| 11 |
+
|
| 12 |
+
# Ensure necessary NLTK resources are downloaded
|
| 13 |
+
nltk.download('punkt')
|
| 14 |
|
| 15 |
from Gradio_UI import GradioUI
|
| 16 |
|
|
|
|
| 41 |
return f"Error fetching time for timezone '{timezone}': {str(e)}"
|
| 42 |
|
| 43 |
|
| 44 |
+
@tool
|
| 45 |
+
def extract_sent(doc: str, top_n: int = 3) -> list:
|
| 46 |
+
""" Extracts key sentences from a document using TextRank.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
doc (str): The document (e.g., abstract) to extract sentences from.
|
| 50 |
+
top_n (int): The number of top-ranked sentences to return.
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
List[str]: The most pertinent sentences.
|
| 54 |
+
"""
|
| 55 |
+
try:
|
| 56 |
+
# Step 1: Tokenize the document into sentences
|
| 57 |
+
sentences = sent_tokenize(doc)
|
| 58 |
+
|
| 59 |
+
# Step 2: Convert sentences to vector representations (TF-IDF)
|
| 60 |
+
vectorizer = TfidfVectorizer()
|
| 61 |
+
sentence_vectors = vectorizer.fit_transform(sentences)
|
| 62 |
+
|
| 63 |
+
# Step 3: Compute similarity matrix (cosine similarity)
|
| 64 |
+
similarity_matrix = (sentence_vectors * sentence_vectors.T).toarray()
|
| 65 |
+
|
| 66 |
+
# Step 4: Create a graph where nodes are sentences, and edges are similarities
|
| 67 |
+
sentence_graph = nx.from_numpy_array(similarity_matrix)
|
| 68 |
+
|
| 69 |
+
# Step 5: Apply PageRank algorithm to rank sentences
|
| 70 |
+
scores = nx.pagerank(sentence_graph)
|
| 71 |
+
|
| 72 |
+
# Step 6: Sort sentences by score and return top-N sentences
|
| 73 |
+
ranked_sentences = sorted(((scores[i], s) for i, s in enumerate(sentences)), reverse=True)
|
| 74 |
+
extracted_sentences = [s for _, s in ranked_sentences[:top_n]]
|
| 75 |
+
|
| 76 |
+
return extracted_sentences
|
| 77 |
+
|
| 78 |
+
except Exception as e:
|
| 79 |
+
print(f"Error in extract_sent: {e}")
|
| 80 |
+
return []
|
| 81 |
+
|
| 82 |
+
|
| 83 |
final_answer = FinalAnswerTool()
|
| 84 |
|
| 85 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
|
|
|
| 101 |
|
| 102 |
agent = CodeAgent(
|
| 103 |
model=model,
|
| 104 |
+
tools=[get_current_time_in_timezone,image_generation_tool,extract_sent,final_answer], ## add your tools here (don't remove final answer)
|
| 105 |
max_steps=6,
|
| 106 |
verbosity_level=1,
|
| 107 |
grammar=None,
|