username
stringlengths 1
118
| score
float64 0
100
| timestamp
stringdate 2025-04-24 16:18:04
2025-09-20 10:24:40
| code
stringlengths 10
42.3k
|
---|---|---|---|
ruilin808
| 0 |
2025-07-10T07:24:08.661304+00:00
|
https://huggingface.co/spaces/aarboleda/hf-agents-course-final-assignment/tree/main
|
evarodenas
| 25 |
2025-07-10T07:41:37.587330+00:00
|
https://huggingface.co/spaces/evarodenas/gaia-agent
|
mhamzaanjum380
| 100 |
2025-07-10T07:50:53.065490+00:00
|
https://huggingface.co/spaces/None/tree/main
|
gjergjik
| 40 |
2025-07-10T09:08:22.541104+00:00
|
https://huggingface.co/spaces/gjergjik/Final_Assignment_Template/tree/main
|
sakettiger
| 0 |
2025-07-10T09:35:52.542558+00:00
|
https://huggingface.co/spaces/sakettiger/Final_Assignment_Template/tree/main
|
ghanemfaouri
| 35 |
2025-07-10T09:48:01.612679+00:00
|
https://huggingface.co/spaces/ghanemfaouri/Final_Assignment_Template/tree/main
|
aatish09
| 0 |
2025-07-10T09:58:41.390898+00:00
|
https://huggingface.co/spaces/aatish09/aiagent_09/tree/main
|
Aishwaryachellaiah
| 35 |
2025-07-10T10:26:28.046761+00:00
|
https://huggingface.co/spaces/Aishwaryachellaiah/Final_Assignment_Template/tree/main
|
valavanca
| 80 |
2025-07-10T12:03:55.485570+00:00
|
https://huggingface.co/spaces/fisherman611/gaia-agent/tree/main
|
dchakour
| 0 |
2025-07-10T14:30:45.413904+00:00
|
https://huggingface.co/spaces/dchakour/Agents_Course_Assignment/tree/main
|
Manavraj
| 5 |
2025-07-10T15:15:18.375255+00:00
|
https://huggingface.co/spaces/Manavraj/Final_Assignment/tree/main
|
chen8160
| 40 |
2025-07-10T15:16:53.641995+00:00
|
https://huggingface.co/spaces/None/tree/main
|
KushCodes
| 5 |
2025-07-10T15:23:14.439169+00:00
|
https://huggingface.co/spaces/KushCodes/unit4
|
Vadymbo
| 10 |
2025-07-10T15:58:29.925672+00:00
|
https://huggingface.co/spaces/None/tree/main
|
GeorgeTheo7
| 0 |
2025-07-10T16:09:01.910484+00:00
|
https://huggingface.co/spaces/GeorgeTheo7/Final_Assignment_Template/tree/main
|
NoT-ToN
| 85 |
2025-07-10T17:35:30.737673+00:00
|
https://huggingface.co/spaces/None/tree/main
|
Niccia
| 0 |
2025-07-10T17:55:57.783083+00:00
|
https://huggingface.co/spaces/Niccia/Final_Assignment_Template/tree/main
|
vinhvo1988
| 45 |
2025-07-10T18:19:53.216588+00:00
|
https://huggingface.co/spaces/vinhvo1988/Final_Project_Agent_Course/tree/main
|
Iurmer
| 0 |
2025-07-10T18:21:53.399732+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
Freddolin
| 100 |
2025-07-10T19:52:39.468624+00:00
|
https://huggingface.co/spaces/Freddolin/Final_Assignment_Template/tree/main
|
rsant
| 35 |
2025-07-10T22:39:46.541989+00:00
|
https://huggingface.co/spaces/rsant/Final_Assignment_Template/tree/main
|
OwlAgent
| 0 |
2025-07-11T01:46:21.172719+00:00
|
OwlAgentstri
|
Austin006
| 25 |
2025-07-11T01:51:45.419357+00:00
|
https://huggingface.co/spaces/Austin006/Final_Assignment_2nd_Attempt/tree/main
|
krisha-n
| 0 |
2025-07-11T03:22:15.188694+00:00
|
https://huggingface.co/spaces/None/tree/main
|
kit086
| 50 |
2025-07-11T03:49:35.688844+00:00
|
https://huggingface.co/spaces/None/tree/main
|
pandaayi
| 10 |
2025-07-11T04:06:13.246717+00:00
|
https://huggingface.co/spaces/pandaayi/Final_Assignment_Template/tree/main
|
ariskin
| 30 |
2025-07-11T06:23:21.505676+00:00
|
https://huggingface.co/spaces/ariskin/GAIA_test
|
himanshushukla12
| 0 |
2025-07-11T07:05:44.338862+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
tangpei
| 0 |
2025-07-11T07:41:08.648130+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
mrtom17
| 35 |
2025-07-11T07:43:55.596783+00:00
|
https://huggingface.co/spaces/mrtom17/gaia-agent/tree/main
|
kskazuha
| 0 |
2025-07-11T09:45:49.883304+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
Prasanthkumar
| 100 |
2025-07-11T10:46:33.214746+00:00
|
https://huggingface.co/spaces/Prasanthkumar/Final_Assignment_Template/tree/main
|
rathore11
| 0 |
2025-07-11T12:39:07.939380+00:00
|
https://huggingface.co/spaces/rathore11/Agent_course_final_project/tree/main
|
BladeSzaSza
| 30 |
2025-07-11T12:52:03.421049+00:00
|
https://huggingface.co/spaces/BladeSzaSza/Grux2/tree/main
|
CapitainFlow
| 35 |
2025-07-11T13:26:28.389981+00:00
|
https://huggingface.co/spaces/None/tree/main
|
reidzansm
| 35 |
2025-07-11T13:48:57.813017+00:00
|
https://huggingface.co/spaces/None/tree/main
|
kappenvinc
| 0 |
2025-07-11T13:52:46.132164+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
ScpMHL
| 0 |
2025-07-11T13:56:33.953032+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
nagarajan
| 5 |
2025-07-11T14:16:08.750432+00:00
|
stringstri
|
cedricbidet
| 35 |
2025-07-11T15:21:15.693263+00:00
|
https://huggingface.co/spaces/cedricbidet/FirminBot/tree/main
|
ZTnlHVbMHMRnuqjnTRnrCZXCOVSqpuLMmF
| 30 |
2025-07-11T15:38:43.912279+00:00
|
import base64
import functools
import time
from io import BytesIO
from typing import Annotated, TypedDict
import dotenv
import pandas as pd
import requests
import whisper
import yt_dlp
# from ddgs import DDGS # Commented out as it's no longer the primary search
from langchain.chat_models import init_chat_model
from langchain_core.tools import tool
from langchain_tavily import TavilySearch
from langgraph.graph import START, StateGraph
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
from PIL import Image
from pydub import AudioSegment
from bs4 import BeautifulSoup
from pypdf import PdfReader
########## INIT ##########
dotenv.load_dotenv()
# llm = init_chat_model("google_genai:gemini-2.5-pro")
llm = init_chat_model("google_genai:gemini-2.5-flash")
# llm = init_chat_model("google_genai:gemini-2.5-flash-lite-preview-06-17")
# llm = init_chat_model("google_genai:gemini-2.0-flash")
# llm = init_chat_model("google_genai:gemini-2.0-flash-lite")
########## TOOLS ##########
def multiply(a: int, b: int) -> int:
"""Multiply two numbers."""
return a * b
# New Tavily Search Tool
# Note: You need to have 'tavily-python' installed (pip install tavily-python)
# and TAVILY_API_KEY set in your environment variables.
tavily_tool = TavilySearch(max_results=5)
def search(query: str) -> str:
"""Perform a web search using the Tavily Search API to find up-to-date information.
This tool is designed for agents to retrieve concise and relevant web search results.
It's useful for answering questions about current events, facts, and general knowledge.
Use this tool when you need to find information that is not in your internal knowledge base.
Args:
query (str): The search query string.
Returns:
str: A string containing the search results, typically a list of snippets.
"""
return tavily_tool.invoke(query)
# --- DuckDuckGo Search (Commented out for future reference) ---
# def search(query: str) -> str:
# """Perform a web search using the DDGS library and extract relevant information.
# This tool is designed for agents to quickly retrieve concise web search results.
# It fetches the top 5 results for the given query and concatenates their body text,
# truncating to 500 characters to ensure brevity and relevance.
# This tool doesn't provide the full results, but it's useful for quick information retrieval.
# Use the scrape tool for more in-depth analysis.
# Args:
# query (str): The search query string to look up on the web.
# Returns:
# str: A string containing the combined text from the top search results, limited to 500 characters.
# Note:
# - If results exceed 500 characters, only the beginning is returned—consider refining the query for more targeted info.
# """
# with DDGS() as ddgs:
# results = ddgs.text(query, max_results=5)
# # combined = " ".join([r["body"] for r in results])
# return results
def scrape_website(url: str) -> str:
"""Scrape and extract clean text content from a given website URL.
This tool fetches the page, parses HTML to readable text (ignoring scripts/styles),
and returns a concise version. Useful for articles or papers. If it's a PDF,
it suggests using the extract_pdf_text tool instead.
Args:
url (str): The URL of the website or page to scrape.
Returns:
str: Cleaned text content or an error message.
"""
try:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
response = requests.get(
url, headers=headers
) # Add headers to mimic browser and avoid some blocks
response.raise_for_status()
if "application/pdf" in response.headers.get("Content-Type", ""):
return "This appears to be a PDF file. Use the extract_pdf_text tool for better extraction."
# Parse HTML to clean text
soup = BeautifulSoup(response.text, "html.parser")
for script in soup(["script", "style"]): # Remove scripts and styles
script.extract()
clean_text = soup.get_text(separator=" ", strip=True)
return clean_text
except requests.exceptions.RequestException as e:
return f"Error scraping website: {e}. If blocked (e.g., CAPTCHA), try searching for summaries instead."
except Exception as e:
return f"Unexpected error: {e}"
def transcribe_audio(file_path: str) -> str:
"""Transcribe an audio file (e.g., MP3) to text.
Args:
file_path (str): Path to the audio file.
Returns:
str: The transcribed text from the audio.
"""
try:
# Load the audio file
audio = AudioSegment.from_mp3(file_path)
audio.export("temp.wav", format="wav") # Convert to WAV for Whisper
# Load Whisper model (use 'base' for speed, 'large' for accuracy)
model = whisper.load_model("base")
result = model.transcribe("temp.wav")
return result["text"]
except Exception as e:
return f"Error transcribing audio: {e}"
def download_youtube_audio(url: str, output_path: str = "temp_audio") -> str:
"""Download audio from a YouTube video URL and save it to a file.
Args:
url (str): The YouTube video URL.
output_path (str): Path to save the audio file without extension (default: 'temp_audio').
Returns:
str: The path to the downloaded audio file or an error message.
"""
try:
# Don't include extension in outtmpl since postprocessor will add it
ydl_opts = {
"format": "bestaudio/best",
"outtmpl": output_path, # No extension here
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "192",
}
],
}
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
# Return the actual filename with .mp3 extension
final_path = f"{output_path}.mp3"
return final_path
except Exception as e:
return f"Error downloading audio: {e}"
def read_excel(
file_path: Annotated[str, "Path to the Excel file (e.g., 'sales_data.xlsx')"],
sheet_name: Annotated[
str, "Name of the sheet to read (optional, defaults to first sheet)"
] = None,
) -> str:
"""Read data from an Excel file and return it as a string representation for analysis.
This tool extracts tabular data from the specified sheet, which can then be used for calculations
like summing sales columns. If the file has multiple sheets, specify the sheet_name.
Returns:
str: A string representation of the Excel data (e.g., rows and columns).
"""
try:
df = pd.read_excel(file_path, sheet_name=sheet_name, engine="openpyxl")
return df.to_string(index=False) # Return as clean string without row indices
except Exception as e:
return f"Error reading Excel file: {e}"
def read_text_file(file_path: str) -> str:
"""Read the content of a text file (e.g., .py, .txt) and return it as a string for analysis.
This tool is useful for inspecting code, documents, or other text-based files.
Use it to retrieve Python script content and determine outputs through logical reasoning.
Args:
file_path (str): Path to the text file.
Returns:
str: The full content of the file or an error message.
"""
try:
with open(file_path, "r", encoding="utf-8") as f:
return f.read()
except Exception as e:
return f"Error reading text file: {e}"
def analyze_image(
image_path: Annotated[str, "Path to the image file"],
analysis_focus: Annotated[
str,
"Specific focus for analysis",
],
) -> str:
"""Analyze an image file using vision capabilities and return a textual description.
This tool is useful for extracting information from visual content, such as identifying a chess board position from a screenshot or diagram. It uses multimodal AI to interpret the image.
Args:
image_path (str): Local path to the image file.
analysis_focus (str): Optional prompt to guide the analysis (defaults to general description with chess focus).
Returns:
str: A detailed textual description of the image content.
"""
try:
# Load the image and convert to base64
with Image.open(image_path) as img:
buffered = BytesIO()
img.save(buffered, format="JPEG") # Or PNG if needed
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
# Prepare multimodal content for Gemini
image_content = [
{"type": "text", "text": analysis_focus},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
]
# Invoke the LLM (reuse your existing llm instance)
response = llm.invoke([{"role": "user", "content": image_content}])
return response.content
except Exception as e:
return f"Error analyzing image: {e}"
def extract_pdf_text(url: str, search_term: str = None) -> str:
"""Extract text from a PDF file at the given URL, optionally searching for a specific term.
This tool is ideal for scientific papers. It downloads the PDF, extracts all text,
and can filter for sections like acknowledgments containing terms (e.g., 'NASA award').
Args:
url (str): The URL of the PDF file.
search_term (str): Optional term to search for (e.g., 'NASA award')—returns context around matches.
Returns:
str: Extracted text or an error message.
"""
try:
response = requests.get(url)
response.raise_for_status()
pdf_reader = PdfReader(BytesIO(response.content))
text = ""
for page in pdf_reader.pages:
text += page.extract_text() + "\n"
if search_term:
# Simple search: Find lines containing the term
lines = text.split("\n")
matches = [line for line in lines if search_term.lower() in line.lower()]
return (
f"Extracted text containing '{search_term}':\n"
+ "\n".join(matches)[:2000]
) # Limit length
return text[:5000] # Truncate full text
except Exception as e:
return f"Error extracting PDF text: {e}"
def create_logging_tool(tool_func):
@functools.wraps(tool_func)
def wrapper(*args, **kwargs):
print()
print(f"Calling tool: {tool_func.__name__} with args: {args}, kwargs: {kwargs}")
time.sleep(1) # Add a 1-second delay before each tool call
result = tool_func(*args, **kwargs)
print(
f"Tool {tool_func.__name__} returned: {str(result)[:100]}"
) # Safely convert result to string for printing
return result
return tool(wrapper)
tools = [
create_logging_tool(search),
create_logging_tool(multiply),
create_logging_tool(scrape_website),
create_logging_tool(transcribe_audio),
create_logging_tool(download_youtube_audio),
create_logging_tool(read_excel),
create_logging_tool(analyze_image),
create_logging_tool(read_text_file),
create_logging_tool(extract_pdf_text),
]
llm_with_tools = llm.bind_tools(tools)
tool_node = ToolNode(tools=tools)
########## STATE ##########
class State(TypedDict):
# Messages have the type "list". The `add_messages` function
# in the annotation defines how this state key should be updated
# (in this case, it appends messages to the list, rather than overwriting them)
messages: Annotated[list, add_messages]
########## GRAPH ##########
def chatbot(state: State):
# system_prompt = "You are a precise agent. Output ONLY the final answer with no extra text, explanations, or punctuation."
system_prompt = """
You are an advanced AI agent designed to solve complex, multi-step questions from the GAIA benchmark. These questions often require reasoning, information gathering, verification, and synthesis across multiple steps. You have access to the Tavily search tool for retrieving up-to-date information from the web.
Your primary goal is to provide accurate, well-reasoned answers by breaking down problems into manageable steps. Always plan before acting, reflect on results, and iterate as needed. Do not guess or fabricate information—rely on search results and logical deduction.
- **Synthesis**:
- Your final answer should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
- Write the shortest possible answer.
"""
messages = [{"role": "system", "content": system_prompt}] + state["messages"]
response = llm_with_tools.invoke(messages)
# Check if this is a tool call (preserve it)
if response.tool_calls: # Or check 'tool_calls' in response.additional_kwargs
return {"messages": [response]}
return {"messages": [response.__class__(content=response.content)]}
graph_builder = StateGraph(State)
graph_builder.add_node("chatbot", chatbot)
graph_builder.add_node("tools", tool_node)
graph_builder.add_edge(START, "chatbot")
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_conditional_edges(
"chatbot",
tools_condition,
)
graph = graph_builder.compile()
|
ctrlMarcio
| 45 |
2025-07-11T16:25:35.794108+00:00
|
import base64
import functools
import time
from io import BytesIO
from typing import Annotated, TypedDict
import dotenv
import pandas as pd
import requests
import whisper
import yt_dlp
from bs4 import BeautifulSoup
# from ddgs import DDGS # Commented out as it's no longer the primary search
from langchain.chat_models import init_chat_model
from langchain_core.tools import tool
from langchain_tavily import TavilySearch
from langgraph.graph import START, StateGraph
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
from PIL import Image
from pydub import AudioSegment
from pypdf import PdfReader
########## INIT ##########
dotenv.load_dotenv()
# llm = init_chat_model("google_genai:gemini-2.5-pro")
llm = init_chat_model("google_genai:gemini-2.5-flash")
# llm = init_chat_model("google_genai:gemini-2.5-flash-lite-preview-06-17")
# llm = init_chat_model("google_genai:gemini-2.0-flash")
# llm = init_chat_model("google_genai:gemini-2.0-flash-lite")
########## TOOLS ##########
def multiply(a: int, b: int) -> int:
"""Multiply two numbers."""
return a * b
# New Tavily Search Tool
# Note: You need to have 'tavily-python' installed (pip install tavily-python)
# and TAVILY_API_KEY set in your environment variables.
tavily_tool = TavilySearch(max_results=5)
# In your ########## TOOLS ########## section
def search(query: str) -> str:
"""Perform a web search using the Tavily Search API to find up-to-date information.
This tool is designed for agents to retrieve concise and relevant web search results.
It's useful for answering questions about current events, facts, and general knowledge.
Use this tool when you need to find information that is not in your internal knowledge base.
Args:
query (str): The search query string.
Returns:
str: A string containing the search results, typically a list of snippets.
"""
return tavily_tool.invoke(query)
# --- DuckDuckGo Search (Commented out for future reference) ---
# def search(query: str) -> str:
# """Perform a web search using the DDGS library and extract relevant information.
# This tool is designed for agents to quickly retrieve concise web search results.
# It fetches the top 5 results for the given query and concatenates their body text,
# truncating to 500 characters to ensure brevity and relevance.
# This tool doesn't provide the full results, but it's useful for quick information retrieval.
# Use the scrape tool for more in-depth analysis.
# Args:
# query (str): The search query string to look up on the web.
# Returns:
# str: A string containing the combined text from the top search results, limited to 500 characters.
# Note:
# - If results exceed 500 characters, only the beginning is returned—consider refining the query for more targeted info.
# """
# with DDGS() as ddgs:
# results = ddgs.text(query, max_results=5)
# # combined = " ".join([r["body"] for r in results])
# return results
def scrape_website(url: str) -> str:
"""Scrape and extract clean text content from a given website URL.
This tool fetches the page, parses HTML to readable text (ignoring scripts/styles),
and returns a concise version. Useful for articles or papers. If it's a PDF,
it suggests using the extract_pdf_text tool instead.
Args:
url (str): The URL of the website or page to scrape.
Returns:
str: Cleaned text content or an error message.
"""
try:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
response = requests.get(
url, headers=headers
) # Add headers to mimic browser and avoid some blocks
response.raise_for_status()
if "application/pdf" in response.headers.get("Content-Type", ""):
return "This appears to be a PDF file. Use the extract_pdf_text tool for better extraction."
# Parse HTML to clean text
soup = BeautifulSoup(response.text, "html.parser")
for script in soup(["script", "style"]): # Remove scripts and styles
script.extract()
clean_text = soup.get_text(separator=" ", strip=True)
return clean_text
except requests.exceptions.RequestException as e:
return f"Error scraping website: {e}. If blocked (e.g., CAPTCHA), try searching for summaries instead."
except Exception as e:
return f"Unexpected error: {e}"
def transcribe_audio(file_path: str) -> str:
"""Transcribe an audio file (e.g., MP3) to text.
Args:
file_path (str): Path to the audio file.
Returns:
str: The transcribed text from the audio.
"""
try:
# Load the audio file
audio = AudioSegment.from_mp3(file_path)
audio.export("temp.wav", format="wav") # Convert to WAV for Whisper
# Load Whisper model (use 'base' for speed, 'large' for accuracy)
model = whisper.load_model("base")
result = model.transcribe("temp.wav")
return result["text"]
except Exception as e:
return f"Error transcribing audio: {e}"
def download_youtube_audio(url: str, output_path: str = "temp_audio") -> str:
"""Download audio from a YouTube video URL and save it to a file.
Args:
url (str): The YouTube video URL.
output_path (str): Path to save the audio file without extension (default: 'temp_audio').
Returns:
str: The path to the downloaded audio file or an error message.
"""
try:
# Don't include extension in outtmpl since postprocessor will add it
ydl_opts = {
"format": "bestaudio/best",
"outtmpl": output_path, # No extension here
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "192",
}
],
}
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
# Return the actual filename with .mp3 extension
final_path = f"{output_path}.mp3"
return final_path
except Exception as e:
return f"Error downloading audio: {e}"
def read_excel(
file_path: Annotated[str, "Path to the Excel file (e.g., 'sales_data.xlsx')"],
sheet_name: Annotated[
str, "Name of the sheet to read (optional, defaults to first sheet)"
] = None,
) -> str:
"""Read data from an Excel file and return it as a string representation for analysis.
This tool extracts tabular data from the specified sheet, which can then be used for calculations
like summing sales columns. If the file has multiple sheets, specify the sheet_name.
Returns:
str: A string representation of the Excel data (e.g., rows and columns).
"""
try:
df = pd.read_excel(file_path, sheet_name=sheet_name, engine="openpyxl")
return df.to_string(index=False) # Return as clean string without row indices
except Exception as e:
return f"Error reading Excel file: {e}"
def read_text_file(file_path: str) -> str:
"""Read the content of a text file (e.g., .py, .txt) and return it as a string for analysis.
This tool is useful for inspecting code, documents, or other text-based files.
Use it to retrieve Python script content and determine outputs through logical reasoning.
Args:
file_path (str): Path to the text file.
Returns:
str: The full content of the file or an error message.
"""
try:
with open(file_path, "r", encoding="utf-8") as f:
return f.read()
except Exception as e:
return f"Error reading text file: {e}"
def analyze_image(
image_path: Annotated[str, "Path to the image file"],
analysis_focus: Annotated[
str,
"Specific focus for analysis",
],
) -> str:
"""Analyze an image file using vision capabilities and return a textual description.
This tool is useful for extracting information from visual content, such as identifying a chess board position from a screenshot or diagram. It uses multimodal AI to interpret the image.
Args:
image_path (str): Local path to the image file.
analysis_focus (str): Optional prompt to guide the analysis (defaults to general description with chess focus).
Returns:
str: A detailed textual description of the image content.
"""
try:
# Load the image and convert to base64
with Image.open(image_path) as img:
buffered = BytesIO()
img.save(buffered, format="JPEG") # Or PNG if needed
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
# Prepare multimodal content for Gemini
image_content = [
{"type": "text", "text": analysis_focus},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
]
# Invoke the LLM (reuse your existing llm instance)
response = llm.invoke([{"role": "user", "content": image_content}])
return response.content
except Exception as e:
return f"Error analyzing image: {e}"
def extract_pdf_text(url: str, search_term: str = None) -> str:
"""Extract text from a PDF file at the given URL, optionally searching for a specific term.
This tool is ideal for scientific papers. It downloads the PDF, extracts all text,
and can filter for sections like acknowledgments containing terms (e.g., 'NASA award').
Args:
url (str): The URL of the PDF file.
search_term (str): Optional term to search for (e.g., 'NASA award')—returns context around matches.
Returns:
str: Extracted text or an error message.
"""
try:
response = requests.get(url)
response.raise_for_status()
pdf_reader = PdfReader(BytesIO(response.content))
text = ""
for page in pdf_reader.pages:
text += page.extract_text() + "\n"
if search_term:
# Simple search: Find lines containing the term
lines = text.split("\n")
matches = [line for line in lines if search_term.lower() in line.lower()]
return (
f"Extracted text containing '{search_term}':\n"
+ "\n".join(matches)[:2000]
) # Limit length
return text[:5000] # Truncate full text
except Exception as e:
return f"Error extracting PDF text: {e}"
def create_logging_tool(tool_func):
@functools.wraps(tool_func)
def wrapper(*args, **kwargs):
print()
print(f"Calling tool: {tool_func.__name__} with args: {args}, kwargs: {kwargs}")
time.sleep(1) # Add a 1-second delay before each tool call
result = tool_func(*args, **kwargs)
print(
f"Tool {tool_func.__name__} returned: {str(result)[:100]}"
) # Safely convert result to string for printing
return result
return tool(wrapper)
tools = [
create_logging_tool(search),
create_logging_tool(multiply),
create_logging_tool(scrape_website),
create_logging_tool(transcribe_audio),
create_logging_tool(download_youtube_audio),
create_logging_tool(read_excel),
create_logging_tool(analyze_image),
create_logging_tool(read_text_file),
create_logging_tool(extract_pdf_text),
]
llm_with_tools = llm.bind_tools(tools)
tool_node = ToolNode(tools=tools)
########## STATE ##########
class State(TypedDict):
# Messages have the type "list". The `add_messages` function
# in the annotation defines how this state key should be updated
# (in this case, it appends messages to the list, rather than overwriting them)
messages: Annotated[list, add_messages]
########## GRAPH ##########
def chatbot(state: State):
system_prompt = """
You are an advanced AI agent designed to solve complex, multi-step questions from the GAIA benchmark. Your goal is to provide accurate, well-reasoned answers by breaking down problems into manageable steps.
**Information Gathering Strategy:**
1. **Search First:** Start by using the `search` tool to find relevant web pages. This tool returns a list of URLs and snippets.
2. **Analyze and Scrape:** Review the search results. Identify the single most promising URL that likely contains the answer. Then, use the `scrape_website` tool to read the full content of that page.
3. **Synthesize:** Use the scraped content to formulate your final answer.
4. **Self-Correction:** Do NOT get stuck in a loop of calling the `search` tool repeatedly. If your initial search doesn't yield a good URL, try refining your search query *once*. If that still fails, reconsider your approach. The goal is to move from search to scrape efficiently.
**Final Answer Formatting:**
- Your final answer should be a number OR as few words as possible OR a comma separated list.
- If you are asked for a number, do not use commas (e.g., 1000, not 1,000) or units ($) unless specified.
- If you are asked for a string, do not use articles (a, an, the) or abbreviations.
- Write the shortest possible answer that directly addresses the question.
"""
messages = [{"role": "system", "content": system_prompt}] + state["messages"]
response = llm_with_tools.invoke(messages)
# Check if this is a tool call (preserve it)
if response.tool_calls: # Or check 'tool_calls' in response.additional_kwargs
return {"messages": [response]}
return {"messages": [response.__class__(content=response.content)]}
graph_builder = StateGraph(State)
graph_builder.add_node("chatbot", chatbot)
graph_builder.add_node("tools", tool_node)
graph_builder.add_edge(START, "chatbot")
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_conditional_edges(
"chatbot",
tools_condition,
)
graph = graph_builder.compile()
|
supratipb
| 35 |
2025-07-11T16:50:18.240895+00:00
|
https://huggingface.co/spaces/supratipb/agent2/tree/main
|
Joehauer17
| 0 |
2025-07-11T23:11:46.364563+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
IsraZ
| 0 |
2025-07-11T23:43:26.279051+00:00
|
https://huggingface.co/spaces/cjb97/Agent_Course_Final_Assignment/tree/main
|
IMosia
| 0 |
2025-07-12T00:47:14.694758+00:00
|
https://huggingface.co/spaces/IMosia/Final_Assignment/tree/main
|
gk2410
| 0 |
2025-07-12T09:10:43.907226+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
Merin75
| 0 |
2025-07-12T12:49:42.238899+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
geekdan
| 10 |
2025-07-12T13:08:15.474242+00:00
|
https://huggingface.co/spaces/geekdan/Agent_Course_Final_Assignment/tree/main
|
abhidgp1978
| 80 |
2025-07-12T13:17:50.796252+00:00
|
https://huggingface.co/spaces/fisherman611/gaia-agent/tree/main
|
AlaaWO
| 30 |
2025-07-12T14:02:41.973098+00:00
|
https://huggingface.co/spaces/None/tree/main
|
blagoje342
| 0 |
2025-07-12T14:13:43.453945+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
Johnatens
| 0 |
2025-07-12T15:19:47.328465+00:00
|
https://huggingface.co/spaces/Johnatens/Final_Assignment_Template/tree/main
|
nit-sparky
| 40 |
2025-07-12T15:21:42.905581+00:00
|
https://huggingface.co/spaces/nit-sparky/Final_Assignment_Template/tree/main
|
HaofanWen
| 85 |
2025-07-12T16:06:43.705677+00:00
|
https://huggingface.co/spaces/fisherman611/gaia-agent/tree/main
|
rotteveel
| 55 |
2025-07-12T17:17:45.785698+00:00
|
https://huggingface.co/spaces/rotteveel/Agent-Assignment/tree/main
|
PROAC
| 100 |
2025-07-12T18:41:24.234499+00:00
|
https://huggingface.co/spaces/PROAC/Final_Assignment_Agents_Course/tree/main
|
yassineameur
| 90 |
2025-07-12T19:31:45.761309+00:00
|
https://huggingface.co/spaces/fisherman611/gaia-agent/tree/main
|
rsingh87
| 0 |
2025-07-12T20:42:57.645821+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
peeya-i
| 100 |
2025-07-12T23:40:51.954407+00:00
|
https://huggingface.co/spaces/PROAC/Final_Assignment_Agents_Course/tree/main
|
Ybezz
| 30 |
2025-07-13T00:40:53.867833+00:00
|
https://huggingface.co/spaces/None/tree/main
|
vijaygopu
| 20 |
2025-07-13T01:02:15.087510+00:00
|
https://huggingface.co/spaces/vijaygopu/agents-space/tree/main
|
<your-username>
| 80 |
2025-07-13T01:29:10.101029+00:00
|
https://huggingface.co/spaces/HaofanWen/causal_debugging_agent/tree/main
|
santiagoahl
| 35 |
2025-07-13T02:27:56.540722+00:00
|
https://huggingface.co/spaces/None/tree/main
|
tomhflau
| 5 |
2025-07-13T06:39:43.865210+00:00
|
https://huggingface.co/spaces/tomhflau/Final_Assignment_Template/tree/main
|
harisgulzar1
| 15 |
2025-07-13T07:21:43.568295+00:00
|
class AgentState(TypedDict):
messages: Annotated[list[AnyMessage], add_messages]
|
kamath93
| 80 |
2025-07-13T07:44:34.698784+00:00
|
https://huggingface.co/spaces/kamath93/Final_Assignment_Template/tree/main
|
dibgerges
| 60 |
2025-07-13T08:00:22.217584+00:00
|
https://huggingface.co/spaces/dibgerges/huggingface_agents_course
|
lppyo
| 100 |
2025-07-13T08:43:10.522024+00:00
|
https://huggingface.co/spaces/lppyo/Final_Assignment_Template/tree/main
|
AgileAndy
| 40 |
2025-07-13T09:06:33.303565+00:00
|
local_testing
|
wilzuv
| 0 |
2025-07-13T10:18:03.719726+00:00
|
https://huggingface.co/spaces/None/tree/main
|
EtienneAms00
| 0 |
2025-07-13T12:18:11.121863+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
JS550
| 80 |
2025-07-13T12:52:39.626034+00:00
|
https://huggingface.co/spaces/baixianger/RobotPai/tree/main
|
AISparking
| 30 |
2025-07-13T14:12:33.166435+00:00
|
https://huggingface.co/spaces/AISparking/HF_AgentsCourse_FinalAssignment/tree/main
|
Kati8
| 0 |
2025-07-13T14:57:20.734671+00:00
|
https://huggingface.co/spaces/Kati8/Final_Assignment_Template/tree/main
|
ArtemAvramenko
| 30 |
2025-07-13T15:41:28.666940+00:00
|
https://huggingface.co/spaces/ArtemAvramenko/Final_Assignment_Template/tree/main
|
floristafa
| 30 |
2025-07-13T16:17:10.337283+00:00
|
https://huggingface.co/spaces/None/tree/main
|
13gauravpandey
| 0 |
2025-07-13T17:06:34.861517+00:00
|
https://huggingface.co/spaces/13gauravpandey/Final_Assignment_Template/tree/main
|
AkhilPadala
| 0 |
2025-07-13T17:33:30.806161+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
SashaKrstev
| 85 |
2025-07-13T17:54:06.551129+00:00
|
https://huggingface.co/spaces/SashaKrstev/Final_Assignment_Template/tree/main
|
misarmat
| 30 |
2025-07-13T18:21:39.833222+00:00
|
https://huggingface.co/spaces/None/tree/main
|
BrenHu4
| 0 |
2025-07-13T18:40:04.740506+00:00
|
https://huggingface.co/spaces/BrenHu4/Final_Assignment_gaia/tree/main
|
enrigle
| 0 |
2025-07-13T19:54:01.327693+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
mattmurphy
| 10 |
2025-07-13T20:04:26.854299+00:00
|
https://huggingface.co/spaces/None/tree/main
|
multimodal_test_user
| 0 |
2025-07-13T21:11:06.760917+00:00
|
https://github.com/user/multimodal-agent
|
gurusarank
| 100 |
2025-07-13T21:15:21.926083+00:00
|
https://huggingface.co/spaces/gurusarank/Final_Assignment_Template/tree/main
|
scelying
| 45 |
2025-07-13T21:21:51.437077+00:00
|
https://huggingface.co/spaces/scelying/hf-agent/tree/main
|
amar4
| 10 |
2025-07-13T21:23:30.288569+00:00
|
https://huggingface.co/spaces/your-username/Final_Assignment_Template/tree/main
|
amar4ankatha
| 15 |
2025-07-13T22:27:25.677223+00:00
|
https://huggingface.co/spaces/amar4ankatha/Final_Assignment_Template/tree/main
|
jessicalopez
| 0 |
2025-07-13T23:26:53.735873+00:00
|
https://huggingface.co/spaces/jessicalopez/Agents_GAIA_dataset/tree/main
|
divyanshstat
| 30 |
2025-07-13T23:29:34.139803+00:00
|
https://huggingface.co/spaces/divyanshstat/Final_Assignment_V2/tree/main
|
antjiuli
| 0 |
2025-07-14T02:39:07.049675+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
ppxa
| 0 |
2025-07-14T02:54:45.611005+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
Aileenvl
| 0 |
2025-07-14T03:49:17.795675+00:00
|
https://huggingface.co/spaces/Aileenvl/gaia-agent-aileen/tree/main
|
Logout (dmfelder)
| 5 |
2025-07-14T06:26:40.836623+00:00
|
https://huggingface.co/spaces/dmfelder/DF_Final_Assignment_Template/tree/main
|
Shouryat32
| 100 |
2025-07-14T07:33:17.511744+00:00
|
https://huggingface.co/spaces/Shouryat32/Final_Assignment_Template/tree/main
|
rqm64
| 20 |
2025-07-14T08:54:24.731719+00:00
|
https://huggingface.co/spaces/divyanshstat/Final_Assignment_V2/tree/main
|
enesaydin
| 20 |
2025-07-14T09:36:51.249004+00:00
|
https://huggingface.co/spaces/None/tree/main
|
kamil1300
| 15 |
2025-07-14T10:40:24.714380+00:00
|
https://huggingface.co/spaces/your-space-id/tree/main
|
oraziorillo
| 40 |
2025-07-14T11:27:57.560382+00:00
|
https://huggingface.co/spaces/oraziorillo/Final_Assignment_Template/tree/main
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.