username
stringlengths 1
118
| score
float64 0
100
| timestamp
stringdate 2025-04-24 16:18:04
2025-09-20 03:16:04
| code
stringlengths 10
42.3k
|
---|---|---|---|
DaquaviousDinglenut
| 10 |
2025-09-10T02:02:14.436643+00:00
|
https://huggingface.co/spaces/DaquaviousDinglenut/Final_Assignment_Template/tree/main
|
JanGo1
| 35 |
2025-09-10T03:16:36.173019+00:00
|
https://huggingface.co/spaces/JanGo1/Agents_Final_Assignment/tree/main
|
ArturoNereu
| 15 |
2025-09-10T03:23:48.047419+00:00
|
https://huggingface.co/spaces/ArturoNereu/GAIA_Agent/tree/main
|
cyborgmass
| 0 |
2025-09-10T05:46:28.449067+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
khizarsait
| 10 |
2025-09-10T06:27:37.235763+00:00
|
https://huggingface.co/spaces/khizarsait/Final_Assignment_Template_KS/tree/main
|
quablab
| 30 |
2025-09-10T07:57:49.500179+00:00
|
https://huggingface.co/spaces/quablab/Final_Assignment_Template/tree/main
|
xuhai951753
| 10 |
2025-09-10T08:06:47.093148+00:00
|
https://huggingface.co/spaces/None/tree/main
|
mayarelsayed
| 0 |
2025-09-10T08:37:22.714677+00:00
|
https://huggingface.co/spaces/mayarelsayed/Final_Assignment_Template/tree/main
|
taufftauff
| 30 |
2025-09-10T10:57:13.884727+00:00
|
https://huggingface.co/spaces/None/tree/main
|
svl09
| 35 |
2025-09-10T12:04:13.836224+00:00
|
https://huggingface.co/spaces/None/tree/main
|
Taruuunn
| 30 |
2025-09-10T16:40:13.231515+00:00
|
https://huggingface.co/spaces/Taruuunn/Final_Assignment_Template/tree/main
|
PawLew
| 35 |
2025-09-10T16:53:47.709830+00:00
|
https://huggingface.co/spaces/PawLew/Final_Assignment_Agents_Course/tree/main
|
GiuSSE
| 30 |
2025-09-10T19:14:13.391040+00:00
|
https://huggingface.co/spaces/GiuSSE/Final_Assignment_Template/tree/main
|
Blannikus
| 0 |
2025-09-10T19:54:56.845334+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
Harshazpcg
| 40 |
2025-09-10T21:29:51.544644+00:00
|
local_development
|
alexander-oleynik
| 100 |
2025-09-10T21:53:26.463858+00:00
|
https://huggingface.co/spaces/alexander-oleynik/agent-demo/tree/main
|
gurugopinath
| 30 |
2025-09-11T00:24:45.606030+00:00
|
https://huggingface.co/spaces/None/tree/main
|
Alex549
| 40 |
2025-09-11T05:46:14.639653+00:00
|
# Prerequisites: Having run `prepare_data.py` to set up the data
# %%
import dotenv
dotenv.load_dotenv()
# %%
# import os
# from huggingface_hub import login
# login(token=os.getenv("HF_TOKEN"))
# %%
import os
import base64
LANGFUSE_PUBLIC_KEY = os.getenv("LANGFUSE_PUBLIC_KEY")
LANGFUSE_SECRET_KEY = os.getenv("LANGFUSE_SECRET_KEY")
LANGFUSE_AUTH=base64.b64encode(f"{LANGFUSE_PUBLIC_KEY}:{LANGFUSE_SECRET_KEY}".encode()).decode()
os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://cloud.langfuse.com/api/public/otel" # EU data region
os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = f"Authorization=Basic {LANGFUSE_AUTH}"
from opentelemetry.sdk.trace import TracerProvider
from openinference.instrumentation.smolagents import SmolagentsInstrumentor
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
trace_provider = TracerProvider()
trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter()))
SmolagentsInstrumentor().instrument(tracer_provider=trace_provider)
# %%
from smolagents import LiteLLMModel, InferenceClientModel, OpenAIServerModel
class InferenceClientModelWithUsage(InferenceClientModel):
last_input_token_count = -1
last_output_token_count = -1
class OpenAIServerModelWithUsage(OpenAIServerModel):
last_input_token_count = -1
last_output_token_count = -1
# %%
from smolagents import (
CodeAgent,
DuckDuckGoSearchTool,
InferenceClientModel,
VisitWebpageTool,
FinalAnswerTool,
tool
)
# %%
agent_llm = InferenceClientModelWithUsage(
model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
provider="together"
)
# %%
import base64
from openai import OpenAI
# 2. Image Comprehension Tool
@tool
def image_comprehension(
image_path: str,
question: str = "Describe this image in detail."
) -> str:
"""
Analyze an image using GPT-4 Vision, given a specific question.
Args:
image_path (str): The path to the image file.
question (str): The question to ask about the image.
Returns:
str: A response to the question about the image.
"""
try:
# Initialize OpenAI client
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# Read and encode the image
with open(image_path, "rb") as image_file:
image_bytes = image_file.read()
image_base64 = base64.b64encode(image_bytes).decode("utf-8")
# Create the message payload directly for OpenAI API
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": question
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{image_base64}"
}
}
]
}
]
# Make the API call directly to OpenAI
response = client.chat.completions.create(
model="gpt-4o",
messages=messages,
max_tokens=1000
)
return response.choices[0].message.content
except Exception as e:
error_msg = f"Error processing image: {str(e)}"
return error_msg
# # Test the image comprehension tool
# image_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/5b2a14e8-6e59-479c-80e3-4696e8980152.jpg"
# image_description = image_comprehension(image_path, question="Describe this image in detail.")
# print(f"Image Description:\n{image_description}")
# %%
# 3. Text Extraction Tool (from image)
@tool
def extract_text_from_image(image_path: str) -> str:
"""
Extract text from an image file using a multimodal model.
Args:
image_path: A local image file path (strings).
Returns:
str: A single string containing the concatenated text extracted from each image.
"""
all_text = ""
extracted_text = image_comprehension(
image_path,
question="Extract all the text from this image. Return only the extracted text, no explanations."
)
all_text += extracted_text + "\n\n"
return all_text.strip()
# Test the text extraction tool
image_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/b7f857e4-d8aa-4387-af2a-0e844df5b9d8.png" # Example image
image_text = extract_text_from_image(image_path)
print(f"Extracted Text:\n{image_text}")
# %%
# 4. Speech to Text Tool
@tool
def transcribe_audio(audio_path: str) -> str:
"""
Transcribe audio using OpenAI's Whisper model.
Args:
audio_path (str): The path to the audio file.
Returns:
str: The transcribed text from the audio.
"""
try:
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
with open(audio_path, "rb") as audio_file:
transcript = client.audio.transcriptions.create(
file=audio_file,
model="gpt-4o-transcribe",
response_format="text"
)
return transcript
except Exception as e:
error_msg = f"Error transcribing audio: {str(e)}"
return error_msg
# # Test the transcribe audio tool
# audio_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/1f975693-876d-457b-a649-393859e79bf3.mp3"
# transcript = transcribe_audio(audio_path)
# print(f"Transcription for '{audio_path}':\n{transcript}")
# %%
# 5. TXT viewer
@tool
def read_txt(file_path: str) -> str:
"""
Read and return the content of a TXT file.
Args:
file_path (str): The path to the TXT file.
Returns:
str: The content of the TXT file.
"""
try:
with open(file_path, "r", encoding="utf-8") as file:
content = file.read()
return content
except Exception as e:
error_msg = f"Error reading TXT file: {str(e)}"
return error_msg
# # Test the read_txt tool
# txt_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/389793a7-ca17-4e82-81cb-2b3a2391b4b9.txt"
# txt_content = read_txt(txt_path)
# print(f"Content of '{txt_path}':\n{txt_content}")
# %%
# %pip install python-docx
# %%
# %pip install pdfplumber
# %%
# %pip install langchain-community
# %%
# %pip install tabulate
# %%
# 6. docx viewer
import pandas as pd
from docx import Document
from docx.oxml.ns import qn
@tool
def read_docx(path: str) -> str:
"""
Read a DOCX file and return its content
Args:
path (str): The path to the DOCX file.
Returns:
str: The content of the DOCX file with page headers and breaks.
"""
doc = Document(path)
out_parts: list[str] = []
# ---------- helpers ----------
def breaks_in_paragraph(p) -> int:
"""Count explicit page-break markers present in this paragraph."""
n = 0
for run in p.runs:
n += len(run._element.xpath(".//w:br[@w:type='page']"))
n += len(run._element.xpath(".//w:lastRenderedPageBreak"))
if p._p.xpath(".//w:pPr/w:pageBreakBefore"):
n += 1
return n
def table_to_markdown(tbl) -> str:
rows = [[cell.text.strip() for cell in row.cells] for row in tbl.rows]
max_len = max((len(r) for r in rows), default=0)
norm = [r + [""] * (max_len - len(r)) for r in rows]
return pd.DataFrame(norm).to_markdown(index=False)
def iter_block_items(doc_):
from docx.text.paragraph import Paragraph
from docx.table import Table
for child in doc_._element.body.iterchildren():
if child.tag == qn("w:p"):
yield Paragraph(child, doc_)
elif child.tag == qn("w:tbl"):
yield Table(child, doc_)
# ---------- pagination state ----------
page_num = 1
have_started_this_page = False
pending_page_break = False # collapse consecutive break markers
def ensure_page_header():
nonlocal have_started_this_page
if not have_started_this_page:
out_parts.append(f"[DOCX Page {page_num}]")
have_started_this_page = True
# ---------- render ----------
for block in iter_block_items(doc):
# If this block is a paragraph, handle its breaks & text
if hasattr(block, "text"): # Paragraph
text = block.text.strip()
has_breaks = breaks_in_paragraph(block) > 0
# If we see a break marker: schedule a single transition
if has_breaks:
if have_started_this_page:
out_parts.append("\n--- PAGE BREAK ---\n")
if not pending_page_break:
page_num += 1
pending_page_break = True
have_started_this_page = False # next content starts the new page
# Emit any text in this paragraph (after handling break)
if text:
# If a break was pending, this is the first content on the new page
if pending_page_break:
pending_page_break = False
ensure_page_header()
out_parts.append(text)
# Tables: just print on the current page (DOCX page breaks are paragraph-based)
elif hasattr(block, "rows"): # Table
if pending_page_break:
if have_started_this_page:
out_parts.append("\n--- PAGE BREAK ---\n")
# move to the next page once
have_started_this_page = False
pending_page_break = False
# header for the new page
ensure_page_header()
else:
ensure_page_header()
out_parts.append(table_to_markdown(block))
return "\n".join(out_parts)
# # Test the read_docx tool
# docx_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/cffe0e32-c9a6-4c52-9877-78ceb4aaa9fb.docx"
# docx_content = read_docx(docx_path)
# print(f"Content of '{docx_path}':\n{docx_content}")
# %%
# 7. pdf viewer
import pdfplumber
import pandas as pd
from collections import defaultdict
from math import floor
from langchain_community.document_loaders import UnstructuredPDFLoader
def smart_pdf_reader(
path: str,
detect_text_tables: bool = True,
x_bin: float = 6.0,
min_rows: int = 3,
min_cols: int = 3,
min_lines_for_column: float = 0.5,
table_mode: str = "auto" # "auto", "lines", or "text"
) -> str:
"""
PDF viewer:
- distinguishing table vs text with alignment heuristics
- tables rendered using your preferred markdown converter
"""
# Coerce numeric inputs
def _as_float(x, default):
try: return float(x[0] if isinstance(x, (list, tuple)) else x)
except: return float(default)
def _as_int(x, default):
try: return int(x[0] if isinstance(x, (list, tuple)) else x)
except: return int(default)
xb = _as_float(x_bin, 6.0)
mr = _as_int(min_rows, 3)
mc = _as_int(min_cols, 3)
def valid_table_rows(rows) -> bool:
if not rows: return False
clean = [[("" if c is None else str(c).strip()) for c in r] for r in rows]
clean = [r for r in clean if any(r)]
if len(clean) < mr: return False
ncols = max(len(r) for r in clean)
return mc <= ncols <= 30
def tables_for_page(page):
def strategies(mode):
if mode == "auto":
return [
dict(vertical_strategy="lines", horizontal_strategy="lines"),
dict(vertical_strategy="text", horizontal_strategy="text"),
]
if mode == "lines":
return [dict(vertical_strategy="lines", horizontal_strategy="lines")]
return [dict(vertical_strategy="text", horizontal_strategy="text")]
for strategy in strategies(table_mode):
# Skip text-based unless alignment check passes
if strategy["vertical_strategy"] == "text":
if not detect_text_tables or not is_probably_tabular(page):
continue
tbls = page.find_tables(table_settings=strategy) or []
results = [t for t in tbls if valid_table_rows(t.extract())]
if results:
return results
return []
def is_probably_tabular(page) -> bool:
words = page.extract_words() or []
if len(words) < 20: return False
lines = defaultdict(list)
for w in words:
lines[round(w["top"],1)].append(w)
nonempty = sum(1 for ws in lines.values() if ws)
if nonempty < mr: return False
bins = defaultdict(int)
for ws in lines.values():
xs = sorted(w["x0"] for w in ws if "x0" in w)
if not xs: continue
gaps = [xs[i+1]-xs[i] for i in range(len(xs)-1)]
med_gap = sorted(gaps)[len(gaps)//2] if gaps else 0
threshold = 1.5*(med_gap or 1.0)
chunks = [xs[0]] + [xs[i+1] for i,g in enumerate(gaps) if g>threshold]
used = set(floor(x/xb)*xb for x in chunks)
for b in used:
bins[b]+=1
needed = max(1, int(nonempty * float(min_lines_for_column)+0.5))
dominant = [b for b,c in bins.items() if c>=needed]
return len(dominant)>=mc
def table_to_markdown(rows):
clean = [[("" if c is None else str(c).strip()) for c in r] for r in rows]
clean = [r for r in clean if any(r)]
max_len = max(len(r) for r in clean)
clean = [r + [""]*(max_len-len(r)) for r in clean]
df = pd.DataFrame(clean)
return df.to_markdown(index=False)
parts = []
with pdfplumber.open(path) as pdf:
total = len(pdf.pages)
for i, page in enumerate(pdf.pages, start=1):
parts.append(f"[PDF Page {i}]")
tbl_objs = tables_for_page(page)
bboxes = [t.bbox for t in tbl_objs]
filtered = page
for bb in bboxes:
filtered = filtered.outside_bbox(bb)
text = (filtered.extract_text() or "").strip()
if text:
parts.append(text)
if tbl_objs:
tcount = 0
for t in tbl_objs:
rows = t.extract()
md = table_to_markdown(rows)
if md:
tcount += 1
parts.append(f"**Table {i}.{tcount}**\n\n{md}")
if i < total:
parts.append("\n--- PAGE BREAK ---\n")
return "\n".join(parts)
def basic_pdf_reader(path: str) -> str:
"""
Return PDF text with a separator after each page.
"""
loader = UnstructuredPDFLoader(path, mode="elements")
elements = loader.load()
out, last_page = [], None
for d in elements:
page = (d.metadata or {}).get("page_number")
if page is not None and page != last_page:
if last_page is not None:
out.append("\n--- PAGE BREAK ---\n")
out.append(f"[PDF Page {page}]")
last_page = page
txt = (d.page_content or "").strip()
if txt:
out.append(txt)
return "\n".join(out)
@tool
def read_pdf(
path: str,
) -> str:
"""
Load and return the content of a PDF file.
Args:
path (str): The path to the PDF file.
Returns:
str: The content of the PDF file.
"""
try:
return smart_pdf_reader(path)
except Exception as e:
print(f"Smart reader failed: {e}")
return basic_pdf_reader(path)
# # Test the read_pdf tool
# # pdf_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/e9a2c537-8232-4c3f-85b0-b52de6bcba99.pdf"
# # pdf_path = "HF_Agents_Course/u4.final_project/gaia/2023/test/8f697523-6988-4c4f-8d72-760a45681f68.pdf"
# # pdf_path = "HF_Agents_Course/u4.final_project/gaia/2023/test/32f386b9-73ee-4455-b412-ddad508aa979.pdf"
# # pdf_path = "HF_Agents_Course/u4.final_project/gaia/2023/test/021a5339-744f-42b7-bd9b-9368b3efda7a.pdf"
# # pdf_path = "HF_Agents_Course/u4.final_project/gaia/2023/test/634fca59-03b2-4cdf-9ce4-0205df22f256.pdf"
# pdf_path = "HF_Agents_Course/u4.final_project/gaia/2023/test/be353748-74eb-4904-8f17-f180ce087f1a.pdf"
# # pdf_content = read_pdf(pdf_path)
# pdf_content = read_pdf(pdf_path)
# print(f"Content of '{pdf_path}':\n{pdf_content}")
# %%
# %pip install unstructured
# %%
# %pip install python-pptx
# %%
# 8. pptx viewer
from langchain_community.document_loaders import UnstructuredPowerPointLoader
@tool
def read_pptx(path: str) -> str:
"""
Load and return the content of a PPTX file.
Args:
path (str): The path to the PPTX file.
Returns:
str: The content of the PPTX file.
"""
loader = UnstructuredPowerPointLoader(path, mode="elements")
elements = loader.load()
out, last_slide = [], None
for d in elements:
meta = d.metadata or {}
slide_no = meta.get("page_number") # Unstructured uses page_number for slides
if slide_no is not None and slide_no != last_slide:
if last_slide is not None:
out.append("\n--- SLIDE BREAK ---\n")
out.append(f"[Slide {slide_no}]")
last_slide = slide_no
txt = (d.page_content or "").strip()
if txt:
out.append(txt)
return "\n".join(out)
# # Test the read_pptx tool
# pptx_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/a3fbeb63-0e8c-4a11-bff6-0e3b484c3e9c.pptx"
# pptx_content = read_pptx(pptx_path)
# print(f"Content of '{pptx_path}':\n{pptx_content}")
# %%
# %pip install openpyxl
# %%
# 9. xlsx viewer
import pandas as pd
from openpyxl import load_workbook
from openpyxl.utils import get_column_letter
@tool
def read_excel(
path: str,
max_rows_per_sheet: int = 10000
) -> str:
"""
Load and return the content of a PDF file
Args:
path (str): The path to the PDF file.
max_rows_per_sheet (int): Maximum number of rows to read per sheet.
Returns:
str: The content of the PDF file.
"""
wb = load_workbook(path, data_only=True)
parts = []
def format_cell(value, numfmt: str) -> str:
if value is None:
return ""
if not isinstance(value, (int, float)):
return str(value)
nf = (numfmt or "").lower()
is_currency = ("$" in nf) or ("[$" in nf) or ("accounting" in nf)
if not is_currency:
return f"{value}"
symbol = "$"
if "[$" in nf:
try:
sym = nf.split("[$", 1)[1].split("]", 1)[0]
symbol = (sym.split("-", 1)[0] or "$").strip()
except Exception:
pass
decimals = 2
if "." in nf:
after = nf.split(".", 1)[1]
z = 0
for ch in after:
if ch == "0":
z += 1
elif ch in "#,; ]":
continue
else:
break
if z > 0:
decimals = z
use_grouping = "," in nf.split(".", 1)[0]
neg_paren = "(" in nf and ")" in nf and value < 0
abs_val = abs(value)
num_str = f"{abs_val:,.{decimals}f}" if use_grouping else f"{abs_val:.{decimals}f}"
if neg_paren:
return f"({symbol}{num_str})"
return f"{'-' if value < 0 else ''}{symbol}{num_str}"
for ws in wb.worksheets:
rows = []
max_row = min(ws.max_row, max_rows_per_sheet) if max_rows_per_sheet else ws.max_row
max_col = ws.max_column
for r in range(1, max_row + 1):
row_vals = []
for c in range(1, max_col + 1):
cell = ws.cell(row=r, column=c)
row_vals.append(format_cell(cell.value, cell.number_format))
rows.append(row_vals)
# Trim trailing empty rows/columns
while rows and all(v == "" for v in rows[-1]):
rows.pop()
while rows and rows and rows[0] and all(v == "" for v in (row[-1] for row in rows)):
for row in rows:
row.pop()
if not rows:
parts.append(f"### Sheet: {ws.title}\n\n*(empty)*\n\n--- SHEET BREAK ---\n")
continue
df = pd.DataFrame(rows).fillna("")
ncols = df.shape[1]
# Set Excel-style column headers
df.columns = [get_column_letter(i) for i in range(1, ncols + 1)]
df.insert(0, "", range(1, len(df) + 1)) # new first column for row numbers
md = df.to_markdown(index=False)
parts.append(f"### Sheet: {ws.title}\n\n{md}\n\n--- SHEET BREAK ---\n")
return "\n".join(parts) if parts else "(No sheets found)"
# # Test the view_xlsx_as_markdown_tables tool
# # xlsx_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/3da89939-209c-4086-8520-7eb734e6b4ef.xlsx"
# xlsx_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/4d0aa727-86b1-406b-9b33-f870dd14a4a5.xlsx"
# # xlsx_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/5cfb274c-0207-4aa7-9575-6ac0bd95d9b2.xlsx"
# # xlsx_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/7bd855d8-463d-4ed5-93ca-5fe35145f733.xlsx"
# # xlsx_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/65afbc8a-89ca-4ad5-8d62-355bb401f61d.xlsx"
# # xlsx_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/076c8171-9b3b-49b9-a477-244d2a532826.xlsx"
# # xlsx_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/32102e3e-d12a-4209-9163-7b3a104efe5d.xlsx"
# # xlsx_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/54612da3-fd56-4941-80f4-5eb82330de25.xlsx"
# # xlsx_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/c526d8d6-5987-4da9-b24c-83466fa172f3.xlsx"
# xlsx_content = read_excel(xlsx_path)
# print(f"Content of '{xlsx_path}':\n{xlsx_content}")
# %%
# 10. py executor
from __future__ import annotations
from typing import List, Optional, Dict, Any
import os, sys, textwrap, subprocess
MAX_OUTPUT_CHARS = 60_000 # prevent enormous payloads in chat history
def _truncate(s: str, limit: int = MAX_OUTPUT_CHARS) -> str:
if s is None:
return ""
if len(s) <= limit:
return s
tail = "\n...[truncated]"
return s[: max(0, limit - len(tail))] + tail
def run_python_file_raw(
path: str,
args: Optional[List[str]] = None,
timeout_sec: int = 30,
env: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
"""
Execute a local .py file in a subprocess (no shell).
Returns a dict: {exit_code, stdout, stderr}.
- args: optional CLI args passed to the script
- timeout_sec: hard limit for process execution (default: 30s)
- env: extra environment variables to add/override
Notes:
* Uses the same interpreter as the host (sys.executable).
* Runs with CWD = script's directory.
* Output is truncated to keep agent state small.
"""
if not path.lower().endswith(".py"):
return {"exit_code": -1, "stdout": "", "stderr": "Refusing to run non-.py file."}
if not os.path.exists(path):
return {"exit_code": -1, "stdout": "", "stderr": f"File not found: {path}"}
args = list(args or [])
cwd = os.path.dirname(os.path.abspath(path)) or None
cmd = [sys.executable, "-u", path, *args] # -u for unbuffered stdout/stderr
# Compose env safely
run_env = os.environ.copy()
if env:
for k, v in env.items():
if isinstance(k, str) and isinstance(v, str):
run_env[k] = v
try:
proc = subprocess.run( # safe: no shell
cmd,
cwd=cwd,
env=run_env,
capture_output=True, # capture both
text=True, # decode to str
timeout=timeout_sec, # hard stop
check=False, # don't raise on nonzero
)
return {
"exit_code": proc.returncode,
"stdout": _truncate(proc.stdout),
"stderr": _truncate(proc.stderr),
}
except subprocess.TimeoutExpired as e:
# e.stdout/e.stderr may be bytes or None depending on Python version; normalize
out = e.stdout.decode() if isinstance(e.stdout, (bytes, bytearray)) else (e.stdout or "")
err = e.stderr.decode() if isinstance(e.stderr, (bytes, bytearray)) else (e.stderr or "")
return {
"exit_code": -9,
"stdout": _truncate(out),
"stderr": _truncate((err or "") + f"\n[timeout after {timeout_sec}s]"),
}
except Exception as e:
return {"exit_code": -1, "stdout": "", "stderr": f"Exception: {e.__class__.__name__}: {e}"}
@tool
def run_python_file(
path: str,
args: Optional[List[str]] = None,
timeout_sec: int = 30,
env: Optional[Dict[str, str]] = None
) -> str:
"""
Execute a local .py file in a subprocess (no shell).
Args:
path (str): The path to the .py file
args (Optional[List[str]]): optional CLI args passed to the script
timeout_sec (int): hard limit for process execution (default: 30s)
env (Optional[Dict[str, str]]): extra environment variables to add/override
Returns:
str: exit code, stdout and stderr from running the .py file
"""
result = run_python_file_raw(
path=path,
args=args,
timeout_sec=timeout_sec,
env=env
)
return (
f"## Exit Code: {result['exit_code']}\n\n"
f"## stdout:\n"
f"{result['stdout']}\n\n"
f"## stderr:\n"
f"{result['stderr']}\n\n"
)
# # Test the run_python_file tool
# py_path = "HF_Agents_Course/u4.final_project/gaia/2023/validation/f918266a-b3e0-4914-865d-4faa564f1aef.py"
# print(f"Running '{py_path}'")
# result = run_python_file(py_path, timeout_sec=100)
# print(result)
# %%
# 11. Calculator
@tool
def calculator(expression: str) -> float|str:
"""
Evaluate a mathematical expression.
Args:
expression (str): The mathematical expression to evaluate.
Returns:
float|str: The result of the evaluation or an error message.
"""
try:
result = eval(expression)
return result
except Exception as e:
return f"Error evaluating expression: {e}"
# # Test the calculator function
# print(calculator("2 //xt 2"))
# print(calculator("10 / 0"))
# print(calculator("10 / 2"))
# %%
# %pip install statsmodels matplotlib seaborn
# %%
# %pip install wikipedia-api
# %%
from smolagents import WikipediaSearchTool
agent = CodeAgent(
# model=InferenceClientModelWithUsage(
# model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
# # provider="together"
# # model_id="Qwen/Qwen3-30B-A3B",
# # provider="nebius",
# ),
model=OpenAIServerModelWithUsage(
model_id="gpt-4o",
max_tokens=16384,
# api_key=os.getenv("OPENAI_API_KEY"),
# max_input_tokens=8192,
# max_output_tokens=1024,
# temperature=0.0,
# top_p=1.0,
# frequency_penalty=0.0,
# presence_penalty=0.0,
),
tools=[
DuckDuckGoSearchTool(),
VisitWebpageTool(),
WikipediaSearchTool(),
image_comprehension,
extract_text_from_image,
transcribe_audio,
read_txt,
read_docx,
read_pdf,
read_pptx,
read_excel,
run_python_file,
calculator,
FinalAnswerTool()
],
additional_authorized_imports=[
"pandas", "numpy",
"matplotlib", "seaborn",
"sklearn", "scipy", "statsmodels",
],
planning_interval=5,
verbosity_level=2,
max_steps=20,
)
agent.visualize()
# %%
from tqdm import tqdm
def get_answer(
question: str,
file_path: str=None,
# timeout_seconds: int = 60 # 1 minute default
):
sys_msg =f"""
You are a helpful assistant with access to various tools.
You should answer the user's question using the tools at your disposal.
User Question: {question}
Currently the user has provided the following file(s) as input:
{file_path if file_path else 'No file provided.'}
Produce your responses using the follow process:
1. Think about what tools you need to use, in particular if there is a file to process.
2. Use one tool at a time
3. Observe the result
4. Decide if you need more information or can provide a final answer.
5. If you need more information, use another tool. If you still need more information, repeat the process until you can provide a final answer. Do not ask the user for more information.
6. Only provide the final answer, with no explanations and no enclosures. AGAIN, YOU ONLY NEED TO PROVIDE THE FINAL ANSWER AND NOTHING ELSE.
For example, for this question "What is the capital of France?", you should just respond with "Paris" (without quotes), instead of "The capital of France is Paris."
Be concise and direct in your responses."""
try:
response = agent.run(
sys_msg
)
print((
"Agent run completed.\n"
"Response:\n"
f"{response}\n"
))
except Exception as e:
response = f"Error during agent execution: {str(e)}"
# print(response)
return response
# %%
import pandas as pd
test_dataset_path = "final_test.csv"
test_df = pd.read_csv(test_dataset_path)
test_df["file_path"] = test_df["file_path"].apply(lambda x: x if isinstance(x, str) else "")
test_df
# %%
tqdm.pandas(desc="Processing rows ...")
test_df["predicted_answer"] = test_df.progress_apply(
lambda row: get_answer(
question=row["question"],
file_path=row["file_path"] if isinstance(row["file_path"], str) else None
), axis=1
)
test_df.to_csv("test_with_predictions.csv", index=False)
# %%
test_df.iloc[6]["question"]
# %%
test_df
# %%
test_df
# %%
tqdm.pandas(desc="Processing rows ...")
test_df["predicted_answer"] = test_df.progress_apply(
lambda row: get_answer(
question=row["question"],
file_path=row["file_path"] if isinstance(row["file_path"], str) else None
) if (isinstance(row["predicted_answer"], str) and "Error" in row["predicted_answer"]) else row["predicted_answer"],
axis=1
)
test_df.to_csv("test_with_predictions.csv", index=False)
# %%
task_11_answer = get_answer(
question=test_df.iloc[11]["question"],
file_path=test_df.iloc[11]["file_path"] if isinstance(test_df.iloc[11]["file_path"], str) else None
)
test_df.at[11, "predicted_answer"] = task_11_answer
test_df.to_csv("test_with_predictions.csv", index=False)
# %%
# val_split = pd.read_csv("val_split.csv")
# val_split["file_path"] = val_split["file_path"].apply(lambda x: x if isinstance(x, str) else "")
# # val_split = val_split.iloc[:10].copy(deep=True)
# # Randomly sample 10 rows
# val_split = val_split.sample(n=10, random_state=42)
# tqdm.pandas(desc="Processing rows ...")
# val_split["predicted_answer"] = val_split.progress_apply(
# lambda row: get_answer(
# question=row["question"],
# file_path=row["file_path"] if isinstance(row["file_path"], str) else None
# ), axis=1
# )
# %%
# val_split["predicted_answer"]
# %%
# for _, row in val_split.iterrows():
# question = row["question"]
# predicted_answer = row["predicted_answer"]
# ground_truth = row["Final answer"]
# print((
# "Question:\n"
# f"{question}\n\n"
# "File:\n"
# f"{row['file_path']}\n\n"
# "Predicted Answer:\n"
# f"{predicted_answer}\n\n"
# "Ground Truth:\n"
# f"{ground_truth}\n"
# "========================\n"
# ))
|
Diaby02
| 30 |
2025-09-11T11:08:17.077568+00:00
|
https://huggingface.co/spaces/Diaby02/Final_Assignment_Template/tree/main
|
ankitdoiphode
| 40 |
2025-09-11T11:28:59.558434+00:00
|
https://huggingface.co/spaces/ankitdoiphode/Final_Assignment_Template/tree/main
|
Hyeonseo
| 100 |
2025-09-11T13:12:02.717085+00:00
|
https://huggingface.co/spaces/Hyeonseo/Final_Assignment_Template/tree/main
|
iwantmorebugs
| 45 |
2025-09-11T16:42:22.995722+00:00
|
iwantmorebugs
|
lcfdiniz
| 35 |
2025-09-11T18:35:35.725350+00:00
|
https://huggingface.co/spaces/lcfdiniz/Agents_Course_Final_Assignment/tree/main
|
sagidu
| 0 |
2025-09-11T20:12:10.506822+00:00
|
https://huggingface.co/spaces/None/tree/main
|
jfr4nc0
| 100 |
2025-09-12T00:31:45.099517+00:00
|
https://huggingface.co/spaces/jfr4nc0/Final_Assignment_Template/tree/main
|
NishaSharma65
| 45 |
2025-09-12T03:56:01.172795+00:00
|
https://huggingface.co/spaces/NishaSharma65/Final_Assignment_Template/tree/main
|
OF-013
| 40 |
2025-09-12T07:42:12.094428+00:00
|
https://huggingface.co/spaces/OF-013/Final_Assignment_Template/tree/main
|
aksamota
| 45 |
2025-09-12T12:27:48.551808+00:00
|
https://huggingface.co/spaces/aksamota/unit-04_final-assignment/tree/main
|
gabzer
| 60 |
2025-09-12T16:14:50.425467+00:00
|
https://huggingface.co/spaces/gabzer/GAIA_benchmark_agent/tree/main
|
harveytuan
| 40 |
2025-09-12T17:31:30.171677+00:00
|
https://huggingface.co/spaces/harveytuan/Final_Assignment_Template911/tree/main
|
tgarity
| 40 |
2025-09-12T18:50:46.752555+00:00
|
https://huggingface.co/spaces/None/tree/main
|
annytran
| 0 |
2025-09-12T19:25:20.635578+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
DevforMM
| 30 |
2025-09-12T21:32:02.661628+00:00
|
https://huggingface.co/spaces/None/tree/main
|
dbb2
| 15 |
2025-09-13T01:51:17.598270+00:00
|
https://huggingface.co/spaces/dbb2/Final_Assignment_Template/tree/main
|
hhhhmmmm
| 0 |
2025-09-13T02:20:51.400202+00:00
|
https://huggingface.co/spaces/hhhhmmmm/Final_Assignment_Template/tree/main
|
najmussaqib313
| 0 |
2025-09-13T05:39:05.056274+00:00
|
https://huggingface.co/spaces/najmussaqib313/Final_Assignment_Template/tree/main
|
jhcadfergu
| 40 |
2025-09-13T08:21:39.885066+00:00
|
https://huggingface.co/spaces/jhcadfergu/unit4_test/tree/main
|
yuHuuH
| 100 |
2025-09-13T08:57:10.721810+00:00
|
https://huggingface.co/spaces/yuHuuH/Final/tree/main
|
wojji
| 30 |
2025-09-13T16:16:27.154514+00:00
|
https://huggingface.co/spaces/wojji/Final_Assignment_Template/tree/main
|
aashita-n
| 0 |
2025-09-13T16:58:10.497784+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
carolinacon
| 90 |
2025-09-13T20:00:39.891374+00:00
|
https://huggingface.co/spaces/carolinacon/Final_Assignment_Template/tree/main
|
AkramZennad
| 30 |
2025-09-13T20:24:09.541111+00:00
|
https://huggingface.co/spaces/None/tree/main
|
martinjolif
| 0 |
2025-09-13T21:29:57.536606+00:00
|
https://huggingface.co/spaces/None/tree/main
|
ffyang
| 0 |
2025-09-13T23:56:00.393450+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
hardwired
| 40 |
2025-09-14T02:28:41.744125+00:00
|
https://huggingface.co/spaces/SwetaPati022/SP_agents-course-final/tree/main
|
mykytazaginei
| 0 |
2025-09-14T12:33:40.699605+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
matapanda
| 10 |
2025-09-14T12:40:23.559004+00:00
|
https://huggingface.co/spaces/None/tree/main
|
MarcAVGL
| 0 |
2025-09-14T12:42:33.921384+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
mouhamed-hussam
| 35 |
2025-09-14T12:54:57.624736+00:00
|
https://huggingface.co/spaces/hussam-mislmani/tree/main
|
morgannedewitte
| 0 |
2025-09-14T14:47:18.110410+00:00
|
https://huggingface.co/spaces/morgannedewitte/Final_Assignment_Template/tree/main
|
tuongtn
| 0 |
2025-09-14T16:19:22.366164+00:00
|
https://huggingface.co/spaces/tuongtn/Final_Assignment_Template/tree/main
|
RiverWangHuggingFace
| 10 |
2025-09-14T18:08:34.447582+00:00
|
https://huggingface.co/spaces/RiverWangHuggingFace/Final_Assignment_Template/tree/main
|
jedrzejbrzezicki
| 0 |
2025-09-14T18:38:25.667895+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
tonyshumai
| 5 |
2025-09-14T20:18:07.395401+00:00
|
https://huggingface.co/spaces/tonyshumai/Final_Assignment_Template/tree/main
|
SwetaPati022
| 35 |
2025-09-15T00:32:04.473237+00:00
|
https://huggingface.co/spaces/SwetaPati022/SP_Final_Assignment_Template/tree/main
|
RenxiuF
| 5 |
2025-09-15T01:48:19.894369+00:00
|
https://huggingface.co/spaces/RenxiuF/Final_Assignment/tree/main
|
ribokle
| 50 |
2025-09-15T05:25:28.188292+00:00
|
https://huggingface.co/spaces/ribokle/Final_Assignment_Template_r/tree/main
|
Khaitakate
| 0 |
2025-09-15T07:03:17.372485+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
manuferr
| 35 |
2025-09-15T08:19:58.461988+00:00
|
https://huggingface.co/spaces/manuferr/Final_Assignment_Template/tree/main
|
Annukul
| 15 |
2025-09-15T08:36:01.675390+00:00
|
https://huggingface.co/spaces/Annukul/Final_Assignment_Template_GAIA_Agent_2/tree/main
|
Chenchloe2025
| 10 |
2025-09-15T08:38:58.215250+00:00
|
https://huggingface.co/spaces/Chenchloe2025/Final_Assignment_Template/tree/main
|
mert-colab
| 10 |
2025-09-15T11:24:09.751293+00:00
|
https://huggingface.co/spaces/None/tree/main
|
lien1119
| 100 |
2025-09-15T12:32:31.412492+00:00
|
https://huggingface.co/spaces/tannguyen20/Final_Assignment_Template/tree/main
|
Alejandro2329
| 50 |
2025-09-15T13:02:16.801625+00:00
|
https://huggingface.co/spaces/Alejandro2329/Final_Assignment_Template/tree/main
|
plomitt
| 50 |
2025-09-15T13:25:38.298282+00:00
|
https://huggingface.co/spaces/None/tree/main
|
apoorvsinghal
| 0 |
2025-09-15T14:13:15.242046+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
coldfirex
| 50 |
2025-09-15T16:19:49.617549+00:00
|
https://huggingface.co/spaces/coldfirex/Final_Assignment_Template/tree/main
|
theraykar
| 5 |
2025-09-15T16:24:51.736918+00:00
|
https://huggingface.co/spaces/None/tree/main
|
trebull
| 0 |
2025-09-15T16:50:50.794550+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
FaizRah
| 0 |
2025-09-15T17:18:54.737349+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
Rama3017
| 0 |
2025-09-15T19:05:23.574537+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
YousefAshraf
| 50 |
2025-09-15T19:30:29.414513+00:00
|
https://huggingface.co/spaces/YousefAshraf/Final_Assignment_Template/tree/main
|
JM1599
| 0 |
2025-09-15T20:37:07.442208+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
jsnmls
| 0 |
2025-09-15T20:57:25.360734+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
keylazy
| 0 |
2025-09-15T23:34:15.097506+00:00
|
https://huggingface.co/spaces/keylazy/HF_Agent_Final_Assignment/tree/main
|
groudas
| 40 |
2025-09-16T00:15:35.871338+00:00
|
https://huggingface.co/spaces/None/tree/main
|
your-hf-username
| 15 |
2025-09-16T04:52:24.917709+00:00
|
https://huggingface.co/spaces/None/tree/main
|
mt
| 5 |
2025-09-16T06:10:18.267170+00:00
|
https://huggingface.co
|
Agogaga
| 0 |
2025-09-16T06:34:57.552066+00:00
|
https://huggingface.co/spaces/Agogaga/Agents_Course_Final/tree/main
|
MajdT
| 30 |
2025-09-16T06:41:15.350396+00:00
|
https://local-run.example/this-is-a-dummy-agent-code-url-for-local-execution
|
AGiorni
| 35 |
2025-09-16T06:45:18.637116+00:00
|
https://huggingface.co/spaces/AGiorni/Final_Assignment_Template/tree/main
|
Vizagil
| 0 |
2025-09-16T11:22:21.005440+00:00
|
https://huggingface.co/spaces/Vizagil/Final_Assignment_Template/tree/main
|
Shorina
| 5 |
2025-09-16T12:47:28.882533+00:00
|
https://huggingface.co/spaces/Shorina/Final_Assignment_Template/tree/main
|
Tsantaris
| 0 |
2025-09-16T14:24:55.641253+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
rounaqnayak
| 100 |
2025-09-16T14:36:47.853683+00:00
|
https://huggingface.co/spaces/clokoihue/hf_agents_course_gaia_agent/tree/main
|
Mikrokot
| 35 |
2025-09-16T15:04:29.625054+00:00
|
https://huggingface.co/spaces/None/tree/main
|
HemanthMarisetti
| 0 |
2025-09-17T05:45:35.923691+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
haooozhe
| 0 |
2025-09-17T07:33:41.228145+00:00
|
https://huggingface.co/spaces/haooozhe/Final_Assignment_Template/tree/main
|
jaedgo
| 30 |
2025-09-17T07:50:05.265373+00:00
|
https://huggingface.co/spaces/jaedgo/Final_Assignment_Template/tree/main
|
Marvin12311
| 0 |
2025-09-17T08:05:01.506554+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
abdelrahmanwael2
| 0 |
2025-09-17T10:15:22.552195+00:00
|
https://huggingface.co/spaces/abdelrahmanwael2/Final_Assignment/tree/main
|
domin3
| 30 |
2025-09-17T11:52:11.336856+00:00
|
https://huggingface.co/spaces/domin3/agent_course/tree/main
|
Ali-Mones
| 0 |
2025-09-17T13:18:15.420549+00:00
|
https://huggingface.co/spaces/Ali-Mones/Final_Assignment_Template/tree/main
|
PureZinc
| 0 |
2025-09-17T13:43:05.673989+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
Petros1994
| 0 |
2025-09-17T14:38:09.600461+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
mohamed2003IX
| 20 |
2025-09-17T15:56:11.470777+00:00
|
https://huggingface.co/spaces/None/tree/main
|
floridaman29
| 0 |
2025-09-17T20:35:04.578281+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
nuricankazaz
| 0 |
2025-09-17T21:06:16.771500+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
lez82bell
| 0 |
2025-09-17T21:08:56.596437+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
Ajeyprabhu
| 0 |
2025-09-18T06:03:15.701678+00:00
|
https://huggingface.co/spaces/agents-course/Final_Assignment_Template/tree/main
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.