-
Notifications
You must be signed in to change notification settings - Fork 4.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We���ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Question]: i want to build a multi-level tools or multi-level agent structure, but don’t know how to do it? #14589
Comments
To build a multi-level tools or multi-level agent structure, you can follow these steps:
Here is the relevant code snippet: Build Document Agent for each Documentfrom llama_index.agent.openai import OpenAIAgent
from llama_index.core import load_index_from_storage, StorageContext, VectorStoreIndex, SummaryIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.tools import QueryEngineTool, ToolMetadata
import os
node_parser = SentenceSplitter()
# Build agents dictionary
agents = {}
query_engines = {}
# this is for the baseline
all_nodes = []
for idx, wiki_title in enumerate(wiki_titles):
nodes = node_parser.get_nodes_from_documents(city_docs[wiki_title])
all_nodes.extend(nodes)
if not os.path.exists(f"./data/{wiki_title}"):
# build vector index
vector_index = VectorStoreIndex(nodes)
vector_index.storage_context.persist(
persist_dir=f"./data/{wiki_title}"
)
else:
vector_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=f"./data/{wiki_title}"),
)
# build summary index
summary_index = SummaryIndex(nodes)
# define query engines
vector_query_engine = vector_index.as_query_engine(llm=Settings.llm)
summary_query_engine = summary_index.as_query_engine(llm=Settings.llm)
# define tools
query_engine_tools = [
QueryEngineTool(
query_engine=vector_query_engine,
metadata=ToolMetadata(
name="vector_tool",
description=(
"Useful for questions related to specific aspects of"
f" {wiki_title} (e.g. the history, arts and culture,"
" sports, demographics, or more)."
),
),
),
QueryEngineTool(
query_engine=summary_query_engine,
metadata=ToolMetadata(
name="summary_tool",
description=(
"Useful for any requests that require a holistic summary"
f" of EVERYTHING about {wiki_title}. For questions about"
" more specific sections, please use the vector_tool."
),
),
),
]
# build agent
function_llm = OpenAI(model="gpt-4")
agent = OpenAIAgent.from_tools(
query_engine_tools,
llm=function_llm,
verbose=True,
system_prompt=f"""\
You are a specialized agent designed to answer queries about {wiki_title}.
You must ALWAYS use at least one of the tools provided when answering a question; do NOT rely on prior knowledge.\
""",
)
agents[wiki_title] = agent
query_engines[wiki_title] = vector_index.as_query_engine(
similarity_top_k=2
) Build Retriever-Enabled OpenAI Agent# define tool for each document agent
all_tools = []
for wiki_title in wiki_titles:
wiki_summary = (
f"This content contains Wikipedia articles about {wiki_title}. Use"
f" this tool if you want to answer any questions about {wiki_title}.\n"
)
doc_tool = QueryEngineTool(
query_engine=agents[wiki_title],
metadata=ToolMetadata(
name=f"tool_{wiki_title}",
description=wiki_summary,
),
)
all_tools.append(doc_tool) # define an "object" index and retriever over these tools
from llama_index.core import VectorStoreIndex
from llama_index.core.objects import ObjectIndex
obj_index = ObjectIndex.from_objects(
all_tools,
index_cls=VectorStoreIndex,
) from llama_index.agent.openai import OpenAIAgent
top_agent = OpenAIAgent.from_tools(
tool_retriever=obj_index.as_retriever(similarity_top_k=3),
system_prompt=""" \
You are an agent designed to answer queries about a set of given cities.
Please always use the tools provided to answer a question. Do not rely on prior knowledge.\
""",
verbose=True,
) This setup allows the top-level agent to retrieve and use the appropriate document agent to answer queries effectively. Using MultiDocumentAgentsPackAlternatively, you can use the from llama_index.core.schema import Document
from llama_index.packs.multi_document_agents.base import MultiDocumentAgentsPack
# Initialize documents, titles, and descriptions
docs = [Document(content="..."), ...]
doc_titles = ["Title1", "Title2", ...]
doc_descriptions = ["Description1", "Description2", ...]
# Create the multi-document agents pack
multi_doc_agents_pack = MultiDocumentAgentsPack(docs, doc_titles, doc_descriptions)
# Access the top-level agent
top_agent = multi_doc_agents_pack.get_modules()["top_agent"]
# Use the top-level agent to answer queries
response = top_agent.query("Your query here")
print(response) This class handles the creation of document agents and the top-level agent, making it easier to manage and use multi-level agent structures [1][2][3]. |
not use openai@dosu |
What if I want more levels? @dosu |
To extend the setup to include more levels of agents without using OpenAI, you can follow these steps:
Additionally, you can use the class MultiDocumentAgentsPack(BaseLlamaPack):
"""Multi-document Agents pack.
Given a set of documents, build our multi-document agents architecture.
- setup a document agent over agent doc (capable of QA and summarization)
- setup a top-level agent over doc agents
"""
def __init__(self, docs: List[Document], doc_titles: List[str], doc_descriptions: List[str], **kwargs: Any) -> None:
"""Init params."""
self.node_parser = SentenceSplitter()
self.llm = YourLLM(temperature=0) # Replace with your LLM
self.service_context = ServiceContext.from_defaults(llm=self.llm)
# Build agents dictionary
self.agents = {}
# build agent for each document
for idx, doc in enumerate(docs):
doc_title = doc_titles[idx]
doc_description = doc_descriptions[idx]
nodes = self.node_parser.get_nodes_from_documents([doc])
# build vector index
vector_index = VectorStoreIndex(nodes, service_context=self.service_context)
# build summary index
summary_index = SummaryIndex(nodes, service_context=self.service_context)
# define query engines
vector_query_engine = vector_index.as_query_engine()
summary_query_engine = summary_index.as_query_engine()
# define tools
query_engine_tools = [
QueryEngineTool(
query_engine=vector_query_engine,
metadata=ToolMetadata(
name="vector_tool",
description=f"Useful for questions related to specific aspects of {doc_title}.",
),
),
QueryEngineTool(
query_engine=summary_query_engine,
metadata=ToolMetadata(
name="summary_tool",
description=f"Useful for any requests that require a holistic summary of EVERYTHING about {doc_title}.",
),
),
]
# build agent
function_llm = YourLLM() # Replace with your LLM
agent = YourAgent.from_tools(
query_engine_tools,
llm=function_llm,
verbose=True,
system_prompt=f"""\
You are a specialized agent designed to answer queries about {doc_title}.
You must ALWAYS use at least one of the tools provided when answering a question; do NOT rely on prior knowledge.\
""",
)
self.agents[doc_title] = agent
# build top-level, retrieval-enabled Agent
all_tools = []
for idx, doc in enumerate(docs):
doc_title = doc_titles[idx]
doc_description = doc_descriptions[idx]
wiki_summary = f"Use this tool if you want to answer any questions about {doc_title}.\nDoc description: {doc_description}\n"
doc_tool = QueryEngineTool(
query_engine=self.agents[doc_title],
metadata=ToolMetadata(
name=f"tool_{doc_title}",
description=wiki_summary,
),
)
all_tools.append(doc_tool)
tool_mapping = SimpleToolNodeMapping.from_objects(all_tools)
self.obj_index = ObjectIndex.from_objects(
all_tools,
tool_mapping,
VectorStoreIndex,
)
self.top_agent = FnRetrieverAgent.from_retriever(
self.obj_index.as_retriever(similarity_top_k=3),
system_prompt=""" \
You are an agent designed to answer queries about a set of given cities.
Please always use the tools provided to answer a question. Do not rely on prior knowledge.\
""",
verbose=True,
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"top_agent": self.top_agent,
"obj_index": self.obj_index,
"doc_agents": self.agents,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.top_agent.query(*args, **kwargs) These examples and the |
Question Validation
Question
Is there any way to achieve something similar to the above?
The text was updated successfully, but these errors were encountered: