from google.adk.agents import LlmAgent, SequentialAgent
from google.adk.models.lite_llm import LiteLlm
from google.adk.tools.agent_tool import AgentTool
from .prompts import agent_instruction
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from ecom_agent.knowledge.agent import knowledge_agent
Model = "gemini-2.5-pro" # LiteLlm(model="ollama_chat/qwen3:1.7b")
knowledge_agent_tool = AgentTool(knowledge_agent)
root_agent = LlmAgent(
name="for_localLLM_test",
model=Model,
description="An agent that extracts the necessary data from the text",
instruction=agent_instruction,
tools=[knowledge_agent_tool]
)
# root_agent = LlmAgent(
# name="localLLM_test_agent",
# model=Model,
# description="An agent that extracts the necessary data from the text",
# instruction=agent_instruction,
# tools=[knowledge_agent_tool],
# sub_agents=[knowledge_agent_tool]
# )
# test_agent = LlmAgent(
# name="localLLM_test_agent",
# model=Model,
# description="An agent that extracts the necessary data from the text",
# instruction=agent_instruction,
# tools=[],
# )
# root_agent = SequentialAgent(
# name="localLLM_test_root",
# description="An Orchestrating agent to extract the necessary data from the text",
# sub_agents=[knowledge_agent, test_agent],
# )