-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathagent.py
More file actions
177 lines (144 loc) · 5.72 KB
/
agent.py
File metadata and controls
177 lines (144 loc) · 5.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
# suppress the warning
import warnings
warnings.filterwarnings("ignore")
import os
from pathlib import Path
from dotenv import load_dotenv
from google.genai import types
from tiny_agent.agent.tiny_agent import TinyAgent
from tiny_agent.tools.decorator import *
from tiny_agent.tools.web.tools import google_search, tavily_search
from tiny_agent.utils.print_utils import format_text
load_dotenv()
_PROVIDER_CONFIG = {
"vertexai": os.environ.get("GOOGLE_GENAI_USE_VERTEXAI", "True") == "True",
"vertexai_location": os.environ.get("GOOGLE_CLOUD_LOCATION", "europe-west4"),
"vertexai_project": os.environ.get(
"GOOGLE_CLOUD_PROJECT", "hg-hjghjg-ai-ft-exp-pr-hjjkhljhlhjkl"
),
"google_ai_studio_api_key": os.environ.get(
"GOOGLE_AI_STUDIO_API_KEY", "adfasdfasdfads"
),
}
_MAIN_AGENT_MODEL = "gemini-3-flash-preview"
_MAIN_AGENT_MODEL_CONFIG = {
"temperature": 1.0,
"seed": 42,
"top_p": 1.0,
"top_k": 60,
"thinking_config": types.ThinkingConfig(
thinking_level=types.ThinkingLevel.HIGH,
include_thoughts=False,
),
}
# For google search tool
_SEARCH_AGENT_MODEL = "gemini-2.5-flash-lite"
_SEARCH_AGENT_MODEL_CONFIG = {
"temperature": 1.0,
"seed": 42,
"top_p": 1.0,
"top_k": 60,
"thinking_config": types.ThinkingConfig(
thinking_budget=-1,
include_thoughts=False,
),
}
# For all web search tools
_SUMMARIZE_MODEL = "gemini-2.5-flash-lite"
_SUMMARIZE_MODEL_CONFIG = {
"temperature": 0.0,
"seed": 42,
"thinking_config": types.ThinkingConfig(
thinking_budget=0,
include_thoughts=False,
),
}
tavily_search.summarize_model = _SUMMARIZE_MODEL
tavily_search.summarize_model_config = _SUMMARIZE_MODEL_CONFIG
tavily_search.provider_config = _PROVIDER_CONFIG
google_search.search_model = _SEARCH_AGENT_MODEL
google_search.summarize_model = _SUMMARIZE_MODEL
google_search.search_options = {**_SEARCH_AGENT_MODEL_CONFIG, **_PROVIDER_CONFIG}
google_search.summarize_options = {**_SUMMARIZE_MODEL_CONFIG, **_PROVIDER_CONFIG}
def get_main_agent_goal(task: str, output_path: str) -> str:
return f"""
You are leading a research and will perform a research task.
-----
The task:
{task}
-----
Decompose the task into piece of research topics and compile them into a list.
Use the **all possible internet or web search tools** to perform a web search for each topic **ONE BY ONE**. Search for **at most 3 results** for each topic.
**Note**: Avoid pursuing perfection excessively. Know when to stop and keep it concise. Citation URLs are important; please carry them together with the results.
Record the **full raw data of research results** into memory.
Analyze and research **within the range of the recorded results** to produce a final but concise report.
**Note**: Citation URLs are important; please include them in the report and reflect the research range (must be within the range of the recorded search results). This is also sensitive.
Record the report into memory.
Compose markdown content based on the recorded report including the following sections:
- **Topic**: The name of each research topic
- **Key Findings**: A summary of the main findings for each topic
- **Description**: A brief description synthesizing insights from all researched topics
- **Citations including URLs**: All sources referenced, organized by topic
- (Optional) **Cross-Topic Insights**: Any patterns or connections observed across multiple topics
Output:
At the end of the report, please also add a datetime to represent the time when the report was generated; use a separate section to place it.
Save the report to a file with the path "{output_path}".
**Reflect** on yourself to check if the report file exists. If not, redo the save operation to save the report to the file. If the file exists, response to user..
Response:
Read out the final report file as the response to the user.
"""
# python ./agent.py --output ./agent-output --tasks tasks/
if __name__ == "__main__":
print("Deep Research Single Agent")
import argparse
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument(
"--output", type=str, required=True, help="The output of the application"
)
parser.add_argument(
"--tasks",
type=str,
required=True,
help="The directory of tasks, md or txt files",
)
args = parser.parse_args()
if args.tasks:
task_files = list(Path(args.tasks).glob("*.md")) + list(
Path(args.tasks).glob("*.txt")
)
task = "\n\n".join(
[
"## Task " + str(i) + ": " + open(file).read()
for i, file in enumerate(task_files)
]
)
else:
# default tasks dir that at the same folder of this file
tasks_dir = Path(__file__).parent / "tasks"
if tasks_dir.exists():
task_files = list(tasks_dir.glob("*.md")) + list(tasks_dir.glob("*.txt"))
task = "\n\n".join(
[
"## Task " + str(i) + ": " + open(file).read()
for i, file in enumerate(task_files)
]
)
else:
task = None
if not task:
raise ValueError("No tasks found")
# create the agent and run
agent = TinyAgent(
name="main_agent",
model=_MAIN_AGENT_MODEL,
output_root=args.output,
tools=[tavily_search, google_search],
genai_stuff=_PROVIDER_CONFIG,
**_MAIN_AGENT_MODEL_CONFIG,
)
full_task = get_main_agent_goal(
task=task, output_path=f"{agent.output_location}/result.md"
)
format_text(task, "⚑ Deep Research (single agent)")
result = agent(contents=full_task)
format_text(result.text, "❀ Deep Research (single agent) result")