Skip to content

Commit 5f65373

Browse files
committed
feat: enhance recharts and report agents with new configurations and processors
1 parent 61e3940 commit 5f65373

4 files changed

Lines changed: 131 additions & 18 deletions

File tree

src/mastra/agents/recharts.ts

Lines changed: 54 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
import { Agent } from '@mastra/core/agent'
22
import { InternalSpans } from '@mastra/core/ai-tracing'
33
import { googleAI, googleAIFlashLite, pgMemory } from '../config'
4+
import { grokAI } from '../config/copilot';
5+
import { BatchPartsProcessor, UnicodeNormalizer } from '@mastra/core/processors/processors';
46

57
export const rechartsTypes = new Agent({
68
id: 'recharts-types',
@@ -18,18 +20,18 @@ export const rechartsTypes = new Agent({
1820
## Style Guide
1921
2022
`,
21-
providerOptions: {
22-
google: {
23-
thinkingConfig: {
24-
thinkingLevel: 'medium',
25-
includeThoughts: true,
26-
thinkingBudget: -1,
27-
}
28-
}
29-
}
23+
/// providerOptions: {
24+
// google: {
25+
// thinkingConfig: {
26+
// thinkingLevel: 'medium',
27+
// includeThoughts: true,
28+
// thinkingBudget: -1,
29+
// }
30+
// }
31+
// }
3032
}
3133
},
32-
model: googleAIFlashLite,
34+
model: grokAI,
3335
memory: pgMemory,
3436
options: { tracingPolicy: { internal: InternalSpans.AGENT } },
3537
})
@@ -53,7 +55,11 @@ export const rechartsIssueLabeler = new Agent({
5355
thinkingLevel: 'medium',
5456
includeThoughts: true,
5557
thinkingBudget: -1,
56-
}
58+
},
59+
mediaResolution: 'MEDIA_RESOLUTION_MEDIUM',
60+
maxOutputTokens: 64000,
61+
temperature: 0.2,
62+
topP: 1.0,
5763
}
5864
}
5965
}
@@ -88,7 +94,11 @@ export const rechartsLinkChecker = new Agent({
8894
thinkingLevel: 'low',
8995
includeThoughts: true,
9096
thinkingBudget: -1,
91-
}
97+
},
98+
mediaResolution: 'MEDIA_RESOLUTION_MEDIUM',
99+
maxOutputTokens: 64000,
100+
temperature: 0.2,
101+
topP: 1.0
92102
}
93103
}
94104
}
@@ -124,7 +134,11 @@ export const chartdesigner = new Agent({
124134
thinkingLevel: 'medium',
125135
includeThoughts: true,
126136
thinkingBudget: -1,
127-
}
137+
},
138+
mediaResolution: 'MEDIA_RESOLUTION_MEDIUM',
139+
maxOutputTokens: 64000,
140+
temperature: 0.2,
141+
topP: 1.0
128142
}
129143
}
130144
}
@@ -161,7 +175,11 @@ export const rechartsMaster = new Agent({
161175
thinkingLevel: 'high',
162176
includeThoughts: true,
163177
thinkingBudget: -1,
164-
}
178+
},
179+
mediaResolution: 'MEDIA_RESOLUTION_MEDIUM',
180+
maxOutputTokens: 64000,
181+
temperature: 0.2,
182+
topP: 1.0
165183
}
166184
}
167185
}
@@ -172,4 +190,26 @@ export const rechartsMaster = new Agent({
172190

173191
},
174192
options: { tracingPolicy: { internal: InternalSpans.AGENT } },
193+
scorers: {
194+
// Add any custom scorers here
195+
},
196+
workflows: {
197+
// Add any custom workflows here
198+
},
199+
inputProcessors: [
200+
new UnicodeNormalizer({
201+
stripControlChars: false,
202+
collapseWhitespace: true,
203+
preserveEmojis: true,
204+
trim: true,
205+
}),
206+
],
207+
outputProcessors: [
208+
new BatchPartsProcessor({
209+
batchSize: 15,
210+
maxWaitTime: 50,
211+
emitOnNonText: true,
212+
}),
213+
],
214+
maxRetries: 5
175215
})

src/mastra/agents/reportAgent.ts

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,11 @@ export const reportAgent = new Agent({
9595
thinkingLevel: 'medium',
9696
includeThoughts: true,
9797
thinkingBudget: -1,
98-
}
98+
},
99+
mediaResolution: 'MEDIA_RESOLUTION_MEDIUM',
100+
maxOutputTokens: 64000,
101+
temperature: 0.2,
102+
topP: 1.0
99103
}
100104
}
101105
}

src/mastra/config/copilot.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ import { createGitHubCopilotOpenAICompatible } from '@opeoginni/github-copilot-o
33
const githubCopilot = createGitHubCopilotOpenAICompatible({
44
baseURL: 'https://api.githubcopilot.com',
55
name: 'githubcopilot',
6+
apiKey: process.env.COPILOT_TOKEN,
67
headers: {
78
Authorization: `Bearer ${process.env.COPILOT_TOKEN}`,
89
"Copilot-Integration-Id": "vscode-chat", // These configs must be provided

src/mastra/config/google.ts

Lines changed: 71 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -108,11 +108,23 @@ export const googleNanoBanana = google('gemini-3-pro-image-preview')
108108
// systemInstruction: 'This is model has cache',
109109
// displayName: 'cacheContent',
110110
//});
111-
111+
/*
112+
* imageGen: Gemini Imagen 4.0 Generate model for standard image generation
113+
* When to use: This model is suitable for standard image generation tasks, offering a balance between quality and computational efficiency. It is ideal for applications ranging from simple illustrations to basic visualizations.
114+
* Why use: Choose this model when you need to generate images quickly and efficiently, balancing the need for decent quality with the desire to keep costs down.
115+
*/
112116
export const imageGen = google.image('imagen-4.0-generate-001');
113-
117+
/*
118+
* imageUltra: Gemini Imagen 4.0 Ultra model for high-resolution image generation
119+
* When to use: This model is ideal for applications that require ultra-high-resolution images with exceptional detail and quality, such as professional design work, marketing materials, and high-end visual content creation.
120+
* Why use: Choose this model when image quality is a top priority and you need the best possible resolution and detail for your visual assets, despite the higher computational costs involved.
121+
*/
114122
export const imageUltra = google.image('imagen-4.0-ultra-generate-001');
115-
123+
/*
124+
* imageFast: Gemini Imagen 4.0 Fast model for rapid image generation
125+
* When to use: This model is designed for scenarios where quick turnaround times are essential, such as real-time applications, rapid prototyping, or situations where speed is prioritized over ultra-high resolution.
126+
* Why use: Choose this model when you need to generate images swiftly without significantly compromising on quality, making it ideal for dynamic content creation and interactive applications.
127+
*/
116128
export const imageFast = google.image('imagen-4.0-fast-generate-001');
117129

118130
/*
@@ -132,4 +144,60 @@ export const googleWithMemory = withSupermemory(googleAI,"mastra", {
132144
verbose: true,
133145
mode: "full",
134146
addMemory: "always"
147+
});
148+
/*
149+
* googleWithMemoryLite: Google Generative AI Flash Lite model integrated with Supermemory for enhanced conversational capabilities
150+
* When to use: This configuration is ideal for budget-conscious applications that still require context-aware interactions, such as lightweight chatbots and virtual assistants. It leverages Supermemory to provide relevant historical context in conversations while minimizing costs.
151+
* Why use: Utilize this setup when you want to enhance user interactions by incorporating past conversations and relevant information while keeping costs low.
152+
* Example
153+
* use cases:
154+
* A budget-friendly customer support chatbot that remembers previous interactions with users to provide tailored assistance.
155+
* A lightweight virtual assistant that maintains context across multiple user requests for more coherent interactions.
156+
* An educational tutor bot that recalls past lessons and user progress to adapt its teaching approach on a budget.
157+
* A travel planning assistant that considers past queries about destinations and preferences to suggest personalized itineraries without incurring high costs.
158+
* A medical consultation tool that takes into account patient history and previous consultations to offer more informed advice while being cost-effective.
159+
* Configuration details: This setup uses the 'mastra' container tag for memory search, includes a conversation ID for grouping messages, and operates in 'full' mode to maximize context retrieval. It is configured to always add relevant memories to the prompts while utilizing the cost-effective Flash Lite model.
160+
*/
161+
export const googleWithMemoryLite = withSupermemory(googleAIFlashLite,"mastra", {
162+
conversationId: "mastra-conversation",
163+
verbose: true,
164+
mode: "full",
165+
addMemory: "always"
166+
});
167+
/*
168+
* superGoogle: Google Generative AI model integrated with Supermemory for enhanced conversational capabilities
169+
* When to use: This configuration is ideal for applications that require context-aware interactions, such as chatbots, virtual assistants, and customer support systems. It leverages Supermemory to provide relevant historical context in conversations.
170+
* Why use: Utilize this setup when you want to enhance user interactions by incorporating past conversations and relevant information, leading to more personalized and accurate responses.
171+
* Example use cases:
172+
* A customer support chatbot that remembers previous interactions with users to provide tailored assistance.
173+
* A virtual assistant that maintains context across multiple user requests for more coherent interactions.
174+
* An educational tutor bot that recalls past lessons and user progress to adapt its teaching approach.
175+
* A travel planning assistant that considers past queries about destinations and preferences to suggest personalized itineraries.
176+
* A medical consultation tool that takes into account patient history and previous consultations to offer more informed advice.
177+
* Configuration details: This setup uses the 'mastra' container tag for memory search, includes a conversation ID for grouping messages, and operates in 'full' mode to maximize context retrieval. It is configured to always add relevant memories to the prompts.
178+
*/
179+
export const superGoogle = withSupermemory(googleAI,"mastra", {
180+
conversationId: "mastra-conversation",
181+
verbose: true,
182+
mode: "full",
183+
addMemory: "always"
184+
});
185+
/*
186+
* superGoogleLite: Google Generative AI Flash Lite model integrated with Supermemory for enhanced conversational capabilities
187+
* When to use: This configuration is ideal for budget-conscious applications that still require context-aware interactions, such as lightweight chatbots and virtual assistants. It leverages Supermemory to provide relevant historical context in conversations while minimizing costs.
188+
* Why use: Utilize this setup when you want to enhance user interactions by incorporating past conversations and relevant information while keeping costs low.
189+
* Example
190+
* use cases:
191+
* A budget-friendly customer support chatbot that remembers previous interactions with users to provide tailored assistance.
192+
* A lightweight virtual assistant that maintains context across multiple user requests for more coherent interactions.
193+
* An educational tutor bot that recalls past lessons and user progress to adapt its teaching approach on a budget.
194+
* A travel planning assistant that considers past queries about destinations and preferences to suggest personalized itineraries without incurring high costs.
195+
* A medical consultation tool that takes into account patient history and previous consultations to offer more informed advice while being cost-effective.
196+
* Configuration details: This setup uses the 'mastra' container tag for memory search, includes a conversation ID for grouping messages, and operates in 'full' mode to maximize context retrieval. It is configured to always add relevant memories to the prompts while utilizing the cost-effective Flash Lite model.
197+
*/
198+
export const superGoogleLite = withSupermemory(googleAIFlashLite,"mastra", {
199+
conversationId: "mastra-conversation",
200+
verbose: true,
201+
mode: "full",
202+
addMemory: "always"
135203
});

0 commit comments

Comments
 (0)