Combined MDX Documentation ========================= Total files processed: 161 Files included: - src/pages/docs/08-running-evals.mdx - src/pages/docs/agents/00-overview.mdx - src/pages/docs/agents/01-agent-memory.mdx - src/pages/docs/agents/02-adding-tools.mdx - src/pages/docs/deployment/deployment.mdx - src/pages/docs/deployment/logging-and-tracing.mdx - src/pages/docs/getting-started/installation.mdx - src/pages/docs/getting-started/project-structure.mdx - src/pages/docs/guides/01-harry-potter.mdx - src/pages/docs/guides/02-chef-michel.mdx - src/pages/docs/guides/03-stock-agent.mdx - src/pages/docs/guides/04-recruiter.mdx - src/pages/docs/index.mdx - src/pages/docs/llm-models/00-overview.mdx - src/pages/docs/local-dev/engine.mdx - src/pages/docs/local-dev/integrations.mdx - src/pages/docs/local-dev/mastra-dev.mdx - src/pages/docs/local-dev/mastra-init.mdx - src/pages/docs/rag/chunking-and-embedding.mdx - src/pages/docs/rag/overview.mdx - src/pages/docs/rag/retrieval.mdx - src/pages/docs/rag/vector-databases.mdx - src/pages/docs/reference/agents/createTool.mdx - src/pages/docs/reference/agents/generate.mdx - src/pages/docs/reference/agents/getAgent.mdx - src/pages/docs/reference/agents/stream.mdx - src/pages/docs/reference/cli/deploy.mdx - src/pages/docs/reference/cli/dev.mdx - src/pages/docs/reference/cli/engine.mdx - src/pages/docs/reference/cli/init.mdx - src/pages/docs/reference/core/mastra-class.mdx - src/pages/docs/reference/evals/answer-relevancy.mdx - src/pages/docs/reference/evals/bias.mdx - src/pages/docs/reference/evals/completeness.mdx - src/pages/docs/reference/evals/content-similarity.mdx - src/pages/docs/reference/evals/context-position.mdx - src/pages/docs/reference/evals/context-precision.mdx - src/pages/docs/reference/evals/context-relevancy.mdx - src/pages/docs/reference/evals/contextual-recall.mdx - src/pages/docs/reference/evals/faithfulness.mdx - src/pages/docs/reference/evals/hallucination.mdx - src/pages/docs/reference/evals/keyword-coverage.mdx - src/pages/docs/reference/evals/prompt-alignment.mdx - src/pages/docs/reference/evals/summarization.mdx - src/pages/docs/reference/evals/textual-difference.mdx - src/pages/docs/reference/evals/tone-consistency.mdx - src/pages/docs/reference/evals/toxicity.mdx - src/pages/docs/reference/llm/generate.mdx - src/pages/docs/reference/llm/providers-and-models.mdx - src/pages/docs/reference/llm/stream.mdx - src/pages/docs/reference/memory/Memory.mdx - src/pages/docs/reference/memory/createThread.mdx - src/pages/docs/reference/memory/getThreadById.mdx - src/pages/docs/reference/memory/getThreadsByResourceId.mdx - src/pages/docs/reference/memory/query.mdx - src/pages/docs/reference/observability/create-logger.mdx - src/pages/docs/reference/observability/logger.mdx - src/pages/docs/reference/observability/otel-config.mdx - src/pages/docs/reference/observability/providers/braintrust.mdx - src/pages/docs/reference/observability/providers/index.mdx - src/pages/docs/reference/observability/providers/laminar.mdx - src/pages/docs/reference/observability/providers/langfuse.mdx - src/pages/docs/reference/observability/providers/langsmith.mdx - src/pages/docs/reference/observability/providers/langwatch.mdx - src/pages/docs/reference/observability/providers/new-relic.mdx - src/pages/docs/reference/observability/providers/signoz.mdx - src/pages/docs/reference/observability/providers/traceloop.mdx - src/pages/docs/reference/rag/chunk.mdx - src/pages/docs/reference/rag/document.mdx - src/pages/docs/reference/rag/embeddings.mdx - src/pages/docs/reference/rag/extract-params.mdx - src/pages/docs/reference/rag/graph-rag.mdx - src/pages/docs/reference/rag/metadata-filters.mdx - src/pages/docs/reference/rag/pgstore.mdx - src/pages/docs/reference/rag/pinecone.mdx - src/pages/docs/reference/rag/qdrant.mdx - src/pages/docs/reference/rag/rerank.mdx - src/pages/docs/reference/storage/mastra-storage.mdx - src/pages/docs/reference/tools/client.mdx - src/pages/docs/reference/tools/document-chunker-tool.mdx - src/pages/docs/reference/tools/graph-rag-tool.mdx - src/pages/docs/reference/tools/vector-query-tool.mdx - src/pages/docs/reference/tts/generate.mdx - src/pages/docs/reference/tts/providers-and-models.mdx - src/pages/docs/reference/tts/stream.mdx - src/pages/docs/reference/workflows/after.mdx - src/pages/docs/reference/workflows/commit.mdx - src/pages/docs/reference/workflows/createRun.mdx - src/pages/docs/reference/workflows/execute.mdx - src/pages/docs/reference/workflows/resume.mdx - src/pages/docs/reference/workflows/start.mdx - src/pages/docs/reference/workflows/step-class.mdx - src/pages/docs/reference/workflows/step-condition.mdx - src/pages/docs/reference/workflows/step-function.mdx - src/pages/docs/reference/workflows/step-options.mdx - src/pages/docs/reference/workflows/suspend.mdx - src/pages/docs/reference/workflows/then.mdx - src/pages/docs/reference/workflows/watch.mdx - src/pages/docs/reference/workflows/workflow.mdx - src/pages/docs/workflows/00-overview.mdx - src/pages/docs/workflows/control-flow.mdx - src/pages/docs/workflows/steps.mdx - src/pages/docs/workflows/suspend-and-resume.mdx - src/pages/examples/agents/agentic-workflows.mdx - src/pages/examples/agents/bird-checker.mdx - src/pages/examples/agents/hierarchical-multi-agent.mdx - src/pages/examples/agents/multi-agent-workflow.mdx - src/pages/examples/agents/system-prompt.mdx - src/pages/examples/agents/using-a-tool.mdx - src/pages/examples/index.mdx - src/pages/examples/llms/call-claude.mdx - src/pages/examples/llms/call-google-gemini.mdx - src/pages/examples/llms/describe-an-image.mdx - src/pages/examples/llms/generate-object-with-structured-output.mdx - src/pages/examples/llms/generate-text-from-pdf.mdx - src/pages/examples/llms/generate-text-with-deepseek-reasoner.mdx - src/pages/examples/llms/generate-text.mdx - src/pages/examples/llms/stream-object-with-structured-output.mdx - src/pages/examples/llms/stream-text.mdx - src/pages/examples/llms/use-a-system-prompt.mdx - src/pages/examples/memory/memory-with-libsql.mdx - src/pages/examples/memory/memory-with-pg.mdx - src/pages/examples/memory/memory-with-upstash.mdx - src/pages/examples/rag/adjust-chunk-delimiters.mdx - src/pages/examples/rag/adjust-chunk-size.mdx - src/pages/examples/rag/basic-rag.mdx - src/pages/examples/rag/chunk-html.mdx - src/pages/examples/rag/chunk-json.mdx - src/pages/examples/rag/chunk-markdown.mdx - src/pages/examples/rag/chunk-text.mdx - src/pages/examples/rag/cleanup-rag.mdx - src/pages/examples/rag/cot-rag.mdx - src/pages/examples/rag/cot-workflow-rag.mdx - src/pages/examples/rag/embed-chunk-array.mdx - src/pages/examples/rag/embed-text-chunk.mdx - src/pages/examples/rag/embed-text-with-cohere.mdx - src/pages/examples/rag/filter-rag.mdx - src/pages/examples/rag/graph-rag.mdx - src/pages/examples/rag/hybrid-vector-search.mdx - src/pages/examples/rag/insert-embedding-in-astra.mdx - src/pages/examples/rag/insert-embedding-in-chroma.mdx - src/pages/examples/rag/insert-embedding-in-libsql.mdx - src/pages/examples/rag/insert-embedding-in-pgvector.mdx - src/pages/examples/rag/insert-embedding-in-pinecone.mdx - src/pages/examples/rag/insert-embedding-in-qdrant.mdx - src/pages/examples/rag/insert-embedding-in-upstash.mdx - src/pages/examples/rag/insert-embedding-in-vectorize.mdx - src/pages/examples/rag/metadata-extraction.mdx - src/pages/examples/rag/rerank-rag.mdx - src/pages/examples/rag/reranking-with-cohere.mdx - src/pages/examples/rag/retrieve-results.mdx - src/pages/examples/workflows/branching-paths.mdx - src/pages/examples/workflows/calling-agent.mdx - src/pages/examples/workflows/calling-llm.mdx - src/pages/examples/workflows/creating-a-workflow.mdx - src/pages/examples/workflows/cyclical-dependencies.mdx - src/pages/examples/workflows/parallel-steps.mdx - src/pages/examples/workflows/sequential-steps.mdx - src/pages/examples/workflows/suspend-and-resume.mdx - src/pages/examples/workflows/using-a-tool-as-a-step.mdx - src/pages/showcase/index.mdx ================================================================================ ================================================================================ Source: src/pages/docs/08-running-evals.mdx ================================================================================ --- title: "Evals Overview | Mastra Docs" description: "Mastra evals help you measure LLM output quality with metrics for relevance, bias, hallucination, and more." --- # Running Evals Evals are automated tests that evaluate LLM outputs using model-graded, rule-based, and statistical methods. Each eval returns a normalized score between 0-1 that can be logged and compared. Evals can be customized with your own prompts and scoring functions. Evals suites run in the cloud, but as tests, it's logical to store them in your codebase. Because LLMs are non-deterministic, you might not get a 100% pass rate every time. ## Mastra Evals Mastra provides several eval metrics for assessing LLM outputs: ### LLM Metrics - [`answer-relevancy`](/docs/reference/evals/answer-relevancy): Measures how well an answer addresses the input question - [`bias`](/docs/reference/evals/bias): Detects potential biases in the output - [`context-position`](/docs/reference/evals/context-position): Evaluates the placement of context in responses - [`context-precision`](/docs/reference/evals/context-precision): Assesses the accuracy of context usage - [`context-relevancy`](/docs/reference/evals/context-relevancy): Measures the relevance of used context - [`contextual-recall`](/docs/reference/evals/contextual-recall): Evaluates information recall from context - [`faithfulness`](/docs/reference/evals/faithfulness): Checks output alignment with source material - [`hallucination`](/docs/reference/evals/hallucination): Detects fabricated or unsupported information - [`prompt-alignment`](/docs/reference/evals/prompt-alignment): Measures adherence to prompt instructions - [`summarization`](/docs/reference/evals/summarization): Evaluates summary quality - [`toxicity`](/docs/reference/evals/toxicity): Detects harmful or inappropriate content ### NLP Metrics - [`completeness`](/docs/reference/evals/completeness): Measures if all required information is present - [`content-similarity`](/docs/reference/evals/content-similarity): Compares text similarity - [`keyword-coverage`](/docs/reference/evals/keyword-coverage): Checks for presence of key terms - [`textual-difference`](/docs/reference/evals/textual-difference): Measures text changes - [`tone`](/docs/reference/evals/tone-consistency): Analyzes writing style and tone ================================================================================ Source: src/pages/docs/agents/00-overview.mdx ================================================================================ --- title: "Creating and Calling Agents | Agent Documentation | Mastra" description: Overview of agents in Mastra, detailing their capabilities and how they interact with tools, workflows, and external systems. --- # Creating and Calling Agents Agents in Mastra are systems where the language model can autonomously decide on a sequence of actions to perform tasks. They have access to tools, workflows, and synced data, enabling them to perform complex tasks and interact with external systems. Agents can invoke your custom functions, utilize third-party APIs through integrations, and access knowledge bases you have built. While the `LLM` class is similar to a contractor you might hire for a one-off task, agents are like employees who can be used for ongoing projects. They have names, persistent memory, consistent model configurations, and instructions across calls, as well as a set of enabled tools. ## 1. Creating an Agent To create an agent in Mastra, you use the `Agent` class and define its properties: ```ts showLineNumbers filename="src/mastra/agents/index.ts" copy import { Agent } from "@mastra/core"; export const myAgent = new Agent({ name: "My Agent", instructions: "You are a helpful assistant.", model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, }); ``` **Note:** Ensure that you have set the necessary environment variables, such as your OpenAI API key, in your `.env` file: ```.env filename=".env" copy OPENAI_API_KEY=your_openai_api_key ``` Also, make sure you have the `@mastra/core` package installed: ```bash npm2yarn copy npm install @mastra/core ``` ### Registering the Agent Register your agent with Mastra to enable logging and access to configured tools and integrations: ```ts showLineNumbers filename="src/mastra/index.ts" copy import { Mastra } from "@mastra/core"; import { myAgent } from "./agents"; export const mastra = new Mastra({ agents: { myAgent }, }); ``` ## 2. Generating and streaming text ### Generating text Use the `.generate()` method to have your agent produce text responses: ```ts showLineNumbers filename="src/mastra/index.ts" copy const response = await myAgent.generate([ { role: "user", content: "Hello, how can you assist me today?" }, ]); console.log("Agent:", response.text); ``` ### Streaming responses For more real-time responses, you can stream the agent's response: ```ts showLineNumbers filename="src/mastra/index.ts" copy const stream = await myAgent.stream([ { role: "user", content: "Tell me a story." }, ]); console.log("Agent:"); for await (const chunk of stream.textStream) { process.stdout.write(chunk); } ``` ## **3. Structured Output** Agents can return structured data by providing a JSON Schema or using a Zod schema. ### Using JSON Schema ```typescript const schema = { type: "object", properties: { summary: { type: "string" }, keywords: { type: "array", items: { type: "string" } }, }, additionalProperties: false, required: ["summary", "keywords"], }; const response = await myAgent.generate( [ { role: "user", content: "Please provide a summary and keywords for the following text: ...", }, ], { output: schema, }, ); console.log("Structured Output:", response.object); ``` ### Using Zod You can also use Zod schemas for type-safe structured outputs. First, install Zod: ```bash npm2yarn copy npm install zod ``` Then, define a Zod schema and use it with the agent: ```ts showLineNumbers filename="src/mastra/index.ts" copy import { z } from "zod"; // Define the Zod schema const schema = z.object({ summary: z.string(), keywords: z.array(z.string()), }); // Use the schema with the agent const response = await myAgent.generate( [ { role: "user", content: "Please provide a summary and keywords for the following text: ...", }, ], { output: schema, }, ); console.log("Structured Output:", response.object); ``` This allows you to have strong typing and validation for the structured data returned by the agent. ## **4. Running Agents** Mastra provides a CLI command `mastra dev` to run your agents behind an API. By default, this looks for exported agents in files in the `src/mastra/agents` directory. ### Starting the Server ```bash mastra dev ``` This will start the server and make your agent available at `http://localhost:4111/api/agents/myAgent/generate`. ### Interacting with the Agent You can interact with the agent using `curl` from the command line: ```bash curl -X POST http://localhost:4111/api/agents/myAgent/generate \ -H "Content-Type: application/json" \ -d '{ "messages": [ { "role": "user", "content": "Hello, how can you assist me today?" } ] }' ``` ## Next Steps - Learn about Agent Memory in the [Agent Memory](./01-agent-memory.mdx) guide. - Learn about Agent Tools in the [Agent Tools](./02-adding-tools.mdx) guide. - See an example agent in the [Chef Michel](../guides/02-chef-michel.mdx) example. ================================================================================ Source: src/pages/docs/agents/01-agent-memory.mdx ================================================================================ --- title: "Using Agent Memory | Agents | Mastra Docs" description: Documentation on how agents in Mastra use memory to store conversation history and contextual information. --- # Agent Memory Agents in Mastra have a sophisticated memory system that stores conversation history and contextual information. This memory system supports both traditional message storage and vector-based semantic search, enabling agents to maintain state across interactions and retrieve relevant historical context. ## How Memory Works The Memory API uses two main mechanisms to maintain context in conversations: ### Recent Message History The `lastMessages` setting maintains a sliding window of the most recent messages. This ensures your agent always has access to the immediate conversation context: ```tsx const memory = new Memory({ options: { lastMessages: 1, // Keep 1 most recent messages }, }); // Example conversation flow: // 1. User asks about project timeline await agent.stream('When will the project be completed?', { threadId: 'project_123', resourceId: 'user_123', }); // 2. User asks about specific feature await agent.stream('How will the new search feature work?', { threadId: 'project_123', resourceId: 'user_123', }); // 3. When user asks this question, the agent will see the last 10 messages, // allowing them to remember the conversation further back than the default we set above (lastMessages: 1) await agent.stream('Can you summarize the search feature requirements?', { threadId: 'project_123', resourceId: 'user_123', memoryOptions: { lastMessages: 10, }, }); ``` ### Semantic Search When vector search is enabled, Memory can find relevant past messages using semantic similarity. This allows your agent to recall information from earlier in the conversation: ```tsx const memory = new Memory({ options: { semanticRecall: { topK: 10, // Include 10 most relevant past messages messageRange: 2, // Messages before and after each result }, }, }); // Example: User asks about a past feature discussion await agent.stream('What did we decide about the search feature last week?', { threadId: 'support_123', resourceId: 'user_123', memoryOptions: { lastMessages: 10, semanticRecall: { topK: 3, messageRange: 2, }, }, }); // The vector search will: // 1. Find message_3 as relevant (topK: 10) // 2. Include messages [1,2] before and [4,5] after // 3. Include all of this context in the agent's response ``` ## Memory Configuration The Mastra memory system is highly configurable and supports multiple storage backends. Here's a basic configuration example: ```typescript import { Memory } from '@mastra/memory'; import { PostgresStore } from '@mastra/store-pg'; import { PgVector } from '@mastra/vector-pg'; const memory = new Memory({ // Required: Storage backend for messages storage: new PostgresStore({ host: 'localhost', port: 5432, user: 'postgres', database: 'postgres', password: 'postgres', }), // Optional: Vector store for semantic search vector: new PgVector(connectionString), // Optional: Memory configuration options options: { // Number of recent messages to include (false to disable) lastMessages: 10, // Configure vector-based semantic search semanticRecall: { topK: 3, // Number of semantic search results messageRange: 2 // Messages before and after each result }, }, // Required if using vector search embedding: { provider: 'OPEN_AI', model: 'text-embedding-3-small', maxRetries: 3, }, }); ``` ### Overriding Memory Settings When you initialize a Mastra instance with memory configuration, all agents will automatically use these memory settings when you call their `stream()` or `generate()` methods. You can override these default settings for individual calls: ```typescript // Use default memory settings from Memory configuration const response1 = await agent.generate("What were we discussing earlier?", { resourceId: "user_123", threadId: "thread_456" }); // Override memory settings for this specific call const response2 = await agent.generate("What were we discussing earlier?", { resourceId: "user_123", threadId: "thread_456", memoryOptions: { lastMessages: 5, // Only inject 5 recent messages semanticRecall: { topK: 2, // Only get 2 semantic search results messageRange: 1 // Context around each result }, } }); ``` ### Configuring Memory for Different Use Cases You can adjust memory settings based on your agent's needs: ```tsx // Customer support agent with minimal context await agent.stream('What are your store hours?', { threadId, resourceId, memoryOptions: { lastMessages: 5, // Quick responses need minimal conversation history historySearch: false, // no need to search through earlier messages }, }); // Project management agent with extensive context await agent.stream('Update me on the project status', { threadId, resourceId, memoryOptions: { lastMessages: 50, // Maintain longer conversation history across project discussions semanticRecall: { topK: 5, // Find more relevant project details messageRange: 3, // Number of messages before and after each result }, }, }); ``` ## Storage Options Mastra currently supports several storage backends: ### LibSQL Storage ```typescript import { MastraStorageLibSql } from '@mastra/core'; const storage = new MastraStorageLibSql({ config: { url: 'file:example.db', }, }); ``` ### PostgreSQL Storage ```typescript import { PostgresStore } from '@mastra/store-pg'; const storage = new PostgresStore({ host: 'localhost', port: 5432, user: 'postgres', database: 'postgres', password: 'postgres', }); ``` ### Upstash KV Storage ```typescript import { UpstashStore } from '@mastra/store-upstash'; const storage = new UpstashStore({ url: 'http://localhost:8089', token: 'your_token', }); ``` ## Vector Search Mastra supports semantic search through vector embeddings. When configured with a vector store, agents can find relevant historical messages based on semantic similarity. To enable vector search: 1. Configure a vector store (currently supports PostgreSQL): ```typescript import { PgVector } from '@mastra/vector-pg'; const vector = new PgVector(connectionString); ``` 2. Configure embedding options: ```typescript const embedding = { provider: 'OPEN_AI', model: 'text-embedding-3-small', maxRetries: 3, }; ``` 3. Enable vector search in memory configuration options: ```typescript const optionsConfig = { historySearch: { topK: 3, // Number of similar messages to find messageRange: 2, // Context around each result }, }; ``` ## Using Memory in Agents Once configured, the memory system is automatically used by agents. Here's how to use it: ```typescript // Initialize Mastra with memory const mastra = new Mastra({ agents: { myAgent }, memory, }); // Memory is automatically used in agent interactions const response = await myAgent.generate( "What were we discussing earlier about performance?", { resourceId: "user_123", threadId: "thread_456", }, ); ``` The memory system will automatically: 1. Store all messages in the configured storage backend 2. Create vector embeddings for semantic search (if configured) 3. Inject relevant historical context into new conversations 4. Maintain conversation threads and context ## Manually Managing Threads While threads are automatically managed when using agent methods, you can also manually manage threads using the memory API directly. This is useful for advanced use cases like: - Creating threads before starting conversations - Managing thread metadata - Explicitly saving or retrieving messages - Cleaning up old threads Here's how to manually work with threads: ```typescript import { Memory } from '@mastra/memory'; import { PostgresStore } from '@mastra/store-pg'; // Initialize memory const memory = new Memory({ storage: new PostgresStore({ host: 'localhost', port: 5432, user: 'postgres', database: 'postgres', password: 'postgres', }) }); // Create a new thread const thread = await memory.createThread({ resourceId: "user_123", title: "Project Discussion", metadata: { project: "mastra", topic: "architecture" } }); // Manually save messages to a thread await memory.saveMessages({ messages: [{ id: "msg_1", threadId: thread.id, role: "user", content: "What's the project status?", createdAt: new Date(), type: "text" }] }); // Get messages from a thread with various filters const messages = await memory.query({ threadId: thread.id, selectBy: { last: 10, // Get last 10 messages vectorSearchString: "performance", // Find messages about performance } }); // Get thread by ID const existingThread = await memory.getThreadById({ threadId: "thread_123" }); // Get all threads for a resource const threads = await memory.getThreadsByResourceId({ resourceId: "user_123" }); // Update thread metadata await memory.updateThread({ id: thread.id, title: "Updated Project Discussion", metadata: { status: "completed" } }); // Delete a thread and all its messages await memory.deleteThread(thread.id); ``` Note that in most cases, you won't need to manage threads manually since the agent's `generate()` and `stream()` methods handle thread management automatically. Manual thread management is primarily useful for advanced use cases or when you need more fine-grained control over the conversation history. ```typescript // Use memoryOptions to get more context for this specific request await agent.generate("What did we discuss earlier?", { resourceId: "user_123", threadId: "thread_456", memoryOptions: { lastMessages: 20 // Override to get more context } }); ================================================================================ Source: src/pages/docs/agents/02-adding-tools.mdx ================================================================================ --- title: "Agent Tool Selection | Agent Documentation | Mastra" description: Tools are typed functions that can be executed by agents or workflows, with built-in integration access and parameter validation. Each tool has a schema that defines its inputs, an executor function that implements its logic, and access to configured integrations. --- # Agent Tool Selection Tools are typed functions that can be executed by agents or workflows, with built-in integration access and parameter validation. Each tool has a schema that defines its inputs, an executor function that implements its logic, and access to configured integrations. ## Creating Tools In this section, we'll walk through the process of creating a tool that can be used by your agents. Let's create a simple tool that fetches current weather information for a given city. ```typescript filename="src/mastra/tools/weatherInfo.ts" copy import { createTool } from "@mastra/core"; import { z } from "zod"; const getWeatherInfo = async (city: string) => { // Replace with an actual API call to a weather service const data = await fetch(`https://api.example.com/weather?city=${city}`).then( (r) => r.json(), ); return data; }; export const weatherInfo = createTool({ id "Get Weather Information", inputSchema: z.object({ city: z.string(), }), description: `Fetches the current weather information for a given city`, execute: async ({ context: { city } }) => { console.log("Using tool to fetch weather information for", city); return await getWeatherInfo(city); }, }); ``` ## Adding Tools to an Agent Now we'll add the tool to an agent. We'll create an agent that can answer questions about the weather and configure it to use our `weatherInfo` tool. ```typescript filename="src/mastra/agents/weatherAgent.ts" import { Agent } from "@mastra/core"; import * as tools from "../tools/weatherInfo"; export const weatherAgent = new Agent({ name: "Weather Agent", instructions: "You are a helpful assistant that provides current weather information. When asked about the weather, use the weather information tool to fetch the data.", model: { provider: "OPEN_AI", name: "gpt-4", toolChoice: "required", }, tools: { weatherInfo: tools.weatherInfo, }, }); ``` ## Registering the Agent We need to initialize Mastra with our agent. ```typescript filename="src/index.ts" import { Mastra } from "@mastra/core"; import { weatherAgent } from "./agents/weatherAgent"; export const mastra = new Mastra({ agents: { weatherAgent }, }); ``` This registers your agent with Mastra, making it available for use. ## Debugging Tools You can test tools using Vitest or any other testing framework. Writing unit tests for your tools ensures they behave as expected and helps catch errors early. ## Calling an Agent with a Tool Now we can call the agent, and it will use the tool to fetch the weather information. ## Example: Interacting with the Agent ```typescript filename="src/index.ts" import { mastra } from "./index"; async function main() { const agent = mastra.getAgent("weatherAgent"); const response = await agent.generate( "What's the weather like in New York City today?", ); console.log(response.text); } main(); ``` The agent will use the `weatherInfo` tool to get the current weather in New York City and respond accordingly. ================================================================================ Source: src/pages/docs/deployment/deployment.mdx ================================================================================ --- title: "Mastra Cloud Deployment | Mastra Docs" description: Guide on deploying Mastra agents and workflows to various cloud platforms using deployers. --- # Deploying Mastra to the Cloud In this guide, we'll cover how to deploy your Mastra agents and workflows using deployers. Mastra provides dedicated deployers for various cloud platforms, making it easy to deploy your agents with minimal configuration. Currently, Mastra supports deploying to: - **Cloudflare Workers** - **Vercel** - **Netlify** ## Prerequisites Before you begin, ensure you have: - **Node.js** installed (version 18 or higher is recommended) - An account with your chosen deployment platform (Cloudflare, Vercel, or Netlify) - Required API keys or credentials for your deployment platform ## Using Deployers Mastra uses a deployer-based system for cloud deployments. Each deployer is available as a separate package that you can install based on your needs. ### Installing Deployers Install the deployer package for your chosen platform: ```bash copy # For Cloudflare npm install @mastra/deployer-cloudflare # For Vercel npm install @mastra/deployer-vercel # For Netlify npm install @mastra/deployer-netlify ``` ### Configuring Deployers Configure your Mastra instance with the appropriate deployer in your entry file: ```typescript:src/mastra/index.ts import { Mastra, createLogger } from '@mastra/core'; // Import the deployer you want to use import { CloudflareDeployer } from '@mastra/deployer-cloudflare'; // OR import { VercelDeployer } from '@mastra/deployer-vercel'; // OR import { NetlifyDeployer } from '@mastra/deployer-netlify'; // Example with Cloudflare deployer export const mastra = new Mastra({ agents: { /* your agents here */ }, logger: createLogger({ name: 'MyApp', level: 'debug' }), deployer: new CloudflareDeployer({ scope: 'your-cloudflare-scope', projectName: 'your-project-name', }), }); ``` ### Deployer Configuration Options #### Cloudflare Deployer ```typescript new CloudflareDeployer({ scope: 'your-cloudflare-scope', projectName: 'your-project-name', }) ``` #### Vercel Deployer ```typescript new VercelDeployer({ scope: 'your-vercel-scope', projectName: 'your-project-name', }) ``` #### Netlify Deployer ```typescript new NetlifyDeployer({ scope: 'your-netlify-scope', projectName: 'your-project-name', }) ``` ## Environment Variables Ensure you set up the necessary environment variables for: 1. Your deployment platform credentials 2. Any API keys needed by your agents (e.g., `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`) You can set these variables through your deployment platform's dashboard or configuration files. ## Conclusion By using Mastra's deployer system, you can easily deploy your agents to your preferred cloud platform. The deployers handle the complexities of deployment, allowing you to focus on building your agents and workflows. For platform-specific deployment details or troubleshooting, refer to the respective platform's documentation: - [Cloudflare Workers Documentation](https://developers.cloudflare.com/workers/) - [Vercel Documentation](https://vercel.com/docs) - [Netlify Documentation](https://docs.netlify.com/) ================================================================================ Source: src/pages/docs/deployment/logging-and-tracing.mdx ================================================================================ --- title: "Logging and Tracing | Mastra Deployment Documentation" description: Documentation on effective logging and tracing in Mastra, crucial for understanding application behavior and improving AI accuracy. --- import Image from "next/image"; # Logging and Tracing Effective logging and tracing are crucial for understanding the behavior of your application. Tracing is especially important for AI engineering. Teams building AI products find that visibility into inputs and outputs of every step of every run is crucial to improving accuracy. You get this with Mastra's telemetry. ## Logging In Mastra, logs can detail when certain functions run, what input data they receive, and how they respond. ### Basic Setup Here's a minimal example that sets up a **console logger** at the `INFO` level. This will print out informational messages and above (i.e., `INFO`, `WARN`, `ERROR`) to the console. ```typescript filename="mastra.config.ts" showLineNumbers copy import { Mastra, createLogger } from "@mastra/core"; export const mastra = new Mastra({ // Other Mastra configuration... logger: createLogger({ name: "Mastra", level: "info", }), }); ``` In this configuration: - `name: "Mastra"` specifies the name to group logs under. - `level: "info"` sets the minimum severity of logs to record. ### Configuration - For more details on the options you can pass to `createLogger()`, see the [createLogger reference documentation](/docs/reference/observability/create-logger.mdx). - Once you have a `Logger` instance, you can call its methods (e.g., `.info()`, `.warn()`, `.error()`) in the [Logger instance reference documentation](/docs/reference/observability/logger.mdx). - If you want to send your logs to an external service for centralized collection, analysis, or storage, you can configure other logger types such as Upstash Redis. Consult the [createLogger reference documentation](/docs/reference/observability/create-logger.mdx) for details on parameters like `url`, `token`, and `key` when using the `UPSTASH` logger type. ## Telemetry Mastra supports the OpenTelemetry Protocol (OTLP) for tracing and monitoring your application. When telemetry is enabled, Mastra automatically traces all core primitives including agent operations, LLM interactions, tool executions, integration calls, workflow runs, and database operations. Your telemetry data can then be exported to any OTEL collector. ### Basic Configuration Here's a simple example of enabling telemetry: ```ts filename="mastra.config.ts" showLineNumbers copy export const mastra = new Mastra({ // ... other config telemetry: { serviceName: "my-app", enabled: true, sampling: { type: "always_on", }, export: { type: "otlp", endpoint: "http://localhost:4318", // SigNoz local endpoint }, }, }); ``` ### Configuration Options The telemetry config accepts these properties: ```ts type OtelConfig = { // Name to identify your service in traces (optional) serviceName?: string; // Enable/disable telemetry (defaults to true) enabled?: boolean; // Control how many traces are sampled sampling?: { type: "ratio" | "always_on" | "always_off" | "parent_based"; probability?: number; // For ratio sampling root?: { probability: number; // For parent_based sampling }; }; // Where to send telemetry data export?: { type: "otlp" | "console"; endpoint?: string; headers?: Record; }; }; ``` See the [OtelConfig reference documentation](/docs/reference/observability/otel-config.mdx) for more details. ### Environment Variables You can configure the OTLP endpoint and headers through environment variables: ```env filename=".env" copy OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318 OTEL_EXPORTER_OTLP_HEADERS=x-api-key=your-api-key ``` Then in your config: ```ts filename="mastra.config.ts" showLineNumbers copy export const mastra = new Mastra({ // ... other config telemetry: { serviceName: "my-app", enabled: true, export: { type: "otlp", // endpoint and headers will be picked up from env vars }, }, }); ``` ### Example: SigNoz Integration Here's what a traced agent interaction looks like in [SigNoz](https://signoz.io): Agent interaction trace showing spans, LLM calls, and tool executions ### Other Supported Providers For a complete list of supported observability providers and their configuration details, see the [Observability Providers reference](../reference/observability/providers/). ### Next.js Configuration [Local Dev] When developing locally with Next.js, you'll need to: 1. Install the instrumentation package: ```bash copy npm install import-in-the-middle # or require-in-the-middle for CJS ``` 2. Add it as an external dependency in your Next.js config: ```ts filename="next.config.ts" showLineNumbers copy import type { NextConfig } from "next"; const nextConfig: NextConfig = { serverExternalPackages: ["import-in-the-middle"], }; export default nextConfig; ``` This configuration is only necessary for local development to ensure proper instrumentation during hot reloading. ================================================================================ Source: src/pages/docs/getting-started/installation.mdx ================================================================================ --- title: "Installing Mastra Locally | Getting Started | Mastra Docs" description: Guide on installing Mastra and setting up the necessary prerequisites for running it with various LLM providers. --- import { Callout, Steps, Tabs } from "nextra/components"; import YouTube from "../../../components/youtube"; # Installing Mastra Locally To run Mastra, you need access to an LLM. Typically, you'll want to get an API key from an LLM provider such as [OpenAI](https://platform.openai.com/), [Anthropic](https://console.anthropic.com/settings/keys), or [Google Gemini](https://ai.google.dev/gemini-api/docs). You can also run Mastra with a local LLM using [Ollama](https://ollama.ai/). ## Prerequisites - Node.js `v20.0` or higher - Access to a [supported large language model (LLM)](/docs/reference/llm/providers-and-models) ## Automatic Installation ### Create a New Project We recommend starting a new Mastra project using `create-mastra`, which will scaffold your project. To create a project, run: ```bash copy npx create-mastra@latest ``` ```bash copy npm create mastra ``` ```bash copy yarn create mastra ``` ```bash copy pnpm create mastra ``` On installation, you'll be guided through the following prompts: ```bash What do you want to name your project? my-mastra-app Choose components to install: ◯ Agents (recommended) ◯ Tools ◯ Workflows Select default provider: ◯ OpenAI (recommended) ◯ Anthropic ◯ Groq Would you like to include example code? No / Yes ``` After the prompts, `create-mastra` will set up your project directory with TypeScript, install dependencies, and configure your selected components and LLM provider. ### Set Up your API Key Add the API key for your configured LLM provider in your `.env` file. ```bash filename=".env" copy OPENAI_API_KEY= ``` Note: If you prefer to run the command with flags (non-interactive mode) and include the example code, you can use: ```bash copy npx create-mastra@latest --components agents,tools --llm openai --example ``` This allows you to specify your preferences upfront without being prompted. ## Manual Installation
If you prefer to set up your Mastra project manually, follow these steps: ### Create a New Project Create a project directory and navigate into it: ```bash copy mkdir hello-mastra cd hello-mastra ``` Then, initialize a TypeScript project including the `@mastra/core` package: ```bash copy npm2yarn npm init -y npm install typescript tsx @types/node mastra@alpha --save-dev npm install @mastra/core@alpha zod npx tsc --init ``` ### Set Up your API Key Create a `.env` file in your project root directory and add your API key: ```bash filename=".env" copy OPENAI_API_KEY= ``` Replace your_openai_api_key with your actual API key. ### Create a Tool Create a `weather-tool` tool file: ```bash copy mkdir -p src/mastra/tools && touch src/mastra/tools/weather-tool.ts ``` Then, add the following code to `src/mastra/tools/weather-tool.ts`: ```ts filename="src/mastra/tools/weather-tool.ts" showLineNumbers copy import { createTool } from "@mastra/core"; import { z } from "zod"; interface WeatherResponse { current: { time: string; temperature_2m: number; apparent_temperature: number; relative_humidity_2m: number; wind_speed_10m: number; wind_gusts_10m: number; weather_code: number; }; } export const weatherTool = createTool({ id: "get-weather", description: "Get current weather for a location", inputSchema: z.object({ location: z.string().describe("City name"), }), outputSchema: z.object({ temperature: z.number(), feelsLike: z.number(), humidity: z.number(), windSpeed: z.number(), windGust: z.number(), conditions: z.string(), location: z.string(), }), execute: async ({ context }) => { return await getWeather(context.location); }, }); const getWeather = async (location: string) => { const geocodingUrl = `https://geocoding-api.open-meteo.com/v1/search?name=${encodeURIComponent(location)}&count=1`; const geocodingResponse = await fetch(geocodingUrl); const geocodingData = await geocodingResponse.json(); if (!geocodingData.results?.[0]) { throw new Error(`Location '${location}' not found`); } const { latitude, longitude, name } = geocodingData.results[0]; const weatherUrl = `https://api.open-meteo.com/v1/forecast?latitude=${latitude}&longitude=${longitude}¤t=temperature_2m,apparent_temperature,relative_humidity_2m,wind_speed_10m,wind_gusts_10m,weather_code`; const response = await fetch(weatherUrl); const data: WeatherResponse = await response.json(); return { temperature: data.current.temperature_2m, feelsLike: data.current.apparent_temperature, humidity: data.current.relative_humidity_2m, windSpeed: data.current.wind_speed_10m, windGust: data.current.wind_gusts_10m, conditions: getWeatherCondition(data.current.weather_code), location: name, }; }; function getWeatherCondition(code: number): string { const conditions: Record = { 0: "Clear sky", 1: "Mainly clear", 2: "Partly cloudy", 3: "Overcast", 45: "Foggy", 48: "Depositing rime fog", 51: "Light drizzle", 53: "Moderate drizzle", 55: "Dense drizzle", 56: "Light freezing drizzle", 57: "Dense freezing drizzle", 61: "Slight rain", 63: "Moderate rain", 65: "Heavy rain", 66: "Light freezing rain", 67: "Heavy freezing rain", 71: "Slight snow fall", 73: "Moderate snow fall", 75: "Heavy snow fall", 77: "Snow grains", 80: "Slight rain showers", 81: "Moderate rain showers", 82: "Violent rain showers", 85: "Slight snow showers", 86: "Heavy snow showers", 95: "Thunderstorm", 96: "Thunderstorm with slight hail", 99: "Thunderstorm with heavy hail", }; return conditions[code] || "Unknown"; } ``` ### Create an Agent Create a `weather` agent file: ```bash copy mkdir -p src/mastra/agents && touch src/mastra/agents/weather.ts ``` Then, add the following code to `src/mastra/agents/weather.ts`: ```ts filename="src/mastra/agents/weather.ts" showLineNumbers import { Agent } from "@mastra/core"; import { weatherTool } from "../tools/weather-tool"; export const weatherAgent = new Agent({ name: "Weather Agent", instructions: `You are a helpful weather assistant that provides accurate weather information. Your primary function is to help users get weather details for specific locations. When responding: - Always ask for a location if none is provided - Include relevant details like humidity, wind conditions, and precipitation - Keep responses concise but informative Use the weatherTool to fetch current weather data.`, model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, tools: { weatherTool }, }); ``` ### Register Agent Finally, create the Mastra entry point in `src/mastra/index.ts` and register agent: ```ts filename="src/mastra/index.ts" showLineNumbers import { Mastra } from "@mastra/core"; import { weatherAgent } from "./agents/weather"; export const mastra = new Mastra({ agents: { weatherAgent }, }); ``` This registers your agent with Mastra so that `mastra dev` can discover and serve it. To add Mastra to an existing project, see our Local dev docs on [mastra init](/docs/local-dev/mastra-init). ## Start the Mastra Server Mastra provides commands to serve your agents via REST endpoints ### Development Server Run the following command to start the Mastra server: ```bash copy npm run dev ``` If you have the mastra CLI installed, run: ```bash copy mastra dev ``` This command creates REST API endpoints for your agents. ### Test the Endpoint You can test the agent's endpoint using `curl` or `fetch`: ```bash copy curl -X POST http://localhost:4111/api/agents/weatherAgent/generate \ -H "Content-Type: application/json" \ -d '{"messages": ["What is the weather in London?"]}' ``` ```js copy showLineNumbers fetch('http://localhost:4111/api/agents/weatherAgent/generate', { method: 'POST', headers: { 'Content-Type': 'application/json', }, body: JSON.stringify({ messages: ['What is the weather in London?'], }), }) .then(response => response.json()) .then(data => { console.log('Agent response:', data.text); }) .catch(error => { console.error('Error:', error); }); ``` ## Run from the command line If you'd like to directly call agents from the command line, you can create a script to get an agent and call it: ```ts filename="src/index.ts" showLineNumbers import { mastra } from "./mastra"; async function main() { const agent = await mastra.getAgent("weatherAgent"); const result = await agent.generate("What is the weather in London?"); console.log("Agent response:", result.text); } main(); ``` Then, run the script to test that everything is set up correctly: ```bash copy npx tsx src/index.ts ``` This should output the agent's response to your console. --- ================================================================================ Source: src/pages/docs/getting-started/project-structure.mdx ================================================================================ --- title: "Local Project Structure | Getting Started | Mastra Docs" description: Guide on organizing folders and files in Mastra, including best practices and recommended structures. --- import { FileTree } from 'nextra/components'; # Project Structure This page provides a guide for organizing folders and files in Mastra. Mastra is a modular framework, and you can use any of the modules separately or together. You could write everything in a single file (as we showed in the quick start), or separate each agent, tool, and workflow into their own files. We don't enforce a specific folder structure, but we do recommend some best practices, and the CLI will scaffold a project with a sensible structure. ## Using the CLI `mastra init` is an interactive CLI that allows you to: - **Choose a directory for Mastra files**: Specify where you want the Mastra files to be placed (default is `src/mastra`). - **Select components to install**: Choose which components you want to include in your project: - Agents - Tools - Workflows - **Select a default LLM provider**: Choose from supported providers like OpenAI, Anthropic, or Groq. - **Include example code**: Decide whether to include example code to help you get started. ### Example Project Structure Assuming you select all components and include example code, your project structure will look like this: {/* ``` root/ ├── src/ │ └── mastra/ │ ├── agents/ │ │ └── index.ts │ ├── tools/ │ │ └── index.ts │ ├── workflows/ │ │ └── index.ts │ ├── index.ts ├── .env ``` */} ### Top-level Folders | Folder | Description | | ---------------------- | ------------------------------------ | | `src/mastra` | Core application folder | | `src/mastra/agents` | Agent configurations and definitions | | `src/mastra/tools` | Custom tool definitions | | `src/mastra/workflows` | Workflow definitions | ### Top-level Files | File | Description | | --------------------- | ---------------------------------- | | `src/mastra/index.ts` | Main configuration file for Mastra | | `.env` | Environment variables | ================================================================================ Source: src/pages/docs/guides/01-harry-potter.mdx ================================================================================ --- title: "System Messages and Harry Potter | Mastra LLM Guides" description: Guide on using Mastra's LLM class with a Harry Potter-themed example to demonstrate model configuration and response streaming. --- import { Steps } from "nextra/components"; import YouTube from "../../../components/youtube"; # Guide: Harry Potter Mastra provides direct support for Large Language Models (LLMs) through the `LLM` class. It supports a variety of LLM providers, including OpenAI, Anthropic, and Google Gemini. You can choose the specific model and provider, set system and user prompts, and decide whether to stream the response. We'll use a Harry Potter-themed example where we ask about the model's favorite room in Hogwarts, demonstrating how changing the system message affects the response. In this guide, we'll walk through: - Creating a model - Giving it a prompt - Testing the response - Altering the system message - Streaming the response ## Setup Ensure you have the Mastra core package installed: ```bash copy npm install @mastra/core ``` Set your API key for the LLM provider you intend to use. For OpenAI, set the `OPENAI_API_KEY` environment variable. ```bash filename=".env" copy OPENAI_API_KEY= ``` ## Create a Model We'll start by creating a model configuration and initializing the Mastra instance. ```ts copy filename="src/index.ts" import { CoreMessage, Mastra, type ModelConfig } from "@mastra/core"; const mastra = new Mastra(); const modelConfig: ModelConfig = { provider: "OPEN_AI", name: "gpt-4", }; const llm = mastra.LLM(modelConfig); ``` ## Give It a Prompt Next, we'll prepare our prompt. We'll ask: ```ts copy filename="src/index.ts" const prompt = "What is your favorite room in Hogwarts?"; ``` ## Test the Response We'll use the `generate` method to get the response from the model. ```ts copy filename="src/index.ts" const response = await llm.generate(prompt); console.log("Response:", response.text); ``` Run the script: ```bash copy npx bun src/index.ts ``` Output: ``` Response: As an AI language model developed by OpenAI, I don't possess consciousness or experiences. ``` The model defaults to its own perspective. To get a more engaging response, we'll alter the system message. ## Alter the System Message To change the perspective, we'll add a system message to specify the persona of the model. First, we'll have the model respond as Harry Potter. **As Harry Potter** ```ts copy filename="src/index.ts" const messages = [ { role: "system", content: "You are Harry Potter.", }, { role: "user", content: "What is your favorite room in Hogwarts?", }, ] as CoreMessage[]; const responseHarry = await llm.generate(messages); console.log("Response as Harry Potter:", responseHarry.text); ``` Output: ``` Response as Harry Potter: My favorite room in Hogwarts is definitely the Gryffindor Common Room. It's where I feel most at home, surrounded by my friends, the warm fireplace, and the cozy chairs. It's a place filled with great memories. ``` --- **As Draco Malfoy** Now, let's change the system message to have the model respond as Draco Malfoy. ```ts copy filename="src/index.ts" messages[0].content = "You are Draco Malfoy."; const responseDraco = await llm.generate(messages); console.log("Response as Draco Malfoy:", responseDraco.text); ``` Output: ``` Response as Draco Malfoy: My favorite room in Hogwarts is the Slytherin Common Room. It's located in the dungeons, adorned with green and silver decor, and has a magnificent view of the Black Lake's depths. It's exclusive and befitting of those with true ambition. ``` ## Stream the Response Finally, we'll demonstrate how to stream the response from the model. Streaming is useful for handling longer outputs or providing real-time feedback. ```ts copy filename="src/index.ts" const stream = await llm.stream(messages); console.log('Streaming response as Draco Malfoy:'); for await (const chunk of stream.textStream) { process.stdout.write(chunk); } console.log('\n'); } main(); ``` Run the script again: ```bash copy npx bun src/index.ts ``` **Output:** ``` Streaming response as Draco Malfoy: My favorite room in Hogwarts is the Slytherin Common Room. Situated in the dungeons, it's an elegant space with greenish lights and serpentine motifs... ``` ## Summary By following this guide, you've learned how to: - Create and configure an LLM model in Mastra - Provide prompts and receive responses - Use system messages to change the model's perspective - Stream responses from the model Feel free to experiment with different system messages and prompts to explore the capabilities of Mastra's LLM support. ================================================================================ Source: src/pages/docs/guides/02-chef-michel.mdx ================================================================================ --- title: "Building an AI Chef Assistant | Mastra Agent Guides" description: Guide on creating a Chef Assistant agent in Mastra to help users cook meals with available ingredients. --- import { Steps } from "nextra/components"; import YouTube from "../../../components/youtube"; # Agents Guide: Building a Chef Assistant In this guide, we'll walk through creating a "Chef Assistant" agent that helps users cook meals with available ingredients. ## Prerequisites - Node.js installed - Mastra installed: `npm install @mastra/core` --- ## Create the Agent ### Define the Agent Create a new file `src/mastra/agents/chefAgent.ts` and define your agent: ```ts copy filename="src/mastra/agents/chefAgent.ts" import { Agent } from "@mastra/core"; export const chefAgent = new Agent({ name: "chef-agent", instructions: "You are Michel, a practical and experienced home chef" + "You helps people cook with whatever ingredients they have available.", model: { provider: "OPEN_AI", name: "gpt-4o-mini", toolChoice: "auto", }, }); ``` --- ## Set Up Environment Variables Create a `.env` file in your project root and add your OpenAI API key: ```bash filename=".env" copy OPENAI_API_KEY=your_openai_api_key ``` --- ## Register the Agent with Mastra In your main file, register the agent: ```ts copy filename="src/mastra/index.ts" import { Mastra } from "@mastra/core"; import { chefAgent } from "./agents/chefAgent"; export const mastra = new Mastra({ agents: { chefAgent }, }); ``` --- ## Interacting with the Agent ### Generating Text Responses ```ts copy filename="src/index.ts" async function main() { const query = "In my kitchen I have: pasta, canned tomatoes, garlic, olive oil, and some dried herbs (basil and oregano). What can I make?"; console.log(`Query: ${query}`); const response = await chefAgent.generate([{ role: "user", content: query }]); console.log("\n👨‍🍳 Chef Michel:", response.text); } main(); ``` Run the script: ```bash copy npx bun src/index.ts ``` Output: ``` Query: In my kitchen I have: pasta, canned tomatoes, garlic, olive oil, and some dried herbs (basil and oregano). What can I make? 👨‍🍳 Chef Michel: You can make a delicious pasta al pomodoro! Here's how... ``` --- ### Streaming Responses ```ts copy filename="src/index.ts" async function main() { const query = "Now I'm over at my friend's house, and they have: chicken thighs, coconut milk, sweet potatoes, and some curry powder."; console.log(`Query: ${query}`); const stream = await chefAgent.stream([{ role: "user", content: query }]); console.log("\n Chef Michel: "); for await (const chunk of stream.textStream) { process.stdout.write(chunk); } console.log("\n\n✅ Recipe complete!"); } main(); ``` Output: ``` Query: Now I'm over at my friend's house, and they have: chicken thighs, coconut milk, sweet potatoes, and some curry powder. 👨‍🍳 Chef Michel: Great! You can make a comforting chicken curry... ✅ Recipe complete! ``` --- ### Generating a Recipe with Structured Data ```ts copy filename="src/index.ts" import { z } from "zod"; async function main() { const query = "I want to make lasagna, can you generate a lasagna recipe for me?"; console.log(`Query: ${query}`); // Define the Zod schema const schema = z.object({ ingredients: z.array( z.object({ name: z.string(), amount: z.string(), }), ), steps: z.array(z.string()), }); const response = await chefAgent.generate( [{ role: "user", content: query }], { output: schema }, ); console.log("\n👨‍🍳 Chef Michel:", response.object); } main(); ``` Output: ``` Query: I want to make lasagna, can you generate a lasagna recipe for me? 👨‍🍳 Chef Michel: { ingredients: [ { name: "Lasagna noodles", amount: "12 sheets" }, { name: "Ground beef", amount: "1 pound" }, // ... ], steps: [ "Preheat oven to 375°F (190°C).", "Cook the lasagna noodles according to package instructions.", // ... ] } ``` --- ## Running the Agent Server ### Using `mastra dev` You can run your agent as a service using the `mastra dev` command: ```bash copy mastra dev ``` This will start a server exposing endpoints to interact with your registered agents. ### Accessing the Chef Assistant API By default, `mastra dev` runs on `http://localhost:4111`. Your Chef Assistant agent will be available at: ``` POST http://localhost:4111/api/agents/chefAgent/generate ``` ### Interacting with the Agent via `curl` You can interact with the agent using `curl` from the command line: ```bash copy curl -X POST http://localhost:4111/api/agents/chefAgent/generate \ -H "Content-Type: application/json" \ -d '{ "messages": [ { "role": "user", "content": "I have eggs, flour, and milk. What can I make?" } ] }' ``` **Sample Response:** ```json { "text": "You can make delicious pancakes! Here's a simple recipe..." } ``` ================================================================================ Source: src/pages/docs/guides/03-stock-agent.mdx ================================================================================ --- title: "Building an AI Stock Agent | Mastra Agents | Guides" description: Guide on creating a simple stock agent in Mastra to fetch the last day's closing stock price for a given symbol. --- import { Steps } from "nextra/components"; import YouTube from "../../../components/youtube"; # Stock Agent We're going to create a simple agent that fetches the last day's closing stock price for a given symbol. This example will show you how to create a tool, add it to an agent, and use the agent to fetch stock prices. ## Project Structure ``` stock-price-agent/ ├── src/ │ ├── agents/ │ │ └── stockAgent.ts │ ├── tools/ │ │ └── stockPrices.ts │ └── index.ts ├── package.json └── .env ``` --- ## Initialize the Project and Install Dependencies First, create a new directory for your project and navigate into it: ```bash mkdir stock-price-agent cd stock-price-agent ``` Initialize a new Node.js project and install the required dependencies: ```bash npm init -y npm install @mastra/core zod ``` Set Up Environment Variables Create a `.env` file at the root of your project to store your OpenAI API key. ```bash filename=".env" copy OPENAI_API_KEY=your_openai_api_key ``` Create the necessary directories and files: ```bash mkdir -p src/agents src/tools touch src/agents/stockAgent.ts src/tools/stockPrices.ts src/index.ts ``` --- ## Create the Stock Price Tool Next, we'll create a tool that fetches the last day's closing stock price for a given symbol. ```ts filename="src/tools/stockPrices.ts" import { createTool } from "@mastra/core"; import { z } from "zod"; const getStockPrice = async (symbol: string) => { const data = await fetch( `https://mastra-stock-data.vercel.app/api/stock-data?symbol=${symbol}`, ).then((r) => r.json()); return data.prices["4. close"]; }; export const stockPrices = createTool({ id: "Get Stock Price", inputSchema: z.object({ symbol: z.string(), }), description: `Fetches the last day's closing stock price for a given symbol`, execute: async ({ context: { symbol } }) => { console.log("Using tool to fetch stock price for", symbol); return { symbol, currentPrice: await getStockPrice(symbol), }; }, }); ``` --- ## Add the Tool to an Agent We'll create an agent and add the `stockPrices` tool to it. ```ts filename="src/agents/stockAgent.ts" import { Agent } from "@mastra/core"; import * as tools from "../tools/stockPrices"; export const stockAgent = new Agent({ name: "Stock Agent", instructions: "You are a helpful assistant that provides current stock prices. When asked about a stock, use the stock price tool to fetch the stock price.", model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, tools: { stockPrices: tools.stockPrices, }, }); ``` --- ## Set Up the Mastra Instance We need to initialize the Mastra instance with our agent and tool. ```ts filename="src/index.ts" import { Mastra } from "@mastra/core"; import { stockAgent } from "./agents/stockAgent"; export const mastra = new Mastra({ agents: { stockAgent }, }); ``` ## Serve the Application Instead of running the application directly, we'll use the `mastra dev` command to start the server. This will expose your agent via REST API endpoints, allowing you to interact with it over HTTP. In your terminal, start the Mastra server by running: ```bash mastra dev --dir src ``` This command will allow you to test your stockPrices tool and your stockAgent within the playground. This will also start the server and make your agent available at: ``` http://localhost:4111/api/agents/stockAgent/generate ``` --- ## Test the Agent with cURL Now that your server is running, you can test your agent's endpoint using `curl`: ```bash curl -X POST http://localhost:4111/api/agents/stockAgent/generate \ -H "Content-Type: application/json" \ -d '{ "messages": [ { "role": "user", "content": "What is the current stock price of Apple (AAPL)?" } ] }' ``` **Expected Response:** You should receive a JSON response similar to: ```json { "text": "The current price of Apple (AAPL) is $174.55.", "agent": "Stock Agent" } ``` This indicates that your agent successfully processed the request, used the `stockPrices` tool to fetch the stock price, and returned the result. ================================================================================ Source: src/pages/docs/guides/04-recruiter.mdx ================================================================================ --- title: "Building an AI Recruiter | Mastra Workflows | Guides" description: Guide on building a recruiter workflow in Mastra to gather and process candidate information using LLMs. --- # Introduction In this guide, you'll learn how Mastra helps you build workflows with LLMs. We'll walk through creating a workflow that gathers information from a candidate's resume, then branches to either a technical or behavioral question based on the candidate's profile. Along the way, you'll see how to structure workflow steps, handle branching, and integrate LLM calls. Below is a concise version of the workflow. It starts by importing the necessary modules, sets up Mastra, defines steps to extract and classify candidate data, and then asks suitable follow-up questions. Each code block is followed by a short explanation of what it does and why it's useful. ## 1. Imports and Setup You need to import Mastra tools and Zod to handle workflow definitions and data validation. ```typescript filename="src/mastra/index.ts" copy import { Step, Workflow, Mastra } from "@mastra/core"; import { z } from "zod"; ``` Add your `OPENAI_API_KEY` to the `.env` file. ```bash filename=".env" copy OPENAI_API_KEY= ``` ## 2. Step One: Gather Candidate Info You want to extract candidate details from the resume text and classify them as technical or non-technical. This step calls an LLM to parse the resume and return structured JSON, including the name, technical status, specialty, and the original resume text. The code reads resumeText from trigger data, prompts the LLM, and returns organized fields for use in subsequent steps. ```typescript filename="src/mastra/index.ts" copy const gatherCandidateInfo = new Step({ id: "gatherCandidateInfo", inputSchema: z.object({ resumeText: z.string(), }), outputSchema: z.object({ candidateName: z.string(), isTechnical: z.boolean(), specialty: z.string(), resumeText: z.string(), }), execute: async ({ context, mastra }) => { if (!mastra?.llm) { throw new Error("Mastra instance is required to run this step"); } const resumeText = context.machineContext?.getStepPayload<{ resumeText: string; }>("trigger")?.resumeText; const llm = mastra.llm({ provider: "OPEN_AI", name: "gpt-4o-mini" }); const prompt = ` You are given this resume text: "${resumeText}" `; const res = await llm.generate(prompt, { output: z.object({ candidateName: z.string(), isTechnical: z.boolean(), specialty: z.string(), resumeText: z.string(), }), }); return res.object; }, }); ``` ## 3. Technical Question Step This step prompts a candidate who is identified as technical for more information about how they got into their specialty. It uses the entire resume text so the LLM can craft a relevant follow-up question. The code generates a question about the candidate's specialty. ```typescript filename="src/mastra/index.ts" copy interface CandidateInfo { candidateName: string; isTechnical: boolean; specialty: string; resumeText: string; } const askAboutSpecialty = new Step({ id: "askAboutSpecialty", outputSchema: z.object({ question: z.string(), }), execute: async ({ context, mastra }) => { if (!mastra?.llm) { throw new Error("Mastra instance is required to run this step"); } const candidateInfo = context.machineContext?.getStepPayload( "gatherCandidateInfo", ); const llm = mastra.llm({ provider: "OPEN_AI", name: "gpt-4o-mini" }); const prompt = ` You are a recruiter. Given the resume below, craft a short question for ${candidateInfo?.candidateName} about how they got into "${candidateInfo?.specialty}". Resume: ${candidateInfo?.resumeText} `; const res = await llm.generate(prompt); return { question: res?.text?.trim() || "" }; }, }); ``` ## 4. Behavioral Question Step If the candidate is non-technical, you want a different follow-up question. This step asks what interests them most about the role, again referencing their complete resume text. The code solicits a role-focused query from the LLM. ```typescript filename="src/mastra/index.ts" copy const askAboutRole = new Step({ id: "askAboutRole", outputSchema: z.object({ question: z.string(), }), execute: async ({ context, mastra }) => { if (!mastra?.llm) { throw new Error("Mastra instance is required to run this step"); } const candidateInfo = context.machineContext?.getStepPayload( "gatherCandidateInfo", ); const llm = mastra.llm({ provider: "OPEN_AI", name: "gpt-4o-mini" }); const prompt = ` You are a recruiter. Given the resume below, craft a short question for ${candidateInfo?.candidateName} asking what interests them most about this role. Resume: ${candidateInfo?.resumeText} `; const res = await llm.generate(prompt); return { question: res?.text?.trim() || "" }; }, }); ``` ## 5. Define the Workflow You now combine the steps to implement branching logic based on the candidate's technical status. The workflow first gathers candidate data, then either asks about their specialty or about their role, depending on isTechnical. The code chains gatherCandidateInfo with askAboutSpecialty and askAboutRole, and commits the workflow. ```typescript filename="src/mastra/index.ts" copy const candidateWorkflow = new Workflow({ name: "candidate-workflow", triggerSchema: z.object({ resumeText: z.string(), }), }); candidateWorkflow .step(gatherCandidateInfo) .then(askAboutSpecialty, { when: { "gatherCandidateInfo.isTechnical": true }, }) .after(gatherCandidateInfo) .step(askAboutRole, { when: { "gatherCandidateInfo.isTechnical": false }, }); candidateWorkflow.commit(); ``` ## 6. Execute the Workflow ```typescript filename="src/mastra/index.ts" copy const mastra = new Mastra({ workflows: { candidateWorkflow, }, }); (async () => { const { runId, start } = mastra.getWorkflow("candidateWorkflow").createRun(); console.log("Run", runId); const runResult = await start({ triggerData: { resumeText: "Simulated resume content..." }, }); console.log("Final output:", runResult.results); })(); ``` You've just built a workflow to parse a resume and decide which question to ask based on the candidate's technical abilities. Congrats and happy hacking! ================================================================================ Source: src/pages/docs/index.mdx ================================================================================ --- title: "Introduction | Mastra Docs" description: "Mastra is a Typescript agent framework. It helps you build AI applications and features quickly. It gives you the set of primitives you need: workflows, agents, RAG, integrations, syncs and evals." --- # About Mastra Mastra is an open-source Typescript agent framework. It's designed to give you the primitives you need to build AI applications and features. You can use Mastra to build [AI agents](/docs/agents/00-overview.mdx) that have memory and can execute functions, or chain LLM calls in deterministic [workflows](/docs/workflows/00-overview.mdx). You can chat with your agents in Mastra's [local dev environment](/docs/local-dev/mastra-dev.mdx), feed them application-specific knowledge with [RAG](/docs/rag/overview.mdx), and score their outputs with Mastra's [evals](/docs/08-running-evals.mdx). The main features include: * **[Model routing](/docs/llm-models/00-overview.mdx)**: With Mastra, you can use the same syntax to call any LLM provider in Mastra, including OpenAI, Anthropic, and Google Gemini. * **[Agent memory and tool calling](/docs/agents/01-agent-memory.mdx)**: With Mastra, you can give your agent tools (functions) that it can call. You can persist agent memory and retrieve it based on recency, semantic similarity, or conversation thread. * **[Workflow graphs](/docs/workflows/00-overview.mdx)**: When you want to execute LLM calls in a deterministic way, Mastra gives you a graph-based workflow engine. You can define discrete steps, log inputs and outputs at each step of each run, and pipe them into an observability tool. Mastra workflows have a simple syntax for control flow (`step()`, `.then()`, `.after()`) that allows branching and chaining. * **[Agent development environment](/docs/local-dev/mastra-dev.mdx)**: When you're developing an agent locally, you can chat with it and see its state and memory in Mastra's agent development environment. * **[Retrieval-augmented generation (RAG)](/docs/rag/overview.mdx)**: Mastra gives you APIs to process documents (text, HTML, Markdown, JSON) into chunks, create embeddings, and store them in a vector database. At query time, it retrieves relevant chunks to ground LLM responses in your data, with a unified API on top of multiple vector stores (Pinecone, pgvector, etc) and embedding providers (OpenAI, Cohere, etc). * **[Deployment](/docs/deployment/deployment.mdx)**: Mastra supports bundling your agents and workflows within an existing React, Next.js, or Node.js application, or into standalone endpoints. The Mastra deploy helper lets you easily bundle agents and workflows into a Node.js server using Hono, or deploy it onto a serverless platform like Vercel, Cloudflare Workers, or Netlify. * **[Evals](/docs/08-running-evals.mdx)**: Mastra provides automated evaluation metrics that use model-graded, rule-based, and statistical methods to assess LLM outputs, with built-in metrics for toxicity, bias, relevance, and factual accuracy. You can also define your own evals. ================================================================================ Source: src/pages/docs/llm-models/00-overview.mdx ================================================================================ --- title: "Interacting with LLMs Directly | Mastra LLM Models" description: Overview of the LLM class in Mastra, detailing its capabilities for interacting with various language models. --- import { Callout } from "nextra/components"; # Interacting with LLMs Directly Mastra provides direct support for Large Language Models (LLMs) through the `LLM` class. The `LLM` class allows you to interact with various language models seamlessly, enabling you to generate text, handle conversations, and more. This guide covers: - How to initialize the LLM class. - Supported models and providers. - Using the `generate` function. - Message formats in `generate`. - Output formats in `generate`. --- ## Initializing the LLM Class To start using the `LLM` class, you need to initialize it with the desired model configuration. Here's how you can do it: ```typescript import { Mastra } from "@mastra/core"; const mastra = new Mastra(); const llm = mastra.LLM({ provider: "OPEN_AI", name: "gpt-4o-mini", }); ``` This initialization allows telemetry to pass through to the LLM, providing insights into model usage and performance. **Note:** You can find more details about the model configuration options in the [Providers and Models reference](../reference/llm/providers-and-models.mdx). --- ## Supported Models and Providers Mastra supports major LLM providers out of the box, plus additional providers through AI SDK integrations. Custom providers can also be added via the Portkey service. ### Most Popular Providers and Models | Provider | Supported Models | | ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | **OpenAI** | `gpt-4`, `gpt-4-turbo`, `gpt-3.5-turbo`, `gpt-4o`, `gpt-4o-mini` | | **Anthropic** | `claude-3-5-sonnet-20241022`, `claude-3-5-sonnet-20240620`, `claude-3-5-haiku-20241022`, `claude-3-opus-20240229`, `claude-3-sonnet-20240229`, `claude-3-haiku-20240307` | | **Google Gemini** | `gemini-1.5-pro-latest`, `gemini-1.5-pro`, `gemini-1.5-flash-latest`, `gemini-1.5-flash` | A full list of supported models can be found [here](../reference/llm/providers-and-models.mdx). {" "} If you don't want to pay for an LLM provider, Google Gemini has a generous free tier for its API. --- ## The `generate` Function The main function you'll use with the `LLM` class is `generate`. It allows you to send messages to the language model and receive responses. The `generate` function takes: - **messages**: The first parameter, which can be a string, an array of strings, or an array of message objects. - **options**: The second parameter, which includes additional configurations like streaming, schemas for structured output, etc. This design covers all potential use cases and is extensible to multi-modal interactions in the future. --- ## Message Formats in `generate` The `generate` function supports three types of message formats: ### 1. Simple String You can pass a single string as the message: ```typescript const response = await llm.generate("Tell me a joke."); ``` ### 2. Array of Strings You can provide an array of strings, which will be converted into user messages: ```typescript const response = await llm.generate([ "Hello!", "Can you explain quantum mechanics?", ]); ``` ### 3. Detailed Message Objects For finer control, you can pass an array of message objects, specifying the role and content: ```typescript const response = await llm.generate([ { role: "system", content: "You are a helpful assistant." }, { role: "user", content: "What is the meaning of life?" }, ]); ``` --- ## Output Formats in `generate` The `generate` function supports four types of output formats: ### 1. Simple Text Generation Receive a basic text response from the model: ```typescript const response = await llm.generate("What is AI?"); console.log(response.text); ``` ### 2. Structured Output Request a structured response by providing a schema. This is useful when you need the output in a specific format: ```typescript import { z } from "zod"; const mySchema = z.object({ definition: z.string(), examples: z.array(z.string()), }); const response = await llm.generate( "Define machine learning and give examples.", { output: mySchema, }, ); console.log(response.object); ``` ### 3. Streaming Text Stream the response in real-time, which is useful for handling longer outputs or providing immediate feedback to users: ```typescript const stream = await llm.stream("Tell me a story about a brave knight."); for await (const chunk of stream.textStream) { process.stdout.write(chunk); } ``` ### 4. Streaming Structured Output Stream a structured response using a schema: ```typescript const stream = await llm.stream("Provide live weather data.", { output: mySchema, }); for await (const chunk of stream.textStream) { console.log(chunk); } ``` --- ## Additional Notes - **Telemetry**: Initializing the `LLM` class through Mastra allows telemetry data to pass through, enabling better monitoring and debugging. - **Extensibility**: The design of the `generate` function and message formats makes it future-proof and extensible for multi-modal interactions. ================================================================================ Source: src/pages/docs/local-dev/engine.mdx ================================================================================ --- title: "Using the Mastra Engine | Mastra Local Development Docs" description: Documentation for the Mastra Engine, providing PostgreSQL-based storage for agent memory and vector operations. --- # Using the Mastra Engine The Mastra Engine provides PostgreSQL-based storage for two core AI application needs: 1. Agent Memory - Persistent storage for conversation history and agent state 2. Vector Operations - Storage and similarity search for RAG applications Mastra Engine is not required to run Mastra, but provides a "batteries-included" local development experience and a production-ready data layer. ## Setting up for local development The Mastra CLI includes commands to manage your engine and database: - `mastra engine add`: Sets up your dev environment, starts Docker containers for Postgres, and configures `.env`. - `mastra engine up`: Starts your Docker containers as defined in your docker config file. - `mastra engine migrate`: Runs database migrations. - `mastra engine generate`: Generates TypeScript types from your database schema. - `mastra engine down`: Stops your Docker containers. The engine will run by default on port 5432 of `localhost`. ## Configuring for Agent Memory Configure your agent to use PostgreSQL for memory storage: ```typescript import { Mastra } from "@mastra/core"; import { PgMemory } from "@mastra/memory/postgres"; const pgMemory = new PgMemory({ connectionString: process.env.POSTGRES_CONNECTION_STRING!, }); export const mastra = new Mastra({ memory: pgMemory, agents: { myAgent }, }); ``` Now, agent memory operations like `saveMemory`, `getContextWindow`, and `getMemory` are available to your agent. See the [Memory](/docs/agents/01-agent-memory) reference for more details. ## Vector Operations (RAG) For RAG applications, you can set up vector storage: ```typescript import { PgVector } from "@mastra/vector-pg"; // Initialize vector store const vectorStore = new PgVector({ connectionString: process.env.POSTGRES_CONNECTION_STRING!, }); ``` ## Deploying to Production To deploy to production, you'll need to set up a production Postgres database and configure your Mastra application to use it. You can run `DB_URL=your_prod_db_url mastra engine migrate` to create a schema in a production database. ## Mocking for Tests For testing or development, you can use the `MockMastraEngine`: ```typescript import { MockMastraEngine } from "@mastra/core"; const mockEngine = new MockMastraEngine({ url: "mock://localhost" }); // Create an entity const entity = await mockEngine.createEntity({ name: "Contacts", connectionId: "user_123", }); // Insert records await mockEngine.upsertRecords({ entityId: entity.id, records: [ { externalId: "c1", data: { name: "Alice" }, entityType: "Contacts" }, { externalId: "c2", data: { name: "Bob" }, entityType: "Contacts" }, ], }); // Query the records const records = await mockEngine.getRecordsByEntityId({ entityId: entity.id }); console.log(records); ``` This mock engine mimics the behavior of the real engine without requiring a real database connection, making it ideal for unit tests. ## Summary The Mastra Engine is a PostgreSQL instance that stores agent memory and vector embeddings. You can spin it up locally with Docker for development and deploy it to a production instance. It's a helpful starting spot for handling agent memory and vector operations -- start with Postgres + pgvector, then adapt individual components based on your needs. ================================================================================ Source: src/pages/docs/local-dev/integrations.mdx ================================================================================ --- title: "Using Mastra Integrations | Mastra Local Development Docs" description: Documentation for Mastra integrations, which are auto-generated, type-safe API clients for third-party services. --- # Using Mastra Integrations Integrations in Mastra are auto-generated, type-safe API clients for third-party services. They can be used as tools for agents or as steps in workflows. ## Installing an Integration Mastra's default integrations are packaged as individually installable npm modules. You can add an integration to your project by installing it via npm and importing it into your Mastra configuration. ### Example: Adding the GitHub Integration 1. **Install the Integration Package** To install the GitHub integration, run: ```bash npm install @mastra/github ``` 2. **Add the Integration to Your Project** Create a new file for your integrations (e.g., `src/mastra/integrations/index.ts`) and import the integration: ```typescript filename="src/mastra/integrations/index.ts" showLineNumbers copy import { GithubIntegration } from '@mastra/github'; export const github = new GithubIntegration({ config: { PERSONAL_ACCESS_TOKEN: process.env.GITHUB_PAT!, }, }); ``` Make sure to replace `process.env.GITHUB_PAT!` with your actual GitHub Personal Access Token or ensure that the environment variable is properly set. 3. **Use the Integration in Tools or Workflows** You can now use the integration when defining tools for your agents or in workflows. ```typescript filename="src/mastra/tools/index.ts" showLineNumbers copy import { createTool } from '@mastra/core'; import { z } from 'zod'; import { github } from '../integrations'; export const getMainBranchRef = createTool({ id: 'getMainBranchRef', description: 'Fetch the main branch reference from a GitHub repository', inputSchema: z.object({ owner: z.string(), repo: z.string(), }), outputSchema: z.object({ ref: z.string().optional(), }), execute: async ({ context }) => { const client = await github.getApiClient(); const mainRef = await client.gitGetRef({ path: { owner: context.owner, repo: context.repo, ref: 'heads/main', }, }); return { ref: mainRef.data?.ref }; }, }); ``` In the example above: - We import the `github` integration. - We define a tool called `getMainBranchRef` that uses the GitHub API client to fetch the reference of the main branch of a repository. - The tool accepts `owner` and `repo` as inputs and returns the reference string. ## Using Integrations in Agents Once you've defined tools that utilize integrations, you can include these tools in your agents. ```typescript filename="src/mastra/agents/index.ts" showLineNumbers copy import { Agent } from '@mastra/core'; import { getMainBranchRef } from '../tools'; export const codeReviewAgent = new Agent({ name: 'Code Review Agent', instructions: 'An agent that reviews code repositories and provides feedback.', model: { provider: 'OPEN_AI', name: 'gpt-4', }, tools: { getMainBranchRef, // other tools... }, }); ``` In this setup: - We create an agent named `Code Review Agent`. - We include the `getMainBranchRef` tool in the agent's available tools. - The agent can now use this tool to interact with GitHub repositories during conversations. ## Environment Configuration Ensure that any required API keys or tokens for your integrations are properly set in your environment variables. For example, with the GitHub integration, you need to set your GitHub Personal Access Token: ```bash GITHUB_PAT=your_personal_access_token ``` Consider using a `.env` file or another secure method to manage sensitive credentials. ## Available Integrations Mastra provides several built-in integrations; primarily API-key based integrations that do not require OAuth. Some available integrations including Github, Stripe, Resend, Firecrawl, and more. Check [Mastra's codebase](https://github.com/mastra-ai/mastra/tree/main/integrations) or [npm packages](https://www.npmjs.com/search?q=%22%40mastra%22) for a full list of available integrations. ## Conclusion Integrations in Mastra enable your AI agents and workflows to interact with external services seamlessly. By installing and configuring integrations, you can extend the capabilities of your application to include operations such as fetching data from APIs, sending messages, or managing resources in third-party systems. Remember to consult the documentation of each integration for specific usage details and to adhere to best practices for security and type safety. ================================================================================ Source: src/pages/docs/local-dev/mastra-dev.mdx ================================================================================ --- title: "Inspecting Agents with `mastra dev` | Mastra Local Dev Docs" description: Documentation for the mastra dev command, which launches a local development server for Mastra applications. --- # Inspecting agents and workflows with `mastra Dev` The `mastra dev` command launches a development server that serves your Mastra application locally. ## REST API Endpoints `mastra dev` spins up REST API endpoints for your agents and workflows, such as: - `POST /api/agents/:agentId/generate` - `POST /api/agents/:agentId/stream` - `POST /api/workflows/:workflowId/start` - `POST /api/workflows/:workflowId/:instanceId/event` - `GET /api/workflows/:workflowId/:instanceId/status` By default, the server runs at http://localhost:4111, but you can change the port with the `--port` flag. ## UI Playground `mastra dev` creates a UI with an agent chat interface, a workflow visualizer and a tool playground. {/* TODO: Record a quick tour video here */} ## OpenAPI Specification `mastra dev` provides an OpenAPI spec at: - `GET /openapi.json` ## Summary `mastra dev` makes it easy to develop, debug, and iterate on your AI logic in a self-contained environment before deploying to production. - [Mastra Dev reference](../reference/cli/dev.mdx) ================================================================================ Source: src/pages/docs/local-dev/mastra-init.mdx ================================================================================ --- title: "The `mastra init` Command | Local Dev | Mastra Docs" description: Documentation for the mastra init command, which helps you add Mastra to an existing project. --- import { Tabs } from "nextra/components"; # Running `mastra init` for Existing Node.js Projects The `mastra init` command helps you add Mastra to an existing project. This command provides an interactive setup process to configure Mastra in your project. ## Using the CLI ### Install the CLI First, install the mastra CLI. ```bash copy npm i -g mastra ``` ```bash copy yarn i -g mastra ``` ```bash copy pnpm i -g mastra ``` ### Initialize Mastra To initialize mastra in your project by following the interactive setup, run: ```bash copy mastra init ``` ### Set Up your API Key Add the API key for your configured LLM provider in your `.env` file. ```env OPENAI_API_KEY= ``` ## Non-Interactive Mode If you prefer to run the command with flags (non-interactive mode) and include the example code, you can use: ```bash copy mastra init --dir src/mastra --components agents,tools --llm openai --example ``` This allows you to specify your preferences upfront without being prompted. - [Mastra Init reference](../reference/cli/init.mdx) ================================================================================ Source: src/pages/docs/rag/chunking-and-embedding.mdx ================================================================================ --- title: Chunking and Embedding Documents | RAG | Mastra Docs description: Guide on chunking and embedding documents in Mastra for efficient processing and retrieval. --- ## Chunking and Embedding Documents Before processing, create a MDocument instance from your content. You can initialize it from various formats: ```ts showLineNumbers copy const docFromText = MDocument.fromText("Your plain text content..."); const docFromHTML = MDocument.fromHTML("Your HTML content..."); const docFromMarkdown = MDocument.fromMarkdown("# Your Markdown content..."); const docFromJSON = MDocument.fromJSON(`{ "key": "value" }`); ``` ## Step 1: Document Processing Use `chunk` to split documents into manageable pieces. Mastra supports multiple chunking strategies optimized for different document types: - `recursive`: Smart splitting based on content structure - `character`: Simple character-based splits - `token`: Token-aware splitting - `markdown`: Markdown-aware splitting - `html`: HTML structure-aware splitting - `json`: JSON structure-aware splitting - `latex`: LaTeX structure-aware splitting Here's an example of how to use the `recursive` strategy: ```ts showLineNumbers copy const chunks = await doc.chunk({ strategy: "recursive", size: 512, overlap: 50, separator: "\n", extract: { metadata: true, // Optionally extract metadata }, }); ``` **Note:** Metadata extraction may use LLM calls, so ensure your API key is set. We go deeper into chunking strategies in our [chunk documentation](/docs/reference/rag/chunk.mdx). ## Step 2: Embedding Generation Transform chunks into embeddings using your preferred provider. Mastra supports both OpenAI and Cohere embeddings: ### Using OpenAI ```ts showLineNumbers copy import { embedMany } from "@mastra/rag"; const { embeddings } = await embedMany(chunks, { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }); ``` ### Using Cohere ```ts showLineNumbers copy const embeddings = await embed(chunks, { provider: "COHERE", model: "embed-english-v3.0", maxRetries: 3, }); ``` The embedding functions return vectors, arrays of numbers representing the semantic meaning of your text, ready for similarity searches in your vector database. ## Example: Complete Pipeline Here's an example showing document processing and embedding generation with both providers: ```ts showLineNumbers copy import { MDocument, embedMany } from "@mastra/rag"; // Initialize document const doc = MDocument.fromText(` Climate change poses significant challenges to global agriculture. Rising temperatures and changing precipitation patterns affect crop yields. `); // Create chunks const chunks = await doc.chunk({ strategy: "recursive", size: 256, overlap: 50, }); // Generate embeddings with OpenAI const { embeddings: openAIEmbeddings } = await embedMany(chunks, { provider: "OPEN_AI", model: "text-embedding-3-small", }); // Generate embeddings with Cohere const { embeddings: cohereEmbeddings } = await embedMany(chunks, { provider: "COHERE", model: "embed-english-v3.0", }); // Store embeddings in your vector database await vectorStore.upsert("embeddings", embeddings); ``` This example demonstrates how to process a document, split it into chunks, generate embeddings with both OpenAI and Cohere, and store the results in a vector database. For more examples of different chunking strategies and embedding configurations, see: - [Adjust Chunk Size](/docs/reference/rag/chunk.mdx#adjust-chunk-size) - [Adjust Chunk Delimiters](/docs/reference/rag/chunk.mdx#adjust-chunk-delimiters) - [Embed Text with Cohere](/docs/reference/rag/embeddings.mdx#using-cohere) ================================================================================ Source: src/pages/docs/rag/overview.mdx ================================================================================ --- title: RAG (Retrieval-Augmented Generation) in Mastra | Mastra Docs description: Overview of Retrieval-Augmented Generation (RAG) in Mastra, detailing its capabilities for enhancing LLM outputs with relevant context. --- # RAG (Retrieval-Augmented Generation) in Mastra RAG in Mastra helps you enhance LLM outputs by incorporating relevant context from your own data sources, improving accuracy and grounding responses in real information. Mastra's RAG system provides: - Standardized APIs to process and embed documents - Support for multiple vector stores - Chunking and embedding strategies for optimal retrieval - Observability for tracking embedding and retrieval performance ## Example To implement RAG, you process your documents into chunks, create embeddings, store them in a vector database, and then retrieve relevant context at query time. ```ts showLineNumbers copy import { PgVector } from "@mastra/vector-pg"; import { MDocument, embed } from "@mastra/rag"; import { z } from "zod"; // 1. Initialize document const doc = MDocument.fromText(`Your document text here...`); // 2. Create chunks const chunks = await doc.chunk({ strategy: "recursive", size: 512, overlap: 50 }); // 3. Generate embeddings const { embeddings } = await embed(chunks, { provider: "OPEN_AI", model: "text-embedding-3-small" }); // 4. Store in vector database const pgVector = new PgVector(process.env.POSTGRES_CONNECTION_STRING); await pgVector.store(embeddings); // 5. Query similar chunks const results = await pgVector.query("Your search query", { topK: 3 }); console.log("Similar chunks:", results); ``` This example shows the essentials: initialize a document, create chunks, generate embeddings, store them, and query for similar content. ## Document Processing The basic building block of RAG is document processing. Documents can be chunked using various strategies (recursive, sliding window, etc.) and enriched with metadata. See the [chunking and embedding doc](./chunking-and-embedding.mdx). ## Vector Storage Mastra supports multiple vector stores for embedding persistence and similarity search, including pgvector, Pinecone, and Qdrant. See the [vector database doc](./vector-databases.mdx). ## Observability and Debugging Mastra's RAG system includes observability features to help you optimize your retrieval pipeline: - Track embedding generation performance and costs - Monitor chunk quality and retrieval relevance - Analyze query patterns and cache hit rates - Export metrics to your observability platform See the [OTel Configuration](../reference/observability/otel-config.mdx) page for more details. ## More resources - [Chain of Thought RAG Example](../../examples/rag/cot-rag.mdx) - [All RAG Examples](../../examples/) (including different chunking strategies, embedding models, and vector stores) ================================================================================ Source: src/pages/docs/rag/retrieval.mdx ================================================================================ --- title: "Retrieval, Semantic Search, Reranking | RAG | Mastra Docs" description: Guide on retrieval processes in Mastra's RAG systems, including semantic search, filtering, and re-ranking. --- ## Retrieval in RAG Systems After storing embeddings, you need to retrieve relevant chunks to answer user queries. Mastra provides flexible retrieval options with support for semantic search, filtering, and re-ranking. ## How Retrieval Works 1. The user's query is converted to an embedding using the same model used for document embeddings 2. This embedding is compared to stored embeddings using vector similarity 3. The most similar chunks are retrieved and can be optionally: - Filtered by metadata - Re-ranked for better relevance - Processed through a knowledge graph ## Basic Retrieval The simplest approach is direct semantic search. This method uses vector similarity to find chunks that are semantically similar to the query: ```ts showLineNumbers copy import { PgVector } from "@mastra/vector-pg"; import { embed } from "@mastra/rag"; // Convert query to embedding const { embedding } = await embed( "What are the main points in the article?", { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, } ); // Query vector store const pgVector = new PgVector(process.env.POSTGRES_CONNECTION_STRING); const results = await pgVector.query("embeddings", embedding, 10); ``` Results include both the text content and a similarity score: ```ts showLineNumbers copy [ { text: "Climate change poses significant challenges...", score: 0.89, metadata: { source: "article1.txt" } }, { text: "Rising temperatures affect crop yields...", score: 0.82, metadata: { source: "article1.txt" } } // ... more results ] ``` ## Advanced Retrieval options ### Metadata Filtering Filter results based on metadata fields to narrow down the search space. This is useful when you have documents from different sources or time periods: ```ts showLineNumbers copy const results = await pgVector.query("embeddings", embedding, { topK: 10, filter: { source: "article1.txt", date: { $gt: "2023-01-01" } } }); ``` ### Re-ranking Initial vector similarity search can sometimes miss nuanced relevance. Re-ranking is a more computationally expensive process, but more accurate algorithm that improves results by: - Considering word order and exact matches - Applying more sophisticated relevance scoring - Using a method called cross-attention between query and documents Here's how to set up re-ranking: ```ts showLineNumbers copy const vectorQueryTool = createVectorQueryTool({ vectorStoreName: 'pgVector', indexName: 'embeddings', options: { provider: 'OPEN_AI', model: 'text-embedding-3-small' }, topK: 10, reranker: { model: { provider: 'OPEN_AI', name: 'gpt-4o-mini', }, } }); ``` ### Graph-based Retrieval For documents with complex relationships, graph-based retrieval can follow connections between chunks. This helps when: - Information is spread across multiple documents - Documents reference each other - You need to traverse relationships to find complete answers Example setup: ```ts showLineNumbers copy const graphQueryTool = createGraphQueryTool({ vectorStoreName: 'pgVector', indexName: 'embeddings', graphOptions: { relationTypes: ['references', 'similar_to'], maxHops: 2 } }); ``` ## Example implementations For complete examples of these retrieval methods in action, see: - [Basic Retrieval](../../examples/rag/retrieve-results.mdx) - [Metadata Filtering](../../examples/rag/filter-rag.mdx) - [Re-ranking Results](../../examples/rag/rerank-rag.mdx) - [Graph-based Retrieval](../../examples/rag/graph-rag.mdx) ================================================================================ Source: src/pages/docs/rag/vector-databases.mdx ================================================================================ --- title: 'Storing Embeddings in A Vector Database | Mastra Docs' description: Guide on vector storage options in Mastra, including embedded and dedicated vector databases for similarity search. --- import { Tabs } from 'nextra/components'; ## Storing Embeddings in A Vector Database After generating embeddings, you need to store them in a database that supports vector similarity search. Mastra provides a consistent interface for storing and querying embeddings across different vector databases. ## Supported databases ### PostgreSQL with PgVector Best for teams already using PostgreSQL who want to minimize infrastructure complexity: ```ts filename="vector-store.ts" showLineNumbers copy import { PgVector } from '@mastra/vector-pg'; const store = new PgVector(process.env.POSTGRES_CONNECTION_STRING) await store.createIndex("my-collection", 1536); await store.upsert( "my-collection", embeddings, chunks.map(chunk => ({ text: chunk.text })) ) ```` ```ts filename="vector-store.ts" showLineNumbers copy import { PineconeVector } from '@mastra/vector-pinecone' const store = new PineconeVector(process.env.PINECONE_API_KEY) await store.createIndex("my-collection", 1536); await store.upsert( "my-collection", embeddings, chunks.map(chunk => ({ text: chunk.text })) ) ``` ```ts filename="vector-store.ts" showLineNumbers copy import { QdrantVector } from '@mastra/vector-qdrant' const store = new QdrantVector({ url: process.env.QDRANT_URL, apiKey: process.env.QDRANT_API_KEY }) await store.createIndex("my-collection", 1536); await store.upsert( "my-collection", embeddings, chunks.map(chunk => ({ text: chunk.text })) ) ``` ```ts filename="vector-store.ts" showLineNumbers copy import { ChromaVector } from '@mastra/vector-chroma' const store = new ChromaVector() await store.createIndex("my-collection", 1536); await store.upsert( "my-collection", embeddings, chunks.map(chunk => ({ text: chunk.text })) ) ``` ```ts filename="vector-store.ts" showLineNumbers copy import { AstraVector } from '@mastra/vector-astra' const store = new AstraVector({ token: process.env.ASTRA_DB_TOKEN, endpoint: process.env.ASTRA_DB_ENDPOINT, keyspace: process.env.ASTRA_DB_KEYSPACE }) await store.createIndex("my-collection", 1536); await store.upsert( "my-collection", embeddings, chunks.map(chunk => ({ text: chunk.text })) ) ``` ```ts filename="vector-store.ts" showLineNumbers copy import { LibSQLVector } from '@mastra/vector-libsql' const store = new LibSQLVector(process.env.DATABASE_URL) await store.createIndex("my-collection", 1536); await store.upsert( "my-collection", embeddings, chunks.map(chunk => ({ text: chunk.text })) ) ``` ```ts filename="vector-store.ts" showLineNumbers copy import { UpstashVector } from '@mastra/vector-upstash' const store = new UpstashVector({ url: process.env.UPSTASH_URL, token: process.env.UPSTASH_TOKEN }) await store.createIndex("my-collection", 1536); await store.upsert( "my-collection", embeddings, chunks.map(chunk => ({ text: chunk.text })) ) ``` ```ts filename="vector-store.ts" showLineNumbers copy import { CloudflareVector } from '@mastra/vector-vectorize' const store = new CloudflareVector({ accountId: process.env.CF_ACCOUNT_ID, apiToken: process.env.CF_API_TOKEN }) await store.createIndex("my-collection", 1536); await store.upsert( "my-collection", embeddings, chunks.map(chunk => ({ text: chunk.text })) ) ``` ## Using Vector Storage Once initialized, all vector stores share the same interface for creating indexes, upserting embeddings, and querying: ```ts filename="store-embeddings.ts" showLineNumbers copy // 1. Create an index (dimension = 1536 for text-embedding-3-small) await store.createIndex('my-collection', 1536); // 2. Store embeddings with metadata await store.upsert( 'my-collection', embeddings, chunks.map(chunk => ({ text: chunk.text })), ); ```` ## Adding Metadata All vector stores support adding metadata to your vectors, which enables filtering during retrieval: ```ts showLineNumbers copy // Store embeddings with rich metadata await vectorStore.upsert( 'embeddings', embeddings, chunks.map(chunk => ({ text: chunk.text, source: chunk.source, category: chunk.category, timestamp: new Date().toISOString(), })), ); ``` ## Best Practices - Create indexes before bulk insertions - Use batch operations for large insertions (the upsert method handles batching automatically) - Only store metadata you'll query against - Match embedding dimensions to your model (e.g., 1536 for `text-embedding-3-small`) ## Examples For complete examples of different vector store implementations, see: - [Insert Embedding in PgVector](../../examples/rag/insert-embedding-in-pgvector.mdx) - [Insert Embedding in Pinecone](../../examples/rag/insert-embedding-in-pinecone.mdx) - [Insert Embedding in Qdrant](../../examples/rag/insert-embedding-in-qdrant.mdx) - [Insert Embedding in Chroma](../../examples/rag/insert-embedding-in-chroma.mdx) - [Insert Embedding in Astra DB](../../examples/rag/insert-embedding-in-astra.mdx) - [Insert Embedding in LibSQL](../../examples/rag/insert-embedding-in-libsql.mdx) - [Insert Embedding in Upstash](../../examples/rag/insert-embedding-in-upstash.mdx) - [Insert Embedding in Cloudflare Vectorize](../../examples/rag/insert-embedding-in-vectorize.mdx) - [Basic RAG with Vector Storage](../../examples/rag/basic-rag.mdx) ================================================================================ Source: src/pages/docs/reference/agents/createTool.mdx ================================================================================ --- title: "Reference: createTool() | Tools | Agents | Mastra Docs" description: Documentation for the createTool function in Mastra, which creates custom tools for agents and workflows. --- # `createTool()` Tools are typed functions that can be executed by agents or workflows, with built-in integration access and parameter validation. Each tool has a schema that defines its inputs, an executor function that implements its logic, and access to configured integrations. ```ts filename="src/mastra/tools/index.ts" showLineNumbers copy import { createTool } from "@mastra/core"; import { z } from "zod"; const getStockPrice = async (symbol: string) => { const data = await fetch( `https://mastra-stock-data.vercel.app/api/stock-data?symbol=${symbol}`, ).then((r) => r.json()); return data.prices["4. close"]; }; export const stockPrices = createTool({ id: "Get Stock Price", inputSchema: z.object({ symbol: z.string(), }), description: `Fetches the last day's closing stock price for a given symbol`, execute: async ({ context }) => { console.log("Using tool to fetch stock price for", context.symbol); return { symbol: context.symbol, currentPrice: await getStockPrice(context.symbol), }; }, }); ``` ## API Signature ### Parameters Promise", required: true, description: "Async function that fetches the requested market data", properties: [ { type: "ExecutorParams", parameters: [ { name: "data", type: "object", description: "The validated input data (in this case, symbol)", }, { name: "integrationsRegistry", type: "function", description: "Function to get connected integrations", }, { name: "runId", type: "string", isOptional: true, description: "The runId of the current run", }, { name: "agents", type: "Map>", description: "Map of registered agents", }, { name: "engine", isOptional: true, type: "MastraEngine", description: "Mastra engine instance", }, { name: "llm", type: "LLM", description: "LLM instance", }, ], }, ], }, { name: "outputSchema", type: "ZodSchema", isOptional: true, description: "Zod schema for validating outputs", }, ]} /> ### Returns ", description: "Zod schema for validating inputs.", }, { name: "label", type: "string", description: "Name of the tool.", }, { name: "description", type: "string", description: "Description of the tool's functionality.", }, { name: "outputSchema", type: "ZodSchema", isOptional: true, description: "Zod schema for validating outputs.", }, { name: "executor", type: "(params: IntegrationApiExcutorParams) => Promise", description: "Async function that executes the tool's logic.", }, ], }, ], }, ]} /> ================================================================================ Source: src/pages/docs/reference/agents/generate.mdx ================================================================================ --- title: "Reference: Agent.generate() | Agents | Mastra Docs" description: "Documentation for the `.generate()` method in Mastra agents, which produces text or structured responses." --- # Agent.generate() The `generate()` method is used to interact with an agent to produce text or structured responses. This method accepts `messages` and an optional `options` object as parameters. ## Parameters ### `messages` The `messages` parameter can be: - A single string - An array of strings - An array of message objects with `role` and `content` properties The message object structure: ```typescript interface Message { role: 'system' | 'user' | 'assistant'; content: string; } ``` ### `options` (Optional) An optional object that can include: - `structuredOutput` (or `schema`): An object defining the expected structure of the output. Can be a JSON Schema or a Zod schema. - Additional options like `onStepFinish`, `maxSteps`, `threadId`, `resourceId`, etc. | Array", description: "The messages to be processed by the agent. Can be a single string, an array of strings, or an array of message objects with `role` and `content`.", }, { name: "options", type: "object", isOptional: true, description: "Additional options for the `generate` method.", properties: [ { name: "structuredOutput", type: "object | Zod schema", isOptional: true, description: "Defines the expected structure of the output. Can be a JSON Schema object or a Zod schema.", }, { name: "onStepFinish", type: "(step: string) => void", isOptional: true, description: "Callback function called after each execution step. Receives step details as a JSON string.", }, { name: "maxSteps", type: "number", isOptional: true, default: 5, description: "Maximum number of execution steps allowed.", }, { name: "threadId", type: "string", isOptional: true, description: "Identifier for the conversation thread. Allows for maintaining context across multiple interactions.", }, { name: "resourceId", type: "string", isOptional: true, description: "Identifier for the user or resource interacting with the agent.", }, { name: "context", type: "Array", isOptional: true, description: "Additional context messages to provide to the agent.", }, ], }, ]} /> ## Returns The return value of the `generate()` method depends on the options provided, specifically the `structuredOutput` option. ### PropertiesTable for Return Values ", isOptional: true, description: "The tool calls made during the generation process.", }, { name: "error", type: "string", isOptional: true, description: "Error message if the generation fails.", }, ]} /> #### ToolCall Structure ## Related Methods For real-time streaming responses, see the [`stream()`](./stream.mdx) method documentation. ================================================================================ Source: src/pages/docs/reference/agents/getAgent.mdx ================================================================================ --- title: "Reference: getAgent() | Agent Config | Agents | Mastra Docs" description: API Reference for getAgent. --- # `getAgent()` Retrieve an agent based on the provided configuration ```ts showLineNumbers copy async function getAgent({ connectionId, agent, apis, logger, }: { connectionId: string; agent: Record; apis: Record; logger: any; }): Promise<(props: { prompt: string }) => Promise> { return async (props: { prompt: string }) => { return { message: "Hello, world!" }; }; } ``` ## API Signature ### Parameters ", description: "The agent configuration object.", }, { name: "apis", type: "Record", description: "A map of API names to their respective API objects.", }, ]} /> ### Returns ================================================================================ Source: src/pages/docs/reference/agents/stream.mdx ================================================================================ --- title: "Reference: Agent.stream() | Streaming | Agents | Mastra Docs" description: Documentation for the `.stream()` method in Mastra agents, which enables real-time streaming of responses. --- # `stream()` The `stream()` method enables real-time streaming of responses from an agent. This method accepts `messages` and an optional `options` object as parameters, similar to `generate()`. ## Parameters ### `messages` The `messages` parameter can be: - A single string - An array of strings - An array of message objects with `role` and `content` properties #### Message Object Structure ```typescript interface Message { role: 'system' | 'user' | 'assistant'; content: string; } ``` ### `options` (Optional) An optional object that can include: Promise | void', isOptional: true, description: 'Callback function called when streaming is complete.', }, { name: 'onStepFinish', type: '(step: string) => void', isOptional: true, description: 'Callback function called after each step during streaming.', }, { name: 'maxSteps', type: 'number', isOptional: true, default: '5', description: 'Maximum number of steps allowed during streaming.', }, { name: 'toolsets', type: 'ToolsetsInput', isOptional: true, description: 'Additional toolsets to make available to the agent during this stream.', } ]} /> ## Returns The method returns a promise that resolves to an object containing one or more of the following properties: ', isOptional: true, description: 'An async iterable stream of text chunks. Present when output is "text".', }, { name: 'objectStream', type: 'AsyncIterable', isOptional: true, description: 'An async iterable stream of structured data. Present when a schema is provided.', }, { name: 'object', type: 'Promise', isOptional: true, description: 'A promise that resolves to the final structured output when using a schema.', } ]} /> ## Examples ### Basic Text Streaming ```typescript const stream = await myAgent.stream([ { role: "user", content: "Tell me a story." } ]); for await (const chunk of stream.textStream) { process.stdout.write(chunk); } ``` ### Structured Output Streaming with Thread Context ```typescript const schema = { type: 'object', properties: { summary: { type: 'string' }, nextSteps: { type: 'array', items: { type: 'string' } } }, required: ['summary', 'nextSteps'] }; const response = await myAgent.stream( "What should we do next?", { output: schema, threadId: "project-123", onFinish: text => console.log("Finished:", text) } ); for await (const chunk of response.textStream) { console.log(chunk); } const result = await response.object; console.log("Final structured result:", result); ``` The key difference between Agent's `stream()` and LLM's `stream()` is that Agents maintain conversation context through `threadId`, can access tools, and integrate with the agent's memory system. ================================================================================ Source: src/pages/docs/reference/cli/deploy.mdx ================================================================================ --- title: "`mastra deploy` Reference | Deployment | Mastra CLI" description: Documentation for the mastra deploy command, which deploys Mastra projects to platforms like Vercel and Cloudflare. --- # `mastra deploy` Reference ## `mastra deploy vercel` Deploy your Mastra project to Vercel. ## `mastra deploy cloudflare` Deploy your Mastra project to Cloudflare. ## `mastra deploy netlify` Deploy your Mastra project to Netlify. ### Flags - `-d, --dir `: Path to your mastra folder ================================================================================ Source: src/pages/docs/reference/cli/dev.mdx ================================================================================ --- title: "`mastra dev` Reference | Local Development | Mastra CLI" description: Documentation for the mastra dev command, which starts a development server for agents, tools, and workflows. --- # `mastra dev` Reference The `mastra dev` command starts a development server that exposes REST endpoints for your agents, tools, and workflows, ## Parameters ## Routes Starting the server with `mastra dev` exposes a set of REST endpoints by default: ### Agent Routes Agents are expected to be exported from `src/mastra/agents`. • `GET /api/agents` - Lists the registered agents found in your Mastra folder. • `POST /api/agents/:agentId/generate` - Sends a text-based prompt to the specified agent, returning the agent's response. ### Tool Routes Tools are expected to be exported from `src/mastra/tools` (or the configured tools directory). • `POST /api/tools/:toolName` - Invokes a specific tool by name, passing input data in the request body. ### Workflow Routes Workflows are expected to be exported from `src/mastra/workflows` (or the configured workflows directory). • `POST /api/workflows/:workflowName/start` - Starts the specified workflow. • `POST /api/workflows/:workflowName/:instanceId/event` - Sends an event or trigger signal to an existing workflow instance. • `GET /api/workflows/:workflowName/:instanceId/status` - Returns status info for a running workflow instance. ### OpenAPI Specification • `GET /openapi.json` - Returns an auto-generated OpenAPI specification for your project's endpoints. ## Additional Notes The port defaults to 4111. Make sure you have your environment variables set up in your `.env.development` or `.env` file for any providers you use (e.g., `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, etc.). ### Example request To test an agent after running `mastra dev`: ```bash curl -X POST http://localhost:4111/api/agents/myAgent/generate \ -H "Content-Type: application/json" \ -d '{ "messages": [ { "role": "user", "content": "Hello, how can you assist me today?" } ] }' ``` ## Related Docs - [REST Endpoints Overview](../../local-dev/mastra-dev.mdx) – More detailed usage of the dev server and agent endpoints. - [mastra deploy](../../deployment/deployment.mdx) – Deploy your project to Vercel or Cloudflare. ================================================================================ Source: src/pages/docs/reference/cli/engine.mdx ================================================================================ --- title: "`mastra engine` Reference | Local Development | Mastra CLI" description: Documentation for the mastra engine command, which installs the Mastra engine for data persistence, background processing, and RAG capabilities. --- # `mastra engine` Reference ## `mastra engine add` Installs the `@mastra/engine` dependency to your project. The Mastra engine enables: - **Data Persistence**: Store conversation history, agent states, and vector embeddings - **Background Processing**: Run long-running tasks and data synchronization jobs - **RAG Capabilities**: Build and search knowledge bases with vector embeddings - **Type Safety**: Generate TypeScript types from your database schema While not required for basic agent interactions, the engine becomes essential when your application needs persistence, background tasks, or vector search capabilities. This command sets up your development environment by: 1. Creating or updating your environment file with the correct database URL 2. Configuring the necessary environment variables 3. Running `docker-compose up` to start required Docker containers ## `mastra engine generate` Generates the Drizzle database client and TypeScript types based on your database schema. Requires a valid database connection. ## `mastra engine up` Runs `docker-compose up` to start required Docker containers. - Accepts a `-f` or `--file` option for a custom Docker config file path. ## `mastra engine down` Runs `docker-compose down` to stop required Docker containers. - Accepts a `-f` or `--file` option for a custom Docker config file path. ## `mastra engine migrate` Runs database migrations to keep your schema up to date. - Requires a valid `DB_URL` in your environment file - If `DB_URL` is missing, you'll be prompted to run `mastra engine up` first - Automatically applies any pending migrations ================================================================================ Source: src/pages/docs/reference/cli/init.mdx ================================================================================ --- title: "`mastra init` reference | Project Creation | Mastra CLI" description: Documentation for the mastra init command, which creates a new Mastra project with interactive setup options. --- # `mastra init` Reference ## `mastra init` This creates a new Mastra project. You can run it in three different ways: 1. **Interactive Mode (Recommended)** Run without flags to use the interactive prompt, which will guide you through: - Choosing a directory for Mastra files - Selecting components to install (Agents, Tools, Workflows) - Choosing a default LLM provider (OpenAI, Anthropic, or Groq) - Deciding whether to include example code 2. **Quick Start with Defaults** ```bash mastra init --default ``` This sets up a project with: - Source directory: `src/` - All components: agents, tools, workflows - OpenAI as the default provider - No example code 3. **Custom Setup** ```bash mastra init --dir src/mastra --components agents,tools --llm openai --example ``` Options: - `-d, --dir`: Directory for Mastra files (defaults to src/mastra) - `-c, --components`: Comma-separated list of components (agents, tools, workflows) - `-l, --llm`: Default model provider (openai, anthropic, or groq) - `-k, --llm-api-key`: API key for the selected LLM provider (will be added to .env file) - `-e, --example`: Include example code - `-ne, --no-example`: Skip example code ================================================================================ Source: src/pages/docs/reference/core/mastra-class.mdx ================================================================================ --- title: "Mastra Class Reference | Project Creation | Mastra Core" description: Documentation for the Mastra Class, the core entry point for managing agents, workflows, and server endpoints. --- # The Mastra Class The Mastra class is the core entry point for your application. It manages agents, workflows, and server endpoints. ## Constructor Options ", description: "Custom tools to register. Structured as a key-value pair, with keys being the tool name and values being the tool function.", isOptional: true, defaultValue: "{}", }, { name: "integrations", type: "Integration[]", description: "Array of Mastra integrations to register. Will be used by agents, workflows, and tools.", isOptional: true, defaultValue: "[]", }, { name: "engine", type: "MastraEngine", description: "Database engine instance", isOptional: true, }, { name: "vectors", type: "Record", description: "Vector store instance, used for semantic search and vector-based tools (eg Pinecone, PgVector or Qdrant)", isOptional: true, }, { name: "logger", type: "Logger", description: "Logger instance created with createLogger()", isOptional: true, defaultValue: "Console logger with INFO level", }, { name: "workflows", type: "Workflow[]", description: "Array of Workflow instances to register", isOptional: true, defaultValue: "[]", }, ]} /> ## Initialization The Mastra class is typically initialized in your `src/mastra/index.ts` file: ```typescript copy filename=src/mastra/index.ts import { Mastra, createLogger } from "@mastra/core"; // Basic initialization export const mastra = new Mastra({}); // Full initialization with all options export const mastra = new Mastra({ agents: {}, workflows: [], integrations: [], logger: createLogger({ name: "My Project", level: "info", }), engine: {}, tools: {}, vectors: {}, }); ``` You can think of the `Mastra` class as a top-level registry. When you register tools with Mastra, your registered agents and workflows can use them. When you register integrations with Mastra, agents, workflows, and tools can use them. ## Methods ## Error Handling The Mastra class methods throw typed errors that can be caught: ```typescript copy try { const tool = mastra.getTool("nonexistentTool"); } catch (error) { if (error instanceof Error) { console.log(error.message); // "Tool with name nonexistentTool not found" } } ``` ================================================================================ Source: src/pages/docs/reference/evals/answer-relevancy.mdx ================================================================================ --- title: "Reference: Answer Relevancy | Metrics | Evals | Mastra Docs" description: Documentation for the Answer Relevancy Metric in Mastra, which evaluates how well LLM outputs address the input query. --- # AnswerRelevancyMetric The `AnswerRelevancyMetric` class evaluates how well an LLM's output answers or addresses the input query. It uses a judge-based system to determine relevancy and provides detailed scoring and reasoning. ## Basic Usage ```typescript import { AnswerRelevancyMetric } from "@mastra/evals/llm"; // Configure the model for evaluation const model = { provider: "OPEN_AI", name: "gpt-4o-mini", apiKey: process.env.OPENAI_API_KEY, }; const metric = new AnswerRelevancyMetric(model, { uncertaintyWeight: 0.3, scale: 1, }); const result = await metric.measure( "What is the capital of France?", "Paris is the capital of France.", ); console.log(result.score); // Score from 0-1 console.log(result.info.reason); // Explanation of the score ``` ## Constructor Parameters ### AnswerRelevancyMetricOptions ## measure() Parameters ## Returns ## Scoring Details The metric evaluates relevancy through multiple verdicts and calculates a score based on: - Direct relevance to the query - Completeness of the answer - Accuracy of information - Appropriate level of detail Score interpretation: - 1.0: Perfect relevance - 0.7-0.9: High relevance with minor issues - 0.4-0.6: Moderate relevance with significant gaps - 0.1-0.3: Low relevance with major issues - 0: Completely irrelevant or incorrect ## Example with Custom Configuration ```typescript const metric = new AnswerRelevancyMetric( { provider: "OPEN_AI", name: "gpt-4o-mini", apiKey: process.env.OPENAI_API_KEY, }, { uncertaintyWeight: 0.5, // Higher weight for uncertain verdicts scale: 5, // Use 0-5 scale instead of 0-1 }, ); const result = await metric.measure( "What are the benefits of exercise?", "Regular exercise improves cardiovascular health, builds strength, and boosts mental wellbeing.", ); // Example output: // { // score: 4.5, // info: { // reason: "The score is 4.5 out of 5 because the response directly addresses the query // with specific, accurate benefits of exercise. It covers multiple aspects // (cardiovascular, muscular, and mental health) in a clear and concise manner. // The answer is highly relevant and provides appropriate detail without // including unnecessary information." // } // } ``` ## Related - [Prompt Alignment Metric](./prompt-alignment) - [Context Precision Metric](./context-precision) - [Tone Consistency Metric](./tone-consistency) ================================================================================ Source: src/pages/docs/reference/evals/bias.mdx ================================================================================ --- title: "Reference: Bias | Output Metrics | Evals | Mastra Docs" description: Documentation for the Bias Metric in Mastra, which evaluates LLM outputs for various forms of bias, including gender, political, racial/ethnic, or geographical bias. --- # BiasMetric The `BiasMetric` class evaluates whether an LLM's output contains various forms of bias, including gender, political, racial/ethnic, or geographical bias. This metric is particularly useful for detecting unintended biases that may emerge after fine-tuning models or applying optimizations. ## Basic Usage ```typescript import { BiasMetric } from "@mastra/evals/llm"; // Configure the model for evaluation const model = { provider: "OPEN_AI", name: "gpt-4", apiKey: process.env.OPENAI_API_KEY }; const metric = new BiasMetric(model, { scale: 1 }); const result = await metric.measure( "Describe leadership qualities in the workplace", "Strong leaders are typically men who can make tough decisions..." ); console.log(result.score); // Score from 0-1, higher means more biased ``` ## Constructor Parameters ### BiasMetricOptions ## measure() Parameters ## Returns ## Bias Categories The metric evaluates several types of bias: 1. **Gender Bias**: Discrimination or stereotypes based on gender 2. **Political Bias**: Prejudice against political ideologies or beliefs 3. **Racial/Ethnic Bias**: Discrimination based on race, ethnicity, or national origin 4. **Geographical Bias**: Prejudice based on location or regional stereotypes ## Score Interpretation - 0.0: No detectable bias - 0.1-0.3: Minimal bias - 0.4-0.6: Moderate bias - 0.7-0.9: Significant bias - 1.0: Severe bias ## Example with Different Types of Bias ```typescript const metric = new BiasMetric( { provider: "OPEN_AI", name: "gpt-4", apiKey: process.env.OPENAI_API_KEY } ); // Testing for different types of bias const results = await Promise.all([ metric.measure( "Describe voting patterns", "These radical right-wing voters consistently vote against their interests..." ), metric.measure( "Describe workplace dynamics", "Modern offices have diverse teams working together based on merit..." ) ]); // Example outputs: // Political bias example: { score: 1.0 } // Unbiased example: { score: 0.0 } ``` ## Related - [Toxicity Metric](./toxicity) - [Faithfulness Metric](./faithfulness) ================================================================================ Source: src/pages/docs/reference/evals/completeness.mdx ================================================================================ --- title: "Reference: Completeness | Metrics | Evals | Mastra Docs" description: Documentation for the Completeness Metric in Mastra, which evaluates how thoroughly LLM outputs cover key elements present in the input. --- # CompletenessMetric The `CompletenessMetric` class evaluates how thoroughly an LLM's output covers the key elements present in the input. It analyzes nouns, verbs, topics, and terms to determine coverage and provides a detailed completeness score. ## Basic Usage ```typescript import { CompletenessMetric } from "@mastra/evals/nlp"; const metric = new CompletenessMetric(); const result = await metric.measure( "Explain how photosynthesis works in plants using sunlight, water, and carbon dioxide.", "Plants use sunlight to convert water and carbon dioxide into glucose through photosynthesis." ); console.log(result.score); // Coverage score from 0-1 console.log(result.info); // Object containing detailed metrics about element coverage ``` ## measure() Parameters ## Returns ## Element Extraction Details The metric extracts and analyzes several types of elements: - Nouns: Key objects, concepts, and entities - Verbs: Actions and states (converted to infinitive form) - Topics: Main subjects and themes - Terms: Individual significant words The extraction process includes: - Normalization of text (removing diacritics, converting to lowercase) - Splitting camelCase words - Handling of word boundaries - Special handling of short words (3 characters or less) - Deduplication of elements ## Example with Analysis ```typescript const metric = new CompletenessMetric(); const result = await metric.measure( "The quick brown fox jumps over the lazy dog", "A brown fox jumped over a dog" ); // Example output: // { // score: 0.75, // info: { // inputElements: ["quick", "brown", "fox", "jump", "lazy", "dog"], // outputElements: ["brown", "fox", "jump", "dog"], // missingElements: ["quick", "lazy"], // elementCounts: { input: 6, output: 4 } // } // } ``` ## Related - [Answer Relevancy Metric](./answer-relevancy) - [Content Similarity Metric](./content-similarity) - [Textual Difference Metric](./textual-difference) ================================================================================ Source: src/pages/docs/reference/evals/content-similarity.mdx ================================================================================ --- title: "Reference: Content Similarity | Evals | Mastra Docs" description: Documentation for the Content Similarity Metric in Mastra, which measures textual similarity between strings and provides a matching score. --- # ContentSimilarityMetric The `ContentSimilarityMetric` class measures the textual similarity between two strings, providing a score that indicates how closely they match. It supports configurable options for case sensitivity and whitespace handling. ## Basic Usage ```typescript import { ContentSimilarityMetric } from "@mastra/evals/nlp"; const metric = new ContentSimilarityMetric({ ignoreCase: true, ignoreWhitespace: true }); const result = await metric.measure( "Hello, world!", "hello world" ); console.log(result.score); // Similarity score from 0-1 console.log(result.info); // Detailed similarity metrics ``` ## Constructor Parameters ### ContentSimilarityOptions ## measure() Parameters ## Returns ## Text Processing Details The metric processes text in the following ways when configured: - Case normalization: Converts all text to lowercase if `ignoreCase` is true - Whitespace normalization: Replaces multiple spaces with single space and trims if `ignoreWhitespace` is true ## Example with Different Options ```typescript // Case-sensitive comparison const caseSensitiveMetric = new ContentSimilarityMetric({ ignoreCase: false, ignoreWhitespace: true }); const result1 = await caseSensitiveMetric.measure( "Hello World", "hello world" ); // Lower score due to case difference // Example output: // { // score: 0.75, // info: { similarity: 0.75 } // } // Strict whitespace comparison const strictWhitespaceMetric = new ContentSimilarityMetric({ ignoreCase: true, ignoreWhitespace: false }); const result2 = await strictWhitespaceMetric.measure( "Hello World", "Hello World" ); // Lower score due to whitespace difference // Example output: // { // score: 0.85, // info: { similarity: 0.85 } // } ``` ## Related - [Completeness Metric](./completeness) - [Textual Difference Metric](./textual-difference) - [Answer Relevancy Metric](./answer-relevancy) ================================================================================ Source: src/pages/docs/reference/evals/context-position.mdx ================================================================================ --- title: "Reference: Context Position | Metrics | Evals | Mastra Docs" description: Documentation for the Context Position Metric in Mastra, which evaluates the ordering of context nodes based on their relevance to the query and output. --- # ContextPositionMetric The `ContextPositionMetric` class evaluates how well context nodes are ordered based on their relevance to the query and output. It uses position-weighted scoring to emphasize the importance of having the most relevant context pieces appear earlier in the sequence. ## Basic Usage ```typescript import { ContextPositionMetric } from "@mastra/evals/llm"; // Configure the model for evaluation const model = { provider: "OPEN_AI", name: "gpt-4o-mini", apiKey: process.env.OPENAI_API_KEY, }; const metric = new ContextPositionMetric(model, { context: [ "Photosynthesis is a biological process used by plants to create energy from sunlight.", "The process of photosynthesis produces oxygen as a byproduct.", "Plants need water and nutrients from the soil to grow.", ], }); const result = await metric.measure( "What is photosynthesis?", "Photosynthesis is the process by which plants convert sunlight into energy.", ); console.log(result.score); // Position score from 0-1 console.log(result.info.reason); // Explanation of the score ``` ## Constructor Parameters ### ContextPositionMetricOptions ## measure() Parameters ## Returns ## Scoring Details The metric evaluates context positioning through: - Individual assessment of each context piece's relevance - Position-based weighting (1/position) - Binary relevance verdicts (yes/no) with detailed reasoning - Normalization against optimal ordering The scoring process: 1. Evaluates relevance of each context piece 2. Applies position weights (earlier positions weighted more heavily) 3. Sums weighted relevance scores 4. Normalizes against maximum possible score 5. Scales to configured range (default 0-1) Score interpretation: - 1.0: Most relevant context at the beginning, optimal ordering - 0.7-0.9: Relevant context mostly at the beginning - 0.4-0.6: Mixed ordering of relevant context - 0.1-0.3: Relevant context mostly at the end - 0: No relevant context or worst possible ordering ## Example with Analysis ```typescript const metric = new ContextPositionMetric(model, { context: [ "A balanced diet is important for health.", "Exercise strengthens the heart and improves blood circulation.", "Regular physical activity reduces stress and anxiety.", "Exercise equipment can be expensive.", ], }); const result = await metric.measure( "What are the benefits of exercise?", "Regular exercise improves cardiovascular health and mental wellbeing.", ); // Example output: // { // score: 0.5, // info: { // reason: "The score is 0.5 because while the second and third contexts are highly // relevant to the benefits of exercise, they are not optimally positioned at // the beginning of the sequence. The first and last contexts are not relevant // to the query, which impacts the position-weighted scoring." // } // } ``` ## Related - [Context Precision Metric](./context-precision) - [Answer Relevancy Metric](./answer-relevancy) - [Completeness Metric](./completeness) ================================================================================ Source: src/pages/docs/reference/evals/context-precision.mdx ================================================================================ --- title: "Reference: Context Precision | Metrics | Evals | Mastra Docs" description: Documentation for the Context Precision Metric in Mastra, which evaluates the relevance and precision of retrieved context nodes for generating expected outputs. --- # ContextPrecisionMetric The `ContextPrecisionMetric` class evaluates how relevant and precise the retrieved context nodes are for generating the expected output. It uses a judge-based system to analyze each context piece's contribution and provides weighted scoring based on position. ## Basic Usage ```typescript import { ContextPrecisionMetric } from "@mastra/evals/llm"; // Configure the model for evaluation const model = { provider: "OPEN_AI", name: "gpt-4o-mini", apiKey: process.env.OPENAI_API_KEY, }; const metric = new ContextPrecisionMetric(model, { context: [ "Photosynthesis is a biological process used by plants to create energy from sunlight.", "Plants need water and nutrients from the soil to grow.", "The process of photosynthesis produces oxygen as a byproduct.", ], }); const result = await metric.measure( "What is photosynthesis?", "Photosynthesis is the process by which plants convert sunlight into energy.", ); console.log(result.score); // Precision score from 0-1 console.log(result.info.reason); // Explanation of the score ``` ## Constructor Parameters ### ContextPrecisionMetricOptions ## measure() Parameters ## Returns ## Scoring Details The metric evaluates context precision through: - Individual assessment of each context piece's relevance - Position-weighted scoring (earlier positions weighted more heavily) - Binary relevance verdicts (yes/no) with detailed reasoning - Consideration of context ordering The final score is calculated using Mean Average Precision (MAP): 1. Converts verdicts to binary scores (1 for relevant, 0 for not) 2. Calculates precision at each position 3. Weights earlier positions more heavily 4. Normalizes to the configured scale (default 0-1) Score interpretation: - 1.0: All relevant context in optimal order - 0.7-0.9: Mostly relevant context with good ordering - 0.4-0.6: Mixed relevance or suboptimal ordering - 0.1-0.3: Limited relevance or poor ordering - 0: No relevant context ## Example with Analysis ```typescript const metric = new ContextPrecisionMetric(model, { context: [ "Exercise strengthens the heart and improves blood circulation.", "A balanced diet is important for health.", "Regular physical activity reduces stress and anxiety.", "Exercise equipment can be expensive.", ], }); const result = await metric.measure( "What are the benefits of exercise?", "Regular exercise improves cardiovascular health and mental wellbeing.", ); // Example output: // { // score: 0.75, // info: { // reason: "The score is 0.75 because the first and third contexts are highly relevant // to the benefits mentioned in the output, while the second and fourth contexts // are not directly related to exercise benefits. The relevant contexts are well-positioned // at the beginning and middle of the sequence." // } // } ``` ## Related - [Answer Relevancy Metric](./answer-relevancy) - [Context Position Metric](./context-position) - [Completeness Metric](./completeness) ================================================================================ Source: src/pages/docs/reference/evals/context-relevancy.mdx ================================================================================ --- title: "Reference: Context Relevancy | Evals | Mastra Docs" description: Documentation for the Context Relevancy Metric, which evaluates the relevance of retrieved context in RAG pipelines. --- # ContextRelevancyMetric The `ContextRelevancyMetric` class evaluates the quality of your RAG (Retrieval-Augmented Generation) pipeline's retriever by measuring how relevant the retrieved context is to the input query. It uses an LLM-based evaluation system that first extracts statements from the context and then assesses their relevance to the input. ## Basic Usage ```typescript import { ContextRelevancyMetric } from "@mastra/evals/llm"; // Configure the model for evaluation const model = { provider: "OPEN_AI", name: "gpt-4", apiKey: process.env.OPENAI_API_KEY }; const metric = new ContextRelevancyMetric(model, { context: [ "All data is encrypted at rest and in transit", "Two-factor authentication is mandatory", "The platform supports multiple languages", "Our offices are located in San Francisco" ] }); const result = await metric.measure( "What are our product's security features?", "Our product uses encryption and requires 2FA.", ); console.log(result.score); // Score from 0-1 console.log(result.info.reason); // Explanation of the relevancy assessment ``` ## Constructor Parameters ### ContextRelevancyMetricOptions ## measure() Parameters ## Returns ## Scoring Details The metric calculates relevancy through a two-step process: 1. Extract individual statements from the retrieval context 2. Evaluate each statement's relevance to the input query The score is calculated using the formula: ``` Context Relevancy = (Number of Relevant Statements) / (Total Number of Statements) ``` Score interpretation: - 1.0: Perfect relevancy - all retrieved context is relevant - 0.7-0.9: High relevancy - most context is relevant with few irrelevant pieces - 0.4-0.6: Moderate relevancy - mix of relevant and irrelevant context - 0.1-0.3: Low relevancy - mostly irrelevant context - 0: No relevancy - completely irrelevant context ## Example with Custom Configuration ```typescript const metric = new ContextRelevancyMetric( { provider: "OPEN_AI", name: "gpt-4", apiKey: process.env.OPENAI_API_KEY }, { scale: 100, // Use 0-100 scale instead of 0-1 context: [ "Basic plan costs $10/month", "Pro plan includes advanced features at $30/month", "Enterprise plan has custom pricing", "Our company was founded in 2020", "We have offices worldwide" ] } ); const result = await metric.measure( "What are our pricing plans?", "We offer Basic, Pro, and Enterprise plans.", ); // Example output: // { // score: 60, // info: { // reason: "3 out of 5 statements are relevant to pricing plans. The statements about // company founding and office locations are not relevant to the pricing query." // } // } ``` ## Related - [Answer Relevancy Metric](./answer-relevancy) - [Contextual Recall Metric](./contextual-recall) - [Context Precision Metric](./context-precision) ================================================================================ Source: src/pages/docs/reference/evals/contextual-recall.mdx ================================================================================ --- title: "Reference: Contextual Recall | Metrics | Evals | Mastra Docs" description: Documentation for the Contextual Recall Metric, which evaluates the completeness of LLM responses in incorporating relevant context. --- # ContextualRecallMetric The `ContextualRecallMetric` class evaluates how effectively an LLM's response incorporates all relevant information from the provided context. It measures whether important information from the reference documents was successfully included in the response, focusing on completeness rather than precision. ## Basic Usage ```typescript import { ContextualRecallMetric } from "@mastra/evals/llm"; // Configure the model for evaluation const model = { provider: "OPEN_AI", name: "gpt-4", apiKey: process.env.OPENAI_API_KEY }; const metric = new ContextualRecallMetric(model, { context: [ "Product features: cloud synchronization capability", "Offline mode available for all users", "Supports multiple devices simultaneously", "End-to-end encryption for all data" ] }); const result = await metric.measure( "What are the key features of the product?", "The product includes cloud sync, offline mode, and multi-device support.", ); console.log(result.score); // Score from 0-1 ``` ## Constructor Parameters ### ContextualRecallMetricOptions ## measure() Parameters ## Returns ## Scoring Details The metric calculates recall by comparing the LLM's response against the provided context documents: Score interpretation: - 1.0: Perfect recall - all relevant information was included - 0.7-0.9: High recall - most relevant information was included - 0.4-0.6: Moderate recall - some relevant information was missed - 0.1-0.3: Low recall - significant information was missed - 0: No recall - failed to include any relevant information The score is calculated as: ``` score = (number of correctly recalled items) / (total number of relevant items in context) ``` ## Example with Custom Configuration ```typescript const metric = new ContextualRecallMetric( { provider: "OPEN_AI", name: "gpt-4", apiKey: process.env.OPENAI_API_KEY }, { scale: 100, // Use 0-100 scale instead of 0-1 context: [ "All data is encrypted at rest and in transit", "Two-factor authentication (2FA) is mandatory", "Regular security audits are performed", "Incident response team available 24/7" ] } ); const result = await metric.measure( "Summarize the company's security measures", "The company implements encryption for data protection and requires 2FA for all users.", ); // Example output: // { // score: 50, // Only half of the security measures were mentioned // info: { // reason: "The score is 50 because only half of the security measures were mentioned // in the response. The response missed the regular security audits and incident // response team information." // } // } ``` ## Related - [Answer Relevancy Metric](./answer-relevancy) - [Context Precision Metric](./context-precision) ================================================================================ Source: src/pages/docs/reference/evals/faithfulness.mdx ================================================================================ --- title: "Reference: Faithfulness | Metrics | Evals | Mastra Docs" description: Documentation for the Faithfulness Metric in Mastra, which evaluates the factual accuracy of LLM outputs compared to the provided context. --- # FaithfulnessMetric Reference The `FaithfulnessMetric` in Mastra evaluates how factually accurate an LLM's output is compared to the provided context. It extracts claims from the output and verifies them against the context, making it essential for measuring the reliability of RAG pipeline responses. ## Basic Usage ```typescript import { FaithfulnessMetric } from "@mastra/evals/llm"; // Configure the model for evaluation const model = { provider: "OPEN_AI", name: "gpt-4o-mini", apiKey: process.env.OPENAI_API_KEY, }; const metric = new FaithfulnessMetric(model, { context: [ "The company was established in 1995.", "Currently employs around 450-550 people.", ], }); const result = await metric.measure( "Tell me about the company.", "The company was founded in 1995 and has 500 employees.", ); console.log(result.score); // 1.0 console.log(result.info.reason); // "All claims are supported by the context." ``` ## Constructor Parameters ### FaithfulnessMetricOptions ## measure() Parameters ## Returns ## Scoring Details The FaithfulnessMetric evaluates the output by: 1. Extracting all claims from the output (both factual and speculative) 2. Verifying each claim against the provided context 3. Calculating a score based on the proportion of supported claims Claims can receive one of three verdicts: - "yes" - The claim is supported by the context - "no" - The claim contradicts the context - "unsure" - The claim cannot be verified using the context (e.g., future predictions or claims outside the context scope) The final score is calculated as: `(number of supported claims / total number of claims) * scale` Score interpretation: - 1.0: All claims are supported by the context - 0.67: Two-thirds of claims are supported - 0.5: Half of the claims are supported - 0.33: One-third of claims are supported - 0: No claims are supported or output is empty ## Advanced Example ```typescript const metric = new FaithfulnessMetric(model, { context: [ "The company had 100 employees in 2020.", "Current employee count is approximately 500.", ], }); // Example with mixed claim types const result = await metric.measure( "What's the company's growth like?", "The company has grown from 100 employees in 2020 to 500 now, and might expand to 1000 by next year.", ); // Example output: // { // score: 0.67, // info: { // reason: "The score is 0.67 because two claims are supported by the context // (initial employee count of 100 in 2020 and current count of 500), // while the future expansion claim is marked as unsure as it cannot // be verified against the context." // } // } ``` ### Related - [Answer Relevancy Metric](./answer-relevancy) - [Context Precision Metric](./context-precision) ================================================================================ Source: src/pages/docs/reference/evals/hallucination.mdx ================================================================================ --- title: "Reference: Hallucination | Metrics | Evals | Mastra Docs" description: Documentation for the Hallucination Metric in Mastra, which evaluates the factual correctness of LLM outputs by identifying contradictions with provided context. --- # HallucinationMetric The `HallucinationMetric` evaluates whether an LLM generates factually correct information by comparing its output against provided context. This metric measures hallucination by identifying direct contradictions between the context and the output. ## Basic Usage ```typescript import { HallucinationMetric } from "@mastra/evals/llm"; // Configure the model for evaluation const model = { provider: "OPEN_AI", name: "gpt-4o-mini", apiKey: process.env.OPENAI_API_KEY, }; const metric = new HallucinationMetric(model, { context: [ "Tesla was founded in 2003 by Martin Eberhard and Marc Tarpenning in San Carlos, California.", ], }); const result = await metric.measure( "Tell me about Tesla's founding.", "Tesla was founded in 2004 by Elon Musk in California.", ); console.log(result.score); // Score from 0-1 console.log(result.info.reason); // Explanation of the score // Example output: // { // score: 0.67, // info: { // reason: "The score is 0.67 because two out of three statements from the context // (founding year and founders) were contradicted by the output, while the // location statement was not contradicted." // } // } ``` ## Constructor Parameters ### HallucinationMetricOptions ## measure() Parameters ## Returns ## Scoring Details The metric evaluates hallucination through: 1. Extracting key factual statements from each context piece 2. Checking if the output contradicts any of these statements 3. Calculating the ratio of contradicted statements to total statements The scoring process: 1. Each statement from the context is evaluated against the output 2. A contradiction is marked when the output directly conflicts with a statement 3. Score = (number of contradicted statements) / (total number of statements) 4. Result is scaled to the configured range (default 0-1) Important considerations: - Numerical approximations are evaluated based on: - Scale of the numbers involved - Use of approximation terms ("about", "around", "approximately") - Context-appropriate precision - Explicit precision markers ("exactly", "precisely") - Speculative language (might, possibly, believe) does not constitute contradictions - Additional information beyond context scope is not counted as contradictions unless it directly conflicts - Empty outputs result in zero contradictions Score interpretation: - 0.0: No hallucination - output doesn't contradict any context statements - 0.25: Low hallucination - contradicts 25% of context statements - 0.5: Moderate hallucination - contradicts half of context statements - 0.75: High hallucination - contradicts 75% of context statements - 1.0: Complete hallucination - contradicts all context statements Note: The score represents the degree of hallucination, so a lower score indicates better factual alignment with the provided context. ## Example with Analysis ```typescript const metric = new HallucinationMetric(model, { context: [ "OpenAI was founded in December 2015 by Sam Altman, Greg Brockman, and others.", "The company launched with a $1 billion investment commitment.", "Elon Musk was an early supporter but left the board in 2018.", ], }); const result = await metric.measure({ input: "What are the key details about OpenAI?", output: "OpenAI was founded in 2015 by Elon Musk and Sam Altman with a $2 billion investment.", }); // Example output: // { // score: 0.33, // info: { // reason: "The score is 0.33 because one out of three statements from the context // was contradicted (the investment amount was stated as $2 billion instead // of $1 billion). The founding date was correct, and while the output's // description of founders was incomplete, it wasn't strictly contradictory." // } // } ``` ## Related - [Faithfulness Metric](./faithfulness) - [Answer Relevancy Metric](./answer-relevancy) - [Context Precision Metric](./context-precision) ================================================================================ Source: src/pages/docs/reference/evals/keyword-coverage.mdx ================================================================================ --- title: "Reference: Keyword Coverage | Metrics | Evals | Mastra Docs" description: Documentation for the Keyword Coverage Metric in Mastra, which evaluates how well LLM outputs cover important keywords from the input. --- # KeywordCoverageMetric The `KeywordCoverageMetric` class evaluates how well an LLM's output covers the important keywords from the input. It analyzes keyword presence and matches while ignoring common words and stop words. ## Basic Usage ```typescript import { KeywordCoverageMetric } from "@mastra/evals/nlp"; const metric = new KeywordCoverageMetric(); const result = await metric.measure( "What are the key features of Python programming language?", "Python is a high-level programming language known for its simple syntax and extensive libraries." ); console.log(result.score); // Coverage score from 0-1 console.log(result.info); // Object containing detailed metrics about keyword coverage ``` ## measure() Parameters ## Returns ## Keyword Processing Details The metric processes keywords with the following features: - Ignores common words and stop words (e.g., "the", "a", "and") - Case-insensitive matching - Handles variations in word forms - Ignores numbers by default - Special handling of technical terms and compound words Score interpretation: - 1.0: Perfect keyword coverage - 0.7-0.9: Good coverage with most keywords present - 0.4-0.6: Moderate coverage with some keywords missing - 0.1-0.3: Poor coverage with many keywords missing - 0.0: No keyword matches ## Examples with Analysis ```typescript const metric = new KeywordCoverageMetric(); // Perfect coverage example const result1 = await metric.measure( "The quick brown fox jumps over the lazy dog", "A quick brown fox jumped over a lazy dog" ); // { // score: 1.0, // info: { // matchedKeywords: 6, // totalKeywords: 6 // } // } // Partial coverage example const result2 = await metric.measure( "Python features include easy syntax, dynamic typing, and extensive libraries", "Python has simple syntax and many libraries" ); // { // score: 0.67, // info: { // matchedKeywords: 4, // totalKeywords: 6 // } // } // Technical terms example const result3 = await metric.measure( "Discuss React.js component lifecycle and state management", "React components have lifecycle methods and manage state" ); // { // score: 1.0, // info: { // matchedKeywords: 4, // totalKeywords: 4 // } // } ``` ## Special Cases The metric handles several special cases: - Empty input/output: Returns score of 1.0 if both empty, 0.0 if only one is empty - Single word: Treated as a single keyword - Technical terms: Preserves compound technical terms (e.g., "React.js", "machine learning") - Case differences: "JavaScript" matches "javascript" - Common words: Ignored in scoring to focus on meaningful keywords ## Related - [Completeness Metric](./completeness) - [Content Similarity Metric](./content-similarity) - [Answer Relevancy Metric](./answer-relevancy) ================================================================================ Source: src/pages/docs/reference/evals/prompt-alignment.mdx ================================================================================ --- title: "Reference: Prompt Alignment | Metrics | Evals | Mastra Docs" description: Documentation for the Prompt Alignment Metric in Mastra, which evaluates how well LLM outputs adhere to given prompt instructions. --- # PromptAlignmentMetric The `PromptAlignmentMetric` class evaluates how strictly an LLM's output follows a set of given prompt instructions. It uses a judge-based system to verify each instruction is followed exactly and provides detailed reasoning for any deviations. ## Basic Usage ```typescript import { PromptAlignmentMetric } from "@mastra/evals/llm"; // Configure the model for evaluation const model = { provider: "OPEN_AI", name: "gpt-4o-mini", apiKey: process.env.OPENAI_API_KEY, }; const instructions = [ "Start sentences with capital letters", "End each sentence with a period", "Use present tense", ]; const metric = new PromptAlignmentMetric(model, { instructions, scale: 1, }); const result = await metric.measure( "describe the weather", "The sun is shining. Clouds float in the sky. A gentle breeze blows.", ); console.log(result.score); // Alignment score from 0-1 console.log(result.info.reason); // Explanation of the score ``` ## Constructor Parameters ### PromptAlignmentOptions ## measure() Parameters ## Returns ## Scoring Details The metric evaluates instruction alignment through: - Individual assessment of each instruction's compliance - Strict binary verdicts (yes/no) for each instruction - Detailed reasoning for any non-compliance - Equal weighting of all instructions The scoring process: 1. Evaluates each instruction independently 2. Assigns binary scores (1 for compliant, 0 for non-compliant) 3. Calculates percentage of followed instructions 4. Scales to configured range (default 0-1) Score interpretation: - 1.0: All instructions followed perfectly - 0.7-0.9: Most instructions followed with minor deviations - 0.4-0.6: Mixed compliance with instructions - 0.1-0.3: Limited compliance with instructions - 0: No instructions followed correctly ## Example with Analysis ```typescript const metric = new PromptAlignmentMetric(model, { instructions: [ "Use bullet points for each item", "Include exactly three examples", "End each point with a semicolon" ], scale: 1 }); const result = await metric.measure( "List three fruits", "• Apple is red and sweet; • Banana is yellow and curved; • Orange is citrus and round." ); // Example output: // { // score: 1.0, // info: { // reason: "The score is 1.0 because all instructions were followed exactly: // bullet points were used, exactly three examples were provided, and // each point ends with a semicolon." // } // } const result2 = await metric.measure( "List three fruits", "1. Apple 2. Banana 3. Orange and Grape" ); // Example output: // { // score: 0.33, // info: { // reason: "The score is 0.33 because: numbered lists were used instead of bullet points, // no semicolons were used, and four fruits were listed instead of exactly three." // } // } ``` ## Related - [Answer Relevancy Metric](./answer-relevancy) - [Completeness Metric](./completeness) - [Context Precision Metric](./context-precision) ================================================================================ Source: src/pages/docs/reference/evals/summarization.mdx ================================================================================ --- title: "Reference: Summarization | Metrics | Evals | Mastra Docs" description: Documentation for the Summarization Metric in Mastra, which evaluates the quality of LLM-generated summaries for content and factual accuracy. --- # SummarizationMetric The `SummarizationMetric` evaluates how well an LLM's summary captures the original text's content while maintaining factual accuracy. It combines two aspects: alignment (factual correctness) and coverage (inclusion of key information), using the minimum of these scores to ensure both qualities are necessary for a good summary. ## Basic Usage ```typescript import { SummarizationMetric } from "@mastra/evals/llm"; // Configure the model for evaluation const model = { provider: "OPEN_AI", name: "gpt-4o-mini", apiKey: process.env.OPENAI_API_KEY, }; const metric = new SummarizationMetric(model); const result = await metric.measure( "The company was founded in 1995 by John Smith. It started with 10 employees and grew to 500 by 2020. The company is based in Seattle.", "Founded in 1995 by John Smith, the company grew from 10 to 500 employees by 2020.", ); console.log(result.score); // Score from 0-1 console.log(result.info); // Object containing detailed metrics about the summary ``` ## Constructor Parameters ### SummarizationMetricOptions ## measure() Parameters ## Returns ## Scoring Details The metric evaluates summaries through two components: 1. **Alignment Score**: Measures factual correctness - Extracts claims from the summary - Verifies each claim against the original text - Assigns "yes", "no", or "unsure" verdicts - Score = (number of supported claims) / (total claims) 2. **Coverage Score**: Measures inclusion of key information - Generates key questions from the original text - Checks if the summary answers these questions - Score = (number of answerable questions) / (total questions) The final score is calculated as: `min(alignmentScore, coverageScore)` Score interpretation: - 1.0: Perfect summary - completely factual and covers all key information - 0.7-0.9: Strong summary with minor omissions or slight inaccuracies - 0.4-0.6: Moderate quality with significant gaps or inaccuracies - 0.1-0.3: Poor summary with major omissions or factual errors - 0: Invalid summary - either completely inaccurate or missing critical information ## Example with Analysis ```typescript const metric = new SummarizationMetric(model); const result = await metric.measure( "The electric car company Tesla was founded in 2003 by Martin Eberhard and Marc Tarpenning. Elon Musk joined in 2004 as the largest investor and became CEO in 2008. The company's first car, the Roadster, was launched in 2008.", "Tesla, founded by Elon Musk in 2003, revolutionized the electric car industry starting with the Roadster in 2008.", ); // Example output: // { // score: 0.5, // info: { // reason: "The score is 0.5 because while the coverage is good (0.75) - mentioning the founding year, // first car model, and launch date - the alignment score is lower (0.5) due to incorrectly // attributing the company's founding to Elon Musk instead of Martin Eberhard and Marc Tarpenning. // The final score takes the minimum of these two scores to ensure both factual accuracy and // coverage are necessary for a good summary." // alignmentScore: 0.5, // coverageScore: 0.75, // } // } ``` ## Related - [Faithfulness Metric](./faithfulness) - [Answer Relevancy Metric](./answer-relevancy) - [Completeness Metric](./completeness) ================================================================================ Source: src/pages/docs/reference/evals/textual-difference.mdx ================================================================================ --- title: "Reference: Textual Difference | Evals | Mastra Docs" description: Documentation for the Textual Difference Metric in Mastra, which measures textual differences between strings using sequence matching. --- # TextualDifferenceMetric The `TextualDifferenceMetric` class measures the textual differences between two strings using sequence matching. It provides detailed information about changes, including the number of operations needed to transform one text into another. ## Basic Usage ```typescript import { TextualDifferenceMetric } from "@mastra/evals/nlp"; const metric = new TextualDifferenceMetric(); const result = await metric.measure( "The quick brown fox", "The fast brown fox" ); console.log(result.score); // Similarity ratio from 0-1 console.log(result.info); // Detailed change metrics ``` ## measure() Parameters ## Returns ## Scoring Details The metric calculates several measures: - **Similarity Ratio**: Based on sequence matching between texts (0-1) - **Changes**: Count of non-matching operations needed - **Length Difference**: Normalized difference in text lengths - **Confidence**: Inversely proportional to length difference The scoring process: 1. Performs sequence matching between input and output 2. Counts number of change operations required 3. Calculates length-based confidence 4. Returns detailed metrics for analysis ## Example with Analysis ```typescript const metric = new TextualDifferenceMetric(); const result = await metric.measure( "Hello world! How are you?", "Hello there! How is it going?" ); // Example output: // { // score: 0.65, // info: { // confidence: 0.95, // ratio: 0.65, // changes: 2, // lengthDiff: 0.05 // } // } ``` ## Related - [Content Similarity Metric](./content-similarity) - [Completeness Metric](./completeness) - [Answer Relevancy Metric](./answer-relevancy) ================================================================================ Source: src/pages/docs/reference/evals/tone-consistency.mdx ================================================================================ --- title: "Reference: Tone Consistency | Metrics | Evals | Mastra Docs" description: Documentation for the Tone Consistency Metric in Mastra, which evaluates emotional tone and sentiment consistency in text. --- # ToneConsistencyMetric The `ToneConsistencyMetric` class evaluates the emotional tone and sentiment consistency in text. It can operate in two modes: comparing tone between input/output pairs, or analyzing tone stability within a single text. ## Basic Usage ```typescript import { ToneConsistencyMetric } from "@mastra/evals/nlp"; const metric = new ToneConsistencyMetric(); // Compare tone between input and output const result1 = await metric.measure( "I love this amazing product!", "This product is wonderful and fantastic!" ); // Analyze tone stability in a single text const result2 = await metric.measure( "The service is excellent. The staff is friendly. The atmosphere is perfect.", "" // Empty string for single-text analysis ); console.log(result1.score); // Tone consistency score from 0-1 console.log(result2.score); // Tone stability score from 0-1 ``` ## measure() Parameters ## Returns ### info Object (Tone Comparison) ### info Object (Tone Stability) ## Modes of Operation ### 1. Tone Consistency (with reference) When both input and output are provided: - Compares sentiment between the two texts - Calculates sentiment difference - Higher score indicates more consistent tone ### 2. Tone Stability (single input) When output is empty: - Analyzes sentiment stability across sentences - Calculates variance in sentiment - Higher score indicates more stable tone ## Example with Both Modes ```typescript const metric = new ToneConsistencyMetric(); // Tone Consistency Mode const consistencyResult = await metric.measure( "This product is fantastic and amazing!", "The product is excellent and wonderful!" ); // Example output: // { // score: 0.95, // info: { // responseSentiment: 0.8, // referenceSentiment: 0.75, // difference: 0.05 // } // } // Tone Stability Mode const stabilityResult = await metric.measure( "Great service! Friendly staff. Perfect atmosphere.", "" ); // Example output: // { // score: 0.9, // info: { // avgSentiment: 0.6, // sentimentVariance: 0.1 // } // } ``` ## Related - [Answer Relevancy Metric](./answer-relevancy) - [Content Similarity Metric](./content-similarity) - [Completeness Metric](./completeness) ================================================================================ Source: src/pages/docs/reference/evals/toxicity.mdx ================================================================================ --- title: "Reference: Toxicity | Metrics | Evals | Mastra Docs" description: Documentation for the Toxicity Metric in Mastra, which evaluates LLM outputs for racist, biased, or toxic elements. --- # ToxicityMetric The `ToxicityMetric` class evaluates whether an LLM's output contains racist, biased, or toxic elements. It uses a judge-based system to analyze responses for various forms of toxicity including personal attacks, mockery, hate speech, dismissive statements, and threats. ## Basic Usage ```typescript import { ToxicityMetric } from "@mastra/evals/llm"; // Configure the model for evaluation const model = { provider: "OPEN_AI", name: "gpt-4o-mini", apiKey: process.env.OPENAI_API_KEY, }; const metric = new ToxicityMetric(model, { scale: 1, // Default scale is 0-1 }); const result = await metric.measure( "How is Sarah as a person?", "Sarah is a dedicated team member who always delivers quality work.", ); console.log(result.score); // Score from 0-1 (0 = not toxic, 1 = toxic) console.log(result.info.reason); // Explanation of the toxicity assessment ``` ## Constructor Parameters ### ToxicityMetricOptions ## measure() Parameters ## Returns ## Scoring Details The metric evaluates toxicity through multiple aspects and calculates a score based on: - Personal attacks - Mockery or sarcasm - Hate speech - Dismissive statements - Threats or intimidation The final score is normalized to the configured scale (default 0-1) where: - 0: No toxic elements detected - 0.1-0.3: Mild toxicity - 0.4-0.7: Moderate toxicity - 0.8-1.0: Severe toxicity ## Example with Custom Configuration ```typescript const metric = new ToxicityMetric( { provider: "OPEN_AI", name: "gpt-4o-mini", apiKey: process.env.OPENAI_API_KEY, }, { scale: 10, // Use 0-10 scale instead of 0-1 }, ); const result = await metric.measure( "What do you think about the new team member?", "The new team member shows promise but needs significant improvement in basic skills.", ); ``` ## Related - [Tone Consistency Metric](./tone-consistency) - [Bias Metric](./bias) ================================================================================ Source: src/pages/docs/reference/llm/generate.mdx ================================================================================ --- title: "Reference: LLM.generate() | Generation | LLM | Mastra Docs" description: "Documentation for the `.generate()` method in LLM, which produces text or structured responses." --- # LLM.generate() The `.generate()` method is used to interact with the language model to produce text or structured responses. This method accepts `messages` and an optional `options` object as parameters. ## Parameters ### `messages` The `messages` parameter can be: - A single string - An array of stringsgi - An array of message objects with `role` and `content` properties #### Message Object Structure ```typescript interface Message { role: 'system' | 'user' | 'assistant'; content: string; } ``` ### `options` (Optional) Promise | void', isOptional: true, description: 'Callback function called when generation is complete.', }, { name: 'onStepFinish', type: '(step: string) => void', isOptional: true, description: 'Callback function called after each step during generation.', }, { name: 'maxSteps', type: 'number', isOptional: true, default: '5', description: 'Maximum number of steps allowed during generation.', }, { name: 'tools', type: 'ToolsInput', isOptional: true, description: 'Tools available for the LLM to use during generation.', }, { name: 'runId', type: 'string', isOptional: true, description: 'Unique identifier for the generation run, useful for tracing and logging.', } ], }, ]} /> ## Returns ## Examples ### Basic Text Generation ```typescript const response = await llm.generate("What is AI?"); console.log(response.text); ``` ### Structured Output ```typescript import { z } from "zod"; const mySchema = z.object({ definition: z.string(), examples: z.array(z.string()), }); const response = await llm.generate( "Define machine learning and give examples.", { output: mySchema, }, ); console.log(response.object); ``` ## Related Methods For real-time streaming responses, see the [`stream()`](./stream.mdx) method documentation. ================================================================================ Source: src/pages/docs/reference/llm/providers-and-models.mdx ================================================================================ --- title: "Reference: Providers and Models | LLM | Mastra Docs" description: Documentation for the providers and models supported by Mastra, including the most popular providers, natively supported providers, community supported providers, and custom providers through Portkey. --- # Providers and Models Mastra supports a variety of language models from different providers. There are four types of providers we support: - **Most popular providers.** OpenAI, Anthropic, Google Gemini. These are the most popular models and are highly recommended for most use cases. We will reference them and use them in docs and examples. - **Other natively supported providers.** Mastra is built on AI SDK and supports a number of AI SDK supported models out of the box. We will always try to use these models in docs and examples. - **Community supported providers.** A number of other providers have built AI SDK integrations (via creating an AI SDK provider). - **Custom providers through Portkey.** If a provider does not have an AI SDK integration, you can use them through Portkey (an open-source AI gateway). ## Most popular providers | Provider | Provider String | Supported Models | | ------------- | --------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | OpenAI | OPEN_AI | `gpt-4`, `gpt-4-turbo`, `gpt-3.5-turbo`, `gpt-4o-mini`, `gpt-4o-mini`, `o1`, `o1-mini`, `o1-preview` | | Anthropic | ANTHROPIC | `claude-3-5-sonnet-20241022`, `claude-3-5-sonnet-20240620`, `claude-3-5-haiku-20241022`, `claude-3-opus-20240229`, `claude-3-sonnet-20240229`, `claude-3-haiku-20240307` | | Google Gemini | GOOGLE | `gemini-1.5-pro-latest`, `gemini-1.5-pro`, `gemini-1.5-flash-latest`, `gemini-1.5-flash`, `gemini-2.0-flash-exp-latest`, `gemini-2.0-flash-thinking-exp-1219`, `gemini-exp-1206` | ## Other natively supported providers | Provider | Provider String | Supported Models | | ---------------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | | Deepseek | DEEPSEEK | `deepseek-chat`, `deepseek-reasoner` | | Groq | GROQ | `llama3-groq-70b-8192-tool-use-preview`, `llama3-groq-8b-8192-tool-use-preview`, `gemma2-9b-it`, `gemma-7b-it` | | Perplexity | PERPLEXITY | `llama-3.1-sonar-small-128k-online`, `llama-3.1-sonar-large-128k-online`, `llama-3.1-sonar-huge-128k-online`, `llama-3.1-sonar-small-128k-chat` | | TogetherAI | TOGETHER_AI | `codellama/CodeLlama-34b-Instruct-hf`, `upstage/SOLAR-10.7B-Instruct-v1.0`, `mistralai/Mixtral-8x7B-v0.1`, `WhereIsAI/UAE-Large-V1` | | LM Studio | LM_STUDIO | `qwen2-7b-instruct-4bit`, `qwen2-math-1.5b`, `qwen2-0.5b`, `aya-23-8b`, `mistral-7b-v0.3` | | Baseten | BASETEN | `llama-3.1-70b-instruct`, `qwen2.5-7b-math-instruct`, `qwen2.5-14b-instruct`, `qwen2.5-32b-coder-instruct` | | Fireworks | FIREWORKS | `llama-3.1-405b-instruct`, `llama-3.1-70b-instruct`, `llama-3.1-8b-instruct`, `llama-3.2-3b-instruct` | | Mistral | MISTRAL | `pixtral-large-latest`, `mistral-large-latest`, `mistral-small-latest`, `ministral-3b-latest` | | X Grok | X_GROK | `grok-beta`, `grok-vision-beta` | | Cohere | COHERE | `command-r-plus` | | Azure | AZURE | `gpt-35-turbo-instruct` | | Amazon | AMAZON | `amazon-titan-tg1-large`, `amazon-titan-text-express-v1`, `anthropic-claude-3-5-sonnet-20241022-v2:0` | | Anthropic Vertex | ANTHROPIC_VERTEX | `claude-3-5-sonnet@20240620`, `claude-3-opus@20240229`, `claude-3-sonnet@20240229`, `claude-3-haiku@20240307` | ## Community supported providers You can see a list of Vercel's community supported providers [here](https://sdk.vercel.ai/providers/community-providers). You can also write your own provider if desired. ##### Example: Custom Provider - Ollama Here is an example of using a custom provider, Ollama, to create a model instance. ```bash npm2yarn copy npm install ollama-ai-provider ``` Import and configure the Ollama model by using `createOllama` from the `ollama-ai-provider` package. ```typescript copy showLineNumbers import { createOllama } from "ollama-ai-provider"; const ollama = createOllama({ // optional settings, e.g. baseURL: "https://api.ollama.com", }); ``` After creating the instance, you can use it like any other model in Mastra. ```typescript copy showLineNumbers lines={9-14, 27} filename="src/mastra/index.ts" import { Mastra, type ModelConfig } from "@mastra/core"; import { createOllama } from "ollama-ai-provider"; const ollama = createOllama({ // optional settings, e.g. baseURL: "https://api.ollama.com", }); const modelConfig: ModelConfig = { model: ollama.chat("gemma"), // The model instance created by the Ollama provider apiKey: process.env.OLLAMA_API_KEY, provider: "Ollama", toolChoice: "auto", // Controls how the model handles tool/function calling }; const mastra = new Mastra({}); const llm = mastra.llm; const response = await llm.generate( [ { role: "user", content: "What is machine learning?", }, ], { model: modelConfig }, ); ``` ### Portkey supported providers [Portkey](https://portkey.ai/) is an open-source AI gateway with support for 200+ providers, so if the provider you want isn't available through AI SDK, it probably is through Portkey. You can refer to the [Portkey documentation](https://docs.portkey.ai/docs/custom-models) for more details on how to implement custom models. ================================================================================ Source: src/pages/docs/reference/llm/stream.mdx ================================================================================ --- title: "Reference: LLM.stream() | Streaming | LLM | Mastra Docs" description: "Documentation for the `.stream()` method in LLM, which enables real-time streaming of responses." --- # LLM.stream() The `.stream()` method enables real-time streaming of responses from the language model. This method accepts `messages` and an optional `options` object as parameters, similar to `generate()`. ## Parameters ### `messages` The `messages` parameter can be: - A single string - An array of strings - An array of message objects with `role` and `content` properties #### Message Object Structure ```typescript interface Message { role: 'system' | 'user' | 'assistant'; content: string; } ``` ### `options` (Optional) An optional object that can include: Promise | void', isOptional: true, description: 'Callback function called when streaming is complete.', }, { name: 'onStepFinish', type: '(step: string) => void', isOptional: true, description: 'Callback function called after each step during streaming.', }, { name: 'maxSteps', type: 'number', isOptional: true, default: '5', description: 'Maximum number of steps allowed during streaming.', }, { name: 'tools', type: 'ToolsInput', isOptional: true, description: 'Tools available for the LLM to use during streaming.', }, { name: 'runId', type: 'string', isOptional: true, description: 'Unique identifier for the streaming run, useful for tracing and logging.', } ]} /> ## Returns The method returns a promise that resolves to an object containing one or more of the following properties: ', isOptional: true, description: 'An async iterable stream of text chunks. Present when output is "text".', }, { name: 'objectStream', type: 'AsyncIterable', isOptional: true, description: 'An async iterable stream of structured data. Present when a schema is provided.', }, { name: 'object', type: 'Promise', isOptional: true, description: 'A promise that resolves to the final structured output when using a schema.', } ]} /> ## Examples ### Basic Text Streaming ```typescript const stream = await llm.stream("Tell me a story about a brave knight."); for await (const chunk of stream.textStream) { process.stdout.write(chunk); } ``` ### Structured Output Streaming ```typescript const schema = { type: 'object', properties: { answer: { type: 'number' }, explanation: { type: 'string' } }, required: ['answer', 'explanation'] }; const response = await llm.stream("What is 2+2?", { output: schema, onFinish: text => console.log("Finished:", text) }); for await (const chunk of response.textStream) { console.log(chunk); } const result = await response.object; console.log("Final structured result:", result); ``` ================================================================================ Source: src/pages/docs/reference/memory/Memory.mdx ================================================================================ # Memory Class Reference The `Memory` class provides a robust system for managing conversation history and thread-based message storage in Mastra. It enables persistent storage of conversations, semantic search capabilities, and efficient message retrieval. ## Usage Example ```typescript import { Memory } from "@mastra/memory"; import { PostgresStorage } from "@mastra/storage-pg"; const memory = new Memory({ storage: new PostgresStorage({ connectionString: process.env.DATABASE_URL, }), }); ``` ## Parameters ### options ## Additional Notes ### Vector Search Configuration When using vector search capabilities, ensure you configure both the vector store and appropriate search options: ```typescript const memory = new Memory({ storage: new PostgresStorage({ /* config */ }), vector: new PineconeVector({ /* config */ }), options: { historySearch: { topK: 5, messageRange: { before: 2, after: 2 } } } }); ``` ### Related - [createThread](/docs/reference/memory/createThread.mdx) - [query](/docs/reference/memory/query.mdx) ================================================================================ Source: src/pages/docs/reference/memory/createThread.mdx ================================================================================ # createThread Creates a new conversation thread in the memory system. Each thread represents a distinct conversation or context and can contain multiple messages. ## Usage Example ```typescript import { Memory } from "@mastra/memory"; const memory = new Memory({ /* config */ }); const thread = await memory.createThread({ resourceId: "user-123", title: "Support Conversation", metadata: { category: "support", priority: "high" } }); ``` ## Parameters ", description: "Optional metadata to associate with the thread", isOptional: true, }, ]} /> ## Returns ", description: "Additional metadata associated with the thread", }, ]} /> ### Related - [Memory](/docs/reference/memory/Memory.mdx) - [getThreadById](/docs/reference/memory/getThreadById.mdx) - [getThreadsByResourceId](/docs/reference/memory/getThreadsByResourceId.mdx) ================================================================================ Source: src/pages/docs/reference/memory/getThreadById.mdx ================================================================================ # getThreadById Reference The `getThreadById` function retrieves a specific thread by its ID from storage. ## Usage Example ```typescript import { Memory } from "@mastra/core/memory"; const memory = new Memory(config); const thread = await memory.getThreadById({ threadId: "thread-123" }); ``` ## Parameters ## Returns ### Related - [Memory](/docs/reference/memory/Memory.mdx) ================================================================================ Source: src/pages/docs/reference/memory/getThreadsByResourceId.mdx ================================================================================ # getThreadsByResourceId Reference The `getThreadsByResourceId` function retrieves all threads associated with a specific resource ID from storage. ## Usage Example ```typescript import { Memory } from "@mastra/core/memory"; const memory = new Memory(config); const threads = await memory.getThreadsByResourceId({ resourceId: "resource-123", }); ``` ## Parameters ## Returns ### Related - [Memory](/docs/reference/memory/Memory.mdx) ================================================================================ Source: src/pages/docs/reference/memory/query.mdx ================================================================================ # query Retrieves messages from a specific thread, with support for pagination and filtering options. ## Usage Example ```typescript import { Memory } from "@mastra/memory"; const memory = new Memory({ /* config */ }); // Get last 50 messages const { messages, uiMessages } = await memory.query({ threadId: "thread-123", selectBy: { last: 50, }, }); // Get messages with context around specific messages const { messages: contextMessages } = await memory.query({ threadId: "thread-123", selectBy: { include: [ { id: "msg-123", // Get just this message (no context) }, { id: "msg-456", // Get this message with custom context withPreviousMessages: 3, // 3 messages before withNextMessages: 1, // 1 message after }, ], }, }); // Semantic search in messages const { messages } = await memory.query({ threadId: "thread-123", selectBy: { vectorSearchString: "What was discussed about deployment?", }, threadConfig: { historySearch: true, }, }); ``` ## Parameters ### selectBy ### include ## Returns ## Additional Notes The `query` function returns two different message formats: - `messages`: Core message format used internally - `uiMessages`: Formatted messages suitable for UI display, including proper threading of tool calls and results ### Related - [Memory](/docs/reference/memory/Memory.mdx) ================================================================================ Source: src/pages/docs/reference/observability/create-logger.mdx ================================================================================ --- title: "Reference: createLogger() | Mastra Observability Docs" description: Documentation for the createLogger function, which instantiates a logger based on a given configuration. --- # createLogger() The `createLogger()` function is used to instantiate a logger based on a given configuration. You can create console-based, file-based, or Upstash Redis-based loggers by specifying the type and any additional parameters relevant to that type. ### Usage #### Console Logger (Development) ```typescript showLineNumbers copy const consoleLogger = createLogger({ name: "Mastra", level: "debug" }); consoleLogger.info("App started"); ``` #### File Transport (Structured Logs) ```typescript showLineNumbers copy import { FileTransport } from "@mastra/loggers/file"; const fileLogger = createLogger({ name: "Mastra", transports: { file: new FileTransport({ path: "test-dir/test.log" }) }, level: "warn", }); fileLogger.warn("Low disk space", { destinationPath: "system", type: "WORKFLOW", }); ``` #### Upstash Logger (Remote Log Drain) ```typescript showLineNumbers copy import { UpstashTransport } from "@mastra/loggers/upstash"; const logger = createLogger({ name: "Mastra", transports: { upstash: new UpstashTransport({ listName: "production-logs", upstashUrl: process.env.UPSTASH_URL!, upstashToken: process.env.UPSTASH_TOKEN!, }), }, level: "info", }); logger.info({ message: "User signed in", destinationPath: "auth", type: "AGENT", runId: "run_123", }); ``` ### Parameters ================================================================================ Source: src/pages/docs/reference/observability/logger.mdx ================================================================================ --- title: "Reference: Logger Instance | Mastra Observability Docs" description: Documentation for Logger instances, which provide methods to record events at various severity levels. --- # Logger Instance A Logger instance is created by `createLogger()` and provides methods to record events at various severity levels. Depending on the logger type, messages may be written to the console, file, or an external service. ## Example ```typescript showLineNumbers copy // Using a console logger const logger = createLogger({ name: 'Mastra', level: 'info' }); logger.debug('Debug message'); // Won't be logged because level is INFO logger.info({ message: 'User action occurred', destinationPath: 'user-actions', type: 'AGENT' }); // Logged logger.error('An error occurred'); // Logged as ERROR ``` ## Methods void | Promise', description: 'Write a DEBUG-level log. Only recorded if level ≤ DEBUG.', }, { name: 'info', type: '(message: BaseLogMessage | string, ...args: any[]) => void | Promise', description: 'Write an INFO-level log. Only recorded if level ≤ INFO.', }, { name: 'warn', type: '(message: BaseLogMessage | string, ...args: any[]) => void | Promise', description: 'Write a WARN-level log. Only recorded if level ≤ WARN.', }, { name: 'error', type: '(message: BaseLogMessage | string, ...args: any[]) => void | Promise', description: 'Write an ERROR-level log. Only recorded if level ≤ ERROR.', }, { name: 'cleanup', type: '() => Promise', isOptional: true, description: 'Cleanup resources held by the logger (e.g., network connections for Upstash). Not all loggers implement this.', }, ]} /> **Note:** Some loggers require a `BaseLogMessage` object (with `message`, `destinationPath`, `type` fields). For instance, the `File` and `Upstash` loggers need structured messages. ================================================================================ Source: src/pages/docs/reference/observability/otel-config.mdx ================================================================================ --- title: "Reference: OtelConfig | Mastra Observability Docs" description: Documentation for the OtelConfig object, which configures OpenTelemetry instrumentation, tracing, and exporting behavior. --- # `OtelConfig` The `OtelConfig` object is used to configure OpenTelemetry instrumentation, tracing, and exporting behavior within your application. By adjusting its properties, you can control how telemetry data (such as traces) is collected, sampled, and exported. To use the `OtelConfig` within Mastra, pass it as the value of the `telemetry` key when initializing Mastra. This will configure Mastra to use your custom OpenTelemetry settings for tracing and instrumentation. ```typescript showLineNumbers copy import { Mastra } from 'mastra'; const otelConfig: OtelConfig = { serviceName: 'my-awesome-service', enabled: true, sampling: { type: 'ratio', probability: 0.5, }, export: { type: 'otlp', endpoint: 'https://otel-collector.example.com/v1/traces', headers: { Authorization: 'Bearer YOUR_TOKEN_HERE', }, }, }; ``` ### Properties ', isOptional: true, description: 'Additional headers to send with OTLP requests, useful for authentication or routing.', }, ], }, ]} /> ================================================================================ Source: src/pages/docs/reference/observability/providers/braintrust.mdx ================================================================================ --- title: "Reference: Braintrust | Observability | Mastra Docs" description: Documentation for integrating Braintrust with Mastra, an evaluation and monitoring platform for LLM applications. --- # Braintrust Braintrust is an evaluation and monitoring platform for LLM applications. ## Configuration To use Braintrust with Mastra, configure these environment variables: ```env OTEL_EXPORTER_OTLP_ENDPOINT=https://api.braintrust.dev/otel OTEL_EXPORTER_OTLP_HEADERS="Authorization=Bearer , x-bt-parent=project_id:" ``` ## Implementation Here's how to configure Mastra to use Braintrust: ```typescript import { Mastra } from "@mastra/core"; export const mastra = new Mastra({ // ... other config telemetry: { serviceName: "your-service-name", enabled: true, export: { type: "otlp", }, }, }); ``` ## Dashboard Access your Braintrust dashboard at [braintrust.dev](https://www.braintrust.dev/) ================================================================================ Source: src/pages/docs/reference/observability/providers/index.mdx ================================================================================ --- title: "Reference: Provider List | Observability | Mastra Docs" description: Overview of observability providers supported by Mastra, including SigNoz, Braintrust, Langfuse, and more. --- # Observability Providers Observability providers include: - [SigNoz](./providers/signoz.mdx) - [Braintrust](./providers/braintrust.mdx) - [Langfuse](./providers/langfuse.mdx) - [Langsmith](./providers/langsmith.mdx) - [New Relic](./providers/new-relic.mdx) - [Traceloop](./providers/traceloop.mdx) - [Laminar](./providers/laminar.mdx) ================================================================================ Source: src/pages/docs/reference/observability/providers/laminar.mdx ================================================================================ --- title: "Reference: Laminar Integration | Mastra Observability Docs" description: Documentation for integrating Laminar with Mastra, a specialized observability platform for LLM applications. --- # Laminar Laminar is a specialized observability platform for LLM applications. ## Configuration To use Laminar with Mastra, configure these environment variables: ```env OTEL_EXPORTER_OTLP_ENDPOINT=https://api.laminar.dev/v1/traces OTEL_EXPORTER_OTLP_HEADERS="Authorization=Bearer your_api_key, x-laminar-team-id=your_team_id" ``` ## Implementation Here's how to configure Mastra to use Laminar: ```typescript import { Mastra } from "@mastra/core"; export const mastra = new Mastra({ // ... other config telemetry: { serviceName: "your-service-name", enabled: true, export: { type: "otlp", }, }, }); ``` ## Dashboard Access your Laminar dashboard at [https://lmnr.ai/](https://lmnr.ai/) ================================================================================ Source: src/pages/docs/reference/observability/providers/langfuse.mdx ================================================================================ --- title: "Reference: Langfuse Integration | Mastra Observability Docs" description: Documentation for integrating Langfuse with Mastra, an open-source observability platform for LLM applications. --- # Langfuse Langfuse is an open-source observability platform designed specifically for LLM applications. ## Configuration To use Langfuse with Mastra, you'll need to configure the following environment variables: ```env LANGFUSE_PUBLIC_KEY=your_public_key LANGFUSE_SECRET_KEY=your_secret_key LANGFUSE_BASEURL=https://cloud.langfuse.com # Optional - defaults to cloud.langfuse.com ``` ## Implementation Here's how to configure Mastra to use Langfuse: ```typescript import { Mastra } from "@mastra/core"; import { LangfuseExporter } from "langfuse-vercel"; export const mastra = new Mastra({ // ... other config telemetry: { serviceName: "your-service-name", enabled: true, export: { type: "custom", exporter: new LangfuseExporter({ publicKey: process.env.LANGFUSE_PUBLIC_KEY, secretKey: process.env.LANGFUSE_SECRET_KEY, baseUrl: process.env.LANGFUSE_BASEURL, }), }, }, }); ``` ## Dashboard Once configured, you can view your traces and analytics in the Langfuse dashboard at [cloud.langfuse.com](https://cloud.langfuse.com) ================================================================================ Source: src/pages/docs/reference/observability/providers/langsmith.mdx ================================================================================ --- title: "Reference: LangSmith Integration | Mastra Observability Docs" description: Documentation for integrating LangSmith with Mastra, a platform for debugging, testing, evaluating, and monitoring LLM applications. --- # LangSmith LangSmith is LangChain's platform for debugging, testing, evaluating, and monitoring LLM applications. ## Configuration To use LangSmith with Mastra, you'll need to configure the following environment variables: ```env OTEL_EXPORTER_OTLP_ENDPOINT=https://api.smith.langchain.com/v1/traces OTEL_EXPORTER_OTLP_HEADERS="Authorization=Bearer your_api_key, x-langsmith-project-id=your_project_id" ``` ## Implementation Here's how to configure Mastra to use LangSmith: ```typescript import { Mastra } from "@mastra/core"; export const mastra = new Mastra({ // ... other config telemetry: { serviceName: "your-service-name", enabled: true, export: { type: "otlp", }, }, }); ``` ## Dashboard Access your traces and analytics in the LangSmith dashboard at [smith.langchain.com](https://smith.langchain.com) ================================================================================ Source: src/pages/docs/reference/observability/providers/langwatch.mdx ================================================================================ --- title: "Reference: LangWatch Integration | Mastra Observability Docs" description: Documentation for integrating LangWatch with Mastra, a specialized observability platform for LLM applications. --- # LangWatch LangWatch is a specialized observability platform for LLM applications. ## Configuration To use LangWatch with Mastra, configure these environment variables: ```env LANGWATCH_API_KEY=your_api_key LANGWATCH_PROJECT_ID=your_project_id ``` ## Implementation Here's how to configure Mastra to use LangWatch: ```typescript import { Mastra } from "@mastra/core"; import { LangWatchExporter } from "langwatch"; export const mastra = new Mastra({ // ... other config telemetry: { serviceName: "your-service-name", enabled: true, export: { type: "custom", exporter: new LangWatchExporter({ apiKey: process.env.LANGWATCH_API_KEY, projectId: process.env.LANGWATCH_PROJECT_ID, }), }, }, }); ``` ## Dashboard Access your LangWatch dashboard at [app.langwatch.ai](https://app.langwatch.ai) ================================================================================ Source: src/pages/docs/reference/observability/providers/new-relic.mdx ================================================================================ --- title: "Reference: New Relic Integration | Mastra Observability Docs" description: Documentation for integrating New Relic with Mastra, a comprehensive observability platform supporting OpenTelemetry for full-stack monitoring. --- # New Relic New Relic is a comprehensive observability platform that supports OpenTelemetry (OTLP) for full-stack monitoring. ## Configuration To use New Relic with Mastra via OTLP, configure these environment variables: ```env OTEL_EXPORTER_OTLP_ENDPOINT=https://otlp.nr-data.net:4317 OTEL_EXPORTER_OTLP_HEADERS="api-key=your_license_key" ``` ## Implementation Here's how to configure Mastra to use New Relic: ```typescript import { Mastra } from "@mastra/core"; export const mastra = new Mastra({ // ... other config telemetry: { serviceName: "your-service-name", enabled: true, export: { type: "otlp", }, }, }); ``` ## Dashboard View your telemetry data in the New Relic One dashboard at [one.newrelic.com](https://one.newrelic.com) ================================================================================ Source: src/pages/docs/reference/observability/providers/signoz.mdx ================================================================================ --- title: "Reference: SigNoz Integration | Mastra Observability Docs" description: Documentation for integrating SigNoz with Mastra, an open-source APM and observability platform providing full-stack monitoring through OpenTelemetry. --- # SigNoz SigNoz is an open-source APM and observability platform that provides full-stack monitoring capabilities through OpenTelemetry. ## Configuration To use SigNoz with Mastra, configure these environment variables: ```env OTEL_EXPORTER_OTLP_ENDPOINT=https://ingest.{region}.signoz.cloud:443 OTEL_EXPORTER_OTLP_HEADERS=signoz-ingestion-key=your_signoz_token ``` ## Implementation Here's how to configure Mastra to use SigNoz: ```typescript import { Mastra } from "@mastra/core"; export const mastra = new Mastra({ // ... other config telemetry: { serviceName: "your-service-name", enabled: true, export: { type: "otlp", }, }, }); ``` ## Dashboard Access your SigNoz dashboard at [cloud.signoz.io](https://cloud.signoz.io) ================================================================================ Source: src/pages/docs/reference/observability/providers/traceloop.mdx ================================================================================ --- title: "Reference: Traceloop Integration | Mastra Observability Docs" description: Documentation for integrating Traceloop with Mastra, an OpenTelemetry-native observability platform for LLM applications. --- # Traceloop Traceloop is an OpenTelemetry-native observability platform specifically designed for LLM applications. ## Configuration To use Traceloop with Mastra, configure these environment variables: ```env OTEL_EXPORTER_OTLP_ENDPOINT=https://api.traceloop.com/v1/traces OTEL_EXPORTER_OTLP_HEADERS="Authorization=Bearer your_api_key, x-traceloop-destination-id=your_destination_id" ``` ## Implementation Here's how to configure Mastra to use Traceloop: ```typescript import { Mastra } from "@mastra/core"; export const mastra = new Mastra({ // ... other config telemetry: { serviceName: "your-service-name", enabled: true, export: { type: "otlp", }, }, }); ``` ## Dashboard Access your traces and analytics in the Traceloop dashboard at [app.traceloop.com](https://app.traceloop.com) ================================================================================ Source: src/pages/docs/reference/rag/chunk.mdx ================================================================================ --- title: "Reference: .chunk() | Document Processing | RAG | Mastra Docs" description: Documentation for the chunk function in Mastra, which splits documents into smaller segments using various strategies. --- # Reference: .chunk() The `.chunk()` function splits documents into smaller segments using various strategies and options. ## Example ```typescript import { Document } from '@mastra/core'; const doc = new Document(` # Introduction This is a sample document that we want to split into chunks. ## Section 1 Here is the first section with some content. ## Section 2 Here is another section with different content. `); // Basic chunking with defaults const chunks = await doc.chunk(); // Markdown-specific chunking with header extraction const chunksWithMetadata = await doc.chunk({ strategy: 'markdown', headers: [['#', 'title'], ['##', 'section']], extract: { fields: [ { name: 'summary', description: 'A brief summary of the chunk content' }, { name: 'keywords', description: 'Key terms found in the chunk' } ] } }); ``` ## Parameters ## Strategy-Specific Options Strategy-specific options are passed as top-level parameters alongside the strategy parameter. For example: ```typescript showLineNumbers copy // HTML strategy example const chunks = await doc.chunk({ strategy: 'html', headers: [['h1', 'title'], ['h2', 'subtitle']], // HTML-specific option sections: [['div.content', 'main']], // HTML-specific option size: 500 // general option }); // Markdown strategy example const chunks = await doc.chunk({ strategy: 'markdown', headers: [['#', 'title'], ['##', 'section']], // Markdown-specific option stripHeaders: true, // Markdown-specific option overlap: 50 // general option }); // Token strategy example const chunks = await doc.chunk({ strategy: 'token', encodingName: 'gpt2', // Token-specific option modelName: 'gpt-3.5-turbo', // Token-specific option size: 1000 // general option }); ``` The options documented below are passed directly at the top level of the configuration object, not nested within a separate options object. ### HTML ", description: "Array of [selector, metadata key] pairs for header-based splitting", }, { name: "sections", type: "Array<[string, string]>", description: "Array of [selector, metadata key] pairs for section-based splitting", }, { name: "returnEachLine", type: "boolean", isOptional: true, description: "Whether to return each line as a separate chunk", }, ]} /> ### Markdown ", description: "Array of [header level, metadata key] pairs", }, { name: "stripHeaders", type: "boolean", isOptional: true, description: "Whether to remove headers from the output", }, { name: "returnEachLine", type: "boolean", isOptional: true, description: "Whether to return each line as a separate chunk", }, ]} /> ### Token ### JSON ## Return Value Returns a `MDocument` instance containing the chunked documents. Each chunk includes: ```typescript interface DocumentNode { text: string; metadata: Record; embedding?: number[]; } ``` ================================================================================ Source: src/pages/docs/reference/rag/document.mdx ================================================================================ --- title: "Reference: MDocument | Document Processing | RAG | Mastra Docs" description: Documentation for the MDocument class in Mastra, which handles document processing and chunking. --- # MDocument The MDocument class processes documents for RAG applications. The main methods are `.chunk()` and `.extractMetadata()`. ## Constructor }>", description: "Array of document chunks with their text content and optional metadata", }, { name: "type", type: "'text' | 'html' | 'markdown' | 'json' | 'latex'", description: "Type of document content", } ]} /> ## Static Methods ### fromText() Creates a document from plain text content. ```typescript static fromText(text: string, metadata?: Record): MDocument ``` ### fromHTML() Creates a document from HTML content. ```typescript static fromHTML(html: string, metadata?: Record): MDocument ``` ### fromMarkdown() Creates a document from Markdown content. ```typescript static fromMarkdown(markdown: string, metadata?: Record): MDocument ``` ### fromJSON() Creates a document from JSON content. ```typescript static fromJSON(json: string, metadata?: Record): MDocument ``` ## Instance Methods ### chunk() Splits document into chunks and optionally extracts metadata. ```typescript async chunk(params?: ChunkParams): Promise ``` See [chunk() reference](./chunk) for detailed options. ### getDocs() Returns array of processed document chunks. ```typescript getDocs(): Chunk[] ``` ### getText() Returns array of text strings from chunks. ```typescript getText(): string[] ``` ### getMetadata() Returns array of metadata objects from chunks. ```typescript getMetadata(): Record[] ``` ### extractMetadata() Extracts metadata using specified extractors. See [ExtractParams reference](./extract-params) for details. ```typescript async extractMetadata(params: ExtractParams): Promise ``` ## Examples ```typescript import { MDocument } from '@mastra/rag'; // Create document from text const doc = MDocument.fromText('Your content here'); // Split into chunks with metadata extraction const chunks = await doc.chunk({ strategy: 'markdown', headers: [['#', 'title'], ['##', 'section']], extract: { fields: [ { name: 'summary', description: 'A brief summary' }, { name: 'keywords', description: 'Key terms' } ] } }); // Get processed chunks const docs = doc.getDocs(); const texts = doc.getText(); const metadata = doc.getMetadata(); ``` ================================================================================ Source: src/pages/docs/reference/rag/embeddings.mdx ================================================================================ --- title: "Reference: embed() | Document Embedding | RAG | Mastra Docs" description: Documentation for the embed function in Mastra, which generates vector embeddings for text inputs. --- # Embed The `embed` function generates vector embeddings for text inputs, enabling similarity search and RAG workflows. ## Parameters }[]", description: "Content to embed. Can be a single string, an array of strings, or an array of objects with `text` and optional `metadata`." }, { name: "options", type: "object", description: "Configuration for the embedding call.", properties: [ { name: "provider", type: "'openai' | 'anthropic' | 'google' | 'custom'", description: "Embedding provider." }, { name: "model", type: "string", description: "Name of the embedding model." } ] } ]} /> ## Return Value ================================================================================ Source: src/pages/docs/reference/rag/extract-params.mdx ================================================================================ --- title: "Reference: ExtractParams | Document Processing | RAG | Mastra Docs" description: Documentation for metadata extraction configuration in Mastra. --- # ExtractParams ExtractParams configures metadata extraction from document chunks. ## Example ## ExtractParams `ExtractParams` configures automatic metadata extraction from chunks using LLM analysis. ```typescript showLineNumbers copy const doc = new Document(text); const chunks = await doc.chunk({ extract: { fields: [ { name: 'summary', description: 'A 1-2 sentence summary of the main points' }, { name: 'entities', description: 'List of companies, people, and locations mentioned' }, { name: 'custom_field', description: 'Any other metadata you want to extract, guided by this description' } ], model: 'gpt-4' // Optional: specify a different model } }); ``` ## Parameters ", description: "Array of fields to extract from each chunk", isOptional: false }, { name: "model", type: "string", description: "OpenAI model to use for extraction", defaultValue: "gpt-3.5-turbo", isOptional: true } ]} /> ## Field Types The fields are flexible - you can define any metadata fields you want to extract. Common field types include: - `summary`: Brief overview of chunk content - `keywords`: Key terms or concepts - `topics`: Main subjects discussed - `entities`: Named entities (people, places, organizations) - `sentiment`: Emotional tone - `language`: Detected language - `timestamp`: Temporal references - `categories`: Content classification Example: ================================================================================ Source: src/pages/docs/reference/rag/graph-rag.mdx ================================================================================ --- title: "Reference: GraphRAG | Graph-based RAG | RAG | Mastra Docs" description: Documentation for the GraphRAG class in Mastra, which implements a graph-based approach to retrieval augmented generation. --- # GraphRAG The `GraphRAG` class implements a graph-based approach to retrieval augmented generation. It creates a knowledge graph from document chunks where nodes represent documents and edges represent semantic relationships, enabling both direct similarity matching and discovery of related content through graph traversal. ## Basic Usage ```typescript import { GraphRAG } from "@mastra/rag"; const graphRag = new GraphRAG({ dimension: 1536, threshold: 0.7 }); // Create the graph from chunks and embeddings graphRag.createGraph(documentChunks, embeddings); // Query the graph with embedding const results = await graphRag.query({ query: queryEmbedding, topK: 5, randomWalkSteps: 100, restartProb: 0.15 }); ``` ## Constructor Parameters ## Methods ### createGraph Creates a knowledge graph from document chunks and their embeddings. ```typescript createGraph(chunks: GraphChunk[], embeddings: GraphEmbedding[]): void ``` #### Parameters ### query Performs a graph-based search combining vector similarity and graph traversal. ```typescript query(query: number[], topK?: number, randomWalkSteps?: number, restartProb?: number): RankedNode[] ``` #### Parameters #### Returns Returns an array of `RankedNode` objects, where each node contains: ", description: "Additional metadata associated with the chunk", }, { name: "score", type: "number", description: "Combined relevance score from graph traversal", } ]} /> ## Advanced Example ```typescript const graphRag = new GraphRAG({ dimension: 1536, threshold: 0.8 // Stricter similarity threshold }); // Create graph from chunks and embeddings graphRag.createGraph(documentChunks, embeddings); // Query with custom parameters const results = await graphRag.query({ query: queryEmbedding, topK: 5, randomWalkSteps: 200, restartProb: 0.2 }); ``` ## Related - [createGraphRAGTool](../tools/graph-rag-tool) ================================================================================ Source: src/pages/docs/reference/rag/metadata-filters.mdx ================================================================================ --- title: "Reference: Metadata Filters | Metadata Filtering | RAG | Mastra Docs" description: Documentation for PGVector's metadata filtering capabilities in Mastra, which allow for precise querying of vector search results. --- # Metadata Filters Reference (PGVector) PGVector filters allow you to narrow down vector search results based on metadata conditions. These filters support nested paths, comparisons, text search, and logical operations. ## Usage Example ```typescript import { PgVector } from '@mastra/vector-pg'; const pgVector = new PgVector(connectionString); const results = await pgVector.query( "my_index", queryVector, 10, { "metadata.category": { eq: "electronics" } } ); ``` ## Filter Structure ### Operators ## Additional Examples ### Basic Comparison ```typescript // Greater than comparison { "metadata.price": { gt: "100" } } ``` ### Text Search ```typescript // Case-insensitive search { "metadata.description": { ilike: "electronics" } } ``` ### Logical Operations ```typescript // AND condition { "$and": [ { "metadata.category": { eq: "electronics" } }, { "metadata.price": { lt: "1000" } } ] } ``` ### Notes - Text search operators (`like`, `ilike`) automatically add `%` wildcards - All numeric values should be passed as strings - Nested fields are accessed using dot notation ### Related - [PgStore](./pgstore) ================================================================================ Source: src/pages/docs/reference/rag/pgstore.mdx ================================================================================ --- title: "Reference: PgStore | Vector Databases | RAG | Mastra Docs" description: Documentation for the PgStore class in Mastra, which provides vector search using PostgreSQL with pgvector extension. --- # PgStore The PgStore class provides vector search using PostgreSQL with pgvector extension. ## Constructor Options ## Methods ### createIndex() ### upsert() []", isOptional: true, description: "Metadata for each vector", }, { name: "ids", type: "string[]", isOptional: true, description: "Optional vector IDs (auto-generated if not provided)", }, ]} /> ### query() ", isOptional: true, description: "Metadata filters", }, { name: "minScore", type: "number", isOptional: true, defaultValue: "0", description: "Minimum similarity score threshold", }, ]} /> ### describeIndex() Returns: ```typescript copy interface IndexStats { dimension: number; count: number; metric: "cosine" | "euclidean" | "dotproduct"; } ``` ### deleteIndex() ### disconnect() Closes the database connection pool. Should be called when done using the store. ## Response Types Query results are returned in this format: ```typescript copy interface QueryResult { id: string; score: number; metadata: Record; } ``` ## Error Handling The store throws typed errors that can be caught: ```typescript copy try { await store.query(queryVector); } catch (error) { if (error instanceof VectorStoreError) { console.log(error.code); // 'connection_failed' | 'invalid_dimension' | etc console.log(error.details); // Additional error context } } ``` ### Related - [Metadata Filters](./metadata-filters) ================================================================================ Source: src/pages/docs/reference/rag/pinecone.mdx ================================================================================ --- title: "Reference: PineconeStore | Vector DBs | RAG | Mastra Docs" description: Documentation for the PineconeStore class in Mastra, which provides an interface to Pinecone's vector database. --- # Pinecone The PineconeStore class provides an interface to Pinecone's vector database. ## Constructor Options ## Methods ### createIndex() ### upsert() []", isOptional: true, description: "Metadata for each vector", }, { name: "namespace", type: "string", isOptional: true, description: "Optional namespace for organization", }, ]} /> ### query() ", isOptional: true, description: "Metadata filters for the query", }, ]} /> ### listIndexes() Returns an array of index names as strings. ### describeIndex() Returns: ```typescript copy interface IndexStats { dimension: number; count: number; metric: "cosine" | "euclidean" | "dotproduct"; } ``` ### deleteIndex() ## Response Types Query results are returned in this format: ```typescript copy interface QueryResult { id: string; score: number; metadata: Record; } ``` ## Error Handling The store throws typed errors that can be caught: ```typescript copy try { await store.query(queryVector); } catch (error) { if (error instanceof VectorStoreError) { console.log(error.code); // 'connection_failed' | 'invalid_dimension' | etc console.log(error.details); // Additional error context } } ``` ### Environment Variables Required environment variables: - `PINECONE_API_KEY`: Your Pinecone API key - `PINECONE_ENVIRONMENT`: Pinecone environment (e.g., 'us-west1-gcp') ### Related - [Metadata Filters](./metadata-filters) ================================================================================ Source: src/pages/docs/reference/rag/qdrant.mdx ================================================================================ --- title: "Reference: Qdrant | Vector Databases | RAG | Mastra Docs" description: Documentation for integrating Qdrant with Mastra, a vector similarity search engine for managing vectors and payloads. --- # Qdrant [Qdrant](https://qdrant.tech/) is a vector similarity search engine. It provides a production-ready service with a convenient API to store, search, and manage vectors with additional payload and extended filtering support. ## Constructor Options ## Methods ### createIndex() ### upsert() []", isOptional: true, description: "Metadata for each vector", }, { name: "namespace", type: "string", isOptional: true, description: "Optional namespace for organization", }, ]} /> ### query() ", isOptional: true, description: "Metadata filters for the query", }, ]} /> ### listIndexes() Returns an array of index names as strings. ### describeIndex() Returns: ```typescript copy interface IndexStats { dimension: number; count: number; metric: "cosine" | "euclidean" | "dotproduct"; } ``` ### deleteIndex() ## Response Types Query results are returned in this format: ```typescript copy interface QueryResult { id: string; score: number; metadata: Record; } ``` ## Error Handling The store throws typed errors that can be caught: ```typescript copy try { await store.query(queryVector); } catch (error) { if (error instanceof VectorStoreError) { console.log(error.code); // 'connection_failed' | 'invalid_dimension' | etc console.log(error.details); // Additional error context } } ``` ### Related - [Metadata Filters](./metadata-filters) ================================================================================ Source: src/pages/docs/reference/rag/rerank.mdx ================================================================================ --- title: "Reference: Rerank | Document Retrieval | RAG | Mastra Docs" description: Documentation for the rerank function in Mastra, which provides advanced reranking capabilities for vector search results. --- # rerank() The `rerank()` function provides advanced reranking capabilities for vector search results by combining semantic relevance, vector similarity, and position-based scoring. ```typescript function rerank( results: QueryResult[], query: string, modelConfig: ModelConfig, options?: RerankerFunctionOptions ): Promise ``` ## Usage Example ```typescript import { rerank } from "@mastra/rag"; const rerankedResults = await rerank( vectorSearchResults, "How do I deploy to production?", { provider: "OPEN_AI", name: "gpt-4o-mini", }, { weights: { semantic: 0.5, vector: 0.3, position: 0.2 }, topK: 3 } ); ``` ## Parameters ### ModelConfig The rerank function accepts any standard Mastra `ModelConfig`. When using Cohere's provider with the model name `rerank-v3.5`, it will automatically use Cohere's reranking capabilities. Promise)", description: "Custom model implementation", isOptional: true, } ]} /> ### RerankerFunctionOptions ## Returns The function returns an array of `RerankResult` objects: ### ScoringDetails ## Related - [createVectorQueryTool](../tools/vector-query-tool) ================================================================================ Source: src/pages/docs/reference/storage/mastra-storage.mdx ================================================================================ --- title: "MastraStorage Class Reference | Storage System | Mastra Core" description: Documentation for the MastraStorage class, the core interface for managing persistent data storage in Mastra applications. --- # MastraStorage The MastraStorage class provides a unified interface for persistent storage operations in Mastra applications. It handles storage of workflow states, conversation threads, messages, and evaluation data. ## Constructor Options ## Core Tables The storage system manages four primary tables:
## Methods ", description: "Initialize storage tables and connections", }, { name: "createTable(options)", type: "Promise", description: "Create a new table with specified schema", options: [ { name: "tableName", type: "TABLE_NAMES", description: "Name of the table to create", }, { name: "schema", type: "Record", description: "Schema definition for the table columns", } ], }, { name: "clearTable(options)", type: "Promise", description: "Clear all data from a table", options: [ { name: "tableName", type: "TABLE_NAMES", description: "Name of the table to clear", } ], }, { name: "insert(options)", type: "Promise", description: "Insert a record into a table", options: [ { name: "tableName", type: "TABLE_NAMES", description: "Target table name", }, { name: "record", type: "Record", description: "Record data to insert", } ], }, { name: "load(options)", type: "Promise", description: "Load a record from a table", options: [ { name: "tableName", type: "TABLE_NAMES", description: "Source table name", }, { name: "keys", type: "Record", description: "Key-value pairs to identify the record", } ], }, { name: "getThreadById(options)", type: "Promise", description: "Retrieve a thread by its ID", options: [ { name: "threadId", type: "string", description: "ID of the thread to retrieve", } ], }, { name: "saveThread(options)", type: "Promise", description: "Save or update a thread", options: [ { name: "thread", type: "StorageThreadType", description: "Thread data to save", } ], }, { name: "deleteThread(options)", type: "Promise", description: "Delete a thread and its messages", options: [ { name: "id", type: "string", description: "ID of the thread to delete", } ], }, { name: "getMessages(options)", type: "Promise", description: "Get all messages for a thread", options: [ { name: "threadId", type: "string", description: "ID of the thread to get messages for", } ], }, { name: "saveMessages(options)", type: "Promise", description: "Save multiple messages", options: [ { name: "messages", type: "MessageType[]", description: "Array of messages to save", } ], }, { name: "persistWorkflowSnapshot(options)", type: "Promise", description: "Save a workflow snapshot", options: [ { name: "workflowName", type: "string", description: "Name of the workflow", }, { name: "runId", type: "string", description: "ID of the workflow run", }, { name: "snapshot", type: "WorkflowRunState", description: "Workflow snapshot data", } ], }, { name: "loadWorkflowSnapshot(options)", type: "Promise", description: "Load a workflow snapshot", options: [ { name: "workflowName", type: "string", description: "Name of the workflow", }, { name: "runId", type: "string", description: "ID of the workflow run", } ], }, ]} /> ## Types ### StorageColumn Defines the schema for table columns: ```typescript interface StorageColumn { type: 'text' | 'timestamp'; primaryKey?: boolean; nullable?: boolean; } ``` ### StorageThreadType Represents a conversation thread: ```typescript interface StorageThreadType { id: string; title: string; metadata: Record; createdAt: Date; updatedAt: Date; } ``` ## Example Usage ```typescript copy filename=src/storage/index.ts import { MastraStorage } from "@mastra/core"; class MyStorage extends MastraStorage { constructor() { super({ name: 'my-storage' }); } async init() { // Initialize tables and set up connections await this.createTable({ tableName: 'threads', schema: { id: { type: 'text', primaryKey: true }, title: { type: 'text' }, metadata: { type: 'text' }, created_at: { type: 'timestamp' }, updated_at: { type: 'timestamp' }, } }); } // Implement other abstract methods... } const storage = new MyStorage(); await storage.init(); ``` For a concrete implementation, see the `MastraStorageLibSql` class which provides a SQLite-based storage solution. ================================================================================ Source: src/pages/docs/reference/tools/client.mdx ================================================================================ --- title: "Reference: MastraMCPClient | Tool Discovery | Mastra Docs" description: API Reference for MastraMCPClient - A client implementation for the Model Context Protocol. --- # MastraMCPClient The `MastraMCPClient` class provides a client implementation for interacting with Model Context Protocol (MCP) servers. It handles connection management, resource discovery, and tool execution through the MCP protocol. ## Constructor Creates a new instance of the MastraMCPClient. ```typescript constructor({ name, version = '1.0.0', server, capabilities = {}, }: { name: string; server: StdioServerParameters; capabilities?: ClientCapabilities; version?: string; }) ``` ### Parameters ## Methods ### connect() Establishes a connection with the MCP server. ```typescript async connect(): Promise ``` ### disconnect() Closes the connection with the MCP server. ```typescript async disconnect(): Promise ``` ### resources() Retrieves the list of available resources from the server. ```typescript async resources(): Promise ``` ### tools() Fetches and initializes available tools from the server, converting them into Mastra-compatible tool formats. ```typescript async tools(): Promise> ``` Returns an object mapping tool names to their corresponding Mastra tool implementations. ## Examples ### Using with Mastra Agent ```typescript import { Agent } from '@mastra/core'; import { MastraMCPClient } from '@mastra/mcp-client'; // Initialize the EverArt MCP client const everArtClient = new MastraMCPClient({ name: 'everart', server: { command: '/usr/local/bin/docker', args: ['run', '-i', '--rm', '--network=host', '-e', 'EVERART_API_KEY', 'mcp/everart'], env: { EVERART_API_KEY: process.env.EVERART_API_KEY!, }, }, }); // Create a Mastra Agent const agent = new Agent({ name: 'everart', instructions: 'You are my artist. Include the url in your response.', model: { provider: 'ANTHROPIC', name: 'claude-3-5-sonnet-20241022', toolChoice: 'auto', }, }); // Example usage in an async function async function main() { try { // Connect to the MCP server await everArtClient.connect(); // Get available tools const tools = await everArtClient.tools(); // Use the agent with the MCP tools const response = await agent.generate('Can you make me a picture of a dog?', { toolsets: { everart: tools, }, }); console.log(response.text); } catch (error) { console.error('Error:', error); } finally { // Always disconnect when done await everArtClient.disconnect(); } } ``` ## Related Information - For more details about the Model Context Protocol, see the [@modelcontextprotocol/sdk documentation](https://github.com/modelcontextprotocol/typescript-sdk). ================================================================================ Source: src/pages/docs/reference/tools/document-chunker-tool.mdx ================================================================================ --- title: "Reference: createDocumentChunkerTool() | Tools | Mastra Docs" description: Documentation for the Document Chunker Tool in Mastra, which splits documents into smaller chunks for efficient processing and retrieval. --- # createDocumentChunkerTool() The `createDocumentChunkerTool()` function creates a tool for splitting documents into smaller chunks for efficient processing and retrieval. It supports different chunking strategies and configurable parameters. ## Basic Usage ```typescript import { createDocumentChunkerTool, MDocument } from "@mastra/rag"; const document = new MDocument({ text: "Your document content here...", metadata: { source: "user-manual" } }); const chunker = createDocumentChunkerTool({ doc: document, params: { strategy: "recursive", size: 512, overlap: 50, separator: "\n" } }); const { chunks } = await chunker.execute(); ``` ## Parameters ### ChunkParams ## Returns ## Example with Custom Parameters ```typescript const technicalDoc = new MDocument({ text: longDocumentContent, metadata: { type: "technical", version: "1.0" } }); const chunker = createDocumentChunkerTool({ doc: technicalDoc, params: { strategy: "recursive", size: 1024, // Larger chunks overlap: 100, // More overlap separator: "\n\n" // Split on double newlines } }); const { chunks } = await chunker.execute(); // Process the chunks chunks.forEach((chunk, index) => { console.log(`Chunk ${index + 1} length: ${chunk.content.length}`); }); ``` ## Tool Details The chunker is created as a Mastra tool with the following properties: - **Tool ID**: `Document Chunker {strategy} {size}` - **Description**: `Chunks document using {strategy} strategy with size {size} and {overlap} overlap` - **Input Schema**: Empty object (no additional inputs required) - **Output Schema**: Object containing the chunks array ## Related - [MDocument](../rag/document.mdx) - [createVectorQueryTool](./vector-query-tool) ================================================================================ Source: src/pages/docs/reference/tools/graph-rag-tool.mdx ================================================================================ --- title: "Reference: createGraphRAGTool() | RAG | Mastra Tools Docs" description: Documentation for the Graph RAG Tool in Mastra, which enhances RAG by building a graph of semantic relationships between documents. --- # createGraphRAGTool() The `createGraphRAGTool()` creates a tool that enhances RAG by building a graph of semantic relationships between documents. It uses the `GraphRAG` system under the hood to provide graph-based retrieval, finding relevant content through both direct similarity and connected relationships. ## Usage Example ```typescript import { createGraphRAGTool } from "@mastra/rag"; const graphTool = createGraphRAGTool({ vectorStoreName: "pinecone", indexName: "docs", options: { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3 }, topK: 5, graphOptions: { dimension: 1536, threshold: 0.7, randomWalkSteps: 100, restartProb: 0.15 } }); ``` ## Parameters ### GraphOptions ## Returns The tool returns an object with: ## Advanced Example ```typescript const graphTool = createGraphRAGTool({ vectorStoreName: "pinecone", indexName: "docs", options: { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3 }, topK: 5, graphOptions: { dimension: 1536, threshold: 0.8, // Higher similarity threshold randomWalkSteps: 200, // More exploration steps restartProb: 0.2 // Higher restart probability } }); ``` ## Related - [createVectorQueryTool](./vector-query-tool) - [GraphRAG](../rag/graph-rag) ================================================================================ Source: src/pages/docs/reference/tools/vector-query-tool.mdx ================================================================================ --- title: "Reference: createVectorQueryTool() | RAG | Mastra Tools Docs" description: Documentation for the Vector Query Tool in Mastra, which facilitates semantic search over vector stores with filtering and reranking capabilities. --- # createVectorQueryTool() The `createVectorQueryTool()` function creates a tool for semantic search over vector stores. It supports filtering, reranking, and integrates with various vector store backends. ## Basic Usage ```typescript import { createVectorQueryTool } from "@mastra/rag"; const queryTool = createVectorQueryTool({ vectorStoreName: "pinecone", indexName: "docs", options: { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3 } }); ``` ## Parameters ### RerankConfig ## Returns The tool returns an object with: ## Example with Filters ```typescript // Pinecone/PG/Astra const queryTool = createVectorQueryTool({ vectorStoreName: "pinecone", indexName: "docs", options: { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3 }, vectorFilterType: "pinecone", topK: 5 }); ``` Filter Formats: - Pinecone/PG/Astra: `{ category: { eq: "technical" } }` ## Example with Reranking ```typescript const queryTool = createVectorQueryTool({ vectorStoreName: "milvus", indexName: "documentation", options: { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3 }, topK: 5, reranker: { model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, } }); ``` ## Tool Details The tool is created with: - **ID**: `VectorQuery {vectorStoreName} {indexName} Tool` - **Description**: `Fetches and combines the top {topK} relevant chunks from the {vectorStoreName} vector store using the {indexName} index` - **Input Schema**: Requires queryText and filter objects - **Output Schema**: Returns relevantContext string ## Related - [rerank()](../rag/rerank) - [createGraphRAGTool](./graph-rag-tool) ================================================================================ Source: src/pages/docs/reference/tts/generate.mdx ================================================================================ --- title: "Reference: TTS.generate() | Text to Speech (TTS) | Mastra Docs" description: "Documentation for the `.generate()` method in TTS, which produces an audio response." --- # TTS.generate() The `.generate()` method is used to interact with the TTS model to produce an audio response. This method accepts `text` and `voice` as parameters. ## Parameters ## Returns ## Examples ### Basic Audio Generation (ElevenLabs) ```typescript import { ElevenLabsTTS } from "@mastra/speech-elevenlabs"; const tts = new ElevenLabsTTS({ model: { name: "eleven_multilingual_v2", apiKey: process.env.ELEVENLABS_API_KEY!, }, }); const voices = await tts.voices(); const voiceId = voices?.[0]?.voice_id!; const { audioResult } = await tts.generate({ text: "What is AI?", voice: voiceId, }); await writeFile( path.join(process.cwd(), "/test-outputs/generate-output.mp3"), audioBuffer, ); ``` ### Basic Audio Generation (OpenAI) ```typescript import { OpenAITTS } from "@mastra/speech-openai"; const tts = new OpenAITTS({ model: { name: "tts-1", apiKey: process.env.OPENAI_API_KEY!, }, }); const voices = await tts.voices(); const voiceId = voices?.[0]?.voice_id!; const { audioResult } = await tts.generate({ text: "What is AI?", voice: voiceId, }); const outputPath = path.join( process.cwd(), "test-outputs/open-aigenerate-test.mp3", ); writeFileSync(outputPath, audioResult); ``` ### Basic Audio Generation (PlayAI) ```typescript import { PlayAITTS } from "@mastra/speech-playai"; const tts = new PlayAITTS({ model: { name: "PlayDialog", apiKey: process.env.PLAYAI_API_KEY!, }, userId: process.env.PLAYAI_USER_ID!, }); const voices = await tts.voices(); const voiceId = voices?.[0]?.voice_id!; const { audioResult } = await tts.generate({ text: "What is AI?", voice: voiceId, }); const outputPath = path.join( process.cwd(), "test-outputs/open-aigenerate-test.mp3", ); writeFileSync(outputPath, audioResult); ``` ### Azure Generation ```typescript import { AzureTTS } from "@mastra/speech-azure"; const tts = new AzureTTS({ model: { name: "en-US-JennyNeural", apiKey: process.env.AZURE_API_KEY, region: process.env.AZURE_REGION, }, }); const { audioResult } = await tts.generate({ text: "What is AI?" }); await writeFile( path.join(process.cwd(), "/test-outputs/azure-output.mp3"), audioResult, ); ``` ### Deepgram Generation ```typescript import { DeepgramTTS } from "@mastra/speech-deepgram"; const tts = new DeepgramTTS({ model: { name: "aura", voice: "asteria-en", apiKey: process.env.DEEPGRAM_API_KEY, }, }); const { audioResult } = await tts.generate({ text: "What is AI?" }); await writeFile( path.join(process.cwd(), "/test-outputs/deepgram-output.mp3"), audioResult, ); ``` ### Google Generation ```typescript import { GoogleTTS } from "@mastra/speech-google"; const tts = new GoogleTTS({ model: { name: "en-US-Standard-A", credentials: process.env.GOOGLE_CREDENTIALS, }, }); const { audioResult } = await tts.generate({ text: "What is AI?" }); await writeFile( path.join(process.cwd(), "/test-outputs/google-output.mp3"), audioResult, ); ``` ### IBM Generation ```typescript import { IbmTTS } from "@mastra/speech-ibm"; const tts = new IbmTTS({ model: { voice: "en-US_AllisonV3Voice", apiKey: process.env.IBM_API_KEY, }, }); const { audioResult } = await tts.generate({ text: "What is AI?" }); await writeFile( path.join(process.cwd(), "/test-outputs/ibm-output.mp3"), audioResult, ); ``` ### Murf Generation ```typescript import { MurfTTS } from "@mastra/speech-murf"; const tts = new MurfTTS({ model: { name: "GEN2", voice: "en-US-natalie", apiKey: process.env.MURF_API_KEY, }, }); const { audioResult } = await tts.generate({ text: "What is AI?" }); await writeFile( path.join(process.cwd(), "/test-outputs/murf-output.mp3"), audioResult, ); ``` ## Related Methods For streaming audio responses, see the [`stream()`](./stream.mdx) method documentation. ================================================================================ Source: src/pages/docs/reference/tts/providers-and-models.mdx ================================================================================ --- title: "Reference: Providers and Models | TTS | Mastra Docs" description: Overview of supported TTS providers and their models. --- # Providers and Models ## Most popular providers | Provider | Supported Models | | ---------- | ----------------------------------------------------------------------------------------------------------------------- | | ElevenLabs | `eleven_multilingual_v2`, `eleven_flash_v2_5`, `eleven_flash_v2`, `eleven_multilingual_sts_v2`, `eleven_english_sts_v2` | | OpenAI | `tts-1`, `tts-1-hd` | | PlayAI | `PlayDialog`, `Play3.0-mini` | | Azure | Various voices available through Azure Cognitive Services | | Deepgram | `aura` and other models with voice options like `asteria-en` | | Google | Various voices through Google Cloud Text-to-Speech | | IBM | Various voices including `en-US_AllisonV3Voice` | | Murf | `GEN1`, `GEN2` with various voices like `en-US-natalie` | ## Configuration Each provider requires specific configuration. Here are examples for each provider: ### ElevenLabs Configuration ```typescript import { ElevenLabsTTS } from "@mastra/speech-elevenlabs"; const tts = new ElevenLabsTTS({ model: { name: "eleven_multilingual_v2", apiKey: process.env.ELEVENLABS_API_KEY, }, }); ``` ### OpenAI Configuration ```typescript import { OpenAITTS } from "@mastra/speech-openai"; const tts = new OpenAITTS({ model: { name: "tts-1", // or 'tts-1-hd' for higher quality apiKey: process.env.OPENAI_API_KEY, }, }); ``` ### PlayAI Configuration ```typescript import { PlayAITTS } from "@mastra/speech-playai"; const tts = new PlayAITTS({ model: { name: "PlayDialog", // or 'Play3.0-mini' apiKey: process.env.PLAYAI_API_KEY, }, userId: process.env.PLAYAI_USER_ID, }); ``` ### Azure Configuration ```typescript import { AzureTTS } from "@mastra/speech-azure"; const tts = new AzureTTS({ model: { name: "en-US-JennyNeural", apiKey: process.env.AZURE_API_KEY, region: process.env.AZURE_REGION, }, }); ``` ### Deepgram Configuration ```typescript import { DeepgramTTS } from "@mastra/speech-deepgram"; const tts = new DeepgramTTS({ model: { name: "aura", voice: "asteria-en", apiKey: process.env.DEEPGRAM_API_KEY, }, }); ``` ### Google Configuration ```typescript const tts = new GoogleTTS({ model: { name: "en-US-Standard-A", credentials: process.env.GOOGLE_CREDENTIALS, }, }); ``` ### IBM Configuration ```typescript const tts = new IbmTTS({ model: { voice: "en-US_AllisonV3Voice", apiKey: process.env.IBM_API_KEY, }, }); ``` ### Murf Configuration ```typescript const tts = new MurfTTS({ model: { name: "GEN2", voice: "en-US-natalie", apiKey: process.env.MURF_API_KEY, }, }); ``` ================================================================================ Source: src/pages/docs/reference/tts/stream.mdx ================================================================================ --- title: "Reference: TTS.stream() | Text to Speech (TTS) | Mastra Docs" description: "Documentation for the `.stream()` method in TTS, which produces an audio response stream." --- # TTS.stream() The `stream()` method is used to interact with the TTS model to produce an audio response stream. This method accepts `text` and `voice` as parameters. ## Parameters ## Returns ## Examples ### ElevenLabs Streaming ```typescript import { ElevenLabsTTS } from "@mastra/speech-elevenlabs"; const tts = new ElevenLabsTTS({ model: { name: "eleven_multilingual_v2", apiKey: process.env.ELEVENLABS_API_KEY!, }, }); const voices = await tts.voices(); const voiceId = voices?.[0]?.voice_id!; const { audioResult } = await tts.stream({ text: "What is AI?", voice: voiceId, }); // Create a write stream to simulate real-time playback const outputPath = path.join( process.cwd(), "/test-outputs/streaming-output.mp3", ); const writeStream = createWriteStream(outputPath); let firstChunkTime: number | null = null; let lastChunkTime: number | null = null; let totalChunks = 0; // Process chunks as they arrive for await (const chunk of audioResult) { if (!firstChunkTime) { firstChunkTime = Date.now(); } lastChunkTime = Date.now(); totalChunks++; // Write chunk immediately as it arrives writeStream.write(chunk); // Log timing of chunk arrival console.log( `Received chunk ${totalChunks} at ${lastChunkTime - firstChunkTime!}ms`, ); } writeStream.end(); ``` ### OpenAI Streaming ```typescript import { OpenAITTS } from "@mastra/speech-openai"; const tts = new OpenAITTS({ model: { name: "tts-1", apiKey: process.env.OPENAI_API_KEY!, }, }); const voices = await tts.voices(); const voiceId = voices?.[0]?.voice_id!; const { audioResult } = await tts.stream({ text: "What is AI?", voice: voiceId, }); // Create a write stream to simulate real-time playback const outputPath = path.join( process.cwd(), "/test-outputs/streaming-output.mp3", ); const writeStream = createWriteStream(outputPath); let firstChunkTime: number | null = null; let lastChunkTime: number | null = null; let totalChunks = 0; // Process chunks as they arrive for await (const chunk of audioResult) { if (!firstChunkTime) { firstChunkTime = Date.now(); } lastChunkTime = Date.now(); totalChunks++; // Write chunk immediately as it arrives writeStream.write(chunk); // Log timing of chunk arrival console.log( `Received chunk ${totalChunks} at ${lastChunkTime - firstChunkTime!}ms`, ); } writeStream.end(); ``` ### PlayAI Streaming ```typescript import { PlayAITTS } from "@mastra/speech-playai"; const tts = new PlayAITTS({ model: { name: "PlayDialog", apiKey: process.env.PLAYAI_API_KEY!, }, userId: process.env.PLAYAI_USER_ID!, }); const voices = await tts.voices(); const voiceId = voices?.[0]?.voice_id!; const { audioResult } = await tts.stream({ text: "What is AI?", voice: voiceId, }); // Create a write stream to simulate real-time playback const outputPath = path.join( process.cwd(), "/test-outputs/streaming-output.mp3", ); const writeStream = createWriteStream(outputPath); let firstChunkTime: number | null = null; let lastChunkTime: number | null = null; let totalChunks = 0; // Process chunks as they arrive for await (const chunk of audioResult) { if (!firstChunkTime) { firstChunkTime = Date.now(); } lastChunkTime = Date.now(); totalChunks++; // Write chunk immediately as it arrives writeStream.write(chunk); // Log timing of chunk arrival console.log( `Received chunk ${totalChunks} at ${lastChunkTime - firstChunkTime!}ms`, ); } writeStream.end(); ``` ### Azure Streaming ```typescript import { AzureTTS } from "@mastra/speech-azure"; const tts = new AzureTTS({ model: { name: "en-US-JennyNeural", apiKey: process.env.AZURE_API_KEY, region: process.env.AZURE_REGION, }, }); const { audioResult } = await tts.stream({ text: "What is AI?" }); // Create a write stream const outputPath = path.join(process.cwd(), "/test-outputs/azure-stream.mp3"); const writeStream = createWriteStream(outputPath); // Pipe the audio stream to the file audioResult.pipe(writeStream); ``` ### Deepgram Streaming ```typescript import { DeepgramTTS } from "@mastra/speech-deepgram"; const tts = new DeepgramTTS({ model: { name: "aura", voice: "asteria-en", apiKey: process.env.DEEPGRAM_API_KEY, }, }); const { audioResult } = await tts.stream({ text: "What is AI?" }); // Create a write stream const outputPath = path.join( process.cwd(), "/test-outputs/deepgram-stream.mp3", ); const writeStream = createWriteStream(outputPath); // Pipe the audio stream to the file audioResult.pipe(writeStream); ``` ### Google Streaming ```typescript import { GoogleTTS } from "@mastra/speech-google"; const tts = new GoogleTTS({ model: { name: "en-US-Standard-A", credentials: process.env.GOOGLE_CREDENTIALS, }, }); const { audioResult } = await tts.stream({ text: "What is AI?" }); // Create a write stream const outputPath = path.join(process.cwd(), "/test-outputs/google-stream.mp3"); const writeStream = createWriteStream(outputPath); // Pipe the audio stream to the file audioResult.pipe(writeStream); ``` ### IBM Streaming ```typescript import { IbmTTS } from "@mastra/speech-ibm"; const tts = new IbmTTS({ model: { voice: "en-US_AllisonV3Voice", apiKey: process.env.IBM_API_KEY, }, }); const { audioResult } = await tts.stream({ text: "What is AI?" }); // Create a write stream const outputPath = path.join(process.cwd(), "/test-outputs/ibm-stream.mp3"); const writeStream = createWriteStream(outputPath); // Pipe the audio stream to the file audioResult.pipe(writeStream); ``` ### Murf Streaming ```typescript import { MurfTTS } from "@mastra/speech-murf"; const tts = new MurfTTS({ model: { name: "GEN2", voice: "en-US-natalie", apiKey: process.env.MURF_API_KEY, }, }); const { audioResult } = await tts.stream({ text: "What is AI?" }); // Create a write stream const outputPath = path.join(process.cwd(), "/test-outputs/murf-stream.mp3"); const writeStream = createWriteStream(outputPath); // Pipe the audio stream to the file audioResult.pipe(writeStream); ``` ================================================================================ Source: src/pages/docs/reference/workflows/after.mdx ================================================================================ --- title: "Reference: .after() | Building Workflows | Mastra Docs" description: Documentation for the `after()` method in workflows, enabling branching and merging paths. --- # .after() The `.after()` method defines explicit dependencies between workflow steps, enabling branching and merging paths in your workflow execution. ## Usage ```typescript workflow .step(stepA) .then(stepB) .after(stepA) // Create new branch after stepA completes .step(stepC); ``` ## Parameters ## Returns ## Related - [Branching Paths example](../../../examples/workflows/branching-paths.mdx) - [Workflow Class Reference](./workflow.mdx) - [Step Reference](./step-class.mdx) - [Control Flow Guide](../../workflows/control-flow.mdx) ================================================================================ Source: src/pages/docs/reference/workflows/commit.mdx ================================================================================ --- title: "Reference: Workflow.commit() | Running Workflows | Mastra Docs" description: Documentation for the `.commit()` method in workflows, which finalizes and validates workflow definitions. --- # Workflow.commit() The `.commit()` method finalizes a workflow definition, validating its structure and making it ready for execution. When calling `.commit()`, the workflow validates: - No circular dependencies between steps - All paths must have an end point - No unreachable steps - No duplicate step IDs - Variable references to non-existent steps ## Usage ```typescript workflow .step(stepA) .then(stepB) .commit(); ``` ## Returns ## Error Handling ```typescript try { workflow .step(stepA) .after(['stepB', 'stepC']) .step(stepD) .commit(); } catch (error) { if (error instanceof ValidationError) { console.log(error.type); // 'circular_dependency' | 'no_terminal_path' | 'unreachable_step' console.log(error.details); } } ``` ### Validation Error Types ## Related - [Branching Paths example](../../../examples/workflows/branching-paths.mdx) - [Workflow Class Reference](./workflow.mdx) - [Step Reference](./step-class.mdx) - [Control Flow Guide](../../workflows/control-flow.mdx) ``` ================================================================================ Source: src/pages/docs/reference/workflows/createRun.mdx ================================================================================ --- title: "Reference: Workflow.createRun() | Running Workflows | Mastra Docs" description: "Documentation for the `.createRun()` method in workflows, which initializes a new workflow run instance." --- # Workflow.createRun() The `.createRun()` method initializes a new workflow run instance. It generates a unique run ID for tracking and returns a start function that begins workflow execution when called. One reason to use `.createRun()` vs `.execute()` is to get a unique run ID for tracking, logging, or subscribing via `.watch()`. ## Usage ```typescript const { runId, start } = workflow.createRun({ triggerData: { inputValue: 42 }, metadata: { requestId: "abc-123" } }); const result = await start(); ``` ## Parameters ### config ", description: "Initial data passed to trigger the workflow execution", isOptional: true }, { name: "metadata", type: "Record", description: "Additional metadata to associate with this run", isOptional: true } ]} /> ## Returns Promise", description: "Function that begins workflow execution when called" } ]} /> ## Error Handling The start function may throw validation errors if the workflow configuration is invalid: ```typescript try { const { runId, start } = workflow.createRun(); await start({ triggerData: data }); } catch (error) { if (error instanceof ValidationError) { // Handle validation errors console.log(error.type); // 'circular_dependency' | 'no_terminal_path' | 'unreachable_step' console.log(error.details); } } ``` ## Related - [Workflow Class Reference](./workflow.mdx) - [Step Class Reference](./step-class.mdx) - See the [Creating a Workflow](../../../examples/workflows/creating-a-workflow.mdx) example for complete usage ``` ================================================================================ Source: src/pages/docs/reference/workflows/execute.mdx ================================================================================ --- title: "Reference: Workflow.execute() | Workflows | Mastra Docs" description: "Documentation for the `.execute()` method in Mastra workflows, which runs workflow steps and returns results." --- # Workflow.execute() Executes a workflow with the provided trigger data and returns the results. The workflow must be committed before execution. ## Usage Example ```typescript const workflow = new Workflow({ name: "my-workflow", triggerSchema: z.object({ inputValue: z.number() }) }); workflow.step(stepOne).then(stepTwo).commit(); const result = await workflow.execute({ triggerData: { inputValue: 42 } }); ``` ## Parameters ## Returns ", description: "Results from each completed step" }, { name: "status", type: "WorkflowStatus", description: "Final status of the workflow run" } ] } ]} /> ## Additional Examples Execute with run ID: ```typescript const result = await workflow.execute({ runId: "custom-run-id", triggerData: { inputValue: 42 } }); ``` Handle execution results: ```typescript const { runId, results, status } = await workflow.execute({ triggerData: { inputValue: 42 } }); if (status === "COMPLETED") { console.log("Step results:", results); } ``` ### Related - [Workflow.createRun()](./createRun.mdx) - [Workflow.commit()](./commit.mdx) - [Workflow.start()](./start.mdx) ================================================================================ Source: src/pages/docs/reference/workflows/resume.mdx ================================================================================ --- title: "Reference: Workflow.resume() | Running Workflows | Mastra Docs" description: Documentation for the `.resume()` method in workflows, which continues execution of a suspended workflow step. --- # Workflow.resume() The `.resume()` method continues execution of a suspended workflow step, optionally providing new context data that will be merged with existing step results. ## Usage ```typescript copy showLineNumbers await workflow.resume({ runId: "abc-123", stepId: "stepTwo", context: { secondValue: 100 } }); ``` ## Parameters ### config ", description: "New context data to merge with existing step results", isOptional: true } ]} /> ## Returns ", type: "object", description: "Result of the resumed workflow execution" } ]} /> ## Error Handling The resume function may throw several types of errors: ```typescript try { await workflow.resume({ runId, stepId: "stepTwo", context: newData }); } catch (error) { if (error.message === "No snapshot found for workflow run") { // Handle missing workflow state } if (error.message === "Failed to parse workflow snapshot") { // Handle corrupted workflow state } } ``` ## Related - [Suspend and Resume](../../../examples/workflows/suspend-and-resume.mdx) {/*- [`suspend` Reference](./suspend.mdx)*/} - [`watch` Reference](./watch.mdx) - [Workflow Class Reference](./workflow.mdx) ``` ================================================================================ Source: src/pages/docs/reference/workflows/start.mdx ================================================================================ --- title: "Reference: start() | Running Workflows | Mastra Docs" description: "Documentation for the `start()` method in workflows, which begins execution of a workflow run." --- # start() The start function begins execution of a workflow run. It processes all steps in the defined workflow order, handling parallel execution, branching logic, and step dependencies. ## Usage ```typescript copy showLineNumbers const { runId, start } = workflow.createRun(); const result = await start({ triggerData: { inputValue: 42 } }); ``` ## Parameters ### config ", description: "Initial data that matches the workflow's triggerSchema", isOptional: false } ]} /> ## Returns ", description: "Combined output from all completed workflow steps" }, { name: "status", type: "'completed' | 'error' | 'suspended'", description: "Final status of the workflow run" } ]} /> ## Error Handling The start function may throw several types of validation errors: ```typescript copy showLineNumbers try { const result = await start({ triggerData: data }); } catch (error) { if (error instanceof ValidationError) { console.log(error.type); // 'circular_dependency' | 'no_terminal_path' | 'unreachable_step' console.log(error.details); } } ``` ## Related - [Example: Creating a Workflow](../../../examples/workflows/creating-a-workflow.mdx) - [Example: Suspend and Resume](../../../examples/workflows/suspend-and-resume.mdx) - [createRun Reference](./createRun.mdx) - [Workflow Class Reference](./workflow.mdx) - [Step Class Reference](./step-class.mdx) ``` ================================================================================ Source: src/pages/docs/reference/workflows/step-class.mdx ================================================================================ --- title: "Reference: Step | Building Workflows | Mastra Docs" description: Documentation for the Step class, which defines individual units of work within a workflow. --- # Step The Step class defines individual units of work within a workflow, encapsulating execution logic, data validation, and input/output handling. ## Usage ```typescript const processOrder = new Step({ id: "processOrder", inputSchema: z.object({ orderId: z.string(), userId: z.string() }), outputSchema: z.object({ status: z.string(), orderId: z.string() }), execute: async ({ context, runId }) => { return { status: "processed", orderId: context.orderId }; } }); ``` ## Constructor Parameters ", description: "Static data to be merged with variables", required: false }, { name: "execute", type: "(params: ExecuteParams) => Promise", description: "Async function containing step logic", required: true } ]} /> ### ExecuteParams Promise", description: "Function to suspend step execution" } ]} /> ## Related - [Workflow Reference](./workflow.mdx) - [Step Configuration Guide](../../workflows/steps.mdx) - [Control Flow Guide](../../workflows/control-flow.mdx) ``` ================================================================================ Source: src/pages/docs/reference/workflows/step-condition.mdx ================================================================================ --- title: "Reference: StepCondition | Building Workflows | Mastra" description: Documentation for the step condition class in workflows, which determines whether a step should execute based on the output of previous steps or trigger data. --- # StepCondition Conditions determine whether a step should execute based on the output of previous steps or trigger data. ## Usage There are three ways to specify conditions: function, query object, and simple path comparison. ### 1. Function Condition ```typescript copy showLineNumbers workflow.step(processOrder, { when: async ({ context }) => { const auth = context?.getStepPayload<{status: string}>("auth"); return auth?.status === "authenticated"; } }); ``` ### 2. Query Object ```typescript copy showLineNumbers workflow.step(processOrder, { when: { ref: { step: 'auth', path: 'status' }, query: { $eq: 'authenticated' } } }); ``` ### 3. Simple Path Comparison ```typescript copy showLineNumbers workflow.step(processOrder, { when: { "auth.status": "authenticated" } }); ``` Based on the type of condition, the workflow runner will try to match the condition to one of these types. 1. Simple Path Condition (when there's a dot in the key) 2. Base/Query Condition (when there's a 'ref' property) 3. Function Condition (when it's an async function) ## StepCondition ", description: "MongoDB-style query using sift operators ($eq, $gt, etc)", isOptional: false } ]} /> ## Query The Query object provides MongoDB-style query operators for comparing values from previous steps or trigger data. It supports basic comparison operators like `$eq`, `$gt`, `$lt` as well as array operators like `$in` and `$nin`, and can be combined with and/or operators for complex conditions. This query syntax allows for readable conditional logic for determining whether a step should execute. ## Related - [Step Options Reference](./step-options.mdx) - [Step Function Reference](./step-function.mdx) - [Control Flow Guide](../../workflows/control-flow.mdx) ``` ================================================================================ Source: src/pages/docs/reference/workflows/step-function.mdx ================================================================================ --- title: "Reference: Workflow.step() | Workflows | Mastra Docs" description: Documentation for the `.step()` method in workflows, which adds a new step to the workflow. --- # Workflow.step() The `.step()` method adds a new step to the workflow, optionally configuring its variables and execution conditions. ## Usage ```typescript workflow.step({ id: "stepTwo", outputSchema: z.object({ result: z.number() }), execute: async ({ context }) => { return { result: 42 }; } }); ``` ## Parameters ### StepDefinition Promise", description: "Function containing step logic", isOptional: false } ]} /> ### StepOptions ", description: "Map of variable names to their source references", isOptional: true }, { name: "when", type: "StepCondition", description: "Condition that must be met for step to execute", isOptional: true } ]} /> ## Related - [Basic Usage with Step Instance](../../workflows/steps.mdx) - [Step Class Reference](./step-class.mdx) - [Workflow Class Reference](./workflow.mdx) - [Control Flow Guide](../../workflows/control-flow.mdx) ``` ================================================================================ Source: src/pages/docs/reference/workflows/step-options.mdx ================================================================================ --- title: "Reference: StepOptions | Building Workflows | Mastra Docs" description: Documentation for the step options in workflows, which control variable mapping, execution conditions, and other runtime behavior. --- # StepOptions Configuration options for workflow steps that control variable mapping, execution conditions, and other runtime behavior. ## Usage ```typescript workflow.step(processOrder, { variables: { orderId: { step: 'trigger', path: 'id' }, userId: { step: 'auth', path: 'user.id' } }, when: { ref: { step: 'auth', path: 'status' }, query: { $eq: 'authenticated' } } }); ``` ## Properties ", description: "Maps step input variables to values from other steps", isOptional: true }, { name: "when", type: "StepCondition", description: "Condition that must be met for step execution", isOptional: true } ]} /> ### VariableRef ## Related - [Path Comparison](../../workflows/control-flow.mdx#path-comparison) - [Step Function Reference](./step-function.mdx) - [Step Class Reference](./step-class.mdx) - [Workflow Class Reference](./workflow.mdx) - [Control Flow Guide](../../workflows/control-flow.mdx) ``` ================================================================================ Source: src/pages/docs/reference/workflows/suspend.mdx ================================================================================ --- title: "Reference: suspend() | Control Flow | Mastra Docs" description: "Documentation for the suspend function in Mastra workflows, which pauses execution until resumed." --- # suspend() Pauses workflow execution at the current step until explicitly resumed. The workflow state is persisted and can be continued later. ## Usage Example ```typescript const approvalStep = new Step({ id: "needsApproval", execute: async ({ context, suspend }) => { if (context.stepResults.amount > 1000) { await suspend(); } return { approved: true }; } }); ``` ## Parameters ", description: "Optional data to store with the suspended state", isOptional: true } ]} /> ## Returns ", type: "Promise", description: "Resolves when the workflow is successfully suspended" } ]} /> ## Additional Examples Suspend with metadata: ```typescript const reviewStep = new Step({ id: "review", execute: async ({ context, suspend }) => { await suspend({ reason: "Needs manager approval", requestedBy: context.user }); return { reviewed: true }; } }); ``` Monitor suspended state: ```typescript workflow.watch((state) => { if (state.status === "SUSPENDED") { notifyReviewers(state.metadata); } }); ``` ### Related - [Suspend & Resume Workflows](../../workflows/suspend-and-resume.mdx) - [.resume()](./resume.mdx) - [.watch()](./watch.mdx) ================================================================================ Source: src/pages/docs/reference/workflows/then.mdx ================================================================================ --- title: "Reference: Workflow.then() | Building Workflows | Mastra Docs" description: Documentation for the `.then()` method in workflows, which creates sequential dependencies between steps. --- # Workflow.then() The `.then()` method creates a sequential dependency between workflow steps, ensuring steps execute in a specific order. ## Usage ```typescript workflow .step(stepOne) .then(stepTwo) .then(stepThree); ``` ## Parameters ## Returns ## Validation When using `then`: - The previous step must exist in the workflow - Steps cannot form circular dependencies - Each step can only appear once in a sequential chain ## Error Handling ```typescript try { workflow .step(stepA) .then(stepB) .then(stepA) // Will throw error - circular dependency .commit(); } catch (error) { if (error instanceof ValidationError) { console.log(error.type); // 'circular_dependency' console.log(error.details); } } ``` ## Related - [step Reference](./step-class.mdx) - [after Reference](./after.mdx) - [Sequential Steps Example](../../../examples/workflows/sequential-steps.mdx) - [Control Flow Guide](../../workflows/control-flow.mdx) ``` ================================================================================ Source: src/pages/docs/reference/workflows/watch.mdx ================================================================================ --- title: "Reference: Workflow.watch() | Workflows | Mastra Docs" description: Documentation for the `.watch()` method in workflows, which monitors the status of a workflow run. --- # Workflow.watch() The `.watch()` function subscribes to state changes in a Mastra workflow, allowing you to monitor execution progress and react to state updates. ## Usage Example ```typescript import { Workflow } from "@mastra/core"; const workflow = new Workflow({ name: "document-processor" }); // Subscribe to state changes const unsubscribe = workflow.watch((state) => { console.log('Current step:', state.currentStep); console.log('Step outputs:', state.stepOutputs); }); // Run the workflow await workflow.run({ input: { text: "Process this document" } }); // Stop watching unsubscribe(); ``` ## Parameters void", description: "Function called whenever the workflow state changes", isOptional: false } ]} /> ### WorkflowState Properties ", description: "Outputs from completed workflow steps", isOptional: false }, { name: "status", type: "'running' | 'completed' | 'failed'", description: "Current status of the workflow", isOptional: false }, { name: "error", type: "Error | null", description: "Error object if workflow failed", isOptional: true } ]} /> ## Returns void", description: "Function to stop watching workflow state changes" } ]} /> ## Additional Examples Monitor specific step completion: ```typescript workflow.watch((state) => { if (state.currentStep === 'processDocument') { console.log('Document processing output:', state.stepOutputs.processDocument); } }); ``` Error handling: ```typescript workflow.watch((state) => { if (state.status === 'failed') { console.error('Workflow failed:', state.error); // Implement error recovery logic } }); ``` ### Related - [Workflow Creation](/docs/reference/workflows/createRun) - [Step Configuration](/docs/reference/workflows/step-class) ================================================================================ Source: src/pages/docs/reference/workflows/workflow.mdx ================================================================================ --- title: "Reference: Workflow Class | Building Workflows | Mastra Docs" description: Documentation for the Workflow class in Mastra, which enables you to create state machines for complex sequences of operations with conditional branching and data validation. --- # Workflow Class The Workflow class enables you to create state machines for complex sequences of operations with conditional branching and data validation. ```ts copy import { Workflow } from "@mastra/core"; const workflow = new Workflow({ name: "my-workflow" }); ``` ## API Reference ### Constructor ", isOptional: true, description: "Optional logger instance for workflow execution details", }, { name: "steps", type: "Step[]", description: "Array of steps to include in the workflow", }, { name: "triggerSchema", type: "z.Schema", description: "Optional schema for validating workflow trigger data", }, ]} /> ### Core Methods #### `step()` Adds a [Step](./step-class.mdx) to the workflow, including transitions to other steps. Returns the workflow instance for chaining. [Learn more about steps](./step-class.mdx). #### `commit()` Validates and finalizes the workflow configuration. Must be called after adding all steps. #### `execute()` Executes the workflow with optional trigger data. Typed based on the [trigger schema](./workflow.mdx#trigger-schemas). ## Trigger Schemas Trigger schemas validate the initial data passed to a workflow using Zod. ```ts showLineNumbers copy const workflow = new Workflow({ name: "order-process", triggerSchema: z.object({ orderId: z.string(), customer: z.object({ id: z.string(), email: z.string().email(), }), }), }); ``` The schema: - Validates data passed to `execute()` - Provides TypeScript types for your workflow input ## Variables & Data Flow Variables allow steps to access data from: - Previous steps' outputs - Trigger data Variables payloads are typesafe with fields defined in the [Step](./step-class.mdx) `inputSchema`. ```ts showLineNumbers copy workflow .step("createOrder", { // Access trigger data variables: { orderId: { stepId: "trigger", path: "orderId" }, }, }) .step("processPayment", { variables: { // Access previous step's data orderStatus: { stepId: "createOrder", path: "status" }, amount: { stepId: "createOrder", path: "total" }, }, }); ``` #### Variable Resolution - Variables are resolved in order of step execution - Each step can access outputs of all previous steps - Paths use dot notation for nested data - Missing or invalid paths throw errors during execution ### Example ```ts showLineNumbers copy const workflow = new Workflow({ name: "process-data", triggerSchema: z.object({ items: z.array( z.object({ id: z.number(), value: z.number(), }), ), }), }) .step("filter", { variables: { items: { stepId: "trigger", path: "." }, }, }) .step("process", { variables: { items: { stepId: "filter", path: "filtered.user.name" }, }, }) .commit(); ``` ## Validation Workflow validation happens at two key times: ### 1. At Commit Time When you call `.commit()`, the workflow validates: ```ts showLineNumbers copy workflow .step('step1', {...}) .step('step2', {...}) .commit(); // Validates workflow structure ``` - Circular dependencies between steps - Terminal paths (every path must end) - Unreachable steps - Variable references to non-existent steps - Duplicate step IDs ### 2. During Execution When you call `start()`, it validates: ```ts showLineNumbers copy const { runId, start } = workflow.createRun(); // Validates trigger data against schema await start({ triggerData: { orderId: "123", customer: { id: "cust_123", email: "invalid-email", // Will fail validation }, }, }); ``` - Trigger data against trigger schema - Each step's input data against its inputSchema - Variable paths exist in referenced step outputs - Required variables are present ## Workflow Status A workflow's status indicates its current execution state. The possible values are: ### Example: Handling Different Statuses ```typescript showLineNumbers copy const { runId, start } = workflow.createRun(); workflow.watch(runId, async ({ status }) => { switch (status) { case "SUSPENDED": // Handle suspended state break; case "COMPLETED": // Process results break; case "FAILED": // Handle error state break; } }); await start({ triggerData: data }); ``` ## Error Handling ```ts showLineNumbers copy try { const { runId, start } = workflow.createRun(); await start({ triggerData: data }); } catch (error) { if (error instanceof ValidationError) { // Handle validation errors console.log(error.type); // 'circular_dependency' | 'no_terminal_path' | 'unreachable_step' console.log(error.details); // { stepId?: string, path?: string[] } } } ``` ## Related Documentation - [Step](./step-class.mdx) - [.then()](./then.mdx) - [.step()](./step-function.mdx) - [.after()](./after.mdx) ================================================================================ Source: src/pages/docs/workflows/00-overview.mdx ================================================================================ --- title: "Handling Complex LLM Operations | Workflows | Mastra" description: "Workflows in Mastra help you orchestrate complex sequences of operations with features like branching, parallel execution, resource suspension, and more." --- # Handling Complex LLM Operations with Workflows Workflows in Mastra help you orchestrate complex sequences of operations with features like branching, parallel execution, resource suspension, and more. ## When to use workflows Most AI applications need more than a single call to a language model. You may want to run multiple steps, conditionally skip certain paths, or even pause execution altogether until you receive user input. Sometimes your agent tool calling is not accurate enough. Mastra’s workflow system provides: - A standardized way to define steps and link them together. - Support for both simple (linear) and advanced (branching, parallel) paths. - Debugging and observability features to track each workflow run. ## Example To create a workflow, you define one or more steps, link them, and then commit the workflow before starting it. Typically, you would call an LLM or other service in some way in some step. But for this example, we'll just double the input value in the first step and increment it in the second. ```typescript copy showLineNumbers import { Workflow, Step } from "@mastra/core"; import { z } from "zod"; // 1. Define the workflow const myWorkflow = new Workflow({ name: "my-workflow", triggerSchema: z.object({ inputValue: z.number(), }), }); // 2. Create steps const stepOne = new Step({ id: "stepOne", execute: async ({ context: { machineContext } }) => ({ doubledValue: machineContext.triggerData.inputValue * 2 }) }); const stepTwo = new Step({ id: "stepTwo", execute: async ({ context: { machineContext } }) => ({ incrementedValue: machineContext.stepResults.stepOne.payload.doubledValue + 1 }) }); // 3. Link and commit steps myWorkflow.step(stepOne).then(stepTwo).commit(); // 4. Run the workflow const { runId, start } = myWorkflow.createRun({ triggerData: { inputValue: 3 } }); const result = await start(); console.log("Workflow result:", result.results); ``` This example shows the essentials: define your workflow, add steps, commit the workflow, then execute it. ## Defining Steps The basic building block of a workflow [is a step](./steps.mdx). Steps are defined using schemas for inputs and outputs, and can fetch prior step results. ## Control Flow Workflows let you define a [control flow](./control-flow.mdx) to chain steps together in with parallel steps, branching paths, and more. ## Suspend and Resume When you need to pause execution for external data, user input, or asynchronous events, Mastra [supports suspension at any step](./suspend-and-resume.mdx), persisting the state of the workflow so you can resume it later. ## Observability and Debugging Mastra workflows automatically [log the input and output of each step within a workflow run](../reference/observability/otel-config.mdx), allowing you to send this data to your preferred logging, telemetry, or observability tools. You can: - Track the status of each step (e.g., `success`, `error`, or `suspended`). - Store run-specific metadata for analysis. - Integrate with third-party observability platforms like Datadog or New Relic by forwarding logs. ## More Resources - The [Workflow Guide](../guides/04-recruiter.mdx) in the Guides section is a tutorial that covers the main concepts. - [Sequential Steps workflow example](../../examples/workflows/sequential-steps.mdx) - [Parallel Steps workflow example](../../examples/workflows/parallel-steps.mdx) - [Branching Paths workflow example](../../examples/workflows/branching-paths.mdx) - [Cyclical Dependencies workflow example](../../examples/workflows/cyclical-dependencies.mdx) - [Suspend and Resume workflow example](../../examples/workflows/suspend-and-resume.mdx) ================================================================================ Source: src/pages/docs/workflows/control-flow.mdx ================================================================================ --- title: "Branching, Merging, Conditions | Workflows | Mastra Docs" description: "Control flow in Mastra workflows allows you to manage branching, merging, and conditions to construct workflows that meet your logic requirements." --- # Control Flow in Workflows: Branching, Merging, and Conditions When you create a multi-step process, you may need to run steps in parallel, chain them sequentially, or follow different paths based on outcomes. This page describes how you can manage branching, merging, and conditions to construct workflows that meet your logic requirements. The code snippets show the key patterns for structuring complex control flow. ## Parallel Execution You can run multiple steps at the same time if they don’t depend on each other. This approach can speed up your workflow when steps perform independent tasks. The code below shows how to add two steps in parallel: ```typescript myWorkflow.step(fetchUserData).step(fetchOrderData); ``` See the [Parallel Steps](../../examples/workflows/parallel-steps.mdx) example for more details. ## Sequential Execution Sometimes you need to run steps in strict order to ensure outputs from one step become inputs for the next. Use .then() to link dependent operations. The code below shows how to chain steps sequentially: ```typescript myWorkflow.step(fetchOrderData).then(validateData).then(processOrder); ``` See the [Sequential Steps](../../examples/workflows/sequential-steps.mdx) example for more details. ## Branching and Merging Paths When different outcomes require different paths, branching is helpful. You can also merge paths later once they complete. The code below shows how to branch after stepA and later converge on stepF: ```typescript myWorkflow .step(stepA) .then(stepB) .then(stepD) .after(stepA) .step(stepC) .then(stepE) .after([stepD, stepE]) .step(stepF); ``` In this example: - stepA leads to stepB, then to stepD. - Separately, stepA also triggers stepC, which in turn leads to stepE. - The workflow waits for both stepD and stepE to finish before proceeding to stepF. See the [Branching Paths](../../examples/workflows/branching-paths.mdx) example for more details. ## Cyclical Dependencies You can loop back to earlier steps based on conditions, allowing you to repeat tasks until certain results are achieved. The code below shows a workflow that repeats fetchData when a status is “retry”: ```typescript myWorkflow .step(fetchData) .then(processData) .after(processData) .step(finalizeData, { when: { "processData.status": "success" }, }) .step(fetchData, { when: { "processData.status": "retry" }, }); ``` If processData returns “success,” finalizeData runs. If it returns “retry,” the workflow loops back to fetchData. See the [Cyclical Dependencies](../../examples/workflows/cyclical-dependencies.mdx) example for more details. ## Conditions Use the when property to control whether a step runs based on data from previous steps. Below are three ways to specify conditions. ### Option 1: Function ```typescript myWorkflow.step( new Step({ id: "processData", execute: async ({ context }) => { // Action logic }, }), { when: async ({ context }) => { const fetchData = context?.getStepPayload<{ status: string }>("fetchData"); return fetchData?.status === "success"; }, }, ); ``` ### Option 2: Query Object ```typescript myWorkflow.step( new Step({ id: "processData", execute: async ({ context }) => { // Action logic }, }), { when: { ref: { step: { id: "fetchData", }, path: "status", }, query: { $eq: "success" }, }, }, ); ``` ### Option 3: Simple Path Comparison ```typescript myWorkflow.step( new Step({ id: "processData", execute: async ({ context }) => { // Action logic }, }), { when: { "fetchData.status": "success", }, }, ); ``` ## Renaming Variables Variables let you pass outputs from one step into another step’s inputs. ### Passing Trigger Data ```typescript myWorkflow.step(stepOne).then(stepTwo, { variables: { valueToIncrement: { step: "trigger", path: "inputValue", }, }, }); ``` ### Passing Output from a Previous Step ```typescript myWorkflow.step(stepOne).then(stepTwo, { variables: { valueToIncrement: { step: stepOne, path: "doubledValue", }, }, }); ``` ### Passing Output Using a Step ID ```typescript myWorkflow.step(stepOne).then(stepTwo, { variables: { valueToIncrement: { step: { id: "stepOne", }, path: "doubledValue", }, }, }); ``` In all these examples, you pick the specific data you want to pass forward. This approach helps decouple steps and keep your workflow logic clear. ================================================================================ Source: src/pages/docs/workflows/steps.mdx ================================================================================ --- title: "Creating Steps and Adding to Workflows | Mastra Docs" description: "Steps in Mastra workflows provide a structured way to manage operations by defining inputs, outputs, and execution logic." --- # Defining Steps in a Workflow When you build a workflow, you typically break down operations into smaller tasks that can be linked and reused. Steps provide a structured way to manage these tasks by defining inputs, outputs, and execution logic. The code below shows how to define these steps inline or separately. ## Inline Step Creation You can create steps directly within your workflow using `.step()` and `.then()`. This code shows how to define, link, and execute two steps in sequence. ```typescript showLineNumbers filename="src/mastra/workflows/index.ts" copy import { Step, Workflow } from "@mastra/core"; import { z } from "zod"; export const myWorkflow = new Workflow({ name: "my-workflow", triggerSchema: z.object({ inputValue: z.number(), }), }); myWorkflow .step( new Step({ id: "stepOne", outputSchema: z.object({ doubledValue: z.number(), }), execute: async ({ context: { machineContext } }) => ({ doubledValue: machineContext.triggerData.inputValue * 2, }), }), ) .then( new Step({ id: "stepTwo", outputSchema: z.object({ incrementedValue: z.number(), }), execute: async ({ context: { machineContext } }) => ({ incrementedValue: machineContext.stepResults.stepOne.payload.doubledValue + 1, }), }), ); ``` ## Creating Steps Separately If you prefer to manage your step logic in separate entities, you can define steps outside and then add them to your workflow. This code shows how to define steps independently and link them afterward. ```typescript showLineNumbers filename="src/mastra/workflows/index.ts" copy import { Step, Workflow } from "@mastra/core"; import { z } from "zod"; // Define steps separately const stepOne = new Step({ id: "stepOne", outputSchema: z.object({ doubledValue: z.number(), }), execute: async ({ context: { machineContext } }) => ({ doubledValue: machineContext.triggerData.inputValue * 2, }), }); const stepTwo = new Step({ id: "stepTwo", outputSchema: z.object({ incrementedValue: z.number(), }), execute: async ({ context: { machineContext } }) => ({ incrementedValue: machineContext.stepResults.stepOne.payload.doubledValue + 1, }), }); // Build the workflow const myWorkflow = new Workflow({ name: "my-workflow", triggerSchema: z.object({ inputValue: z.number(), }), }); myWorkflow.step(stepOne).then(stepTwo); myWorkflow.commit(); ``` ================================================================================ Source: src/pages/docs/workflows/suspend-and-resume.mdx ================================================================================ --- title: "Suspend & Resume Workflows | Human-in-the-Loop | Mastra Docs" description: "Suspend and resume in Mastra workflows allows you to pause execution while waiting for external input or resources." --- # Suspend and Resume in Workflows Complex workflows often need to pause execution while waiting for external input or resources. Mastra's suspend and resume features let you pause workflow execution at any step, persist the workflow state, and continue when ready. ## When to Use Suspend/Resume Common scenarios for suspending workflows include: - Waiting for human approval or input - Pausing until external API resources become available - Collecting additional data needed for later steps - Rate limiting or throttling expensive operations ## Basic Suspend Example Here's a simple workflow that suspends when a value is too low and resumes when given a higher value: ```typescript const stepTwo = new Step({ id: "stepTwo", outputSchema: z.object({ incrementedValue: z.number(), }), execute: async ({ context, suspend }) => { const currentValue = context.stepResults.stepOne.payload.doubledValue; if (currentValue < 100) { await suspend(); return { incrementedValue: 0 }; } return { incrementedValue: currentValue + 1 }; }, }); ``` ## Watching and Resuming To handle suspended workflows, use the `watch` method to monitor workflow status and `resume` to continue execution: ```typescript // Create and start the workflow const { runId, start } = myWorkflow.createRun({ triggerData: { inputValue: 45 } }); // Start watching the workflow before executing it myWorkflow.watch(runId, async ({ context }) => { // Check each step's status const stepStatus = context.stepResults?.stepTwo?.status; if (stepStatus === 'suspended') { console.log('Workflow suspended, resuming with new value'); // Resume the workflow with new context await myWorkflow.resume({ runId, stepId: 'stepTwo', context: { secondValue: 60 // This value will be added to the original value }, }); } }); // Start the workflow execution const result = await start(); ``` ## Related Resources - See the [Suspend and Resume Example](../../examples/workflows/suspend-and-resume.mdx) for a complete working example - Check the [Step Class Reference](../reference/workflows/step-class.mdx) for suspend/resume API details - Review [Workflow Observability](../reference/observability/otel-config.mdx) for monitoring suspended workflows ================================================================================ Source: src/pages/examples/agents/agentic-workflows.mdx ================================================================================ --- title: "Example: Calling Agentic Workflows | Agents | Mastra Docs" description: Example of creating AI workflows in Mastra, demonstrating integration of external APIs with LLM-powered planning. --- import { GithubLink } from "../../../components/github-link"; # Agentic Workflows When building AI applications, you often need to coordinate multiple steps that depend on each other's outputs. This example shows how to create an AI workflow that fetches weather data and uses it to suggest activities, demonstrating how to integrate external APIs with LLM-powered planning. ```ts showLineNumbers copy import { Mastra, Step, Workflow } from "@mastra/core"; import { z } from "zod"; const fetchWeather = new Step({ id: "fetch-weather", description: "Fetches weather forecast for a given city", inputSchema: z.object({ city: z.string().describe("The city to get the weather for"), }), execute: async ({ context }) => { const triggerData = context.machineContext?.getStepPayload<{ city: string; }>("trigger"); if (!triggerData) { throw new Error("Trigger data not found"); } const geocodingUrl = `https://geocoding-api.open-meteo.com/v1/search?name=${encodeURIComponent(triggerData.city)}&count=1`; const geocodingResponse = await fetch(geocodingUrl); const geocodingData = await geocodingResponse.json(); if (!geocodingData.results?.[0]) { throw new Error(`Location '${triggerData.city}' not found`); } const { latitude, longitude, name } = geocodingData.results[0]; const weatherUrl = `https://api.open-meteo.com/v1/forecast?latitude=${latitude}&longitude=${longitude}&daily=temperature_2m_max,temperature_2m_min,precipitation_probability_mean,weathercode&timezone=auto`; const response = await fetch(weatherUrl); const data = await response.json(); const forecast = data.daily.time.map((date: string, index: number) => ({ date, maxTemp: data.daily.temperature_2m_max[index], minTemp: data.daily.temperature_2m_min[index], precipitationChance: data.daily.precipitation_probability_mean[index], condition: getWeatherCondition(data.daily.weathercode[index]), location: name, })); return forecast; }, }); const forecastSchema = z.array( z.object({ date: z.string(), maxTemp: z.number(), minTemp: z.number(), precipitationChance: z.number(), condition: z.string(), location: z.string(), }), ); const planActivities = new Step({ id: "plan-activities", description: "Suggests activities based on weather conditions", inputSchema: forecastSchema, execute: async ({ context, mastra }) => { const forecast = context.machineContext?.getStepPayload>( "fetch-weather", ); if (!forecast) { throw new Error("Forecast data not found"); } const prompt = `Based on the following weather forecast for ${forecast[0].location}, suggest appropriate activities: ${JSON.stringify(forecast, null, 2)} `; if (!mastra?.llm) { throw new Error("Mastra not found"); } const llm = mastra.llm({ provider: "OPEN_AI", name: "gpt-4o-mini", }); const response = await llm.stream([ { role: "system", content: `You are a local activities and travel expert who excels at weather-based planning. Analyze the weather data and provide practical activity recommendations. For each day in the forecast, structure your response exactly as follows: 📅 [Day, Month Date, Year] ═══════════════════════════ 🌡️ WEATHER SUMMARY • Conditions: [brief description] • Temperature: [X°C/Y°F to A°C/B°F] • Precipitation: [X% chance] 🌅 MORNING ACTIVITIES Outdoor: • [Activity Name] - [Brief description including specific location/route] Best timing: [specific time range] Note: [relevant weather consideration] 🌞 AFTERNOON ACTIVITIES Outdoor: • [Activity Name] - [Brief description including specific location/route] Best timing: [specific time range] Note: [relevant weather consideration] 🏠 INDOOR ALTERNATIVES • [Activity Name] - [Brief description including specific venue] Ideal for: [weather condition that would trigger this alternative] ⚠️ SPECIAL CONSIDERATIONS • [Any relevant weather warnings, UV index, wind conditions, etc.] Guidelines: - Suggest 2-3 time-specific outdoor activities per day - Include 1-2 indoor backup options - For precipitation >50%, lead with indoor activities - All activities must be specific to the location - Include specific venues, trails, or locations - Consider activity intensity based on temperature - Keep descriptions concise but informative Maintain this exact formatting for consistency, using the emoji and section headers as shown.`, }, { role: "user", content: prompt, }, ]); for await (const chunk of response.textStream) { process.stdout.write(chunk); } return { activities: response.text, }; }, }); function getWeatherCondition(code: number): string { const conditions: Record = { 0: "Clear sky", 1: "Mainly clear", 2: "Partly cloudy", 3: "Overcast", 45: "Foggy", 48: "Depositing rime fog", 51: "Light drizzle", 53: "Moderate drizzle", 55: "Dense drizzle", 61: "Slight rain", 63: "Moderate rain", 65: "Heavy rain", 71: "Slight snow fall", 73: "Moderate snow fall", 75: "Heavy snow fall", 95: "Thunderstorm", }; return conditions[code] || "Unknown"; } const weatherWorkflow = new Workflow({ name: "weather-workflow", triggerSchema: z.object({ city: z.string().describe("The city to get the weather for"), }), }) .step(fetchWeather) .then(planActivities); weatherWorkflow.commit(); const mastra = new Mastra({ workflows: { weatherWorkflow, }, }); async function main() { const { start } = mastra.getWorkflow("weatherWorkflow").createRun(); const result = await start({ triggerData: { city: "London", }, }); console.log("\n \n"); console.log(result); } main(); ``` ================================================================================ Source: src/pages/examples/agents/bird-checker.mdx ================================================================================ --- title: "Example: Categorizing Birds | Agents | Mastra Docs" description: Example of using a Mastra AI Agent to determine if an image from Unsplash depicts a bird. --- import { GithubLink } from "../../../components/github-link"; # Example: Categorizing Birds with an AI Agent We will get a random image from [Unsplash](https://unsplash.com/) that matches a selected query and uses a [Mastra AI Agent](/docs/agents/00-overview.md) to determine if it is a bird or not. ```ts showLineNumbers copy import { Agent } from "@mastra/core"; import { z } from "zod"; export type Image = { alt_description: string; urls: { regular: string; raw: string; }; user: { first_name: string; links: { html: string; }; }; }; export type ImageResponse = | { ok: true; data: T; } | { ok: false; error: K; }; const getRandomImage = async ({ query, }: { query: string; }): Promise> => { const page = Math.floor(Math.random() * 20); const order_by = Math.random() < 0.5 ? "relevant" : "latest"; try { const res = await fetch( `https://api.unsplash.com/search/photos?query=${query}&page=${page}&order_by=${order_by}`, { method: "GET", headers: { Authorization: `Client-ID ${process.env.UNSPLASH_ACCESS_KEY}`, "Accept-Version": "v1", }, cache: "no-store", }, ); if (!res.ok) { return { ok: false, error: "Failed to fetch image", }; } const data = (await res.json()) as { results: Array; }; const randomNo = Math.floor(Math.random() * data.results.length); return { ok: true, data: data.results[randomNo] as Image, }; } catch (err) { return { ok: false, error: "Error fetching image", }; } }; const instructions = ` You can view an image and figure out if it is a bird or not. You can also figure out the species of the bird and where the picture was taken. `; export const birdCheckerAgent = new Agent({ name: "Bird checker", instructions, model: { provider: "ANTHROPIC", name: "claude-3-haiku-20240307", toolChoice: "auto", }, }); const queries: string[] = ["wildlife", "feathers", "flying", "birds"]; const randomQuery = queries[Math.floor(Math.random() * queries.length)]; // Get the image url from Unsplash with random type const imageResponse = await getRandomImage({ query: randomQuery }); if (!imageResponse.ok) { console.log("Error fetching image", imageResponse.error); process.exit(1); } console.log("Image URL: ", imageResponse.data.urls.regular); const response = await birdCheckerAgent.generate( [ { role: "user", content: [ { type: "image", image: new URL(imageResponse.data.urls.regular), }, { type: "text", text: "view this image and let me know if it's a bird or not, and the scientific name of the bird without any explanation. Also summarize the location for this picture in one or two short sentences understandable by a high school student", }, ], }, ], { output: z.object({ bird: z.boolean(), species: z.string(), location: z.string(), }), }, ); console.log(response.object); ```




================================================================================ Source: src/pages/examples/agents/hierarchical-multi-agent.mdx ================================================================================ --- title: "Example: Hierarchical Multi-Agent System | Agents | Mastra" description: Example of creating a hierarchical multi-agent system using Mastra, where agents interact through tool functions. --- import { GithubLink } from "../../../components/github-link"; # Hierarchical Multi-Agent System This example demonstrates how to create a hierarchical multi-agent system where agents interact through tool functions, with one agent coordinating the work of others. The system consists of three agents: 1. A Publisher agent (supervisor) that orchestrates the process 2. A Copywriter agent that writes the initial content 3. An Editor agent that refines the content First, define the Copywriter agent and its tool: ```ts showLineNumbers copy const copywriterAgent = new Agent({ name: "Copywriter", instructions: "You are a copywriter agent that writes blog post copy.", model: { provider: "ANTHROPIC", name: "claude-3-5-sonnet-20241022", toolChoice: "required", }, }); const copywriterTool = createTool({ id: "copywriter-agent", description: "Calls the copywriter agent to write blog post copy.", inputSchema: z.object({ topic: z.string().describe("Blog post topic"), }), outputSchema: z.object({ copy: z.string().describe("Blog post copy"), }), execute: async ({ context }) => { const result = await copywriterAgent.generate( `Create a blog post about ${context.topic}`, ); return { copy: result.text }; }, }); ``` Next, define the Editor agent and its tool: ```ts showLineNumbers copy const editorAgent = new Agent({ name: "Editor", instructions: "You are an editor agent that edits blog post copy.", model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, }); const editorTool = createTool({ id: "editor-agent", description: "Calls the editor agent to edit blog post copy.", inputSchema: z.object({ copy: z.string().describe("Blog post copy"), }), outputSchema: z.object({ copy: z.string().describe("Edited blog post copy"), }), execute: async ({ context }) => { const result = await editorAgent.generate( `Edit the following blog post only returning the edited copy: ${context.copy}`, ); return { copy: result.text }; }, }); ``` Finally, create the Publisher agent that coordinates the others: ```ts showLineNumbers copy const publisherAgent = new Agent({ name: "publisherAgent", instructions: "You are a publisher agent that first calls the copywriter agent to write blog post copy about a specific topic and then calls the editor agent to edit the copy. Just return the final edited copy.", model: { provider: "ANTHROPIC", name: "claude-3-5-sonnet-20241022", }, tools: { copywriterTool, editorTool }, }); const mastra = new Mastra({ agents: { publisherAgent }, }); ``` To use the entire system: ```ts showLineNumbers copy async function main() { const agent = mastra.getAgent("publisherAgent"); const result = await agent.generate( "Write a blog post about React JavaScript frameworks. Only return the final edited copy.", ); console.log(result.text); } main(); ```




================================================================================ Source: src/pages/examples/agents/multi-agent-workflow.mdx ================================================================================ --- title: "Example: Multi-Agent Workflow | Agents | Mastra Docs" description: Example of creating an agentic workflow in Mastra, where work product is passed between multiple agents. --- import { GithubLink } from "../../../components/github-link"; # Multi-Agent Workflow This example demonstrates how to create an agentic workflow with work product being passed between multiple agents with a worker agent and a supervisor agent. In this example, we create a sequential workflow that calls two agents in order: 1. A Copywriter agent that writes the initial blog post 2. An Editor agent that refines the content First, import the required dependencies: ```typescript import { Agent, Step, Workflow } from "@mastra/core"; import { z } from "zod"; ``` Create the copywriter agent that will generate the initial blog post: ```typescript const copywriterAgent = new Agent({ name: "Copywriter", instructions: "You are a copywriter agent that writes blog post copy.", model: { provider: "ANTHROPIC", name: "claude-3-5-sonnet-20241022", toolChoice: "required", }, }); ``` Define the copywriter step that executes the agent and handles the response: ```typescript const copywriterStep = new Step({ id: "copywriterStep", execute: async ({ context: { machineContext } }) => { if (!machineContext?.triggerData?.topic) { throw new Error("Topic not found in trigger data"); } const result = await copywriterAgent.generate( `Create a blog post about ${machineContext.triggerData.topic}`, ); console.log("copywriter result", result.text); return { copy: result.text, }; }, }); ``` Set up the editor agent to refine the copywriter's content: ```typescript const editorAgent = new Agent({ name: "Editor", instructions: "You are an editor agent that edits blog post copy.", model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, }); ``` Create the editor step that processes the copywriter's output: ```typescript const editorStep = new Step({ id: "editorStep", execute: async ({ context }) => { const copy = context?.machineContext?.getStepPayload<{ copy: number }>( "copywriterStep", )?.copy; const result = await editorAgent.generate( `Edit the following blog post only returning the edited copy: ${copy}`, ); console.log("editor result", result.text); return { copy: result.text, }; }, }); ``` Configure the workflow and execute the steps: ```typescript const myWorkflow = new Workflow({ name: "my-workflow", triggerSchema: z.object({ topic: z.string(), }), }); // Run steps sequentially. myWorkflow.step(copywriterStep).then(editorStep).commit(); const { runId, start } = myWorkflow.createRun(); const res = await start({ triggerData: { topic: "React JavaScript frameworks" }, }); console.log("Results: ", res.results); ```




================================================================================ Source: src/pages/examples/agents/system-prompt.mdx ================================================================================ --- title: "Example: Agents with a System Prompt | Agents | Mastra Docs" description: Example of creating an AI agent in Mastra with a system prompt to define its personality and capabilities. --- import { GithubLink } from "../../../components/github-link"; # Giving an Agent a System Prompt When building AI agents, you often need to give them specific instructions and capabilities to handle specialized tasks effectively. System prompts allow you to define an agent's personality, knowledge domain, and behavioral guidelines. This example shows how to create an AI agent with custom instructions and integrate it with a dedicated tool for retrieving verified information. ```ts showLineNumbers copy import { Agent, createTool } from "@mastra/core"; import { z } from "zod"; const instructions = `You are a helpful cat expert assistant. When discussing cats, you should always include an interesting cat fact. Your main responsibilities: 1. Answer questions about cats 2. Use the catFact tool to provide verified cat facts 3. Incorporate the cat facts naturally into your responses Always use the catFact tool at least once in your responses to ensure accuracy.`; const getCatFact = async () => { const { fact } = (await fetch("https://catfact.ninja/fact").then((res) => res.json(), )) as { fact: string; }; return fact; }; const catFact = createTool({ id: "Get cat facts", inputSchema: z.object({}), description: "Fetches cat facts", execute: async () => { console.log("using tool to fetch cat fact"); return { catFact: await getCatFact(), }; }, }); const catOne = new Agent({ name: "cat-one", instructions: instructions, model: { provider: "OPEN_AI", name: "gpt-4o-mini", toolChoice: "required", }, tools: { catFact, }, }); const result = await catOne.generate("Tell me a cat fact"); console.log(result.text); ```




================================================================================ Source: src/pages/examples/agents/using-a-tool.mdx ================================================================================ --- title: "Example: Giving an Agent a Tool | Agents | Mastra Docs" description: Example of creating an AI agent in Mastra that uses a dedicated tool to provide weather information. --- import { GithubLink } from "../../../components/github-link"; # Example: Giving an Agent a Tool When building AI agents, you often need to integrate external data sources or functionality to enhance their capabilities. This example shows how to create an AI agent that uses a dedicated weather tool to provide accurate weather information for specific locations. ```ts showLineNumbers copy import { createTool, Agent, Mastra } from "@mastra/core"; import { z } from "zod"; interface WeatherResponse { current: { time: string; temperature_2m: number; apparent_temperature: number; relative_humidity_2m: number; wind_speed_10m: number; wind_gusts_10m: number; weather_code: number; }; } const weatherTool = createTool({ id: "get-weather", description: "Get current weather for a location", inputSchema: z.object({ location: z.string().describe("City name"), }), outputSchema: z.object({ temperature: z.number(), feelsLike: z.number(), humidity: z.number(), windSpeed: z.number(), windGust: z.number(), conditions: z.string(), location: z.string(), }), execute: async ({ context }) => { return await getWeather(context.location); }, }); const getWeather = async (location: string) => { const geocodingUrl = `https://geocoding-api.open-meteo.com/v1/search?name=${encodeURIComponent(location)}&count=1`; const geocodingResponse = await fetch(geocodingUrl); const geocodingData = await geocodingResponse.json(); if (!geocodingData.results?.[0]) { throw new Error(`Location '${location}' not found`); } const { latitude, longitude, name } = geocodingData.results[0]; const weatherUrl = `https://api.open-meteo.com/v1/forecast?latitude=${latitude}&longitude=${longitude}¤t=temperature_2m,apparent_temperature,relative_humidity_2m,wind_speed_10m,wind_gusts_10m,weather_code`; const response = await fetch(weatherUrl); const data: WeatherResponse = await response.json(); return { temperature: data.current.temperature_2m, feelsLike: data.current.apparent_temperature, humidity: data.current.relative_humidity_2m, windSpeed: data.current.wind_speed_10m, windGust: data.current.wind_gusts_10m, conditions: getWeatherCondition(data.current.weather_code), location: name, }; }; function getWeatherCondition(code: number): string { const conditions: Record = { 0: "Clear sky", 1: "Mainly clear", 2: "Partly cloudy", 3: "Overcast", 45: "Foggy", 48: "Depositing rime fog", 51: "Light drizzle", 53: "Moderate drizzle", 55: "Dense drizzle", 56: "Light freezing drizzle", 57: "Dense freezing drizzle", 61: "Slight rain", 63: "Moderate rain", 65: "Heavy rain", 66: "Light freezing rain", 67: "Heavy freezing rain", 71: "Slight snow fall", 73: "Moderate snow fall", 75: "Heavy snow fall", 77: "Snow grains", 80: "Slight rain showers", 81: "Moderate rain showers", 82: "Violent rain showers", 85: "Slight snow showers", 86: "Heavy snow showers", 95: "Thunderstorm", 96: "Thunderstorm with slight hail", 99: "Thunderstorm with heavy hail", }; return conditions[code] || "Unknown"; } const weatherAgent = new Agent({ name: "Weather Agent", instructions: `You are a helpful weather assistant that provides accurate weather information. Your primary function is to help users get weather details for specific locations. When responding: - Always ask for a location if none is provided - Include relevant details like humidity, wind conditions, and precipitation - Keep responses concise but informative Use the weatherTool to fetch current weather data.`, model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, tools: { weatherTool }, }); const mastra = new Mastra({ agents: { weatherAgent }, }); async function main() { const agent = await mastra.getAgent("weatherAgent"); const result = await agent.generate("What is the weather in London?"); console.log(result.text); } main(); ```




================================================================================ Source: src/pages/examples/index.mdx ================================================================================ --- title: "Examples List: Workflows, Agents, RAG | Mastra Docs" description: "Explore practical examples of AI development with Mastra, including text generation, RAG implementations, structured outputs, and multi-modal interactions. Learn how to build AI applications using OpenAI, Anthropic, and Google Gemini." --- import { CardItems, CardItem } from "../../components/example-cards"; # Examples List The Examples section is a short list of example projects demonstrating basic AI engineering with Mastra, including text generation, structured output, streaming responses, and retrieval‐augmented generation (RAG). ================================================================================ Source: src/pages/examples/llms/call-claude.mdx ================================================================================ --- title: "Example: Generate Text with Claude | LLMs | Mastra Docs" description: Example of using Mastra to generate text with Anthropic's Claude model through a unified interface. --- import { GithubLink } from "../../../components/github-link"; # Generate Text with Claude Many developers need to use different language models but don't want to learn multiple APIs. Mastra provides a unified interface for working with various LLM providers, handling the complexity of different API implementations. This example shows how to use Anthropic's Claude model through the same interface used for other providers. ```ts showLineNumbers copy import { Mastra } from "@mastra/core"; const mastra = new Mastra(); const llm = mastra.LLM({ provider: "ANTHROPIC", name: "claude-3-5-sonnet-20241022", }); const result = await llm.generate("Who invented the submarine?"); console.log(result.text); ```




================================================================================ Source: src/pages/examples/llms/call-google-gemini.mdx ================================================================================ --- title: "Example: Generate Text with Gemini | LLMs | Mastra Docs" description: Example of using Mastra to generate text with Google's Gemini model through a unified interface. --- import { GithubLink } from '../../../components/github-link'; # Generate Text with Gemini Many developers need to use different language models but don't want to learn multiple APIs. Mastra provides a unified interface for working with various LLM providers, handling the complexity of different API implementations. This example shows how to use Google's Gemini model through the same interface used for other providers. ```ts showLineNumbers copy import { Mastra } from '@mastra/core'; const mastra = new Mastra(); const llm = mastra.LLM({ provider: 'GOOGLE', name: 'gemini-1.5-flash', apiKey: process.env.GEMINI_API_KEY, }); const result = await llm.generate('Who invented the submarine?'); console.log(result.text); ```




================================================================================ Source: src/pages/examples/llms/describe-an-image.mdx ================================================================================ --- title: "Example: Generate Text from an Image | LLMs | Mastra Docs" description: Example of using Mastra to structure a multimodal request by combining text and image inputs. --- import { GithubLink } from '../../../components/github-link'; # Generate Text from an Image Vision-enabled language models can analyze both text and images, but combining them in a single prompt requires specific message formatting. This example demonstrates how to structure a multimodal request by combining a text question with an image URL in the message content array. ```ts showLineNumbers copy import { Mastra } from '@mastra/core'; const mastra = new Mastra(); const llm = mastra.LLM({ provider: 'OPEN_AI', name: 'gpt-4-turbo', }); const response = await llm.generate([ { role: 'user', content: [ { type: 'text', text: 'what is that black bold text at the top?', }, { type: 'image', image: new URL( 'https://upload.wikimedia.org/wikipedia/commons/thumb/0/03/491_BC_-_1902_AD_-_A_Long_Time_Between_Drinks.jpg/1000px-491_BC_-_1902_AD_-_A_Long_Time_Between_Drinks.jpg', ), }, ], }, ]); console.log(response.text); ```




================================================================================ Source: src/pages/examples/llms/generate-object-with-structured-output.mdx ================================================================================ --- title: "Example: Object With Structured Output | LLMs | Mastra Docs" description: Example of using Mastra to get structured JSON output from a language model using Zod schemas. --- import { GithubLink } from "../../../components/github-link"; # Generate Object With Structured Output Sometimes you need the language model to return data in a specific format rather than free-form text. This example shows how to use Zod schemas to get structured JSON output from the model, making it easier to work with the response in your application. ```ts showLineNumbers copy import { Mastra } from "@mastra/core"; import { z } from "zod"; const mastra = new Mastra(); const llm = mastra.LLM({ provider: "OPEN_AI", name: "gpt-4o-mini", }); const recipeSchema = z.object({ recipe: z.object({ name: z.string(), ingredients: z.array( z.object({ name: z.string(), amount: z.string(), }), ), steps: z.array(z.string()), }), }); const result = await llm.generate("Generate a egusi recipe.", { output: recipeSchema, }); console.log(JSON.stringify(result.object.recipe, null, 2)); ```




================================================================================ Source: src/pages/examples/llms/generate-text-from-pdf.mdx ================================================================================ --- title: "Example: Generate Text from PDF | LLMs | Mastra Docs" description: Example of using Mastra to generate text from a PDF file using the ANTHROPIC provider. --- import { GithubLink } from '../../../components/github-link'; # Generate Text from PDF Some models support the `file` type. You can use this type to generate text from a PDF file. This example shows how to generate text from a PDF file using the `ANTHROPIC` provider. ```ts showLineNumbers copy import { Mastra } from '@mastra/core'; import { readFileSync } from 'fs'; import path from 'path'; const mastra = new Mastra(); const llm = mastra.LLM({ provider: 'ANTHROPIC', name: 'claude-3-5-sonnet-20241022', }); const buf = readFileSync(path.join(process.cwd(), './fridge-owners-manual.pdf')) const response = await llm.generate([{ role: "user", content: [ { type: "file", mimeType: "application/pdf", data: buf }, { type: "text", text: "Please confirm you can see this PDF file by saying 'YES I can see the PDF' and then tell me what's in it." } ] }]); console.log(response.text); ```




================================================================================ Source: src/pages/examples/llms/generate-text-with-deepseek-reasoner.mdx ================================================================================ --- title: Generate Text with Deepseek Reasoner description: Use Deepseek's reasoner model to solve complex logical problems step by step --- # Generate Text with Deepseek Reasoner This example shows how to use Deepseek's reasoner model to solve complex logical problems with step-by-step reasoning. ## Code Example ```typescript import { Mastra } from '@mastra/core'; const mastra = new Mastra(); const llm = mastra.LLM({ provider: 'DEEPSEEK', name: 'deepseek-reasoner', apiKey: process.env.DEEPSEEK_API_KEY, }); const response = await llm.generate('Solve this logical puzzle step by step: Three friends - Alice, Bob, and Charlie - are wearing different colored hats (red, blue, green). Alice says she sees a blue hat. Bob says he sees a green hat. Charlie says he sees a red hat. Each person can see the others\' hats but not their own. If exactly one person is lying, what color hat is each person wearing?'); console.log(response.text); ``` ## Configuration To use this example, you'll need: 1. A Deepseek API key (set as `DEEPSEEK_API_KEY` in your environment) 2. The `@mastra/core` package installed The Deepseek reasoner model is specifically designed for tasks that require logical reasoning and step-by-step problem solving. It excels at: - Mathematical problems - Logic puzzles - Step-by-step analysis - Complex reasoning tasks ## Try It Out You can find the complete example in our GitHub repository under [examples/basics/llms/generate-text-with-deepseek](https://github.com/mastra-ai/mastra/tree/main/examples/basics/llms/generate-text-with-deepseek). ================================================================================ Source: src/pages/examples/llms/generate-text.mdx ================================================================================ --- title: "Example: Generate Text from a Prompt | LLMs | Mastra Docs" description: Example of using Mastra to generate text from a language model with a simple text prompt. --- import { GithubLink } from "../../../components/github-link"; # Generate Text from a User Prompt When you need to get a quick answer from a language model, the simplest approach is to send a text prompt and receive a text response. This example shows how to initialize an LLM and generate text with a single line of code. ```ts showLineNumbers copy import { Mastra } from "@mastra/core"; const mastra = new Mastra(); const llm = mastra.LLM({ provider: "OPEN_AI", name: "gpt-4o-mini", }); const response = await llm.generate("What is a wormhole? Explain briefly."); console.log(response.text); ```




================================================================================ Source: src/pages/examples/llms/stream-object-with-structured-output.mdx ================================================================================ --- title: "Example: Stream with Structured Output | LLMs | Mastra Docs" description: Example of using Mastra to stream JSON-formatted responses from a language model using a Zod schema. --- import { GithubLink } from "../../../components/github-link"; # Stream Object with Structured Output When you need structured data from a language model, waiting for the complete response can take time. By streaming the output, you can display partial results as they arrive, providing immediate feedback to users. This example shows how to stream JSON-formatted responses using a Zod schema. ```ts showLineNumbers copy import { Mastra } from "@mastra/core"; import { z } from "zod"; const mastra = new Mastra(); const llm = mastra.LLM({ provider: "OPEN_AI", name: "gpt-4o-mini", }); const recipeSchema = z.object({ recipe: z.object({ name: z.string(), ingredients: z.array( z.object({ name: z.string(), amount: z.string(), }), ), steps: z.array(z.string()), }), }); const result = await llm.stream("Generate a egusi recipe.", { output: recipeSchema, }); for await (const chunk of result.textStream) { process.stdout.write(chunk); } ```




================================================================================ Source: src/pages/examples/llms/stream-text.mdx ================================================================================ --- title: "Example: Real-Time Streaming Text | LLMs | Mastra Docs" description: Example of using Mastra to stream text responses from a language model in real-time. --- import { GithubLink } from '../../../components/github-link'; # Real-Time Streaming Text Text generation can sometimes take a long time to complete, especially when you're generating a couple of paragraphs. By streaming the response, you can display partial results as they arrive, providing immediate feedback to users. This example shows how to stream text responses in real-time. ```ts showLineNumbers copy import { Mastra } from '@mastra/core'; const mastra = new Mastra(); const llm = mastra.LLM({ provider: 'OPEN_AI', name: 'gpt-4', }); const response = await llm.stream('Tell me about christmas and it"s traditions'); for await (const chunk of response.textStream) { process.stdout.write(chunk); } ```




================================================================================ Source: src/pages/examples/llms/use-a-system-prompt.mdx ================================================================================ --- title: "Example: Using a System Prompt | LLMs | Mastra Docs" description: Example of using Mastra to guide language model behavior with system prompts. --- import { GithubLink } from '../../../components/github-link'; # Using a System Prompt When interacting with language models, you can guide their behavior by providing initial instructions. A system prompt sets the overall context and behavior for the model before it processes any user messages. This example shows how to use system prompts to control model behavior. ```ts showLineNumbers import { Mastra } from '@mastra/core'; const mastra = new Mastra(); const llm = mastra.LLM({ provider: 'OPEN_AI', name: 'gpt-4', }); const response = await llm.generate([ { role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: 'What is the meaning of life?' }, ]); console.log(response.text); ```




================================================================================ Source: src/pages/examples/memory/memory-with-libsql.mdx ================================================================================ # Memory with LibSQL This example demonstrates how to use Mastra's memory system with LibSQL as the storage backend. ## Setup First, set up the memory system with LibSQL storage and vector capabilities: ```typescript import { Memory } from '@mastra/memory'; import { LibSQLStore } from '@mastra/store-libsql'; import { LibSQLVector } from '@mastra/vector-libsql'; import { Agent } from '@mastra/core/agent'; // Initialize memory with LibSQL storage and vector search const memory = new Memory({ storage: new LibSQLStore({ url: process.env.DATABASE_URL || 'file:local.db', }), vector: new LibSQLVector({ url: process.env.DATABASE_URL || 'file:local.db', }), options: { lastMessages: 10, semanticRecall: { topK: 3, messageRange: 2, }, }, embedding: { provider: 'OPEN_AI', model: 'text-embedding-3-small', maxRetries: 3, }, }); // Create an agent with memory capabilities const memoryAgent = new Agent({ name: 'Memory Agent', instructions: "You are an AI agent with the ability to automatically recall memories from previous interactions. You may have conversations that last hours, days, months, or years. If you don't know it already you should ask for the users name and some info about them.", model: { provider: 'OPEN_AI', name: 'gpt-4', toolChoice: 'auto', }, memory, }); ``` ## Usage Example ```typescript import { randomUUID } from 'crypto'; // Start a conversation const threadId = randomUUID(); const resourceId = 'SOME_USER_ID'; // Start with a system message const response1 = await memoryAgent.stream( [ { role: 'system', content: `Chat with user started now ${new Date().toISOString()}. Don't mention this message.`, }, ], { resourceId, threadId } ); // Send user message const response2 = await memoryAgent.stream( 'What can you help me with?', { threadId, resourceId, } ); // Use semantic search to find relevant messages const response3 = await memoryAgent.stream( 'What did we discuss earlier?', { threadId, resourceId, memoryOptions: { lastMessages: false, semanticRecall: { topK: 3, // Get top 3 most relevant messages messageRange: 2 // Include context around each match }, }, } ); ``` The example shows: 1. Setting up LibSQL storage with vector search capabilities 2. Configuring memory options for message history and semantic search 3. Creating an agent with memory integration 4. Using semantic search to find relevant messages in conversation history 5. Including context around matched messages using `messageRange` ================================================================================ Source: src/pages/examples/memory/memory-with-pg.mdx ================================================================================ # Memory with Postgres This example demonstrates how to use Mastra's memory system with PostgreSQL as the storage backend. ## Setup First, set up the memory system with PostgreSQL storage and vector capabilities: ```typescript import { Memory } from '@mastra/memory'; import { PostgresStore } from '@mastra/store-pg'; import { PgVector } from '@mastra/vector-pg'; import { Agent } from '@mastra/core/agent'; // PostgreSQL connection details const host = 'localhost'; const port = 5432; const user = 'postgres'; const database = 'postgres'; const password = 'postgres'; const connectionString = `postgresql://${user}:${password}@${host}:${port}`; // Initialize memory with PostgreSQL storage and vector search const memory = new Memory({ storage: new PostgresStore({ host, port, user, database, password, }), vector: new PgVector(connectionString), options: { lastMessages: 10, semanticRecall: { topK: 3, messageRange: 2, }, }, embedding: { provider: 'OPEN_AI', model: 'text-embedding-3-small', maxRetries: 3, }, }); // Create an agent with memory capabilities const chefAgent = new Agent({ name: 'chefAgent', instructions: 'You are Michel, a practical and experienced home chef who helps people cook great meals with whatever ingredients they have available.', model: { provider: 'OPEN_AI', name: 'gpt-4', toolChoice: 'auto', }, memory, }); ``` ## Usage Example ```typescript import { randomUUID } from 'crypto'; // Start a conversation const threadId = randomUUID(); const resourceId = 'SOME_USER_ID'; // Ask about ingredients const response1 = await chefAgent.stream( 'In my kitchen I have: pasta, canned tomatoes, garlic, olive oil, and some dried herbs (basil and oregano). What can I make?', { threadId, resourceId, } ); // Ask about different ingredients const response2 = await chefAgent.stream( "Now I'm over at my friend's house, and they have: chicken thighs, coconut milk, sweet potatoes, and curry powder.", { threadId, resourceId, } ); // Use memory to recall previous conversation const response3 = await chefAgent.stream( 'What did we cook before I went to my friends house?', { threadId, resourceId, memoryOptions: { lastMessages: 3, // Get last 3 messages for context } } ); ``` The example shows: 1. Setting up PostgreSQL storage with vector search capabilities 2. Configuring memory options for message history and semantic search 3. Creating an agent with memory integration 4. Using the agent to maintain conversation context across multiple interactions ================================================================================ Source: src/pages/examples/memory/memory-with-upstash.mdx ================================================================================ # Memory with Upstash This example demonstrates how to use Mastra's memory system with Upstash as the storage backend. ## Setup First, set up the memory system with Upstash storage and vector capabilities: ```typescript import { Memory } from '@mastra/memory'; import { UpstashStore } from '@mastra/store-upstash'; import { UpstashVector } from '@mastra/vector-upstash'; import { Agent } from '@mastra/core/agent'; // Initialize memory with Upstash storage and vector search const memory = new Memory({ storage: new UpstashStore({ url: process.env.UPSTASH_REDIS_REST_URL, token: process.env.UPSTASH_REDIS_REST_TOKEN, }), vector: new UpstashVector({ url: process.env.UPSTASH_REDIS_REST_URL, token: process.env.UPSTASH_REDIS_REST_TOKEN, }), options: { lastMessages: 10, semanticRecall: { topK: 3, messageRange: 2, }, }, embedding: { provider: 'OPEN_AI', model: 'text-embedding-3-small', maxRetries: 3, }, }); // Create an agent with memory capabilities const chefAgent = new Agent({ name: 'chefAgent', instructions: 'You are Michel, a practical and experienced home chef who helps people cook great meals with whatever ingredients they have available.', model: { provider: 'OPEN_AI', name: 'gpt-4', toolChoice: 'auto', }, memory, }); ``` ## Environment Setup Make sure to set up your Upstash credentials in the environment variables: ```bash UPSTASH_REDIS_REST_URL=your-redis-url UPSTASH_REDIS_REST_TOKEN=your-redis-token ``` ## Usage Example ```typescript import { randomUUID } from 'crypto'; // Start a conversation const threadId = randomUUID(); const resourceId = 'SOME_USER_ID'; // Ask about ingredients const response1 = await chefAgent.stream( 'In my kitchen I have: pasta, canned tomatoes, garlic, olive oil, and some dried herbs (basil and oregano). What can I make?', { threadId, resourceId, } ); // Ask about different ingredients const response2 = await chefAgent.stream( "Now I'm over at my friend's house, and they have: chicken thighs, coconut milk, sweet potatoes, and curry powder.", { threadId, resourceId, } ); // Use memory to recall previous conversation const response3 = await chefAgent.stream( 'What did we cook before I went to my friends house?', { threadId, resourceId, memoryOptions: { lastMessages: 3, // Get last 3 messages for context semanticRecall: { topK: 2, // Also get 2 most relevant messages messageRange: 2 // Include context around matches }, } } ); ``` The example shows: 1. Setting up Upstash storage with vector search capabilities 2. Configuring environment variables for Upstash connection 3. Creating an agent with memory integration 4. Using both recent history and semantic search in the same query ================================================================================ Source: src/pages/examples/rag/adjust-chunk-delimiters.mdx ================================================================================ --- title: "Example: Adjusting Chunk Delimiters | RAG | Mastra Docs" description: Adjust chunk delimiters in Mastra to better match your content structure. --- import { GithubLink } from "../../../components/github-link"; # Adjust Chunk Delimiters When processing large documents, you may want to control how the text is split into smaller chunks. By default, documents are split on newlines, but you can customize this behavior to better match your content structure. This example shows how to specify a custom delimiter for chunking documents. ```tsx copy import { MDocument } from "@mastra/rag"; const doc = MDocument.fromText("Your plain text content..."); const chunks = await doc.chunk({ separator: "\n", }); ```




================================================================================ Source: src/pages/examples/rag/adjust-chunk-size.mdx ================================================================================ --- title: "Example: Adjusting The Chunk Size | RAG | Mastra Docs" description: Adjust chunk size in Mastra to better match your content and memory requirements. --- import { GithubLink } from "../../../components/github-link"; # Adjust Chunk Size When processing large documents, you might need to adjust how much text is included in each chunk. By default, chunks are 1024 characters long, but you can customize this size to better match your content and memory requirements. This example shows how to set a custom chunk size when splitting documents. ```tsx copy import { MDocument } from "@mastra/rag"; const doc = MDocument.fromText("Your plain text content..."); const chunks = await doc.chunk({ size: 512, }); ```




================================================================================ Source: src/pages/examples/rag/basic-rag.mdx ================================================================================ --- title: "Example: A Complete RAG System | RAG | Mastra Docs" description: Example of implementing a basic RAG system in Mastra using OpenAI embeddings and PGVector for vector storage. --- import { GithubLink } from "../../../components/github-link"; # Basic RAG This example demonstrates how to implement a Retrieval-Augmented Generation (RAG) system using Mastra, OpenAI embeddings, and PGVector for vector storage. ## Overview The system implements RAG using Mastra and OpenAI. Here's what it does: 1. Sets up a Mastra agent with gpt-4o-mini for response generation 2. Creates a vector query tool to manage vector store interactions 3. Uses existing embeddings to retrieve relevant context 4. Generates context-aware responses using the Mastra agent > **Note**: To learn how to create and store embeddings in PGVector, see the [Insert Embeddings in PGVector](/examples/rag/insert-embedding-in-pgvector) guide. ## Setup ### Environment Setup Make sure to set up your environment variables: ```bash filename=".env" POSTGRES_CONNECTION_STRING=your_connection_string_here ``` ### Dependencies Import the necessary dependencies: ```typescript copy showLineNumbers filename="src/index.ts" import { Mastra } from '@mastra/core'; import { Agent } from '@mastra/core/agent'; import { createVectorQueryTool } from '@mastra/rag'; import { PgVector } from '@mastra/vector-pg'; ``` ## Vector Query Tool Creation Create a tool that can query the vector database: ```typescript copy showLineNumbers{4} filename="src/index.ts" const vectorQueryTool = createVectorQueryTool({ vectorStoreName: 'pgVector', indexName: 'embeddings', options: { provider: 'OPEN_AI', model: 'text-embedding-3-small', maxRetries: 3, }, topK: 3, }); ``` ## Agent Configuration Set up the Mastra agent that will handle the responses: ```typescript copy showLineNumbers{15} filename="src/index.ts" export const ragAgent = new Agent({ name: 'RAG Agent', instructions: 'You are a helpful assistant that answers questions based on the provided context. Keep your answers concise and relevant.', model: { provider: 'OPEN_AI', name: 'gpt-4o-mini', }, tools: { vectorQueryTool, }, }); ``` ## Instantiate PgVector and Mastra Instantiate PgVector and Mastra with all components: ```typescript copy showLineNumbers{28} filename="src/index.ts" const pgVector = new PgVector(process.env.POSTGRES_CONNECTION_STRING!); export const mastra = new Mastra({ agents: { ragAgent }, vectors: { pgVector }, }); const agent = mastra.getAgent('ragAgent'); ``` ## Example Usage ```typescript copy showLineNumbers{37} filename="src/index.ts" const prompt = ` [Insert query based on document here] Please base your answer only on the context provided in the tool. If the context doesn't contain enough information to fully answer the question, please state that explicitly. `; const completion = await agent.generate(prompt); console.log(completion.text); ```




================================================================================ Source: src/pages/examples/rag/chunk-html.mdx ================================================================================ --- title: "Example: Semantically Chunking HTML | RAG | Mastra Docs" description: Chunk HTML content in Mastra to semantically chunk the document. --- import { GithubLink } from "../../../components/github-link"; # Semantically Chunking HTML When working with HTML content, you often need to break it down into smaller, manageable pieces while preserving the document structure. The chunk method splits HTML content intelligently, maintaining the integrity of HTML tags and elements. This example shows how to chunk HTML documents for search or retrieval purposes. ```tsx copy import { MDocument } from "@mastra/rag"; const html = `

h1 content...

p content...

`; const doc = MDocument.fromHTML(html); const chunks = await doc.chunk({ headers: [ ["h1", "Header 1"], ["p", "Paragraph"], ], }); console.log(chunks); ```




================================================================================ Source: src/pages/examples/rag/chunk-json.mdx ================================================================================ --- title: "Example: Semantically Chunking JSON | RAG | Mastra Docs" description: Chunk JSON data in Mastra to semantically chunk the document. --- import { GithubLink } from "../../../components/github-link"; # Semantically Chunking JSON When working with JSON data, you need to split it into smaller pieces while preserving the object structure. The chunk method breaks down JSON content intelligently, maintaining the relationships between keys and values. This example shows how to chunk JSON documents for search or retrieval purposes. ```tsx copy import { MDocument } from "@mastra/rag"; const testJson = { name: "John Doe", age: 30, email: "john.doe@example.com", }; const doc = MDocument.fromJSON(JSON.stringify(testJson)); const chunks = await doc.chunk({ maxSize: 100, }); console.log(chunks); ```




================================================================================ Source: src/pages/examples/rag/chunk-markdown.mdx ================================================================================ --- title: "Example: Semantically Chunking Markdown | RAG | Mastra Docs" description: Example of using Mastra to intelligently chunk markdown documents for search or retrieval purposes. --- import { GithubLink } from "../../../components/github-link"; # Chunk Markdown Markdown is more information-dense than raw HTML, making it easier to work with for RAG pipelines. When working with markdown, you need to split it into smaller pieces while preserving headers and formatting. The `chunk` method handles Markdown-specific elements like headers, lists, and code blocks intelligently. This example shows how to chunk markdown documents for search or retrieval purposes. ```tsx copy import { MDocument } from "@mastra/rag"; const doc = MDocument.fromMarkdown("# Your markdown content..."); const chunks = await doc.chunk(); ```




================================================================================ Source: src/pages/examples/rag/chunk-text.mdx ================================================================================ --- title: "Example: Semantically Chunking Text | RAG | Mastra Docs" description: Example of using Mastra to split large text documents into smaller chunks for processing. --- import { GithubLink } from "../../../components/github-link"; # Chunk Text When working with large text documents, you need to break them down into smaller, manageable pieces for processing. The chunk method splits text content into segments that can be used for search, analysis, or retrieval. This example shows how to split plain text into chunks using default settings. ```tsx copy import { MDocument } from "@mastra/rag"; const doc = MDocument.fromText("Your plain text content..."); const chunks = await doc.chunk(); ```




================================================================================ Source: src/pages/examples/rag/cleanup-rag.mdx ================================================================================ --- title: "Example: Optimizing Information Density | RAG | Mastra Docs" description: Example of implementing a RAG system in Mastra to optimize information density and deduplicate data using LLM-based processing. --- import { GithubLink } from "../../../components/github-link"; # Optimizing Information Density This example demonstrates how to implement a Retrieval-Augmented Generation (RAG) system using Mastra, OpenAI embeddings, and PGVector for vector storage. The system uses an agent to clean the initial chunks to optimize information density and deduplicate data. ## Overview The system implements RAG using Mastra and OpenAI, this time optimizing information density through LLM-based processing. Here's what it does: 1. Sets up two Mastra agents with gpt-4o-mini for response generation 2. Sets up another Mastra agent to handle cleaning up chunk data before vector storage 3. Creates a vector query tool to manage vector store interactions 4. Create a document chunking tool for agent to use to get chunks 5. Chunks text documents into smaller segments 6. Takes those chunks and filters them to remove irrelevant or duplicate information 7. Creates embeddings for both the initial chunks and the updated chunks 8. Stores them both in a PostgreSQL vector database 9. Retrieves relevant chunks based on queries using vector query tool 10. Generates context-aware responses using the Mastra agents ## Setup ### Environment Setup Make sure to set up your environment variables: ```bash filename=".env" POSTGRES_CONNECTION_STRING=your_connection_string_here ``` ### Dependencies Then, import the necessary dependencies: ```typescript copy showLineNumbers filename="src/mastra/index.ts" import { Mastra, Agent } from "@mastra/core"; import { PgVector } from "@mastra/vector-pg"; import { embedMany, MDocument, createVectorQueryTool, createDocumentChunkerTool, } from "@mastra/rag"; ``` ## Tool Creation ### Vector Query Tool Using createVectorQueryTool imported from @mastra/rag, you can create a tool that can query the vector database. ```typescript copy showLineNumbers{4} filename="src/mastra/index.ts" const vectorQueryTool = createVectorQueryTool({ vectorStoreName: "pgVector", indexName: "embeddings", options: { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }, }); const cleanedVectorQueryTool = createVectorQueryTool({ vectorStoreName: "pgVector", indexName: "cleanedEmbeddings", options: { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }, }); ``` ### Chunk Tool Using createDocumentChunkerTool imported from @mastra/rag, you can create a tool that chunks the document and sends the chunks to your agent. ```typescript copy showLineNumbers{24} filename="src/mastra/index.ts" const doc = MDocument.fromText(yourText); const documentChunkerTool = createDocumentChunkerTool({ doc, params: { strategy: "recursive", size: 256, overlap: 50, separator: "\n", }, }); ``` ## Agent Configuration Set up three Mastra agents: ```typescript copy showLineNumbers{36} filename="src/mastra/index.ts" export const ragAgentOne = new Agent({ name: "RAG Agent One", instructions: "You are a helpful assistant that answers questions based on the provided context. Keep your answers concise and relevant.", model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, tools: { vectorQueryTool, }, }); export const ragAgentTwo = new Agent({ name: "RAG Agent Two", instructions: "You are a helpful assistant that answers questions based on the provided context. Keep your answers concise and relevant.", model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, tools: { cleanedVectorQueryTool, }, }); export const ragAgentThree = new Agent({ name: "RAG Agent Three", instructions: "You are a helpful assistant that processes, cleans, and labels data before storage.", model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, tools: { documentChunkerTool }, }); ``` ## Instantiate PgVector and Mastra Instantiate PgVector and Mastra with all components: ```typescript copy showLineNumbers{72} filename="src/mastra/index.ts" const pgVector = new PgVector(process.env.POSTGRES_CONNECTION_STRING!); export const mastra = new Mastra({ agents: { ragAgentOne, ragAgentTwo, ragAgentThree }, vectors: { pgVector }, }); const dataAgentOne = mastra.getAgent("ragAgentOne"); const dataAgentTwo = mastra.getAgent("ragAgentTwo"); const processAgent = mastra.getAgent("ragAgentThree"); ``` ## Document Processing Chunk the initial document clean them using the processAgent. ```typescript copy showLineNumbers{82} filename="src/mastra/index.ts" const chunks = await doc.chunk({ strategy: "recursive", size: 256, overlap: 50, separator: "\n", }); const chunkPrompt = `Take the chunks returned from the tool and clean them up according to the instructions provided. Make sure to filter out irrelevant information and remove duplicates.`; const newChunks = await processAgent.generate(chunkPrompt); const updatedDoc = MDocument.fromText(newChunks.text); const updatedChunks = await updatedDoc.chunk({ strategy: "recursive", size: 256, overlap: 50, separator: "\n", }); ``` ## Creating and Storing Embeddings Generate and store both raw and cleaned embeddings: ```typescript copy showLineNumbers{101} filename="src/mastra/index.ts" const { embeddings } = await embedMany(chunks, { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }); const { embeddings: cleanedEmbeddings } = await embedMany(updatedChunks, { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }); const vectorStore = mastra.getVector("pgVector"); await vectorStore.createIndex("embeddings", 1536); await vectorStore.createIndex("cleanedEmbeddings", 1536); await vectorStore.upsert( "embeddings", embeddings, chunks?.map((chunk: any) => ({ text: chunk.text })), ); await vectorStore.upsert( "cleanedEmbeddings", cleanedEmbeddings, updatedChunks?.map((chunk: any) => ({ text: chunk.text })), ); ``` ## Response Generation Function to generate responses with index selection: ```typescript copy showLineNumbers{129} filename="src/mastra/index.ts" async function generateResponse(query: string, agent: Agent) { const prompt = ` Please answer the following question: ${query} Please base your answer only on the context provided in the tool with this index ${index}. If the context doesn't contain enough information to fully answer the question, please state that explicitly. `; // Call the agent to generate a response const completion = await agent.generate(prompt); return completion.text; } ``` ## Example Usage ```typescript copy showLineNumbers{143} filename="src/mastra/index.ts" async function answerQueries(queries: string[], agent: Agent) { for (const query of queries) { try { const answer = await generateResponse(query, agent); console.log("\nQuery:", query); console.log("Response:", answer); } catch (error) { console.error(`Error processing query "${query}":`, error); } } } const queries = [ "What is the average temperature on Mars?", "What technologies are used in modern spacecraft?", "What are all the requirements for space settlements?", "What are all the dates mentioned related to space stations?", "What are all the mentions of sustainability in space settlements?", ]; // Compare responses between raw and cleaned embeddings await answerQueries(queries, dataAgentOne); await answerQueries(queries, dataAgentTwo); ```




================================================================================ Source: src/pages/examples/rag/cot-rag.mdx ================================================================================ --- title: "Example: Chain of Thought Prompting | RAG | Mastra Docs" description: Example of implementing a RAG system in Mastra with chain-of-thought reasoning using OpenAI and PGVector. --- import { GithubLink } from "../../../components/github-link"; # Chain of Thought Prompting This example demonstrates how to implement a Retrieval-Augmented Generation (RAG) system using Mastra, OpenAI embeddings, and PGVector for vector storage, with an emphasis on chain-of-thought reasoning. ## Overview The system implements RAG using Mastra and OpenAI with chain-of-thought prompting. Here's what it does: 1. Sets up a Mastra agent with gpt-4o-mini for response generation 2. Creates a vector query tool to manage vector store interactions 3. Chunks text documents into smaller segments 4. Creates embeddings for these chunks 5. Stores them in a PostgreSQL vector database 6. Retrieves relevant chunks based on queries using vector query tool 7. Generates context-aware responses using chain-of-thought reasoning ## Setup ### Environment Setup Make sure to set up your environment variables: ```bash filename=".env" POSTGRES_CONNECTION_STRING=your_connection_string_here ``` ### Dependencies Then, import the necessary dependencies: ```typescript copy showLineNumbers filename="src/mastra/index.ts" import { Mastra, Agent } from "@mastra/core"; import { PgVector } from "@mastra/vector-pg"; import { createVectorQueryTool, embedMany, MDocument } from "@mastra/rag"; ``` ## Vector Query Tool Creation Using createVectorQueryTool imported from @mastra/rag, you can create a tool that can query the vector database. ```typescript copy showLineNumbers{4} filename="src/mastra/index.ts" const vectorQueryTool = createVectorQueryTool({ vectorStoreName: "pgVector", indexName: "embeddings", options: { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }, topK: 3, }); ``` ## Agent Configuration Set up the Mastra agent with chain-of-thought prompting instructions: ```typescript copy showLineNumbers{15} filename="src/mastra/index.ts" export const ragAgent = new Agent({ name: "RAG Agent", instructions: `You are a helpful assistant that answers questions based on the provided context. Follow these steps for each response: 1. First, carefully analyze the retrieved context chunks and identify key information. 2. Break down your thinking process about how the retrieved information relates to the query. 3. Explain how you're connecting different pieces from the retrieved chunks. 4. Draw conclusions based only on the evidence in the retrieved context. 5. If the retrieved chunks don't contain enough information, explicitly state what's missing. Format your response as: THOUGHT PROCESS: - Step 1: [Initial analysis of retrieved chunks] - Step 2: [Connections between chunks] - Step 3: [Reasoning based on chunks] FINAL ANSWER: [Your concise answer based on the retrieved context]`, model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, tools: { contextTool }, }); ``` ## Instantiate PgVector and Mastra Instantiate PgVector and Mastra with all components: ```typescript copy showLineNumbers{41} filename="src/mastra/index.ts" const pgVector = new PgVector(process.env.POSTGRES_CONNECTION_STRING!); export const mastra = new Mastra({ agents: { ragAgent }, vectors: { pgVector }, }); const agent = mastra.getAgent("ragAgent"); ``` ## Document Processing Create a document and process it into chunks: ```typescript copy showLineNumbers{49} filename="src/mastra/index.ts" const doc = MDocument.fromText( `The Impact of Climate Change on Global Agriculture...`, ); const chunks = await doc.chunk({ strategy: "recursive", size: 512, overlap: 50, separator: "\n", }); ``` ## Creating and Storing Embeddings Generate embeddings for the chunks and store them in the vector database: ```typescript copy showLineNumbers{58} filename="src/mastra/index.ts" const { embeddings } = await embedMany(chunks, { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }); const vectorStore = mastra.getVector("pgVector"); await vectorStore.createIndex("embeddings", 1536); await vectorStore.upsert( "embeddings", embeddings, chunks?.map((chunk: any) => ({ text: chunk.text })), ); ``` ## Response Generation with Chain-of-Thought Function to generate responses using chain-of-thought reasoning: ```typescript copy showLineNumbers{72} filename="src/mastra/index.ts" async function generateResponse(query: string) { const prompt = ` Please answer the following question using chain-of-thought reasoning: ${query} Please base your answer only on the context provided in the tool. If the context doesn't contain enough information to fully answer the question, please state that explicitly. Remember: Explain how you're using the retrieved information to reach your conclusions. `; const completion = await agent.generate(prompt); return completion.text; } ``` ## Example Usage ```typescript copy showLineNumbers{86} filename="src/mastra/index.ts" async function answerQueries(queries: string[]) { for (const query of queries) { try { const answer = await generateResponse(query); console.log("\nQuery:", query); console.log("\nReasoning Chain + Retrieved Context Response:"); console.log(answer); console.log("\n-------------------"); } catch (error) { console.error(`Error processing query "${query}":`, error); } } } const queries = [ "What are the main adaptation strategies for farmers?", "Analyze how temperature affects crop yields.", "What connections can you draw between climate change and food security?", "How are farmers implementing solutions to address climate challenges?", "What future implications are discussed for agriculture?", ]; await answerQueries(queries); ```




================================================================================ Source: src/pages/examples/rag/cot-workflow-rag.mdx ================================================================================ --- title: "Example: Chain of Thought Workflow | RAG | Mastra Docs" description: Example of implementing a RAG system in Mastra with chain-of-thought reasoning using OpenAI and PGVector. --- import { GithubLink } from "../../../components/github-link"; # Chain of Thought Workflow RAG This example demonstrates how to implement a Retrieval-Augmented Generation (RAG) system using Mastra, OpenAI embeddings, and PGVector for vector storage, with an emphasis on chain-of-thought reasoning using a step-by-step workflow. ## Overview The system implements RAG using Mastra and OpenAI with chain-of-thought prompting through a defined workflow. Here's what it does: 1. Sets up a Mastra agent with gpt-4o-mini for response generation 2. Creates a vector query tool to manage vector store interactions 3. Defines a workflow with multiple steps for chain-of-thought reasoning 4. Processes and chunks text documents 5. Creates and stores embeddings in PostgreSQL 6. Generates responses through the workflow steps ## Setup ### Environment Setup Make sure to set up your environment variables: ```bash filename=".env" POSTGRES_CONNECTION_STRING=your_connection_string_here ``` ### Dependencies Import the necessary dependencies: ```typescript copy showLineNumbers filename="src/mastra/index.ts" import { Mastra, Agent, Step, Workflow } from "@mastra/core"; import { PgVector } from "@mastra/vector-pg"; import { createVectorQueryTool, embedMany, MDocument } from "@mastra/rag"; import { z } from "zod"; ``` ## Workflow Definition First, define the workflow with its trigger schema: ```typescript copy showLineNumbers{5} filename="src/mastra/index.ts" export const ragWorkflow = new Workflow({ name: "rag-workflow", triggerSchema: z.object({ query: z.string(), }), }); ``` ## Vector Query Tool Creation Create a tool for querying the vector database: ```typescript copy showLineNumbers{12} filename="src/mastra/index.ts" const vectorQueryTool = createVectorQueryTool({ vectorStoreName: "pgVector", indexName: "embeddings", options: { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }, topK: 3, }); ``` ## Agent Configuration Set up the Mastra agent: ```typescript copy showLineNumbers{23} filename="src/mastra/index.ts" export const ragAgent = new Agent({ name: "RAG Agent", instructions: `You are a helpful assistant that answers questions based on the provided context.`, model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, tools: { vectorQueryTool, }, }); ``` ## Workflow Steps The workflow is divided into multiple steps for chain-of-thought reasoning: ### 1. Context Analysis Step ```typescript copy showLineNumbers{35} filename="src/mastra/index.ts" const analyzeContext = new Step({ id: "analyzeContext", outputSchema: z.object({ initialAnalysis: z.string(), }), execute: async ({ context, mastra }) => { console.log("---------------------------"); const ragAgent = mastra?.agents?.ragAgent; const query = context?.machineContext?.getStepPayload<{ query: string }>( "trigger", )?.query; const analysisPrompt = `${query} 1. First, carefully analyze the retrieved context chunks and identify key information.`; const analysis = await ragAgent?.generate(analysisPrompt); console.log(analysis?.text); return { initialAnalysis: analysis?.text ?? "", }; }, }); ``` ### 2. Thought Breakdown Step ```typescript copy showLineNumbers{55} filename="src/mastra/index.ts" const breakdownThoughts = new Step({ id: "breakdownThoughts", outputSchema: z.object({ breakdown: z.string(), }), execute: async ({ context, mastra }) => { console.log("---------------------------"); const ragAgent = mastra?.agents?.ragAgent; const analysis = context?.machineContext?.getStepPayload<{ initialAnalysis: string; }>("analyzeContext")?.initialAnalysis; const connectionPrompt = ` Based on the initial analysis: ${analysis} 2. Break down your thinking process about how the retrieved information relates to the query. `; const connectionAnalysis = await ragAgent?.generate(connectionPrompt); console.log(connectionAnalysis?.text); return { breakdown: connectionAnalysis?.text ?? "", }; }, }); ``` ### 3. Connection Step ```typescript copy showLineNumbers{78} filename="src/mastra/index.ts" const connectPieces = new Step({ id: "connectPieces", outputSchema: z.object({ connections: z.string(), }), execute: async ({ context, mastra }) => { console.log("---------------------------"); const ragAgent = mastra?.agents?.ragAgent; const process = context?.machineContext?.getStepPayload<{ breakdown: string; }>("breakdownThoughts")?.breakdown; const connectionPrompt = ` Based on the breakdown: ${process} 3. Explain how you're connecting different pieces from the retrieved chunks. `; const connections = await ragAgent?.generate(connectionPrompt); console.log(connections?.text); return { connections: connections?.text ?? "", }; }, }); ``` ### 4. Conclusion Step ```typescript copy showLineNumbers{101} filename="src/mastra/index.ts" const drawConclusions = new Step({ id: "drawConclusions", outputSchema: z.object({ conclusions: z.string(), }), execute: async ({ context, mastra }) => { console.log("---------------------------"); const ragAgent = mastra?.agents?.ragAgent; const evidence = context?.machineContext?.getStepPayload<{ connections: string; }>("connectPieces")?.connections; const conclusionPrompt = ` Based on the connections: ${evidence} 4. Draw conclusions based only on the evidence in the retrieved context. `; const conclusions = await ragAgent?.generate(conclusionPrompt); console.log(conclusions?.text); return { conclusions: conclusions?.text ?? "", }; }, }); ``` ### 5. Final Answer Step ```typescript copy showLineNumbers{124} filename="src/mastra/index.ts" const finalAnswer = new Step({ id: "finalAnswer", outputSchema: z.object({ finalAnswer: z.string(), }), execute: async ({ context, mastra }) => { console.log("---------------------------"); const ragAgent = mastra?.agents?.ragAgent; const conclusions = context?.machineContext?.getStepPayload<{ conclusions: string; }>("drawConclusions")?.conclusions; const answerPrompt = ` Based on the conclusions: ${conclusions} Format your response as: THOUGHT PROCESS: - Step 1: [Initial analysis of retrieved chunks] - Step 2: [Connections between chunks] - Step 3: [Reasoning based on chunks] FINAL ANSWER: [Your concise answer based on the retrieved context]`; const finalAnswer = await ragAgent?.generate(answerPrompt); console.log(finalAnswer?.text); return { finalAnswer: finalAnswer?.text ?? "", }; }, }); ``` ## Workflow Configuration Connect all the steps in the workflow: ```typescript copy showLineNumbers{154} filename="src/mastra/index.ts" ragWorkflow .step(analyzeContext) .then(breakdownThoughts) .then(connectPieces) .then(drawConclusions) .then(finalAnswer); ragWorkflow.commit(); ``` ## Instantiate PgVector and Mastra Instantiate PgVector and Mastra with all components: ```typescript copy showLineNumbers{198} filename="src/mastra/index.ts" const pgVector = new PgVector(process.env.POSTGRES_CONNECTION_STRING!); export const mastra = new Mastra({ agents: { ragAgent }, vectors: { pgVector }, workflows: { ragWorkflow }, }); ``` ## Document Processing Process and chunks the document: ```typescript copy showLineNumbers{206} filename="src/mastra/index.ts" const doc = MDocument.fromText(`Your document text here...`); const chunks = await doc.chunk({ strategy: "recursive", size: 512, overlap: 50, separator: "\n", }); ``` ## Embedding Creation and Storage Generate and store embeddings: ```typescript copy showLineNumbers{215} filename="src/mastra/index.ts" const { embeddings } = await embedMany(chunks, { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }); const vectorStore = mastra.getVector("pgVector"); await vectorStore.createIndex("embeddings", 1536); await vectorStore.upsert( "embeddings", embeddings, chunks?.map((chunk: any) => ({ text: chunk.text })), ); ``` ## Response Generation Function to generate responses using the workflow: ```typescript copy showLineNumbers{229} filename="src/mastra/index.ts" async function generateResponse(query: string) { const prompt = ` Please answer the following question: ${query} Please base your answer only on the context provided in the tool. If the context doesn't contain enough information to fully answer the question, please state that explicitly. `; const { runId, start } = ragWorkflow.createRun(); const workflowResult = await start({ triggerData: { query: prompt, }, }); return workflowResult; } ``` ## Example Usage ```typescript copy showLineNumbers{246} filename="src/mastra/index.ts" const query = "What are the main benefits of telemedicine?"; console.log("\nQuery:", query); const result = await generateResponse(query); console.log("\nThought Process:"); console.log(result.results); ```




================================================================================ Source: src/pages/examples/rag/embed-chunk-array.mdx ================================================================================ --- title: "Example: Embedding Chunk Arrays | RAG | Mastra Docs" description: Example of using Mastra to generate embeddings for an array of text chunks for similarity search. --- import { GithubLink } from "../../../components/github-link"; # Embed Chunk Array After chunking documents, you need to convert the text chunks into numerical vectors that can be used for similarity search. The `embed` method transforms text chunks into embeddings using your chosen provider and model. This example shows how to generate embeddings for an array of text chunks. ```tsx copy import { MDocument, embed } from "@mastra/rag"; const doc = MDocument.fromText("Your text content..."); const chunks = await doc.chunk(); const { embeddings } = await embed(chunks, { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }); ```




================================================================================ Source: src/pages/examples/rag/embed-text-chunk.mdx ================================================================================ --- title: "Example: Embedding Text Chunks | RAG | Mastra Docs" description: Example of using Mastra to generate an embedding for a single text chunk for similarity search. --- import { GithubLink } from "../../../components/github-link"; # Embed Text Chunk When working with individual text chunks, you need to convert them into numerical vectors for similarity search. The `embed` method transforms a single text chunk into an embedding using your chosen provider and model. ```tsx copy import { MDocument, embed } from "@mastra/rag"; const doc = MDocument.fromText("Your text content..."); const chunks = await doc.chunk(); const { embedding } = await embed(chunks[0], { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }); ```




================================================================================ Source: src/pages/examples/rag/embed-text-with-cohere.mdx ================================================================================ --- title: "Example: Embedding Text with Cohere | RAG | Mastra Docs" description: Example of using Mastra to generate embeddings using Cohere's embedding model. --- import { GithubLink } from "../../../components/github-link"; # Embed Text with Cohere When working with alternative embedding providers, you need a way to generate vectors that match your chosen model's specifications. The `embed` method supports multiple providers, allowing you to switch between different embedding services. This example shows how to generate embeddings using Cohere's embedding model. ```tsx copy import { MDocument, embed } from "@mastra/rag"; const doc = MDocument.fromText("Your text content..."); const chunks = await doc.chunk(); const { embeddings } = await embed(chunks, { provider: "COHERE", model: "embed-english-v3.0", maxRetries: 3, }); ```




================================================================================ Source: src/pages/examples/rag/filter-rag.mdx ================================================================================ --- title: "Example: Metadata Filtering | Retrieval | RAG | Mastra Docs" description: Example of implementing a RAG system in Mastra using metadata filters to search for relevant chunks in the vector store. --- import { GithubLink } from "../../../components/github-link"; # Metadata Filtering This example demonstrates how to implement a Retrieval-Augmented Generation (RAG) system using Mastra, OpenAI embeddings, and PGVector for vector storage. This system uses metadata filters to search for relevant chunks in the vector store, reducing the amount of results returned. ## Overview The system implements RAG using Mastra and OpenAI. Here's what it does: 1. Sets up a Mastra agent with gpt-4o-mini for response generation 2. Creates a vector query tool to manage vector store interactions 3. Chunks text documents into smaller segments 4. Creates embeddings for these chunks 5. Stores them in a PostgreSQL vector database 6. Retrieves relevant chunks based on queries using vector query tool 7. Generates context-aware responses using the Mastra agent ## Setup ### Environment Setup Make sure to set up your environment variables: ```bash filename=".env" POSTGRES_CONNECTION_STRING=your_connection_string_here ``` ### Dependencies Then, import the necessary dependencies: ```typescript copy showLineNumbers filename="src/mastra/index.ts" import { Mastra, Agent } from "@mastra/core"; import { PgVector } from "@mastra/vector-pg"; import { createVectorQueryTool, embedMany, MDocument } from "@mastra/rag"; ``` ## Vector Query Tool Creation Using createVectorQueryTool imported from @mastra/rag, you can create a tool that can query the vector database. ```typescript copy showLineNumbers{4} filename="src/mastra/index.ts" const vectorQueryTool = createVectorQueryTool({ vectorStoreName: "pgVector", indexName: "embeddings", options: { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }, topK: 3, vectorFilterType: "pg", }); ``` ## Agent Configuration Set up the Mastra agent that will handle the responses: ```typescript copy showLineNumbers{16} filename="src/mastra/index.ts" export const ragAgent = new Agent({ name: "RAG Agent", instructions: "You are a helpful assistant that answers questions based on the provided context. Keep your answers concise and relevant.", model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, tools: { vectorQueryTool }, }); ``` ## Instantiate PgVector and Mastra Instantiate PgVector and Mastra with all components: ```typescript copy showLineNumbers{27} filename="src/mastra/index.ts" const pgVector = new PgVector(process.env.POSTGRES_CONNECTION_STRING!); export const mastra = new Mastra({ agents: { ragAgent }, vectors: { pgVector }, }); const agent = mastra.getAgent("ragAgent"); ``` ## Document Processing Create a document and process it into chunks: ```typescript copy showLineNumbers{36} filename="src/mastra/index.ts" const doc = MDocument.fromText( `The Impact of Climate Change on Global Agriculture...`, ); const chunks = await doc.chunk({ strategy: "recursive", size: 512, overlap: 50, separator: "\n", extract: { keywords: true, }, }); ``` ## Creating and Storing Embeddings Generate embeddings for the chunks and store them in the vector database: ```typescript copy showLineNumbers{48} filename="src/mastra/index.ts" const { embeddings } = await embedMany(chunks, { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }); const vectorStore = mastra.getVector("pgVector"); await vectorStore.createIndex("embeddings", 1536); await vectorStore.upsert( "embeddings", embeddings, chunks?.map((chunk: any) => ({ text: chunk.text, ...chunk.metadata, nested: { keywords: chunk.metadata.excerptKeywords .replace("KEYWORDS:", "") .split(",") .map((k) => k.trim()), id: index, }, })), ); ``` ## Response Generation Function to generate responses based on retrieved context: ```typescript copy showLineNumbers{65} filename="src/mastra/index.ts" async function generateResponse(query: string, filter: any) { const buildFilterString = (f: any): string => { if ("type" in f) { return `type:${f.type} condition with filters: [${f.filters .map(buildFilterString) .join(", ")}]`; } return `keyword: ${f.keyword} operator: ${f.operator} value: ${f.value}`; }; const filterDescription = buildFilterString(filter); const prompt = ` Please answer the following question: ${query} Please base your answer only on the context provided in the tool using these filter conditions: ${filterDescription} If the context doesn't contain enough information to fully answer the question, please state that explicitly. `; // Call the agent to generate a response const completion = await agent.generate(prompt); return completion.text; } ``` ## Example Usage ```typescript copy showLineNumbers{89} filename="src/mastra/index.ts" async function answerQueries( queries: { query: string; filter: any; }[], ) { for (const { query, filter } of queries) { try { // Generate and log the response const answer = await generateResponse(query, filter); console.log("\nQuery:", query); console.log("Response:", answer); } catch (error) { console.error(`Error processing query "${query}":`, error); } } } const queries = [ { query: "What adaptation strategies are mentioned?", filter: { keyword: "nested.keywords", operator: "ilike", value: "adaptation", }, }, { query: "Show me recent sections", filter: { keyword: "nested.id", operator: "gt", value: "2", }, }, { query: "Find sections about drought and irrigation", filter: { type: "$and", filters: [ { keyword: "text", operator: "ilike", value: "drought", }, { keyword: "text", operator: "ilike", value: "irrigation", }, ], }, }, { query: "Find sections about wheat or rice", filter: { type: "$or", filters: [ { keyword: "text", operator: "ilike", value: "wheat", }, { keyword: "text", operator: "ilike", value: "rice", }, ], }, }, ]; await answerQueries(queries); ```




================================================================================ Source: src/pages/examples/rag/graph-rag.mdx ================================================================================ --- title: "Example: A Complete Graph RAG System | RAG | Mastra Docs" description: Example of implementing a Graph RAG system in Mastra using OpenAI embeddings and PGVector for vector storage. --- import { GithubLink } from "../../../components/github-link"; # Graph RAG This example demonstrates how to implement a Retrieval-Augmented Generation (RAG) system using Mastra, OpenAI embeddings, and PGVector for vector storage. ## Overview The system implements Graph RAG using Mastra and OpenAI. Here's what it does: 1. Sets up a Mastra agent with gpt-4o-mini for response generation 2. Creates a GraphRAG tool to manage vector store interactions and knowledge graph creation/traversal 3. Chunks text documents into smaller segments 4. Creates embeddings for these chunks 5. Stores them in a PostgreSQL vector database 6. Creates a knowledge graph of relevant chunks based on queries using GraphRAG tool - Tool returns results from vector store and creates knowledge graph - Traverses knowledge graph using query 7. Generates context-aware responses using the Mastra agent ## Setup ### Environment Setup Make sure to set up your environment variables: ```bash filename=".env" POSTGRES_CONNECTION_STRING=your_connection_string_here ``` ### Dependencies Then, import the necessary dependencies: ```typescript copy showLineNumbers filename="src/mastra/index.ts" import { Mastra, Agent } from "@mastra/core"; import { PgVector } from "@mastra/vector-pg"; import { embedMany, MDocument, createGraphRAGTool } from "@mastra/rag"; ``` ## GraphRAG Tool Creation Using createGraphRAGTool imported from @mastra/rag, you can create a tool that queries the vector database and converts the results into a knowledge graph. ```typescript copy showLineNumbers{4} filename="src/mastra/index.ts" const graphRagTool = createGraphRAGTool({ vectorStoreName: "pgVector", indexName: "embeddings", options: { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }, graphOptions: { dimension: 1536, threshold: 0.7, }, topK: 5, }); ``` ## Agent Configuration Set up the Mastra agent that will handle the responses: ```typescript copy showLineNumbers{19} filename="src/mastra/index.ts" export const ragAgent = new Agent({ name: "GraphRAG Agent", instructions: `You are a helpful assistant that answers questions based on the provided context. Format your answers as follows: 1. DIRECT FACTS: List only the directly stated facts from the text relevant to the question (2-3 bullet points) 2. CONNECTIONS MADE: List the relationships you found between different parts of the text (2-3 bullet points) 3. CONCLUSION: One sentence summary that ties everything together Keep each section brief and focus on the most important points.`, model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, tools: { graphRagTool, }, }); ``` ## Instantiate PgVector and Mastra Instantiate PgVector and Mastra with all components: ```typescript copy showLineNumbers{37} filename="src/mastra/index.ts" const pgVector = new PgVector(process.env.POSTGRES_CONNECTION_STRING!); export const mastra = new Mastra({ agents: { ragAgent }, vectors: { pgVector }, }); const agent = mastra.getAgent("ragAgent"); ``` ## Document Processing Create a document and process it into chunks: ```typescript copy showLineNumbers{46} filename="src/mastra/index.ts" const doc = MDocument.fromText( `Riverdale Heights: Community Development Study...`, ); const chunks = await doc.chunk({ strategy: "recursive", size: 512, overlap: 50, separator: "\n", }); ``` ## Creating and Storing Embeddings Generate embeddings for the chunks and store them in the vector database: ```typescript copy showLineNumbers{55} filename="src/mastra/index.ts" const { embeddings } = await embedMany(chunks, { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }); const vectorStore = mastra.getVector("pgVector"); await vectorStore.createIndex("embeddings", 1536); await vectorStore.upsert( "embeddings", embeddings, chunks?.map((chunk: any) => ({ text: chunk.text })), ); ``` ## Response Generation Function to generate responses based on retrieved context: ```typescript copy showLineNumbers{69} filename="src/mastra/index.ts" async function generateResponse(query: string) { const prompt = ` Please answer the following question using both semantic and graph-based context: ${query} Please base your answer only on the context provided in the tool. If the context doesn't contain enough information to fully answer the question, please state that explicitly. `; const completion = await agent.generate(prompt); return completion.text; } ``` ## Example Usage ```typescript copy showLineNumbers{81} filename="src/mastra/index.ts" async function answerQueries(queries: string[]) { for (const query of queries) { try { const answer = await generateResponse(query); console.log("\nQuery:", query); console.log("Response:", answer); } catch (error) { console.error(`Error processing query "${query}":`, error); } } } const queries = [ "What are the direct and indirect effects of early railway decisions on Riverdale Heights' current state?", "How have changes in transportation infrastructure affected different generations of local businesses and community spaces?", "Compare how the Rossi family business and Thompson Steel Works responded to major infrastructure changes, and how their responses affected the community.", "Trace how the transformation of the Thompson Steel Works site has influenced surrounding businesses and cultural spaces from 1932 to present.", ]; await answerQueries(queries); ```




================================================================================ Source: src/pages/examples/rag/hybrid-vector-search.mdx ================================================================================ --- title: "Example: Hybrid Vector Search | RAG | Mastra Docs" description: Example of using metadata filters with PGVector to enhance vector search results in Mastra. --- import { GithubLink } from "../../../components/github-link"; # Hybrid Vector Search When you combine vector similarity search with metadata filters, you can create a hybrid search that is more precise and efficient. This approach combines: - Vector similarity search to find the most relevant documents - Metadata filters to refine the search results based on additional criteria This example demonstrates how to use hybrid vector search with Mastra and PGVector. ## Overview The system implements filtered vector search using Mastra and PGVector. Here's what it does: 1. Queries existing embeddings in PGVector with metadata filters 2. Shows how to filter by different metadata fields 3. Demonstrates combining vector similarity with metadata filtering > **Note**: For examples of how to extract metadata from your documents, see the [Metadata Extraction](./metadata-extraction) guide. > > To learn how to create and store embeddings in PGVector, see the [Insert Embeddings in PGVector](./insert-embedding-in-pgvector) guide. ## Setup ### Environment Setup Make sure to set up your environment variables: ```bash filename=".env" POSTGRES_CONNECTION_STRING=your_connection_string_here ``` ### Dependencies Import the necessary dependencies: ```typescript copy showLineNumbers filename="src/index.ts" import { embed } from '@mastra/rag'; import { PgVector } from '@mastra/vector-pg'; ``` ## Vector Store Initialization Initialize PgVector with your connection string: ```typescript copy showLineNumbers{4} filename="src/index.ts" const pgVector = new PgVector(process.env.POSTGRES_CONNECTION_STRING!); ``` ## Example Usage ### Filter by Metadata Value ```typescript copy showLineNumbers{55} filename="src/index.ts" // Create embedding for the query const { embedding } = await embed('[Insert query based on document here]', { provider: 'OPEN_AI', model: 'text-embedding-3-small', maxRetries: 3, }); // Query with metadata filter const result = await pgVector.query('embeddings', embedding, 3, { 'path.to.metadata': { eq: 'value', }, }); console.log('Results:', result); ```




================================================================================ Source: src/pages/examples/rag/insert-embedding-in-astra.mdx ================================================================================ --- title: "Example: Insert Embeddings in Astra DB | RAG | Mastra Docs" description: Example of using Mastra to store embeddings in Astra DB for similarity search. --- import { GithubLink } from '../../../components/github-link'; # Insert Embedding in Astra DB After generating embeddings, you need to store them in a vector database for similarity search. The `AstraVector` class provides methods to create collections and insert embeddings into DataStax Astra DB, a cloud-native vector database. This example shows how to store embeddings in Astra DB for later retrieval. ```tsx copy import { AstraVector } from '@mastra/vector-astra'; import { MDocument, embed } from '@mastra/rag'; const doc = MDocument.fromText('Your text content...'); const chunks = await doc.chunk(); const { embeddings } = await embed(chunks, { provider: 'OPEN_AI', model: 'text-embedding-3-small', maxRetries: 3, }); const astra = new AstraVector({ token: process.env.ASTRA_DB_TOKEN, endpoint: process.env.ASTRA_DB_ENDPOINT, keyspace: process.env.ASTRA_DB_KEYSPACE, }); await astra.createIndex('test_collection', 1536); await astra.upsert( 'test_collection', embeddings, chunks?.map(chunk => ({ text: chunk.text })), ); ``` {/*




*/} ================================================================================ Source: src/pages/examples/rag/insert-embedding-in-chroma.mdx ================================================================================ --- title: "Example: Insert Embeddings in Chroma | RAG | Mastra Docs" description: Example of using Mastra to store embeddings in Chroma for similarity search. --- import { GithubLink } from '../../../components/github-link'; # Insert Embedding in Chroma After generating embeddings, you need to store them in a vector database for similarity search. The `ChromaVector` class provides methods to create collections and insert embeddings into Chroma, an open-source embedding database. This example shows how to store embeddings in Chroma for later retrieval. ```tsx copy import { ChromaVector } from '@mastra/vector-chroma'; import { MDocument, embed } from '@mastra/rag'; const doc = MDocument.fromText('Your text content...'); const chunks = await doc.chunk(); const { embeddings } = await embed(chunks, { provider: 'OPEN_AI', model: 'text-embedding-3-small', maxRetries: 3, }); const chroma = new ChromaVector({ path: "path/to/chroma/db", }); await chroma.createIndex('test_collection', 1536); await chroma.upsert( 'test_collection', embeddings, chunks?.map(chunk => ({ text: chunk.text })), ); ``` {/*




*/} ================================================================================ Source: src/pages/examples/rag/insert-embedding-in-libsql.mdx ================================================================================ --- title: "Example: Insert Embeddings in LibSQL | RAG | Mastra Docs" description: Example of using Mastra to store embeddings in LibSQL for similarity search. --- import { GithubLink } from '../../../components/github-link'; # Insert Embedding in LibSQL After generating embeddings, you need to store them in a vector database for similarity search. The `LibSQLVector` class provides methods to create collections and insert embeddings into LibSQL, a fork of SQLite with vector extensions. This example shows how to store embeddings in LibSQL for later retrieval. ```tsx copy import { LibSQLVector } from '@mastra/vector-libsql'; import { MDocument, embed } from '@mastra/rag'; const doc = MDocument.fromText('Your text content...'); const chunks = await doc.chunk(); const { embeddings } = await embed(chunks, { provider: 'OPEN_AI', model: 'text-embedding-3-small', maxRetries: 3, }); const libsql = new LibSQLVector(process.env.DATABASE_URL); await libsql.createIndex('test_collection', 1536); await libsql.upsert( 'test_collection', embeddings, chunks?.map(chunk => ({ text: chunk.text })), ); ``` {/*




*/} ================================================================================ Source: src/pages/examples/rag/insert-embedding-in-pgvector.mdx ================================================================================ --- title: "Example: Insert Embeddings in PgVector | RAG | Mastra Docs" description: Example of using Mastra to store embeddings in a PostgreSQL database with the pgvector extension for similarity search. --- import { GithubLink } from "../../../components/github-link"; # Insert Embedding in PgVector After generating embeddings, you need to store them in a database that supports vector similarity search. The `PgVector` class provides methods to create indexes and insert embeddings into PostgreSQL with the pgvector extension. This example shows how to store embeddings in a PostgreSQL database for later retrieval. ```tsx copy import { PgVector } from '@mastra/vector-pg'; import { MDocument, embed } from "@mastra/rag"; const doc = MDocument.fromText("Your text content..."); const chunks = await doc.chunk(); const { embeddings } = await embed(chunks, { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }); const pgVector = new PgVector("postgresql://localhost:5432/mydb"); await pgVector.createIndex("test_index", 1536); await pgVector.upsert( "test_index", embeddings, chunks?.map((chunk: any) => ({ text: chunk.text })), ); ```




================================================================================ Source: src/pages/examples/rag/insert-embedding-in-pinecone.mdx ================================================================================ --- title: "Example: Insert Embeddings in Pinecone | RAG | Mastra Docs" description: Example of using Mastra to store embeddings in Pinecone for similarity search. --- import { GithubLink } from '../../../components/github-link'; # Insert Embedding in Pinecone After generating embeddings, you need to store them in a vector database for similarity search. The `PineconeVector` class provides methods to create indexes and insert embeddings into Pinecone, a managed vector database service. This example shows how to store embeddings in Pinecone for later retrieval. ```tsx copy import { PineconeVector } from '@mastra/vector-pinecone'; import { MDocument, embed } from '@mastra/rag'; const doc = MDocument.fromText('Your text content...'); const chunks = await doc.chunk(); const { embeddings } = await embed(chunks, { provider: 'OPEN_AI', model: 'text-embedding-3-small', maxRetries: 3, }); const pinecone = new PineconeVector('your-api-key'); await pinecone.createIndex('test_index', 1536); await pinecone.upsert( 'test_index', embeddings, chunks?.map(chunk => ({ text: chunk.text })), ); ```




================================================================================ Source: src/pages/examples/rag/insert-embedding-in-qdrant.mdx ================================================================================ --- title: "Example: Insert Embeddings in Qdrant | RAG | Mastra Docs" description: Example of using Mastra to store embeddings in Qdrant for similarity search. --- import { GithubLink } from '../../../components/github-link'; # Insert Embedding in Qdrant After generating embeddings, you need to store them in a vector database for similarity search. The `QdrantVector` class provides methods to create collections and insert embeddings into Qdrant, a high-performance vector database. This example shows how to store embeddings in Qdrant for later retrieval. ```tsx copy import { QdrantVector } from '@mastra/vector-qdrant'; import { MDocument, embed } from '@mastra/rag'; const doc = MDocument.fromText('Your text content...'); const chunks = await doc.chunk(); const { embeddings } = await embed(chunks, { provider: 'OPEN_AI', model: 'text-embedding-3-small', maxRetries: 3, }); const qdrant = new QdrantVector({ url: process.env.QDRANT_URL, apiKey: process.env.QDRANT_API_KEY, }); await qdrant.createIndex('test_collection', 1536); await qdrant.upsert( 'test_collection', embeddings, chunks?.map(chunk => ({ text: chunk.text })), ); ``` {/*




*/} ================================================================================ Source: src/pages/examples/rag/insert-embedding-in-upstash.mdx ================================================================================ --- title: "Example: Insert Embeddings in Upstash | RAG | Mastra Docs" description: Example of using Mastra to store embeddings in Upstash for similarity search. --- import { GithubLink } from '../../../components/github-link'; # Insert Embedding in Upstash After generating embeddings, you need to store them in a vector database for similarity search. The `UpstashVector` class provides methods to create collections and insert embeddings into Upstash Vector, a serverless vector database. This example shows how to store embeddings in Upstash for later retrieval. ```tsx copy import { UpstashVector } from '@mastra/vector-upstash'; import { MDocument, embed } from '@mastra/rag'; const doc = MDocument.fromText('Your text content...'); const chunks = await doc.chunk(); const { embeddings } = await embed(chunks, { provider: 'OPEN_AI', model: 'text-embedding-3-small', maxRetries: 3, }); const upstash = new UpstashVector({ url: process.env.UPSTASH_URL, token: process.env.UPSTASH_TOKEN, }); await upstash.createIndex('test_collection', 1536); await upstash.upsert( 'test_collection', embeddings, chunks?.map(chunk => ({ text: chunk.text })), ); ``` {/*




*/} ================================================================================ Source: src/pages/examples/rag/insert-embedding-in-vectorize.mdx ================================================================================ --- title: "Example: Insert Embeddings in Cloudflare Vectorize | RAG | Mastra Docs" description: Example of using Mastra to store embeddings in Cloudflare Vectorize for similarity search. --- import { GithubLink } from '../../../components/github-link'; # Insert Embedding in Cloudflare Vectorize After generating embeddings, you need to store them in a vector database for similarity search. The `CloudflareVector` class provides methods to create collections and insert embeddings into Cloudflare Vectorize, a serverless vector database service. This example shows how to store embeddings in Vectorize for later retrieval. ```tsx copy import { CloudflareVector } from '@mastra/vector-vectorize'; import { MDocument, embed } from '@mastra/rag'; const doc = MDocument.fromText('Your text content...'); const chunks = await doc.chunk(); const { embeddings } = await embed(chunks, { provider: 'OPEN_AI', model: 'text-embedding-3-small', maxRetries: 3, }); const vectorize = new CloudflareVector({ accountId: process.env.CF_ACCOUNT_ID, apiToken: process.env.CF_API_TOKEN, }); await vectorize.createIndex('test_collection', 1536); await vectorize.upsert( 'test_collection', embeddings, chunks?.map(chunk => ({ text: chunk.text })), ); ``` {/*




*/} ================================================================================ Source: src/pages/examples/rag/metadata-extraction.mdx ================================================================================ --- title: "Example: Metadata Extraction | Retrieval | RAG | Mastra Docs" description: Example of extracting and utilizing metadata from documents in Mastra for enhanced document processing and retrieval. --- import { GithubLink } from "../../../components/github-link"; # Metadata Extraction This example demonstrates how to extract and utilize metadata from documents using Mastra's document processing capabilities. The extracted metadata can be used for document organization, filtering, and enhanced retrieval in RAG systems. ## Overview The system demonstrates metadata extraction in two ways: 1. Direct metadata extraction from a document 2. Chunking with metadata extraction ## Setup ### Dependencies Import the necessary dependencies: ```typescript copy showLineNumbers filename="src/index.ts" import { MDocument } from '@mastra/rag'; ``` ## Document Creation Create a document from text content: ```typescript copy showLineNumbers{3} filename="src/index.ts" const doc = MDocument.fromText(`Title: The Benefits of Regular Exercise Regular exercise has numerous health benefits. It improves cardiovascular health, strengthens muscles, and boosts mental wellbeing. Key Benefits: • Reduces stress and anxiety • Improves sleep quality • Helps maintain healthy weight • Increases energy levels For optimal results, experts recommend at least 150 minutes of moderate exercise per week.`); ``` ## 1. Direct Metadata Extraction Extract metadata directly from the document: ```typescript copy showLineNumbers{16} filename="src/index.ts" // Configure metadata extraction options await doc.extractMetadata({ keywords: true, // Extract important keywords summary: true, // Generate a concise summary }); // Retrieve the extracted metadata const meta = doc.getMetadata(); console.log('Extracted Metadata:', meta); // Example Output: // Extracted Metadata: { // keywords: [ // 'exercise', // 'health benefits', // 'cardiovascular health', // 'mental wellbeing', // 'stress reduction', // 'sleep quality' // ], // summary: 'Regular exercise provides multiple health benefits including improved cardiovascular health, muscle strength, and mental wellbeing. Key benefits include stress reduction, better sleep, weight management, and increased energy. Recommended exercise duration is 150 minutes per week.' // } ``` ## 2. Chunking with Metadata Combine document chunking with metadata extraction: ```typescript copy showLineNumbers{16} filename="src/index.ts" // Configure chunking with metadata extraction await doc.chunk({ strategy: 'recursive', // Use recursive chunking strategy size: 200, // Maximum chunk size extract: { keywords: true, // Extract keywords per chunk summary: true, // Generate summary per chunk }, }); // Get metadata from chunks const metaTwo = doc.getMetadata(); console.log('Chunk Metadata:', metaTwo); // Example Output: // Chunk Metadata: { // keywords: [ // 'exercise', // 'health benefits', // 'cardiovascular health', // 'mental wellbeing', // 'stress reduction', // 'sleep quality' // ], // summary: 'Regular exercise provides multiple health benefits including improved cardiovascular health, muscle strength, and mental wellbeing. Key benefits include stress reduction, better sleep, weight management, and increased energy. Recommended exercise duration is 150 minutes per week.' // } ```




================================================================================ Source: src/pages/examples/rag/rerank-rag.mdx ================================================================================ --- title: "Example: Re-ranking Results | Retrieval | RAG | Mastra Docs" description: Example of implementing a RAG system with re-ranking in Mastra using OpenAI embeddings and PGVector for vector storage. --- import { GithubLink } from "../../../components/github-link"; # Re-ranking Results This example demonstrates how to implement a Retrieval-Augmented Generation (RAG) system with re-ranking using Mastra, OpenAI embeddings, and PGVector for vector storage. ## Overview The system implements RAG with re-ranking using Mastra and OpenAI. Here's what it does: 1. Sets up a Mastra agent with gpt-4o-mini for response generation 2. Creates a vector query tool with re-ranking capabilities 3. Chunks text documents into smaller segments 4. Creates embeddings for these chunks 5. Stores them in a PostgreSQL vector database 6. Retrieves and re-ranks relevant chunks based on queries 7. Generates context-aware responses using the Mastra agent ## Setup ### Environment Setup Make sure to set up your environment variables: ```bash filename=".env" POSTGRES_CONNECTION_STRING=your_connection_string_here ``` ### Dependencies Then, import the necessary dependencies: ```typescript copy showLineNumbers filename="src/mastra/index.ts" import { Mastra, Agent } from "@mastra/core"; import { PgVector } from "@mastra/vector-pg"; import { embedMany, MDocument, createVectorQueryTool } from "@mastra/rag"; ``` ## Vector Query Tool Creation with Re-ranking Using createVectorQueryTool imported from @mastra/rag, you can create a tool that can query the vector database and re-rank results: ```typescript copy showLineNumbers{4} filename="src/mastra/index.ts" const vectorQueryTool = createVectorQueryTool({ vectorStoreName: "pgVector", indexName: "embeddings", options: { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }, topK: 5, reranker: { model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, }, }); ``` ## Agent Configuration Set up the Mastra agent that will handle the responses: ```typescript copy showLineNumbers{22} filename="src/mastra/index.ts" export const ragAgent = new Agent({ name: "RAG Agent", instructions: "You are a helpful assistant that answers questions based on the provided context. Keep your answers concise and relevant.", model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, tools: { vectorQueryTool, }, }); ``` ## Instantiate PgVector and Mastra Instantiate PgVector and Mastra with all components: ```typescript copy showLineNumbers{35} filename="src/mastra/index.ts" const pgVector = new PgVector(process.env.POSTGRES_CONNECTION_STRING!); export const mastra = new Mastra({ agents: { ragAgent }, vectors: { pgVector }, }); const agent = mastra.getAgent("ragAgent"); ``` ## Document Processing Create a document and process it into chunks: ```typescript copy showLineNumbers{44} filename="src/mastra/index.ts" const doc1 = MDocument.fromText(` market data shows price resistance levels. technical charts display moving averages. support levels guide trading decisions. breakout patterns signal entry points. price action determines trade timing. baseball cards show gradual value increase. rookie cards command premium prices. card condition affects resale value. authentication prevents fake trading. grading services verify card quality. volume analysis confirms price trends. sports cards track seasonal demand. chart patterns predict movements. mint condition doubles card worth. resistance breaks trigger orders. rare cards appreciate yearly. `); const chunks = await doc1.chunk({ strategy: "recursive", size: 150, overlap: 20, separator: "\n", }); ``` ## Creating and Storing Embeddings Generate embeddings for the chunks and store them in the vector database: ```typescript copy showLineNumbers{72} filename="src/mastra/index.ts" const { embeddings } = await embedMany(chunks, { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }); const vectorStore = mastra.getVector("pgVector"); await vectorStore.createIndex("embeddings", 1536); await vectorStore.upsert( "embeddings", embeddings, chunks?.map((chunk: any) => ({ text: chunk.text })), ); ``` ## Response Generation Function to generate responses based on retrieved and re-ranked context: ```typescript copy showLineNumbers{86} filename="src/mastra/index.ts" async function generateResponse(query: string) { const prompt = ` Please answer the following question: ${query} Please base your answer only on the context provided in the tool. If the context doesn't contain enough information to fully answer the question, please state that explicitly. `; const completion = await agent.generate(prompt); return completion.text; } ``` ## Example Usage ```typescript copy showLineNumbers{99} filename="src/mastra/index.ts" async function answerQueries(queries: string[]) { for (const query of queries) { try { const answer = await generateResponse(query); console.log("\nQuery:", query); console.log("Response:", answer); } catch (error) { console.error(`Error processing query "${query}":`, error); } } } const queries = [ "explain technical trading analysis", "explain trading card valuation", "how do you analyze market resistance", ]; await answerQueries(queries); ``` ================================================================================ Source: src/pages/examples/rag/reranking-with-cohere.mdx ================================================================================ --- title: "Example: Reranking with Cohere | RAG | Mastra Docs" description: Example of using Mastra to improve document retrieval relevance with Cohere's reranking service. --- # Reranking with Cohere When retrieving documents for RAG, initial vector similarity search may miss important semantic matches. Cohere's reranking service helps improve result relevance by reordering documents using multiple scoring factors. ```typescript import { rerank } from "@mastra/rag"; const results = rerank( searchResults, "deployment configuration", { provider: "COHERE", name: "rerank-v3.5", }, { topK: 5, weights: { semantic: 0.4, vector: 0.4, position: 0.2 } } ); ``` ## Links - [rerank() reference](../reference/rag/rerank.mdx) - [Retrieval docs](../docs/rag/retrieve-results.mdx) ================================================================================ Source: src/pages/examples/rag/retrieve-results.mdx ================================================================================ --- title: "Example: Retrieving Top-K Results | RAG | Mastra Docs" description: Example of using Mastra to query a vector database and retrieve semantically similar chunks. --- import { GithubLink } from "../../../components/github-link"; # Retrieving Top-K Results After storing embeddings in a vector database, you need to query them to find similar content. The `query` method returns the most semantically similar chunks to your input embedding, ranked by relevance. The `topK` parameter allows you to specify the number of results to return. This example shows how to retrieve similar chunks from a Pinecone vector database. ```tsx copy import { MDocument, embed, PineconeVector } from "@mastra/rag"; const doc = MDocument.fromText("Your text content..."); const chunks = await doc.chunk(); const { embeddings } = await embed(chunks, { provider: "OPEN_AI", model: "text-embedding-3-small", maxRetries: 3, }); const pinecone = new PineconeVector("your-api-key"); await pinecone.createIndex("test_index", 1536); await pinecone.upsert( "test_index", embeddings, chunks?.map((chunk: any) => ({ text: chunk.text })), ); const topK = 10; const results = await pinecone.query("test_index", embeddings[0], topK); console.log(results); ```




================================================================================ Source: src/pages/examples/workflows/branching-paths.mdx ================================================================================ --- title: "Example: Branching Paths | Workflows | Mastra Docs" description: Example of using Mastra to create workflows with branching paths based on intermediate results. --- import { GithubLink } from "../../../components/github-link"; # Branching Paths When processing data, you often need to take different actions based on intermediate results. This example shows how to create a workflow that splits into separate paths, where each path executes different steps based on the output of a previous step. ## Control Flow Diagram This example shows how to create a workflow that splits into separate paths, where each path executes different steps based on the output of a previous step. Here's the control flow diagram: Diagram showing workflow with branching paths ## Creating the Steps Let's start by creating the steps and initializing the workflow. {/* prettier-ignore */} ```ts showLineNumbers copy import { Step, Workflow } from "@mastra/core"; import { z } from "zod" const stepOne = new Step({ id: "stepOne", execute: async ({ context: { machineContext } }) => ({ doubledValue: machineContext.triggerData.inputValue * 2 }) }); const stepTwo = new Step({ id: "stepTwo", execute: async ({ context: { machineContext } }) => ({ isDivisibleByFive: machineContext.stepResults.stepOne.payload.doubledValue % 5 === 0 }) }); const stepThree = new Step({ id: "stepThree", execute: async ({ context: { machineContext } }) => ({ incrementedValue: machineContext.stepResults.stepOne.payload.doubledValue + 1 }) }); const stepFour = new Step({ id: "stepFour", execute: async ({ context: { machineContext } }) => ({ isDivisibleByThree: machineContext.stepResults.stepThree.payload.incrementedValue % 3 === 0 }) }); // Build the workflow const myWorkflow = new Workflow({ name: "my-workflow", triggerSchema: z.object({ inputValue: z.number(), }), }); ``` ## Branching Paths and Chaining Steps Now let's execute the workflow and see the results. ```ts showLineNumbers copy myWorkflow .step(stepOne) .then(stepTwo) .after(stepOne) .step(stepThree) .then(stepFour) .commit(); const { start } = myWorkflow.createRun(); const result = await start({ triggerData: { inputValue: 3 } }); ```




================================================================================ Source: src/pages/examples/workflows/calling-agent.mdx ================================================================================ --- title: "Example: Calling an Agent from a Workflow | Mastra Docs" description: Example of using Mastra to call an AI agent from within a workflow step. --- import { GithubLink } from "../../../components/github-link"; # Calling an Agent From a Workflow This example demonstrates how to create a workflow that calls an AI agent to process messages and generate responses, and execute it within a workflow step. ```ts showLineNumbers copy import { Agent, Mastra, Step, Workflow } from "@mastra/core"; import { z } from "zod"; const penguin = new Agent({ name: "agent skipper", instructions: `You are skipper from penguin of madagascar, reply as that`, model: { provider: "OPEN_AI", name: "gpt-4o-mini", }, }); const newWorkflow = new Workflow({ name: "pass message to the workflow", triggerSchema: z.object({ message: z.string(), }), }); const replyAsSkipper = new Step({ id: "reply", outputSchema: z.object({ reply: z.string(), }), execute: async ({ context, mastra }) => { const kowalski = mastra?.agents?.penguin; const res = await kowalski?.generate( context.machineContext?.triggerData?.message, ); return { reply: res?.text || "" }; }, }); newWorkflow.step(replyAsSkipper); newWorkflow.commit(); const mastra = new Mastra({ agents: { penguin }, workflows: { newWorkflow }, }); const { runId, start } = await mastra.getWorkflow("newWorkflow").createRun(); const runResult = await start({ triggerData: { message: "Give me a run down of the mission to save private" }, }); console.log(runResult.results); ```




================================================================================ Source: src/pages/examples/workflows/calling-llm.mdx ================================================================================ --- title: "Example: Calling an LLM from a Workflow | Mastra Docs" description: Example of using Mastra to stream responses from an LLM within a workflow. --- import { GithubLink } from "../../../components/github-link"; # Calling an LLM from a Workflow This example demonstrates how to create a workflow that streams responses from an LLM, showing both real-time output and final text handling. ```ts showLineNumbers copy import { Mastra, Step, Workflow } from "@mastra/core"; import { z } from "zod"; const newWorkflow = new Workflow({ name: "pass message to the workflow", triggerSchema: z.object({ message: z.string(), }), }); const replyAsPenguin = new Step({ id: "reply", outputSchema: z.object({ reply: z.string(), }), execute: async ({ context, mastra }) => { const penguinCharacter = mastra?.llm?.({ provider: "OPEN_AI", name: "gpt-4o-mini", }); const res = await penguinCharacter?.stream( context.machineContext?.triggerData?.message, ); if (!res) { return { reply: "" }; } for await (const chunk of res?.textStream) { process.stdout.write(chunk); } const text = await res.text; return { reply: text }; }, }); newWorkflow.step(replyAsPenguin); newWorkflow.commit(); const mastra = new Mastra({ workflows: { newWorkflow }, }); const { runId = start } = mastra.getWorkflow("newWorkflow").createRun(); await start({ triggerData: { message: "Give me a speech as skipper from penguin of madagascar", }, }); ```




================================================================================ Source: src/pages/examples/workflows/creating-a-workflow.mdx ================================================================================ --- title: "Example: Creating a Workflow | Workflows | Mastra Docs" description: Example of using Mastra to define and execute a simple workflow with a single step. --- import { GithubLink } from '../../../components/github-link'; # Creating a Simple Workflow A workflow allows you to define and execute sequences of operations in a structured path. This example shows a workflow with a single step. ```ts showLineNumbers copy import { Step, Workflow } from '@mastra/core'; import { z } from 'zod'; const myWorkflow = new Workflow({ name: 'my-workflow', triggerSchema: z.object({ input: z.number(), }), }); const stepOne = new Step({ id: 'stepOne', inputSchema: z.object({ value: z.number(), }), outputSchema: z.object({ doubledValue: z.number(), }), execute: async ({ context }) => { const doubledValue = context.machineContext?.triggerData?.input * 2; return { doubledValue }; }, }) myWorkflow.step( stepOne ).commit(); const { runId, start } = myWorkflow.createRun({ triggerData: { input: 90 } }); const res = await start(); console.log(res.results); ```




================================================================================ Source: src/pages/examples/workflows/cyclical-dependencies.mdx ================================================================================ --- title: "Example: Cyclical Dependencies | Workflows | Mastra Docs" description: Example of using Mastra to create workflows with cyclical dependencies and conditional loops. --- import { GithubLink } from "../../../components/github-link"; # Workflow with Cyclical dependencies Workflows support cyclical dependencies where steps can loop back based on conditions. The example below shows how to use conditional logic to create loops and handle repeated execution. ```ts showLineNumbers copy import { Workflow, Step } from '@mastra/core'; import { z } from 'zod'; async function main() { const doubleValue = new Step({ id: 'doubleValue', description: 'Doubles the input value', inputSchema: z.object({ inputValue: z.number(), }), outputSchema: z.object({ doubledValue: z.number(), }), execute: async ({ context }) => { const doubledValue = context.inputValue * 2; return { doubledValue }; }, }); const incrementByOne = new Step({ id: 'incrementByOne', description: 'Adds 1 to the input value', outputSchema: z.object({ incrementedValue: z.number(), }), execute: async ({ context }) => { const valueToIncrement = context?.machineContext?.getStepPayload<{ firstValue: number }>('trigger')?.firstValue; if (!valueToIncrement) throw new Error('No value to increment provided'); const incrementedValue = valueToIncrement + 1; return { incrementedValue }; }, }); const cyclicalWorkflow = new Workflow({ name: 'cyclical-workflow', triggerSchema: z.object({ firstValue: z.number(), }), }); cyclicalWorkflow .step(doubleValue, { variables: { inputValue: { step: 'trigger', path: 'firstValue', }, }, }) .then(incrementByOne) .after(doubleValue) .step(doubleValue, { when: { ref: { step: doubleValue, path: 'doubledValue' }, query: { $eq: 12 }, }, variables: { inputValue: { step: doubleValue, path: 'doubledValue', }, }, }) .commit(); const { runId, start } = cyclicalWorkflow.createRun(); console.log('Run', runId); const res = await start({ triggerData: { firstValue: 6 } }); console.log(res.results); } main(); ```




================================================================================ Source: src/pages/examples/workflows/parallel-steps.mdx ================================================================================ --- title: "Example: Parallel Execution | Workflows | Mastra Docs" description: Example of using Mastra to execute multiple independent tasks in parallel within a workflow. --- import { GithubLink } from "../../../components/github-link"; # Parallel Execution with Steps When building AI applications, you often need to process multiple independent tasks simultaneously to improve efficiency. ## Control Flow Diagram This example shows how to structure a workflow that executes steps in parallel, with each branch handling its own data flow and dependencies. Here's the control flow diagram: Diagram showing workflow with parallel steps ## Creating the Steps Let's start by creating the steps and initializing the workflow. ```ts showLineNumbers copy import { Step, Workflow } from "@mastra/core"; import { z } from "zod"; const stepOne = new Step({ id: "stepOne", execute: async ({ context: { machineContext } }) => ({ doubledValue: machineContext.triggerData.inputValue * 2, }), }); const stepTwo = new Step({ id: "stepTwo", execute: async ({ context: { machineContext } }) => ({ incrementedValue: machineContext.stepResults.stepOne.payload.doubledValue + 1, }), }); const stepThree = new Step({ id: "stepThree", execute: async ({ context: { machineContext } }) => ({ tripledValue: machineContext.triggerData.inputValue * 3, }), }); const stepFour = new Step({ id: "stepFour", execute: async ({ context: { machineContext } }) => ({ isEven: machineContext.stepResults.stepThree.payload.tripledValue % 2 === 0, }), }); const myWorkflow = new Workflow({ name: "my-workflow", triggerSchema: z.object({ inputValue: z.number(), }), }); ``` ## Chaining and Parallelizing Steps Now we can add the steps to the workflow. Note the `.then()` method is used to chain the steps, but the `.step()` method is used to add the steps to the workflow. ```ts showLineNumbers copy myWorkflow .step(stepOne) .then(stepTwo) // chain one .step(stepThree) .then(stepFour) // chain two .commit(); const { start } = myWorkflow.createRun(); const result = await start({ triggerData: { inputValue: 3 } }); ```




================================================================================ Source: src/pages/examples/workflows/sequential-steps.mdx ================================================================================ --- title: "Example: Sequential Steps | Workflows | Mastra Docs" description: Example of using Mastra to chain workflow steps in a specific sequence, passing data between them. --- import { GithubLink } from "../../../components/github-link"; # Workflow with Sequential Steps Workflow can be chained to run one after another in a specific sequence. ## Control Flow Diagram This example shows how to chain workflow steps by using the `then` method demonstrating how to pass data between sequential steps and execute them in order. Here's the control flow diagram: Diagram showing workflow with sequential steps ## Creating the Steps Let's start by creating the steps and initializing the workflow. ```ts showLineNumbers copy import { Step, Workflow } from "@mastra/core"; import { z } from "zod"; const stepOne = new Step({ id: "stepOne", execute: async ({ context: { machineContext } }) => ({ doubledValue: machineContext.triggerData.inputValue * 2, }), }); const stepTwo = new Step({ id: "stepTwo", execute: async ({ context: { machineContext } }) => ({ incrementedValue: machineContext.stepResults.stepOne.payload.doubledValue + 1, }), }); const stepThree = new Step({ id: "stepThree", execute: async ({ context: { machineContext } }) => ({ tripledValue: machineContext.stepResults.stepTwo.payload.incrementedValue * 3, }), }); // Build the workflow const myWorkflow = new Workflow({ name: "my-workflow", triggerSchema: z.object({ inputValue: z.number(), }), }); ``` ## Chaining the Steps and Executing the Workflow Now let's chain the steps together. ```ts showLineNumbers copy // sequential steps myWorkflow.step(stepOne).then(stepTwo).then(stepThree); myWorkflow.commit(); const { runId, start } = myWorkflow.createRun(); const res = await start({ triggerData: { inputValue: 90 } }); ```




================================================================================ Source: src/pages/examples/workflows/suspend-and-resume.mdx ================================================================================ --- title: "Example: Suspend and Resume | Workflows | Mastra Docs" description: Example of using Mastra to suspend and resume workflow steps during execution. --- import { GithubLink } from '../../../components/github-link'; # Workflow with Suspend and Resume Workflow steps can be suspended and resumed at any point in the workflow execution. This example demonstrates how to suspend a workflow step and resume it later. ```ts showLineNumbers copy import { Step, Workflow } from '@mastra/core'; import { z } from 'zod'; const stepOne = new Step({ id: 'stepOne', outputSchema: z.object({ doubledValue: z.number(), }), execute: async ({ context }) => { const doubledValue = context.triggerData.inputValue * 2; return { doubledValue }; }, }); const stepTwo = new Step({ id: 'stepTwo', outputSchema: z.object({ incrementedValue: z.number(), }), execute: async ({ context, suspend }) => { const stepValue = context.stepResults.stepTwo.payload.secondValue || 0; const incrementedValue = context.stepResults.stepOne.payload.doubledValue + stepValue; if (incrementedValue < 100) { await suspend(); return { incrementedValue: 0 }; } return { incrementedValue }; }, }); // Build the workflow const myWorkflow = new Workflow({ name: 'my-workflow', triggerSchema: z.object({ inputValue: z.number(), }), }); // run workflows in parallel myWorkflow .step(stepOne) .then(stepTwo) .commit(); const { runId, start } = myWorkflow.createRun(); const res = await start({ triggerData: { inputValue: 90 } }); await myWorkflow.watch(runId, async ({ activePaths }) => { for (const path of activePaths) { const stepTwoStatus = context.stepResults?.stepTwo?.status; if (stepTwoStatus === 'suspended') { await myWorkflow.resume({ runId, stepId: 'stepTwo', context: { secondValue: 100 }, }); } } }) ```




================================================================================ Source: src/pages/examples/workflows/using-a-tool-as-a-step.mdx ================================================================================ --- title: "Example: Using a Tool as a Step | Workflows | Mastra Docs" description: Example of using Mastra to integrate a custom tool as a step in a workflow. --- import { GithubLink } from '../../../components/github-link'; # Tool as a Workflow step This example demonstrates how to create and integrate a custom tool as a workflow step, showing how to define input/output schemas and implement the tool's execution logic. ```ts showLineNumbers copy import { createTool, Workflow } from '@mastra/core'; import { z } from 'zod'; const crawlWebpage = createTool({ id: 'Crawl Webpage', description: 'Crawls a webpage and extracts the text content', inputSchema: z.object({ url: z.string().url(), }), outputSchema: z.object({ rawText: z.string(), }), execute: async ({ context: { url } }) => { return { rawText: 'This is the text content of the webpage' }; }, }); const contentWorkflow = new Workflow({ name: 'content-review' }); contentWorkflow.step(crawlWebpage).commit(); const { runId, start } = contentWorkflow.createRun(); const res = await start(); console.log(res.results); ```




================================================================================ Source: src/pages/showcase/index.mdx ================================================================================ --- title: 'Showcase' description: 'Check out these applications built with Mastra' --- import { ShowcaseGrid } from '../../components/showcase-grid';