ExamplesWorkflowsCalling an LLM

Calling an LLM from a Workflow

This example demonstrates how to create a workflow that streams responses from an LLM, showing both real-time output and final text handling.

import { Mastra, Step, Workflow } from '@mastra/core';
import { z } from 'zod';
 
const newWorkflow = new Workflow({
  name: 'pass message to the workflow',
  triggerSchema: z.object({
    message: z.string(),
  }),
});
 
const replyAsPenguin = new Step({
  id: 'reply',
  outputSchema: z.object({
    reply: z.string(),
  }),
  execute: async ({ context, mastra }) => {
    const penguinCharacter = mastra?.llm?.({
      provider: 'OPEN_AI',
      name: 'gpt-4o',
    });
 
    const res = await penguinCharacter?.stream(context.machineContext?.triggerData?.message);
 
    if (!res) {
      return { reply: '' };
    }
 
    for await (const chunk of res?.textStream) {
      process.stdout.write(chunk);
    }
 
    const text = await res.text;
    return { reply: text };
  },
});
 
newWorkflow.step(replyAsPenguin);
newWorkflow.commit();
 
const mastra = new Mastra({
  workflows: { newWorkflow },
});
 
const { runId = start } = mastra
  .getWorkflow('newWorkflow')
  .createRun();
 
await start({ triggerData: { message: 'Give me a speech as skipper from penguin of madagascar' } });





View Example on GitHub

MIT 2025 © Nextra.