Set up environment variables

Create a .env file in your project root and add the following variables:

KEYWORDSAI_BASE_URL=https://api.keywordsai.co/api
KEYWORDSAI_API_KEY=YOUR_KEYWORDSAI_API_KEY

Create a task with Vercel AI SDK

In this example, we create 2 tasks: getUserInput and getLLMResponse and annotate them with KeywordsAI tracing.

Vercel AI SDK
import { CoreMessage, streamText } from "ai";
import { createOpenAI } from "@ai-sdk/openai";
import { KeywordsAITelemetry } from "@keywordsai/tracing";

const kTl = new KeywordsAITelemetry({
  apiKey: process.env.KEYWORDSAI_API_KEY || "",
  baseUrl: process.env.KEYWORDSAI_BASE_URL || "",
  disableBatch: true,
});

const client = createOpenAI({
  compatibility: "strict",
});

function getUserInput(userName: string) {
  return kTl.withTask(
    { name: "getUserInput" },
    async (userName: string): Promise<CoreMessage> => {
      return {
        role: "user",
        content: "Hello, my name is:  " + userName,
      };
    },
    userName
  );
}

async function getLLMResponse(
  messages: CoreMessage[],
  model: string,
  temperature: number
) {
  const proxyTextStream = await kTl.withTask(
    { name: "getLLMResponse" },
    async (kwargs: { messages: CoreMessage[]; model: string; temperature: number }) => {
      const { textStream: proxyTextStream } = streamText({
        model: client.chat(kwargs.model),
        messages: kwargs.messages,
        temperature: kwargs.temperature,
      });
      return proxyTextStream;
    },
    {
      messages: messages,
      model: model,
      temperature: temperature,
    }
  );
  return proxyTextStream;
}

Combine tasks into a workflow

In this example, we create a workflow vercelWithTracing that combines the getUserInput and getLLMResponse tasks.