Skip to main content Link Search Menu Expand Document (external link)

Chat.ts overview

The Chat module provides a stateful conversation interface for AI language models.

This module enables persistent chat sessions that maintain conversation history, support tool calling, and offer both streaming and non-streaming text generation. It integrates seamlessly with the Effect AI ecosystem, providing type-safe conversational AI capabilities.

Example

import { Chat, LanguageModel } from "@effect/ai"
import { Effect, Layer } from "effect"

// Create a new chat session
const program = Effect.gen(function* () {
  const chat = yield* Chat.empty

  // Send a message and get response
  const response = yield* chat.generateText({
    prompt: "Hello! What can you help me with?"
  })

  console.log(response.content)

  return response
})

Example

import { Chat, LanguageModel } from "@effect/ai"
import { Effect, Stream } from "effect"

// Streaming chat with tool support
const streamingChat = Effect.gen(function* () {
  const chat = yield* Chat.empty

  yield* chat
    .streamText({
      prompt: "Generate a creative story"
    })
    .pipe(Stream.runForEach((part) => Effect.sync(() => console.log(part))))
})

Since v1.0.0


Exports Grouped by Category


Constructors

empty

Creates a new Chat service with empty conversation history.

This is the most common way to start a fresh chat session without any initial context or system prompts.

Example

import { Chat } from "@effect/ai"
import { Effect } from "effect"

const freshChat = Effect.gen(function* () {
  const chat = yield* Chat.empty

  const response = yield* chat.generateText({
    prompt: "Hello! Can you introduce yourself?"
  })

  console.log(response.content)

  return chat
})

Signature

declare const empty: Effect.Effect<Service, never, LanguageModel.LanguageModel>

Source

Since v1.0.0

fromExport

Creates a Chat service from previously exported chat data.

Restores a chat session from structured data that was previously exported using the export method. Useful for persisting and restoring conversation state.

Example

import { Chat } from "@effect/ai"
import { Effect } from "effect"

declare const loadFromDatabase: (sessionId: string) => Effect.Effect<unknown>

const restoreChat = Effect.gen(function* () {
  // Assume we have previously exported data
  const savedData = yield* loadFromDatabase("chat-session-123")

  const restoredChat = yield* Chat.fromExport(savedData)

  // Continue the conversation from where it left off
  const response = yield* restoredChat.generateText({
    prompt: "Let's continue our discussion"
  })
}).pipe(
  Effect.catchTag("ParseError", (error) => {
    console.log("Failed to restore chat:", error.message)
    return Effect.void
  })
)

Signature

declare const fromExport: (data: unknown) => Effect.Effect<Service, ParseError, LanguageModel.LanguageModel>

Source

Since v1.0.0

fromJson

Creates a Chat service from previously exported JSON chat data.

Restores a chat session from JSON string that was previously exported using the exportJson method. This is the most convenient way to persist and restore chat sessions to/from storage systems.

Example

import { Chat } from "@effect/ai"
import { Effect } from "effect"

const restoreFromJson = Effect.gen(function* () {
  // Load JSON from localStorage or file system
  const jsonData = localStorage.getItem("my-chat-backup")
  if (!jsonData) return yield* Chat.empty

  const restoredChat = yield* Chat.fromJson(jsonData)

  // Chat history is now restored
  const response = yield* restoredChat.generateText({
    prompt: "What were we talking about?"
  })

  return response
}).pipe(
  Effect.catchTag("ParseError", (error) => {
    console.log("Invalid JSON format:", error.message)
    return Chat.empty // Fallback to empty chat
  })
)

Signature

declare const fromJson: (data: string) => Effect.Effect<Service, ParseError, LanguageModel.LanguageModel>

Source

Since v1.0.0

fromPrompt

Creates a new Chat service from an initial prompt.

This is the primary constructor for creating chat instances. It initializes a new conversation with the provided prompt as the starting context.

Example

import { Chat, Prompt } from "@effect/ai"
import { Effect } from "effect"

const chatWithSystemPrompt = Effect.gen(function* () {
  const chat = yield* Chat.fromPrompt([
    {
      role: "system",
      content: "You are a helpful assistant specialized in mathematics."
    }
  ])

  const response = yield* chat.generateText({
    prompt: "What is 2+2?"
  })

  return response.content
})

Example

import { Chat, Prompt } from "@effect/ai"
import { Effect } from "effect"

// Initialize with conversation history
const existingChat = Effect.gen(function* () {
  const chat = yield* Chat.fromPrompt([
    { role: "user", content: [{ type: "text", text: "What's the weather like?" }] },
    { role: "assistant", content: [{ type: "text", text: "I don't have access to weather data." }] },
    { role: "user", content: [{ type: "text", text: "Can you help me with coding?" }] }
  ])

  const response = yield* chat.generateText({
    prompt: "I need help with TypeScript"
  })

  return response
})

Signature

declare const fromPrompt: (prompt: Prompt.RawInput) => Effect.Effect<Service, never, LanguageModel.LanguageModel>

Source

Since v1.0.0

Context

Chat (class)

The Chat service tag for dependency injection.

This tag provides access to chat functionality throughout your application, enabling persistent conversational AI interactions with full context management.

Example

import { Chat } from "@effect/ai"
import { Effect } from "effect"

const useChat = Effect.gen(function* () {
  const chat = yield* Chat
  const response = yield* chat.generateText({
    prompt: "Explain quantum computing in simple terms"
  })
  return response.content
})

Signature

declare class Chat

Source

Since v1.0.0

Models

Service (interface)

Represents the interface that the Chat service provides.

Signature

export interface Service {
  /**
   * Reference to the chat history.
   *
   * Provides direct access to the conversation history for advanced use cases
   * like custom history manipulation or inspection.
   *
   * @example
   * ```ts
   * import { Chat } from "@effect/ai"
   * import { Effect, Ref } from "effect"
   *
   * const inspectHistory = Effect.gen(function* () {
   *   const chat = yield* Chat.empty
   *   const currentHistory = yield* Ref.get(chat.history)
   *   console.log("Current conversation:", currentHistory)
   *   return currentHistory
   * })
   * ```
   */
  readonly history: Ref.Ref<Prompt.Prompt>

  /**
   * Exports the chat history into a structured format.
   *
   * Returns the complete conversation history as a structured object
   * that can be stored, transmitted, or processed by other systems.
   *
   * @example
   * ```ts
   * import { Chat } from "@effect/ai"
   * import { Effect } from "effect"
   *
   * const saveChat = Effect.gen(function* () {
   *   const chat = yield* Chat.empty
   *   yield* chat.generateText({ prompt: "Hello!" })
   *
   *   const exportedData = yield* chat.export
   *
   *   // Save to database or file system
   *   return exportedData
   * })
   * ```
   */
  readonly export: Effect.Effect<unknown>

  /**
   * Exports the chat history as a JSON string.
   *
   * Provides a convenient way to serialize the entire conversation
   * for storage or transmission in JSON format.
   *
   * @example
   * ```ts
   * import { Chat } from "@effect/ai"
   * import { Effect } from "effect"
   *
   * const backupChat = Effect.gen(function* () {
   *   const chat = yield* Chat.empty
   *   yield* chat.generateText({ prompt: "Explain photosynthesis" })
   *
   *   const jsonBackup = yield* chat.exportJson
   *
   *   yield* Effect.sync(() =>
   *     localStorage.setItem("chat-backup", jsonBackup)
   *   )
   *
   *   return jsonBackup
   * })
   * ```
   */
  readonly exportJson: Effect.Effect<string>

  /**
   * Generate text using a language model for the specified prompt.
   *
   * If a toolkit is specified, the language model will have access to tools
   * for function calling and enhanced capabilities. Both input and output
   * messages are automatically added to the chat history.
   *
   * @example
   * ```ts
   * import { Chat } from "@effect/ai"
   * import { Effect } from "effect"
   *
   * const chatWithAI = Effect.gen(function* () {
   *   const chat = yield* Chat.empty
   *
   *   const response1 = yield* chat.generateText({
   *     prompt: "What is the capital of France?"
   *   })
   *
   *   const response2 = yield* chat.generateText({
   *     prompt: "What's the population of that city?",
   *   })
   *
   *   return [response1.content, response2.content]
   * })
   * ```
   */
  readonly generateText: <
    Options extends NoExcessProperties<LanguageModel.GenerateTextOptions<any>, Options>,
    Tools extends Record<string, Tool.Any> = {}
  >(
    options: Options & LanguageModel.GenerateTextOptions<Tools>
  ) => Effect.Effect<
    LanguageModel.GenerateTextResponse<Tools>,
    LanguageModel.ExtractError<Options>,
    LanguageModel.ExtractContext<Options>
  >

  /**
   * Generate text using a language model with streaming output.
   *
   * Returns a stream of response parts that are emitted as soon as they're
   * available from the model. Supports tool calling and maintains chat history.
   *
   * @example
   * ```ts
   * import { Chat } from "@effect/ai"
   * import { Effect, Stream, Console } from "effect"
   *
   * const streamingChat = Effect.gen(function* () {
   *   const chat = yield* Chat.empty
   *
   *   const stream = yield* chat.streamText({
   *     prompt: "Write a short story about space exploration"
   *   })
   *
   *   yield* Stream.runForEach(stream, (part) =>
   *     part.type === "text-delta"
   *       ? Effect.sync(() => process.stdout.write(part.delta))
   *       : Effect.void
   *   )
   * })
   * ```
   */
  readonly streamText: <
    Options extends NoExcessProperties<LanguageModel.GenerateTextOptions<any>, Options>,
    Tools extends Record<string, Tool.Any> = {}
  >(
    options: Options & LanguageModel.GenerateTextOptions<Tools>
  ) => Stream.Stream<
    Response.StreamPart<Tools>,
    LanguageModel.ExtractError<Options>,
    LanguageModel.ExtractContext<Options>
  >

  /**
   * Generate a structured object using a language model and schema.
   *
   * Forces the model to return data that conforms to the specified schema,
   * enabling structured data extraction and type-safe responses. The
   * conversation history is maintained across calls.
   *
   * @example
   * ```ts
   * import { Chat } from "@effect/ai"
   * import { Effect, Schema } from "effect"
   *
   * const ContactSchema = Schema.Struct({
   *   name: Schema.String,
   *   email: Schema.String,
   *   phone: Schema.optional(Schema.String)
   * })
   *
   * const extractContact = Effect.gen(function* () {
   *   const chat = yield* Chat.empty
   *
   *   const contact = yield* chat.generateObject({
   *     prompt: "Extract contact info: John Doe, john@example.com, 555-1234",
   *     schema: ContactSchema
   *   })
   *
   *   console.log(contact.object)
   *   // { name: "John Doe", email: "john@example.com", phone: "555-1234" }
   *
   *   return contact.object
   * })
   * ```
   */
  readonly generateObject: <
    A,
    I extends Record<string, unknown>,
    R,
    Options extends NoExcessProperties<LanguageModel.GenerateObjectOptions<any, A, I, R>, Options>,
    Tools extends Record<string, Tool.Any> = {}
  >(
    options: Options & LanguageModel.GenerateObjectOptions<Tools, A, I, R>
  ) => Effect.Effect<
    LanguageModel.GenerateObjectResponse<Tools, A>,
    LanguageModel.ExtractError<Options>,
    LanguageModel.LanguageModel | R | LanguageModel.ExtractContext<Options>
  >
}

Source

Since v1.0.0