TypeScript SDK

The official TypeScript/JavaScript SDK for BlockRun.

Installation

npm install @blockrun/llm
# or
pnpm add @blockrun/llm
# or
yarn add @blockrun/llm

Quick Start

import { LLMClient } from '@blockrun/llm';

const client = new LLMClient({
  privateKey: process.env.BLOCKRUN_WALLET_KEY as `0x${string}`
});

const response = await client.chat('openai/gpt-4o', 'Hello!');
console.log(response);

Configuration

Options

import { LLMClient } from '@blockrun/llm';

const client = new LLMClient({
  privateKey: '0x...',              // Required: wallet private key
  apiUrl: 'https://api.blockrun.ai', // Optional: API endpoint
  timeout: 60000                     // Optional: timeout in ms
});

Methods

chat(model, prompt, options?)

Simple one-line chat interface.

const response = await client.chat('openai/gpt-4o', 'Explain quantum computing', {
  system: 'You are a physics teacher.',  // Optional
  maxTokens: 500,                         // Optional
  temperature: 0.7                        // Optional
});

Returns: Promise<string> - The assistant's response text

chatCompletion(model, messages, options?)

Full OpenAI-compatible chat completion.

import { LLMClient, type ChatMessage } from '@blockrun/llm';

const messages: ChatMessage[] = [
  { role: 'system', content: 'You are helpful.' },
  { role: 'user', content: 'What is 2+2?' }
];

const result = await client.chatCompletion('openai/gpt-4o', messages, {
  maxTokens: 100,
  temperature: 0.7,
  topP: 0.9
});

console.log(result.choices[0].message.content);
console.log(`Tokens used: ${result.usage?.total_tokens}`);

Returns: Promise<ChatResponse>

listModels()

Get available models with pricing.

const models = await client.listModels();
for (const model of models) {
  console.log(`${model.id}: $${model.inputPrice}/M`);
}

getWalletAddress()

Get the wallet address being used.

const address = client.getWalletAddress();
console.log(`Paying from: ${address}`);

Error Handling

import { LLMClient, APIError, PaymentError } from '@blockrun/llm';

const client = new LLMClient({ privateKey: '0x...' });

try {
  const response = await client.chat('openai/gpt-4o', 'Hello!');
} catch (error) {
  if (error instanceof PaymentError) {
    console.error('Payment failed:', error.message);
    // Check your USDC balance
  } else if (error instanceof APIError) {
    console.error(`API error (${error.statusCode}):`, error.message);
  } else {
    throw error;
  }
}

Types

interface ChatMessage {
  role: 'system' | 'user' | 'assistant';
  content: string;
}

interface ChatResponse {
  id: string;
  object: string;
  created: number;
  model: string;
  choices: ChatChoice[];
  usage?: ChatUsage;
}

interface ChatChoice {
  index: number;
  message: ChatMessage;
  finish_reason?: string;
}

interface ChatUsage {
  prompt_tokens: number;
  completion_tokens: number;
  total_tokens: number;
}

interface Model {
  id: string;
  name: string;
  provider: string;
  inputPrice: number;
  outputPrice: number;
  contextWindow: number;
  maxOutput: number;
  available: boolean;
}

Examples

Concurrent Requests

import { LLMClient } from '@blockrun/llm';

const client = new LLMClient({ privateKey: '0x...' });

const [gpt, claude, gemini] = await Promise.all([
  client.chat('openai/gpt-4o', 'What is 2+2?'),
  client.chat('anthropic/claude-sonnet-4', 'What is 3+3?'),
  client.chat('google/gemini-2.5-flash', 'What is 4+4?')
]);

console.log('GPT:', gpt);
console.log('Claude:', claude);
console.log('Gemini:', gemini);

Streaming (Coming Soon)

// Streaming support is planned for a future release

Express.js Integration

import express from 'express';
import { LLMClient } from '@blockrun/llm';

const app = express();
const client = new LLMClient({ privateKey: process.env.BLOCKRUN_WALLET_KEY });

app.post('/chat', async (req, res) => {
  try {
    const { message } = req.body;
    const response = await client.chat('openai/gpt-4o', message);
    res.json({ response });
  } catch (error) {
    res.status(500).json({ error: error.message });
  }
});

Next.js API Route

// app/api/chat/route.ts
import { NextRequest, NextResponse } from 'next/server';
import { LLMClient } from '@blockrun/llm';

const client = new LLMClient({
  privateKey: process.env.BLOCKRUN_WALLET_KEY as `0x${string}`
});

export async function POST(request: NextRequest) {
  const { message } = await request.json();
  const response = await client.chat('openai/gpt-4o', message);
  return NextResponse.json({ response });
}