Official ModelRiver client SDK for real-time AI response streaming via WebSockets.
npm install @modelriver/client
# or
yarn add @modelriver/client
# or
pnpm add @modelriver/client
<script src="https://cdn.modelriver.com/client/v1.1.3/modelriver.min.js"></script>
<!-- or latest -->
<script src="https://cdn.modelriver.com/client/latest/modelriver.min.js"></script>
Your backend calls the ModelRiver /api/v1/ai/async endpoint and receives connection details:
// Your backend endpoint proxies to ModelRiver
const response = await fetch('/api/ai/request', {
method: 'POST',
body: JSON.stringify({ message: 'Hello AI' }),
});
// Response from /api/v1/ai/async:
// {
// "message": "success",
// "status": "pending",
// "channel_id": "a1b2c3d4-...",
// "ws_token": "one-time-websocket-token",
// "websocket_url": "wss://api.modelriver.com/socket",
// "websocket_channel": "ai_response:PROJECT_ID:a1b2c3d4-..."
// }
const { channel_id, ws_token, websocket_url, websocket_channel } = await response.json();
import { ModelRiverClient } from '@modelriver/client';
const client = new ModelRiverClient({
baseUrl: 'wss://api.modelriver.com/socket',
});
client.on('response', (data) => {
console.log('AI Response:', data);
});
client.on('error', (error) => {
console.error('Error:', error);
});
client.connect({
channelId: channel_id,
wsToken: ws_token,
websocketUrl: websocket_url,
websocketChannel: websocket_channel,
});
import { useModelRiver } from '@modelriver/client/react';
function ChatComponent() {
const {
connect,
disconnect,
response,
error,
isConnected,
steps
} = useModelRiver({
baseUrl: 'wss://api.modelriver.com/socket',
persist: true,
});
const handleSend = async () => {
const {
channel_id,
ws_token,
websocket_url,
websocket_channel,
} = await yourBackendAPI.createRequest(message); // calls /api/v1/ai/async
connect({
channelId: channel_id,
wsToken: ws_token,
websocketUrl: websocket_url,
websocketChannel: websocket_channel,
});
};
return (
<div>
<button onClick={handleSend} disabled={isConnected}>
Send
</button>
{/* Show workflow progress */}
{steps.map((step) => (
<div key={step.id} className={step.status}>
{step.name}
</div>
))}
{/* Show response */}
{response && (
<pre>{JSON.stringify(response.data, null, 2)}</pre>
)}
{/* Show error */}
{error && <p className="error">{error}</p>}
</div>
);
}
<script setup>
import { useModelRiver } from '@modelriver/client/vue';
const {
connect,
disconnect,
response,
error,
isConnected,
steps
} = useModelRiver({
baseUrl: 'wss://api.modelriver.com/socket',
});
async function handleSend() {
const {
channel_id,
ws_token,
websocket_url,
websocket_channel,
} = await yourBackendAPI.createRequest(message); // calls /api/v1/ai/async
connect({
channelId: channel_id,
wsToken: ws_token,
websocketUrl: websocket_url,
websocketChannel: websocket_channel,
});
}
</script>
<template>
<div>
<button @click="handleSend" :disabled="isConnected">Send</button>
<div v-for="step in steps" :key="step.id" :class="step.status">
{{ step.name }}
</div>
<pre v-if="response">{{ response.data }}</pre>
<p v-if="error" class="error">{{ error }}</p>
</div>
</template>
import { Component, OnDestroy } from '@angular/core';
import { ModelRiverService } from '@modelriver/client/angular';
@Component({
selector: 'app-chat',
providers: [ModelRiverService],
template: `
<button (click)="send()" [disabled]="modelRiver.isConnected">
Send
</button>
<div *ngFor="let step of modelRiver.steps$ | async" [class]="step.status">
{{ step.name }}
</div>
<pre *ngIf="modelRiver.response$ | async as res">
{{ res.data | json }}
</pre>
<p *ngIf="modelRiver.error$ | async as err" class="error">
{{ err }}
</p>
`,
})
export class ChatComponent implements OnDestroy {
constructor(public modelRiver: ModelRiverService) {
this.modelRiver.init({
baseUrl: 'wss://api.modelriver.com/socket'
});
}
async send() {
const {
channel_id,
ws_token,
websocket_url,
websocket_channel,
} = await this.backendService.createRequest(message); // calls /api/v1/ai/async
this.modelRiver.connect({
channelId: channel_id,
wsToken: ws_token,
websocketUrl: websocket_url,
websocketChannel: websocket_channel,
});
}
ngOnDestroy() {
this.modelRiver.destroy();
}
}
<script>
import { createModelRiver } from '@modelriver/client/svelte';
import { onDestroy } from 'svelte';
const modelRiver = createModelRiver({
baseUrl: 'wss://api.modelriver.com/socket',
});
const { response, error, isConnected, steps, connect, disconnect } = modelRiver;
async function send() {
const {
channel_id,
ws_token,
websocket_url,
websocket_channel,
} = await backendAPI.createRequest(message); // calls /api/v1/ai/async
connect({
channelId: channel_id,
wsToken: ws_token,
websocketUrl: websocket_url,
websocketChannel: websocket_channel,
});
}
onDestroy(() => disconnect());
</script>
<button on:click={send} disabled={$isConnected}>Send</button>
{#each $steps as step}
<div class={step.status}>{step.name}</div>
{/each}
{#if $response}
<pre>{JSON.stringify($response.data, null, 2)}</pre>
{/if}
{#if $error}
<p class="error">{$error}</p>
{/if}
<!DOCTYPE html>
<html>
<head>
<script src="https://cdn.modelriver.com/client/latest/modelriver.min.js"></script>
</head>
<body>
<button id="send">Send</button>
<pre id="response"></pre>
<script>
const client = new ModelRiver.ModelRiverClient({
baseUrl: 'wss://api.modelriver.com/socket',
});
client.on('response', (data) => {
document.getElementById('response').textContent =
JSON.stringify(data, null, 2);
});
client.on('error', (error) => {
console.error('Error:', error);
});
document.getElementById('send').addEventListener('click', async () => {
// Get async connection info from your backend
const res = await fetch('/api/ai/request', { method: 'POST' });
const {
channel_id,
ws_token,
websocket_url,
websocket_channel,
} = await res.json(); // your backend calls /api/v1/ai/async
client.connect({
channelId: channel_id,
wsToken: ws_token,
websocketUrl: websocket_url,
websocketChannel: websocket_channel,
});
});
</script>
</body>
</html>
interface ModelRiverClientOptions {
baseUrl?: string; // WebSocket URL (default: 'wss://api.modelriver.com/socket')
apiBaseUrl?: string; // Optional HTTP base URL for backend reconnect (/api/v1/ai/reconnect)
debug?: boolean; // Enable debug logging (default: false)
persist?: boolean; // Enable localStorage persistence (default: true)
storageKeyPrefix?: string; // Storage key prefix (default: 'modelriver_')
heartbeatInterval?: number; // Heartbeat interval in ms (default: 30000)
requestTimeout?: number; // Request timeout in ms (default: 300000)
}
| Method | Description |
|---|---|
connect({ channelId, websocketUrl?, websocketChannel? }) |
Connect to WebSocket with channel ID |
disconnect() |
Disconnect from WebSocket |
reset() |
Reset state and clear stored data |
reconnect() |
Reconnect using stored channel ID |
reconnectWithBackend() |
Call your backend /api/v1/ai/reconnect to get a fresh ws_token and reconnect |
getState() |
Get current client state |
hasPendingRequest() |
Check if there's a pending request |
on(event, callback) |
Add event listener (returns unsubscribe function) |
off(event, callback) |
Remove event listener |
destroy() |
Clean up all resources |
| Event | Payload | Description |
|---|---|---|
connecting |
- | Connection attempt started |
connected |
- | Successfully connected |
disconnected |
reason?: string |
Disconnected from WebSocket |
response |
AIResponse |
AI response received |
error |
Error or string |
Error occurred |
step |
WorkflowStep |
Workflow step updated |
channel_joined |
- | Successfully joined channel |
channel_error |
reason: string |
Channel join failed |
// Response from /api/ai/async endpoint
interface AsyncResponse {
message: string; // "success"
status: 'pending'; // Always "pending" for async
channel_id: string; // Unique channel ID
ws_token: string; // One-time WebSocket token for authentication
websocket_url: string; // WebSocket URL to connect to
websocket_channel: string; // Full channel name (e.g., "ai_response:uuid")
instructions?: {
websocket?: string;
webhook?: string;
};
test_mode?: boolean; // Present in test mode
}
// AI response received via WebSocket
interface AIResponse {
status: string; // "success" or "error"
channel_id?: string;
content?: string; // AI response text
model?: string; // Model used (e.g., "gpt-4")
data?: unknown; // Structured output data
meta?: {
workflow?: string;
status?: string;
duration_ms?: number;
usage?: {
prompt_tokens?: number;
completion_tokens?: number;
total_tokens?: number;
};
};
error?: {
message: string;
details?: unknown;
};
}
interface WorkflowStep {
id: string;
name: string;
status: 'pending' | 'loading' | 'success' | 'error';
duration?: number;
errorMessage?: string;
}
/api/v1/ai/async endpointchannel_id, ws_token, websocket_url, and websocket_channelchannel_id + ws_tokenpersist, hasPendingRequest, reconnect, reconnectWithBackend) together with your backend /api/v1/ai/reconnect endpoint.┌──────────────┐ ┌──────────────┐ ┌──────────────┐
│ Frontend │ │ Your Backend │ │ ModelRiver │
└──────┬───────┘ └──────┬───────┘ └──────┬───────┘
│ │ │
│ 1. Request AI │ │
│─────────────────────>│ │
│ │ 2. Create request │
│ │─────────────────────>│
│ │ │
│ │ 3. Return channel_id│
│ │<─────────────────────│
│ 4. Return channel_id│ │
│<─────────────────────│ │
│ │ │
│ 5. Connect WebSocket (SDK) │
│─────────────────────────────────────────────>│
│ │ │
│ 6. Stream AI response │
│<─────────────────────────────────────────────│
│ │ │
The /api/v1/ai/async response contains:
channel_id - Unique identifier for this requestws_token - Short-lived, one-time WebSocket token (per user + project)websocket_url - WebSocket endpoint URLwebsocket_channel - Channel name to joinThe client SDK uses channel_id and ws_token to connect to the WebSocket.
The ws_token is:
For page refresh recovery:
localStorageclient.reconnect() to reuse the stored ws_token (if still valid)client.reconnectWithBackend() to have your backend issue a fresh ws_token via /api/v1/ai/reconnectImportant: Always obtain channel_id and ws_token from your backend.
Never expose your ModelRiver API key in frontend code. Your backend should be the only component that talks to ModelRiver's HTTP API (/api/v1/ai/async, /api/v1/ai/reconnect, etc.).
MIT