Integration Guides
Drop ziptoken into your existing AI stack. Every guide follows the same 3-line pattern: compress β call LLM β use response.
OpenAI (Node.js)
Wrap your messages with compress() before passing them to the Chat Completions API.
typescript
import OpenAI from 'openai'
const openai = new OpenAI()
async function compress(text: string) {
const res = await fetch('https://api.ziptoken.ai/api/v1/compress', {
method: 'POST',
headers: {
Authorization: `Bearer ${process.env.ZIPTOKEN_API_KEY}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({ text, mode: 'balanced' }),
})
const { compressed } = await res.json()
return compressed
}
// Usage
const userMessage = 'Write a detailed analysis of renewable energy trends...'
const compressed = await compress(userMessage)
const completion = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: compressed }],
})Anthropic SDK (Node.js)
Works identically with the Anthropic SDK. Compress the user message before sending.
typescript
import Anthropic from '@anthropic-ai/sdk'
const client = new Anthropic()
async function compressedMessage(text: string) {
const res = await fetch('https://api.ziptoken.ai/api/v1/compress', {
method: 'POST',
headers: {
Authorization: `Bearer ${process.env.ZIPTOKEN_API_KEY}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({ text, mode: 'balanced', model: 'claude-3-5' }),
})
const { compressed } = await res.json()
return compressed
}
const text = await compressedMessage('Explain quantum entanglement in simple terms with examples...')
const message = await client.messages.create({
model: 'claude-opus-4-5',
max_tokens: 1024,
messages: [{ role: 'user', content: text }],
})LangChain (TypeScript)
Create a custom runnable that compresses text before passing it to any LangChain chain.
typescript
import { RunnableLambda } from '@langchain/core/runnables'
import { ChatOpenAI } from '@langchain/openai'
import { StringOutputParser } from '@langchain/core/output_parsers'
import { HumanMessage } from '@langchain/core/messages'
async function zipCompress(text: string): Promise<string> {
const res = await fetch('https://api.ziptoken.ai/api/v1/compress', {
method: 'POST',
headers: { Authorization: `Bearer ${process.env.ZIPTOKEN_API_KEY}`, 'Content-Type': 'application/json' },
body: JSON.stringify({ text }),
})
return (await res.json()).compressed
}
const compressStep = RunnableLambda.from(zipCompress)
const llm = new ChatOpenAI({ model: 'gpt-4o' })
const parser = new StringOutputParser()
const chain = compressStep
.pipe((compressed: string) => [new HumanMessage(compressed)])
.pipe(llm)
.pipe(parser)
const result = await chain.invoke('Write a 1000-word essay on climate change...')Vercel AI SDK
Compress prompts server-side in a Route Handler before streaming to the client.
typescript
// app/api/chat/route.ts
import { openai } from '@ai-sdk/openai'
import { streamText } from 'ai'
async function compress(text: string) {
const res = await fetch('https://api.ziptoken.ai/api/v1/compress', {
method: 'POST',
headers: {
Authorization: `Bearer ${process.env.ZIPTOKEN_API_KEY}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({ text }),
})
return (await res.json()).compressed as string
}
export async function POST(req: Request) {
const { messages } = await req.json()
const last = messages[messages.length - 1]
// Compress only the latest user message
if (last.role === 'user') {
last.content = await compress(last.content)
}
const result = streamText({ model: openai('gpt-4o'), messages })
return result.toDataStreamResponse()
}Python
Use httpx or requests β no SDK required.
python
import httpx
import openai
ZIPTOKEN_KEY = "zt_live_xxxxxxxxxxxx"
def compress(text: str, mode: str = "balanced") -> str:
resp = httpx.post(
"https://api.ziptoken.ai/api/v1/compress",
headers={"Authorization": f"Bearer {ZIPTOKEN_KEY}"},
json={"text": text, "mode": mode},
)
resp.raise_for_status()
return resp.json()["compressed"]
# Usage with OpenAI
client = openai.OpenAI()
prompt = compress("Write a detailed analysis of global supply chain risks...")
completion = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": prompt}],
)