Quick Start
Set your API key:
export OPENAI_API_KEY="sk-..."
Rust
use openai_oxide::{OpenAI, types::responses::*};
#[tokio::main]
async fn main() -> Result<(), openai_oxide::OpenAIError> {
let client = OpenAI::from_env()?;
let response = client.responses().create(
ResponseCreateRequest::new("gpt-5.4-mini")
.input("Explain quantum computing in one sentence.")
.max_output_tokens(100)
).await?;
println!("{}", response.output_text());
Ok(())
}
Node.js
const { Client } = require("openai-oxide");
const client = new Client();
const text = await client.createText("gpt-5.4-mini", "Hello from Node!");
console.log(text);
Python
import asyncio, json
from openai_oxide import Client
async def main():
client = Client()
res = json.loads(await client.create("gpt-5.4-mini", "Hello from Python!"))
print(res["text"])
asyncio.run(main())
Drop-in Migration
Switch from the official OpenAI SDK by changing one import line. Rest of your code stays the same.
Python
- from openai import AsyncOpenAI
+ from openai_oxide.compat import AsyncOpenAI
Full working example (mirrors official openai examples/parsing.py):
#!/usr/bin/env python3
"""
Drop-in replacement for official openai SDK parsing example.
Change: `from openai import AsyncOpenAI` → `from openai_oxide.compat import AsyncOpenAI`
"""
import asyncio
from typing import List
from pydantic import BaseModel
# ── One-line change from official SDK ──
# from openai import AsyncOpenAI
from openai_oxide.compat import AsyncOpenAI
class Step(BaseModel):
explanation: str
output: str
class MathResponse(BaseModel):
steps: List[Step]
final_answer: str
async def main():
client = AsyncOpenAI()
completion = await client.chat.completions.parse(
model="gpt-5.4-mini",
messages=[
{"role": "system", "content": "You are a helpful math tutor."},
{"role": "user", "content": "solve 8x + 31 = 2"},
],
response_format=MathResponse,
)
message = completion.choices[0].message
if message.parsed:
for step in message.parsed.steps:
print(f" {step.explanation} → {step.output}")
print("answer:", message.parsed.final_answer)
else:
print("refusal:", message.refusal)
asyncio.run(main())
Node.js
- const OpenAI = require('openai');
+ const { OpenAI } = require('openai-oxide/compat');
Full working example (mirrors official openai SDK):
/**
* Drop-in replacement for official openai SDK demo.
* Change: `const OpenAI = require('openai')` → `const { OpenAI } = require('openai-oxide/compat')`
*/
// ── One-line change from official SDK ──
// const OpenAI = require('openai');
const { OpenAI } = require('../compat');
async function main() {
const client = new OpenAI();
// Non-streaming:
console.log("----- standard request -----");
const completion = await client.chat.completions.create({
model: "gpt-5.4-mini",
messages: [{ role: "user", content: "Say this is a test" }],
});
console.log(completion.choices[0].message.content);
// Streaming:
console.log("----- streaming request -----");
const stream = await client.chat.completions.create({
model: "gpt-5.4-mini",
messages: [{ role: "user", content: "How do I list files in a directory using Node.js?" }],
stream: true,
});
for await (const chunk of stream) {
const content = chunk.choices?.[0]?.delta?.content;
if (content) process.stdout.write(content);
}
console.log();
}
main();