JavaScript/TypeScript Integration
Use JavaScript or TypeScript to connect to Enclava with the OpenAI SDK.
OpenAI SDK
Installation
npm install openai
# or
yarn add openai
Basic Configuration
import OpenAI from 'openai';
const openai = new OpenAI({
baseURL: 'https://your-enclava-instance/api/v1',
apiKey: 'YOUR_API_KEY'
});
async function main() {
const response = await openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Hello, Enclava!' }]
});
console.log(response.choices[0].message.content);
}
main();
Complete Example
import OpenAI from 'openai';
class EnclavaClient {
constructor(apiKey, baseURL = 'https://localhost/api/v1') {
this.client = new OpenAI({
baseURL,
apiKey
});
}
async chat(messages, model = 'gpt-3.5-turbo', options = {}) {
const response = await this.client.chat.completions.create({
model,
messages,
...options
});
return response.choices[0].message.content;
}
async embed(text) {
const response = await this.client.embeddings.create({
model: 'text-embedding-ada-002',
input: text
});
return response.data[0].embedding;
}
async getModels() {
const response = await this.client.models.list();
return response.data;
}
}
// Usage
async function main() {
const client = new EnclavaClient('YOUR_API_KEY');
// Chat
const response = await client.chat([
{ role: 'user', content: 'Tell me a joke' }
]);
console.log(response);
// Embed
const embedding = await client.embed('Hello, world!');
console.log(`Embedding dimensions: ${embedding.length}`);
// List models
const models = await client.getModels();
models.forEach(model => console.log(`Model: ${model.id}`));
}
main();
Streaming Example
import OpenAI from 'openai';
const openai = new OpenAI({
baseURL: 'https://your-enclava-instance/api/v1',
apiKey: 'YOUR_API_KEY'
});
async function streamChat() {
const stream = await openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Write a story about AI' }],
stream: true
});
for await (const chunk of stream) {
if (chunk.choices[0]?.delta?.content) {
process.stdout.write(chunk.choices[0].delta.content);
}
}
}
streamChat();
Async/Await Patterns
import OpenAI from 'openai';
const openai = new OpenAI({
baseURL: 'https://your-enclava-instance/api/v1',
apiKey: 'YOUR_API_KEY'
});
async function processMultipleMessages(messages) {
const promises = messages.map(msg =>
openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [msg]
})
);
const responses = await Promise.all(promises);
return responses.map(r => r.choices[0].message.content);
}
async function main() {
const messages = [
{ role: 'user', content: 'Hello' },
{ role: 'user', content: 'How are you?' },
{ role: 'user', content: 'What is AI?' }
];
const responses = await processMultipleMessages(messages);
responses.forEach((r, i) => console.log(`Response ${i+1}: ${r}`));
}
main();
Error Handling
import OpenAI from 'openai';
class EnclavaClient {
constructor(apiKey, baseURL = 'https://localhost/api/v1') {
this.client = new OpenAI({ baseURL, apiKey });
}
async chatWithRetry(messages, maxRetries = 3) {
for (let attempt = 0; attempt < maxRetries; attempt++) {
try {
const response = await this.client.chat.completions.create({
model: 'gpt-3.5-turbo',
messages
});
return response.choices[0].message.content;
} catch (error) {
if (error.status === 429) {
const waitTime = 2 ** attempt;
console.log(`Rate limited. Attempt ${attempt + 1}/${maxRetries}`);
await new Promise(resolve => setTimeout(resolve, waitTime * 1000));
} else {
throw error;
}
}
}
throw new Error('Max retries exceeded');
}
}
async function main() {
const client = new EnclavaClient('YOUR_API_KEY');
try {
const response = await client.chatWithRetry([
{ role: 'user', content: 'Hello' }
]);
console.log(response);
} catch (error) {
console.error('Error:', error.message);
}
}
main();
Fetch API
If you prefer using the native Fetch API:
class EnclavaClient {
constructor(apiKey, baseURL = 'https://localhost/api/v1') {
this.apiKey = apiKey;
this.baseURL = baseURL;
}
async chat(messages, options = {}) {
const response = await fetch(`${this.baseURL}/chat/completions`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${this.apiKey}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({
model: options.model || 'gpt-3.5-turbo',
messages,
...options
})
});
if (!response.ok) {
const error = await response.json();
throw new Error(error.error?.message || 'Request failed');
}
const data = await response.json();
return data.choices[0].message.content;
}
async embed(text) {
const response = await fetch(`${this.baseURL}/embeddings`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${this.apiKey}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({
model: 'text-embedding-ada-002',
input: text
})
});
if (!response.ok) {
throw new Error('Embedding request failed');
}
const data = await response.json();
return data.data[0].embedding;
}
}
async function main() {
const client = new EnclavaClient('YOUR_API_KEY');
const response = await client.chat([
{ role: 'user', content: 'Hello, world!' }
]);
console.log(response);
const embedding = await client.embed('Hello, world!');
console.log(`Embedding: ${embedding.length} dimensions`);
}
main();
Streaming with Fetch
async function streamChat(messages) {
const response = await fetch('https://your-enclava-instance/api/v1/chat/completions', {
method: 'POST',
headers: {
'Authorization': 'Bearer YOUR_API_KEY',
'Content-Type': 'application/json'
},
body: JSON.stringify({
model: 'gpt-3.5-turbo',
messages,
stream: true
})
});
if (!response.ok) {
throw new Error('Stream request failed');
}
const reader = response.body.getReader();
const decoder = new TextDecoder();
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split('\n');
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') continue;
try {
const parsed = JSON.parse(data);
const content = parsed.choices?.[0]?.delta?.content;
if (content) {
process.stdout.write(content);
}
} catch (e) {
// Skip invalid JSON
}
}
}
}
}
streamChat([{ role: 'user', content: 'Tell me a story' }]);
TypeScript Example
import OpenAI from 'openai';
interface Message {
role: 'system' | 'user' | 'assistant';
content: string;
}
interface ChatOptions {
model?: string;
temperature?: number;
maxTokens?: number;
stream?: boolean;
}
class EnclavaClient {
private client: OpenAI;
constructor(apiKey: string, baseURL: string = 'https://localhost/api/v1') {
this.client = new OpenAI({ baseURL, apiKey });
}
async chat(messages: Message[], options: ChatOptions = {}): Promise<string> {
const response = await this.client.chat.completions.create({
model: options.model || 'gpt-3.5-turbo',
messages,
temperature: options.temperature,
max_tokens: options.maxTokens,
stream: options.stream
});
return response.choices[0].message.content;
}
async embed(text: string): Promise<number[]> {
const response = await this.client.embeddings.create({
model: 'text-embedding-ada-002',
input: text
});
return response.data[0].embedding;
}
}
// Usage
async function main() {
const client = new EnclavaClient('YOUR_API_KEY');
const messages: Message[] = [
{ role: 'user', content: 'Hello, TypeScript!' }
];
const response = await client.chat(messages);
console.log(response);
}
main();
Best Practices
- Environment Variables: Store API keys in
.envfiles - Type Safety: Use TypeScript for better development experience
- Error Handling: Implement retry logic and error catching
- Streaming: Use streaming for long responses
- Async/Await: Use async patterns for concurrent requests
Next Steps
- Python Integration - Use with Python
- LangChain Integration - Integrate with LangChain
- Chat Completions - Full API details