Skip to main content

Python Integration

Complete Python examples for integrating with Enclava chatbots.

Create and Chat

import requests

BASE_URL = "http://localhost/api/v1"
API_KEY = "YOUR_API_KEY"

class EnclavaChatbot:
def __init__(self, api_key):
self.api_key = api_key
self.base_url = BASE_URL
self.headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}

def create_chatbot(self, name, model, system_prompt, use_rag=False, rag_collection=None):
"""Create a new chatbot"""
response = requests.post(
f"{self.base_url}/chatbot/create",
headers=self.headers,
json={
"name": name,
"model": model,
"system_prompt": system_prompt,
"use_rag": use_rag,
"rag_collection": rag_collection
}
)
response.raise_for_status()
return response.json()

def chat(self, chatbot_id, message, conversation_id=None):
"""Send message to chatbot"""
response = requests.post(
f"{self.base_url}/chatbot/chat",
headers=self.headers,
json={
"chatbot_id": chatbot_id,
"message": message,
"conversation_id": conversation_id
}
)
response.raise_for_status()
return response.json()

def chat_stream(self, chatbot_id, message):
"""Chat with streaming"""
response = requests.post(
f"{self.base_url}/chatbot/chat",
headers=self.headers,
json={
"chatbot_id": chatbot_id,
"message": message,
"stream": True
},
stream=True
)
for line in response.iter_lines():
if line:
data = line.decode('utf-8')
if "response" in data:
print(data.replace("response:", "").strip(), end="", flush=True)

# Usage
client = EnclavaChatbot(api_key=API_KEY)

# Create chatbot
chatbot = client.create_chatbot(
name="Support Bot",
model="gpt-3.5-turbo",
system_prompt="You are a helpful support agent.",
use_rag=True,
rag_collection="documentation"
)

chatbot_id = chatbot["id"]
print(f"Created chatbot: {chatbot_id}")

# Chat
response = client.chat(
chatbot_id=chatbot_id,
message="How do I reset my password?"
)

print(f"Bot: {response['response']}")

if response.get("rag_used"):
print(f"RAG sources used: {len(response['rag_sources'])}")

Conversation Management

class EnclavaConversation:
def __init__(self, client, chatbot_id):
self.client = client
self.chatbot_id = chatbot_id
self.conversation_id = None
self.history = []

def send(self, message):
"""Send message and track conversation"""
response = self.client.chat(
chatbot_id=self.chatbot_id,
message=message,
conversation_id=self.conversation_id
)

# Update conversation ID from response
self.conversation_id = response.get("conversation_id")

# Track history
self.history.append({
"role": "user",
"content": message,
"timestamp": response.get("timestamp")
})

if "response" in response:
self.history.append({
"role": "assistant",
"content": response["response"],
"timestamp": response.get("timestamp")
})

return response

def get_history(self):
"""Get conversation history"""
return self.history

# Usage
client = EnclavaChatbot(api_key=API_KEY)
chatbot = client.create_chatbot(
name="Conversation Bot",
model="gpt-3.5-turbo",
system_prompt="You are a helpful assistant. Remember our conversation."
)

conv = EnclavaConversation(client, chatbot["id"])

# Multi-turn conversation
conv.send("My name is Alice")
print(conv.send("What's your name?"))
print(conv.send("I live in New York"))
print(conv.send("Where am I from?"))

# View history
for msg in conv.get_history():
print(f"{msg['role']}: {msg['content']}")

RAG-Enhanced Chatbot

import requests

class RAGChatbot:
def __init__(self, api_key):
self.api_key = api_key
self.headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}

def create_rag_chatbot(self, name, rag_collection, system_prompt=None):
"""Create chatbot with RAG"""
if not system_prompt:
system_prompt = f"""You are a helpful assistant that answers questions based on the {rag_collection} knowledge base.
When you find relevant information, cite it naturally in your response.
If you don't find relevant information, say so honestly."""

response = requests.post(
"http://localhost/api/v1/chatbot/create",
headers=self.headers,
json={
"name": name,
"model": "gpt-3.5-turbo",
"system_prompt": system_prompt,
"use_rag": True,
"rag_collection": rag_collection,
"rag_top_k": 5,
"similarity_threshold": 0.7
}
)
response.raise_for_status()
return response.json()

def chat_with_context(self, chatbot_id, message):
"""Chat and display RAG context"""
response = requests.post(
"http://localhost/api/v1/chatbot/chat",
headers=self.headers,
json={
"chatbot_id": chatbot_id,
"message": message
}
)

result = response.json()
answer = result["response"]

# Display RAG context if available
if result.get("rag_sources"):
sources = result["rag_sources"]
if sources:
print("\n**Relevant Documents:**")
for i, source in enumerate(sources, 1):
print(f"{i}. Document: {source['document_id']} (relevance: {source['score']:.2f})")
if source.get("content"):
preview = source["content"][:100]
print(f" Preview: {preview}...")

print(f"\nAnswer: {answer}")
return result

# Usage
client = RAGChatbot(api_key=API_KEY)

# Create RAG chatbot
chatbot = client.create_rag_chatbot(
name="Documentation Assistant",
rag_collection="product_docs"
)

chatbot_id = chatbot["id"]

# Chat with context
client.chat_with_context(
chatbot_id=chatbot_id,
message="How do I configure the API key?"
)

Async Implementation

import aiohttp
import asyncio

class AsyncEnclavaChatbot:
def __init__(self, api_key, base_url="http://localhost/api/v1"):
self.api_key = api_key
self.base_url = base_url
self.headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}

async def create_chatbot(self, **config):
"""Async create chatbot"""
async with aiohttp.ClientSession() as session:
async with session.post(
f"{self.base_url}/chatbot/create",
headers=self.headers,
json=config
) as response:
response.raise_for_status()
return await response.json()

async def chat(self, chatbot_id, message):
"""Async chat"""
async with aiohttp.ClientSession() as session:
async with session.post(
f"{self.base_url}/chatbot/chat",
headers=self.headers,
json={
"chatbot_id": chatbot_id,
"message": message
}
) as response:
response.raise_for_status()
return await response.json()

async def main():
client = AsyncEnclavaChatbot(api_key=API_KEY)

# Create chatbot
chatbot = await client.create_chatbot(
name="Async Bot",
model="gpt-3.5-turbo",
system_prompt="You are a fast assistant."
)

chatbot_id = chatbot["id"]
print(f"Created chatbot: {chatbot_id}")

# Chat
messages = ["Hello!", "How are you?", "Tell me a joke"]

for msg in messages:
response = await client.chat(chatbot_id, msg)
print(f"You: {msg}")
print(f"Bot: {response['response']}")

asyncio.run(main())

Web Application Integration

from flask import Flask, request, jsonify
import requests

app = Flask(__name__)

class ChatbotService:
def __init__(self, api_key):
self.api_key = api_key
self.base_url = "http://localhost/api/v1"

def create_chatbot(self, config):
response = requests.post(
f"{self.base_url}/chatbot/create",
headers={"Authorization": f"Bearer {self.api_key}"},
json=config
)
return response.json()

def chat(self, chatbot_id, message, conversation_id=None):
response = requests.post(
f"{self.base_url}/chatbot/chat",
headers={"Authorization": f"Bearer {self.api_key}"},
json={
"chatbot_id": chatbot_id,
"message": message,
"conversation_id": conversation_id
}
)
return response.json()

# Initialize service
chatbot_service = ChatbotService(api_key=API_KEY)

@app.route("/api/chatbot", methods=["POST"])
def create_chatbot_endpoint():
"""Create a new chatbot"""
config = request.json
result = chatbot_service.create_chatbot(config)
return jsonify(result)

@app.route("/api/chatbot/<chatbot_id>/chat", methods=["POST"])
def chat_endpoint(chatbot_id):
"""Send message to chatbot"""
data = request.json
message = data.get("message")
conversation_id = data.get("conversation_id")

result = chatbot_service.chat(
chatbot_id=chatbot_id,
message=message,
conversation_id=conversation_id
)

return jsonify(result)

if __name__ == "__main__":
app.run(debug=True, port=5000)

Error Handling

from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

class RobustChatbotClient:
def __init__(self, api_key):
self.api_key = api_key
self.base_url = "http://localhost/api/v1"
self.headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}

# Configure retry strategy
retry_strategy = Retry(
total=3,
backoff_factor=1,
status_forcelist=[429, 500, 502, 503, 504]
)
adapter = HTTPAdapter(max_retries=retry_strategy)
self.session = requests.Session()
self.session.mount("http://", adapter)
self.session.mount("https://", adapter)

def chat_with_retry(self, chatbot_id, message):
"""Chat with automatic retry on failure"""
try:
response = self.session.post(
f"{self.base_url}/chatbot/chat",
headers=self.headers,
json={
"chatbot_id": chatbot_id,
"message": message
},
timeout=30
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Request failed: {e}")
raise

# Usage
client = RobustChatbotClient(api_key=API_KEY)

try:
response = client.chat_with_retry(
chatbot_id="your-chatbot-id",
message="Hello!"
)
print(f"Response: {response['response']}")
except Exception as e:
print(f"Failed after retries: {e}")

Best Practices

  1. Use Conversation IDs - Maintain context across requests
  2. Handle Errors - Implement retry logic and proper error handling
  3. Check RAG Sources - Verify RAG is providing relevant context
  4. Use Async for High Volume - Handle multiple concurrent requests
  5. Rate Limiting - Be aware of API rate limits
  6. Monitor Usage - Track performance and costs

Next Steps