Skip to main content

LangChain Integration

Integrate Enclava with LangChain for powerful AI workflows.

Installation

pip install langchain-openai

Basic Configuration

from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage

# Configure with Enclava endpoint
llm = ChatOpenAI(
openai_api_base="https://your-enclava-instance/api/v1",
openai_api_key="YOUR_API_KEY",
model="gpt-3.5-turbo"
)

# Simple completion
response = llm.predict("Hello, LangChain!")
print(response)

Complete Example

from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage,
AIMessage
)

class EnclavaLangChain:
def __init__(self, api_key, base_url="https://localhost/api/v1"):
self.llm = ChatOpenAI(
openai_api_base=base_url,
openai_api_key=api_key,
model="gpt-3.5-turbo"
)

def chat(self, message):
"""Send single message"""
return self.llm.predict(message)

def chat_with_system(self, system_prompt, message):
"""Chat with system prompt"""
messages = [
SystemMessage(content=system_prompt),
HumanMessage(content=message)
]
return self.llm.predict_messages(messages)

def chat_conversation(self, messages):
"""Multi-turn conversation"""
langchain_messages = []
for msg in messages:
if msg["role"] == "system":
langchain_messages.append(SystemMessage(content=msg["content"]))
elif msg["role"] == "user":
langchain_messages.append(HumanMessage(content=msg["content"]))
elif msg["role"] == "assistant":
langchain_messages.append(AIMessage(content=msg["content"]))

return self.llm.predict_messages(langchain_messages)

# Usage
client = EnclavaLangChain(api_key="YOUR_API_KEY")

# Simple chat
response = client.chat("Tell me a joke")
print(response)

# Chat with system prompt
response = client.chat_with_system(
"You are a Python expert.",
"How do I sort a list?"
)
print(response)

# Multi-turn conversation
conversation = [
{"role": "system", "content": "You are helpful."},
{"role": "user", "content": "What is 2+2?"},
{"role": "assistant", "content": "2+2 equals 4."},
{"role": "user", "content": "What about 5+3?"}
]
response = client.chat_conversation(conversation)
print(response)

Chains

Simple Chain

from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate

llm = ChatOpenAI(
openai_api_base="https://your-enclava-instance/api/v1",
openai_api_key="YOUR_API_KEY",
model="gpt-3.5-turbo"
)

# Create prompt template
template = PromptTemplate(
input_variables=["product", "feature"],
template="Tell me about {product}'s {feature} feature."
)

# Create chain
chain = LLMChain(llm=llm, prompt=template)

# Run chain
response = chain.run(product="Enclava", feature="RAG")
print(response)

Sequential Chain

from langchain.chat_models import ChatOpenAI
from langchain.chains import SequentialChain, LLMChain
from langchain.prompts import PromptTemplate

llm = ChatOpenAI(
openai_api_base="https://your-enclava-instance/api/v1",
openai_api_key="YOUR_API_KEY",
model="gpt-3.5-turbo"
)

# First chain: Generate summary
summary_prompt = PromptTemplate(
input_variables=["topic"],
template="Summarize {topic} in one sentence."
)
summary_chain = LLMChain(llm=llm, prompt=summary_prompt)

# Second chain: Expand summary
expand_prompt = PromptTemplate(
input_variables=["summary"],
template="Expand this: {summary}"
)
expand_chain = LLMChain(llm=llm, prompt=expand_prompt)

# Combine chains
overall_chain = SequentialChain(
chains=[summary_chain, expand_chain],
input_variables=["topic"]
)

# Run
result = overall_chain.run(topic="confidential computing")
print(result)

Memory

Conversation Buffer Memory

from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationChain

llm = ChatOpenAI(
openai_api_base="https://your-enclava-instance/api/v1",
openai_api_key="YOUR_API_KEY",
model="gpt-3.5-turbo"
)

# Add memory
memory = ConversationBufferMemory()

# Create conversation chain
conversation = ConversationChain(
llm=llm,
memory=memory,
verbose=True
)

# Chat with memory
response1 = conversation.predict(input="My name is Alice.")
print(f"AI: {response1}")

response2 = conversation.predict(input="What is my name?")
print(f"AI: {response2}") # Will remember Alice

Conversation Summary Memory

from langchain.memory import ConversationSummaryMemory

llm = ChatOpenAI(
openai_api_base="https://your-enclava-instance/api/v1",
openai_api_key="YOUR_API_KEY",
model="gpt-3.5-turbo"
)

memory = ConversationSummaryMemory(llm=llm)

conversation = ConversationChain(
llm=llm,
memory=memory,
verbose=True
)

# Long conversation gets summarized automatically
response = conversation.predict(input="Tell me about quantum computing")
print(response)

Tools and Agents

Simple Tool

from langchain.tools import Tool
from langchain.agents import initialize_agent, AgentType

llm = ChatOpenAI(
openai_api_base="https://your-enclava-instance/api/v1",
openai_api_key="YOUR_API_KEY",
model="gpt-3.5-turbo"
)

def calculator(expression: str) -> str:
"""Simple calculator"""
try:
result = eval(expression)
return f"Result: {result}"
except:
return "Error: Invalid expression"

# Define tool
calculator_tool = Tool(
name="Calculator",
func=calculator,
description="Useful for math calculations. Input should be a mathematical expression."
)

# Initialize agent
agent = initialize_agent(
tools=[calculator_tool],
llm=llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)

# Run agent
response = agent.run("What is 2+2?")
print(response)

Custom Tool with API

import requests
from langchain.tools import BaseTool
from pydantic import BaseModel, Field

class WeatherInput(BaseModel):
location: str = Field(description="City name")

class WeatherTool(BaseTool):
name = "Weather"
description = "Get current weather for a location"
args_schema = WeatherInput

def _run(self, location: str) -> str:
# Call weather API
response = requests.get(
f"https://api.weather.com/current?city={location}"
)
data = response.json()
return f"Weather in {location}: {data['temperature']}°C, {data['conditions']}"

# Use in agent
llm = ChatOpenAI(
openai_api_base="https://your-enclava-instance/api/v1",
openai_api_key="YOUR_API_KEY",
model="gpt-3.5-turbo"
)

agent = initialize_agent(
tools=[WeatherTool()],
llm=llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)

response = agent.run("What's the weather in Paris?")
print(response)

RAG Integration

Simple RAG with Enclava

from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter

# Configure Enclava
llm = ChatOpenAI(
openai_api_base="https://your-enclava-instance/api/v1",
openai_api_key="YOUR_API_KEY",
model="gpt-3.5-turbo"
)

embeddings = OpenAIEmbeddings(
openai_api_base="https://your-enclava-instance/api/v1",
openai_api_key="YOUR_API_KEY"
)

# Load documents
loader = TextLoader("document.txt")
documents = loader.load()

# Split documents
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
splits = text_splitter.split_documents(documents)

# Create vector store
vectorstore = FAISS.from_documents(splits, embeddings)

# Create RAG chain
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=vectorstore.as_retriever(search_kwargs={"k": 3})
)

# Query
query = "What is Enclava?"
response = qa_chain.run(query)
print(response)

RAG with Enclava RAG Endpoint

import requests
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate

llm = ChatOpenAI(
openai_api_base="https://your-enclava-instance/api/v1",
openai_api_key="YOUR_API_KEY",
model="gpt-3.5-turbo"
)

def search_rag(query, collection="docs"):
"""Search Enclava RAG collection"""
response = requests.post(
"https://your-enclava-instance/api/v1/rag/search",
headers={"Authorization": "Bearer YOUR_API_KEY"},
json={
"collection_name": collection,
"query": query,
"top_k": 5
}
)
return "\n\n".join([r["content"] for r in response.json()["results"]])

# Create RAG chain
rag_prompt = PromptTemplate(
input_variables=["context", "question"],
template="Answer the question using this context:\n\n{context}\n\nQuestion: {question}"
)

rag_chain = LLMChain(llm=llm, prompt=rag_prompt)

# Use RAG
query = "How do I create an API key?"

# Step 1: Get context from RAG
context = search_rag(query)

# Step 2: Generate answer with context
response = rag_chain.run(context=context, question=query)
print(response)

Streaming

from langchain.chat_models import ChatOpenAI

llm = ChatOpenAI(
openai_api_base="https://your-enclava-instance/api/v1",
openai_api_key="YOUR_API_KEY",
model="gpt-3.5-turbo",
streaming=True
)

# Stream response
for chunk in llm.stream("Tell me a story"):
print(chunk.content, end="", flush=True)

Best Practices

  1. Environment Variables: Store API keys in environment, not code
  2. Memory Management: Use appropriate memory for your use case
  3. Error Handling: Implement try-catch blocks for resilience
  4. Streaming: Use streaming for long responses
  5. Chaining: Combine multiple steps using chains
  6. Tools: Create custom tools for specific needs

Next Steps