🦜 LangChain + PostgreSQL
LangChain's VectorStore interface meets FraiseQL's PostgreSQL GraphQL API. Document ingestion, semantic search, and retrieval patterns for AI applications.
Start Building RAG →LangChain document loaders work seamlessly with FraiseQL's GraphQL mutations for vector storage.
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
from fraiseql.integrations.langchain import FraiseQLVectorStore
# Load and split documents
loader = TextLoader("documents/my_file.txt")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
docs = text_splitter.split_documents(documents)
# Initialize embeddings and vector store
embeddings = OpenAIEmbeddings()
vectorstore = FraiseQLVectorStore(
graphql_url="http://localhost:8000/graphql",
embeddings=embeddings,
collection_name="my_documents"
)
# Ingest documents
vectorstore.add_documents(docs)
print(f"Added {len(docs)} document chunks to FraiseQL")
# Search for similar documents
query = "How do I implement authentication?"
results = vectorstore.similarity_search(query, k=5)
for doc in results:
print(f"Content: {doc.page_content[:200]}...")
print(f"Metadata: {doc.metadata}")
print("---")
# Search with score and metadata filter
results_with_scores = vectorstore.similarity_search_with_score(
query,
k=10,
filter={"category": "authentication", "version": "v2"}
)
for doc, score in results_with_scores:
print(f"Score: {score:.3f}")
print(f"Content: {doc.page_content[:200]}...")
print(f"Source: {doc.metadata.get('source')}")
print("---")
from langchain.chains import RetrievalQA
from langchain_openai import ChatOpenAI
from fraiseql.integrations.langchain import FraiseQLVectorStore
# Initialize components
llm = ChatOpenAI(temperature=0)
vectorstore = FraiseQLVectorStore(
graphql_url="http://localhost:8000/graphql",
embeddings=OpenAIEmbeddings(),
collection_name="knowledge_base"
)
# Create RAG chain
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=vectorstore.as_retriever(
search_kwargs={"k": 3, "filter": {"category": "technical"}}
),
return_source_documents=True
)
# Ask questions
query = "How do I optimize database queries?"
result = qa_chain({"query": query})
print(result["result"])
print("\nSources:")
for doc in result["source_documents"]:
print(f"- {doc.metadata.get('title', 'Unknown')}")
Full VectorStore interface compatibility. Drop-in replacement for other vector stores.
Combine semantic search with SQL metadata filtering for precise results.
HNSW indexing and Rust-fast queries. No external vector database needed.
Sentence transformers, local embeddings
text-embedding-ada-002, GPT integration
Embed API, multilingual support
Ollama, private deployments
LangChain + FraiseQL = Production-ready AI applications with PostgreSQL
Start with LangChain →