🦙 LlamaIndex + PostgreSQL
Full LlamaIndex VectorStore interface with FraiseQL's PostgreSQL GraphQL API. Query engines, document indexing, and hybrid search for enterprise RAG.
Build with LlamaIndex →LlamaIndex's document readers and node parsers work seamlessly with FraiseQL's vector storage.
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.embeddings.openai import OpenAIEmbedding
from fraiseql.integrations.llamaindex import FraiseQLVectorStore
# Load documents with LlamaIndex
documents = SimpleDirectoryReader("data").load_data()
# Parse into nodes
node_parser = SentenceSplitter(chunk_size=1024, chunk_overlap=200)
nodes = node_parser.get_nodes_from_documents(documents)
# Initialize FraiseQL vector store
embed_model = OpenAIEmbedding()
vector_store = FraiseQLVectorStore(
graphql_url="http://localhost:8000/graphql",
embed_model=embed_model,
collection_name="llamaindex_docs"
)
# Create and persist index
index = VectorStoreIndex(
nodes=nodes,
embed_model=embed_model,
vector_store=vector_store,
show_progress=True
)
# Persist to FraiseQL
index.storage_context.persist()
from llama_index.core import VectorStoreIndex
from fraiseql.integrations.llamaindex import FraiseQLVectorStore
# Load existing index
vector_store = FraiseQLVectorStore(
graphql_url="http://localhost:8000/graphql",
embed_model=OpenAIEmbedding(),
collection_name="llamaindex_docs"
)
index = VectorStoreIndex.from_vector_store(vector_store)
# Create query engine
query_engine = index.as_query_engine(similarity_top_k=5)
# Query
response = query_engine.query("How do I implement user authentication?")
print(response)
from llama_index.core.vector_stores import MetadataFilters, ExactMatchFilter
# Create query engine with metadata filtering
query_engine = index.as_query_engine(
similarity_top_k=10,
filters=MetadataFilters(filters=[
ExactMatchFilter(key="category", value="security"),
ExactMatchFilter(key="version", value="v2.0")
])
)
# Query with filtering
response = query_engine.query(
"What are the security best practices?",
similarity_top_k=5
)
print(f"Response: {response}")
print(f"Source nodes: {len(response.source_nodes)}")
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.response_synthesizers import CompactAndRefine
from llama_index.llms.openai import OpenAI
from fraiseql.integrations.llamaindex import FraiseQLVectorStore
# Setup components
llm = OpenAI(model="gpt-4", temperature=0)
embed_model = OpenAIEmbedding()
# Initialize vector store
vector_store = FraiseQLVectorStore(
graphql_url="http://localhost:8000/graphql",
embed_model=embed_model,
collection_name="knowledge_base"
)
# Load and index documents
documents = SimpleDirectoryReader("docs").load_data()
nodes = SentenceSplitter(chunk_size=512).get_nodes_from_documents(documents)
index = VectorStoreIndex(
nodes=nodes,
vector_store=vector_store,
embed_model=embed_model
)
# Create RAG query engine
query_engine = index.as_query_engine(
llm=llm,
response_synthesizer=CompactAndRefine(),
similarity_top_k=3,
streaming=True
)
# Ask questions
response = query_engine.query("How do I optimize database performance?")
print(response)
Complete LlamaIndex VectorStore interface. All query patterns supported.
Hybrid search, metadata filtering, and custom retrieval strategies.
PostgreSQL reliability with GraphQL API. Production-grade RAG systems.
Local embeddings, custom models
GPT-4, embedding models
Claude, Titan integration
Ollama, private deployments
LlamaIndex + FraiseQL = Production-ready AI applications
Start with LlamaIndex →