from raglight.rag.simple_rag_api import RAGPipeline
from raglight.config.settings import Settings
from raglight.config.rag_config import RAGConfig
from raglight.config.vector_store_config import VectorStoreConfig
from raglight.models.data_source_model import FolderSource
# 1. Initialize Settings
Settings.setup_logging()
# 2. Configure the vector store (embeddings + ChromaDB)
vector_store_config = VectorStoreConfig(
embedding_model=Settings.DEFAULT_EMBEDDINGS_MODEL,
provider=Settings.HUGGINGFACE,
database=Settings.CHROMA,
persist_directory="./defaultDb",
collection_name=Settings.DEFAULT_COLLECTION_NAME,
)
# 3. Configure the RAG pipeline (LLM + retrieval)
config = RAGConfig(
provider=Settings.OLLAMA,
llm=Settings.DEFAULT_LLM,
k=5,
knowledge_base=[FolderSource(path="./data")],
)
# 4. Build the Pipeline (ingests documents and creates the vector store)
pipeline = RAGPipeline(config, vector_store_config)
pipeline.build()
# 5. Ask a Question
response = pipeline.generate("What are the key takeaways from these documents?")
print("\n🤖 Answer:", response)