|
import os, sys |
|
import streamlit as st |
|
|
|
st.set_page_config(page_title="Pricing for scalar and binary embeddings", page_icon=":floppy-disk:", layout="wide", menu_items={'Report a bug': "mailto:[email protected]"}) |
|
|
|
kb2gb = 1024**3 |
|
|
|
|
|
st.title("***Pricing model with scalar and binary embeddings***") |
|
st.write("***Akim Mousterou*** (April 2024) *[LinkedIn](https://www.linkedin.com/in/akim-mousterou/), [HuggingFace](https://huggingface.co/Akimfromparis), and [GitHub](https://github.com/AkimParis)*") |
|
|
|
st.write("*The real democratization of AI can only be achieved by a powerful open-source ecosystem and low prices for memory/GPU usage. Thanks to quantization, we can say bye to float32, and hello binary! Compression-friendly embedding models implemented in int8 and binary can save up to x4 and x32 of memory, storage, and, costs. To achieve X32 compute efficiency and retain ∼96% of retrieval performance, the binary quantization is powered by the normalization of embedding values (either 0 or 1), the calculation of Hamming Distance with only 2 CPU runtimes, and the application of ReRank step of [Yamada et al (2021)](https://arxiv.org/abs/2106.00882). Scalar and binary embeddings revealed great retrieval efficiency with just a minimal degradation of performance, perfect for NLP downstream tasks, semantic search, recommendation systems, and retrieval-augmented generation solutions.*") |
|
st.divider() |
|
col1, col2 = st.columns([1,1]) |
|
with col1: |
|
cloud_price = st.slider("Price of the instance: *From 0 to 20 (default $3.8 per GB/mo estimated on x2gd instances on AWS)* ", 0.0,20.00,3.8) |
|
with col2: |
|
docs = st.slider("Number of vector embeddings: *From 100M to 1 Billion (default 250M)*", 100000000,1000000000,250000000, step=10000000) |
|
st.divider() |
|
col3, col4, col5, col6, col7, col8, col9, col10 = st.columns([1,1,1,1,1,1,1,1]) |
|
with col3: |
|
st.write("***Embedding dimension***") |
|
with col4: |
|
st.write("***Memory usage in Gb***") |
|
with col5: |
|
st.write("***Price on a monthly basis***") |
|
with col6: |
|
st.write("***Price on a yearly basis***") |
|
with col7: |
|
st.write("***Int8 memory*** (div. by 4)") |
|
with col8: |
|
st.write("***Int8 price*** (div. by 4)") |
|
with col9: |
|
st.write("***Binary memory*** (div. by 32)") |
|
with col10: |
|
st.write("***Binary price*** (div. by 32)") |
|
|
|
col11, col12, col13, col14, col15, col16, col17, col18 = st.columns([1,1,1,1,1,1,1,1]) |
|
with col11: |
|
st.write("***384***") |
|
st.write("***512***") |
|
st.write("***768***") |
|
st.write("***1024***") |
|
st.write("***1536***") |
|
st.write("***2048***") |
|
st.write("***3072***") |
|
st.write("***4096***") |
|
with col12: |
|
dim_1 = ((384 * 4) * docs) / kb2gb |
|
st.write(str(round(dim_1, 2)) + " GB") |
|
|
|
dim_2 = ((512 * 4) * docs) / kb2gb |
|
r = st.write(str(round(dim_2, 2)) + " GB") |
|
|
|
dim_3 = ((768 * 4) * docs) / kb2gb |
|
r = st.write(str(round(dim_3, 2)) + " GB") |
|
|
|
dim_4 = ((1024 * 4) * docs) / kb2gb |
|
r = st.write(str(round(dim_4, 2)) + " GB") |
|
|
|
dim_5 = ((1536 * 4) * docs) / kb2gb |
|
r = st.write(str(round(dim_5, 2)) + " GB") |
|
|
|
dim_6 = ((2048 * 4) * docs) / kb2gb |
|
r = st.write(str(round(dim_6, 2)) + " GB") |
|
|
|
dim_7 = ((3072 * 4) * docs) / kb2gb |
|
r = st.write(str(round(dim_7, 2)) + " GB") |
|
|
|
dim_8 = ((4096 * 4) * docs) / kb2gb |
|
r = st.write(str(round(dim_8, 2)) + " GB") |
|
|
|
with col13: |
|
price_month_1 = dim_1 * cloud_price |
|
st.write(str(round(price_month_1, 2)) + " $") |
|
|
|
price_month_2 = dim_2 * cloud_price |
|
st.write(str(round(price_month_2, 2)) + " $") |
|
|
|
price_month_3 = dim_3 * cloud_price |
|
st.write(str(round(price_month_3, 2)) + " $") |
|
|
|
price_month_4 = dim_4 * cloud_price |
|
st.write(str(round(price_month_4, 2)) + " $") |
|
|
|
price_month_5 = dim_5 * cloud_price |
|
st.write(str(round(price_month_5, 2)) + " $") |
|
|
|
price_month_6 = dim_6 * cloud_price |
|
st.write(str(round(price_month_6, 2)) + " $") |
|
|
|
price_month_7 = dim_7 * cloud_price |
|
st.write(str(round(price_month_7, 2)) + " $") |
|
|
|
price_month_8 = dim_8 * cloud_price |
|
st.write(str(round(price_month_8, 2)) + " $") |
|
|
|
with col14: |
|
price_year_1 = price_month_1 * 12 |
|
st.write(str(round(price_year_1, 2)) + " $") |
|
|
|
price_year_2 = price_month_2 * 12 |
|
st.write(str(round(price_year_2, 2)) + " $") |
|
|
|
price_year_3 = price_month_3 * 12 |
|
st.write(str(round(price_year_3, 2)) + " $") |
|
|
|
price_year_4 = price_month_4 * 12 |
|
st.write(str(round(price_year_4, 2)) + " $") |
|
|
|
price_year_5 = price_month_5 * 12 |
|
st.write(str(round(price_year_5, 2)) + " $") |
|
|
|
price_year_6 = price_month_6 * 12 |
|
st.write(str(round(price_year_6, 2)) + " $") |
|
|
|
price_year_7 = price_month_7 * 12 |
|
st.write(str(round(price_year_7, 2)) + " $") |
|
|
|
price_year_8 = price_month_8 * 12 |
|
st.write(str(round(price_year_8, 2)) + " $") |
|
|
|
with col15: |
|
int8_mem_1 = dim_1 / 4 |
|
st.write(str(round(int8_mem_1, 2)) + " GB") |
|
|
|
int8_mem_2 = dim_2 / 4 |
|
st.write(str(round(int8_mem_2, 2)) + " GB") |
|
|
|
int8_mem_3 = dim_3 / 4 |
|
st.write(str(round(int8_mem_3, 2)) + " GB") |
|
|
|
int8_mem_4 = dim_4 / 4 |
|
st.write(str(round(int8_mem_4, 2)) + " GB") |
|
|
|
int8_mem_5 = dim_5 / 4 |
|
st.write(str(round(int8_mem_5, 2)) + " GB") |
|
|
|
int8_mem_6 = dim_6 / 4 |
|
st.write(str(round(int8_mem_6, 2)) + " GB") |
|
|
|
int8_mem_7 = dim_7 / 4 |
|
st.write(str(round(int8_mem_7, 2)) + " GB") |
|
|
|
int8_mem_8 = dim_8 / 4 |
|
st.write(str(round(int8_mem_8, 2)) + " GB") |
|
with col16: |
|
int8_price_1 = price_month_1 / 4 |
|
st.write(str(round(int8_price_1, 2)) + " $") |
|
|
|
int8_price_2 = price_month_2 / 4 |
|
st.write(str(round(int8_price_2, 2)) + " $") |
|
|
|
int8_price_3 = price_month_3 / 4 |
|
st.write(str(round(int8_price_3, 2)) + " $") |
|
|
|
int8_price_4 = price_month_4 / 4 |
|
st.write(str(round(int8_price_4, 2)) + " $") |
|
|
|
int8_price_5 = price_month_5 / 4 |
|
st.write(str(round(int8_price_5, 2)) + " $") |
|
|
|
int8_price_6 = price_month_6 / 4 |
|
st.write(str(round(int8_price_6, 2)) + " $") |
|
|
|
int8_price_7 = price_month_7 / 4 |
|
st.write(str(round(int8_price_7, 2)) + " $") |
|
|
|
int8_price_8 = price_month_8 / 4 |
|
st.write(str(round(int8_price_8, 2)) + " $") |
|
|
|
with col17: |
|
binary_mem_1 = dim_1 / 32 |
|
st.write(str(round(binary_mem_1, 2)) + " GB") |
|
|
|
binary_mem_2 = dim_2 / 32 |
|
st.write(str(round(binary_mem_2, 2)) + " GB") |
|
|
|
binary_mem_3 = dim_3 / 32 |
|
st.write(str(round(binary_mem_3, 2)) + " GB") |
|
|
|
binary_mem_4 = dim_4 / 32 |
|
st.write(str(round(binary_mem_4, 2)) + " GB") |
|
|
|
binary_mem_5 = dim_5 / 32 |
|
st.write(str(round(binary_mem_5, 2)) + " GB") |
|
|
|
binary_mem_6 = dim_6 / 32 |
|
st.write(str(round(binary_mem_6, 2)) + " GB") |
|
|
|
binary_mem_7 = dim_7 / 32 |
|
st.write(str(round(binary_mem_7, 2)) + " GB") |
|
|
|
binary_mem_8 = dim_8 / 32 |
|
st.write(str(round(binary_mem_8, 2)) + " GB") |
|
|
|
with col18: |
|
binary_price_1 = price_month_1 / 32 |
|
st.write(str(round(binary_price_1, 2)) + " $") |
|
|
|
binary_price_2 = price_month_2 / 32 |
|
st.write(str(round(binary_price_2, 2)) + " $") |
|
|
|
binary_price_3 = price_month_3 / 32 |
|
st.write(str(round(binary_price_3, 2)) + " $") |
|
|
|
binary_price_4 = price_month_4 / 32 |
|
st.write(str(round(binary_price_4, 2)) + " $") |
|
|
|
binary_price_5 = price_month_5 / 32 |
|
st.write(str(round(binary_price_5, 2)) + " $") |
|
|
|
binary_price_6 = price_month_6 / 32 |
|
st.write(str(round(binary_price_6, 2)) + " $") |
|
|
|
binary_price_7 = price_month_7 / 32 |
|
st.write(str(round(binary_price_7, 2)) + " $") |
|
|
|
binary_price_8 = price_month_8 / 32 |
|
st.write(str(round(binary_price_8, 2)) + " $") |
|
|
|
st.write('***Disclaimer:*** *The financial projections below are based on ["Cohere int8 & binary Embeddings - Scale Your Vector Database to Large Datasets"](https://cohere.com/blog/int8-binary-embeddings) by Nils Reimers of [Cohere](https://cohere.com/). The cost of the index and the metadata might not have been factored in the calculus.*') |
|
|
|
st.divider() |
|
st.write("***- Open-source vector databases for Scalar and binary quantization:***") |
|
col19, col20 = st.columns([1,1]) |
|
with col19: |
|
st.write("- [FAISS](https://github.com/facebookresearch/faiss) from :flag-us:") |
|
st.write("- [VESPA AI](https://github.com/vespa-engine/vespa) from :flag-no:") |
|
st.write("- [Pgvector](https://github.com/pgvector/pgvector) from :flag-us:") |
|
st.write("- [Milvus](https://github.com/milvus-io/milvus) from :flag-cn:") |
|
st.write("- [Usearch](https://github.com/unum-cloud/usearch) from :flag-us:") |
|
with col20: |
|
st.write("- [Qdrant](https://github.com/qdrant) from :flag-de:") |
|
st.write("- [pgvecto.rs](https://github.com/tensorchord/pgvecto.rs) from :flag-cn:") |
|
st.write("- [TencentVectorDB](https://github.com/Tencent/vectordatabase-sdk-python) from :flag-cn:") |
|
st.write("- [BinaryVectorDB](https://github.com/cohere-ai/BinaryVectorDB) from :flag-ca:") |
|
st.write("- [Weaviate](https://github.com/weaviate/weaviate) from :flag-de:") |
|
st.divider() |
|
st.write("***- For further readings:***") |
|
|
|
st.write("- [Billion-scale similarity search with GPUs](https://arxiv.org/abs/1702.08734)") |
|
st.write("- [Efficient Passage Retrieval with Hashing for Open-domain Question Answering](https://arxiv.org/abs/2106.00882)") |
|
st.write("- [Matryoshka Representation Learning](https://arxiv.org/abs/2205.13147)") |
|
st.write("- [Incorporating Relevance Feedback for Information-Seeking Retrieval using Few-Shot Document Re-Ranking](https://arxiv.org/abs/2210.10695)") |
|
st.write("- [Binary Embedding-based Retrieval at Tencent](https://arxiv.org/abs/2302.08714)") |
|
st.divider() |
|
st.write("***Akim Mousterou*** (April 2024) *[LinkedIn](https://www.linkedin.com/in/akim-mousterou/), [HuggingFace](https://huggingface.co/Akimfromparis), and [GitHub](https://github.com/AkimParis)*") |