File size: 989 Bytes
357b0b8
 
a78bf29
357b0b8
 
 
 
6d88167
 
 
357b0b8
 
fcc5cad
 
6d88167
fcc5cad
3c96867
4a841d0
 
 
 
fcc5cad
6d88167
357b0b8
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import dashboard_text2image
import dashboard_image2image
import dashboard_featurefinder

import streamlit as st

PAGES = {
    "Retrieve Images given Text": dashboard_text2image,
    "Retrieve Images given Image": dashboard_image2image,
    "Find Feature in Image": dashboard_featurefinder,
}

st.sidebar.title("CLIP-RSICD")
st.sidebar.image("thumbnail.jpg")
st.sidebar.markdown("""
    We have fine-tuned the CLIP model (see [Model card](https://huggingface.co/flax-community/clip-rsicd-v2)) 
    using remote sensing images and captions from the [RSICD dataset](https://github.com/201528014227051/RSICD_optimal). 
    The CLIP model from OpenAI is trained in a self-supervised manner using contrastive learning to project images 
    and caption text onto a common embedding space.

    Please click here for [more information about our project](https://github.com/arampacha/CLIP-rsicd).

""")
selection = st.sidebar.radio("Go to", list(PAGES.keys()))
page = PAGES[selection]
page.app()