raj999 commited on
Commit
51999bc
1 Parent(s): a7d1161

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +202 -29
app.py CHANGED
@@ -1,39 +1,212 @@
1
- import gradio as gr
 
 
2
  from deepforest import main
 
3
  import matplotlib.pyplot as plt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- # Initialize the deepforest model and use the released version
6
  model = main.deepforest()
7
  model.use_release()
8
 
9
- def predict_and_visualize(image):
 
 
 
 
 
 
 
 
 
10
  """
11
- Function to predict and visualize the image using deepforest model.
12
-
13
- Args:
14
- - image: An image array.
15
-
16
- Returns:
17
- - An image with predictions visualized.
 
 
18
  """
19
- # Predict image and return plot. Since Gradio passes image as array, save it temporarily.
20
- temp_path = "/tmp/uploaded_image.png"
21
- plt.imsave(temp_path, image)
22
- img = model.predict_image(path=temp_path, return_plot=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- # Since the output is BGR and matplotlib (and hence Gradio) needs RGB, we convert the color scheme
25
- img_rgb = img[:, :, ::-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- # Return the RGB image
28
- return img_rgb
29
-
30
- # Define the Gradio interface
31
- iface = gr.Interface(fn=predict_and_visualize,
32
- inputs=gr.Image(type="numpy", label="Upload Image"),
33
- outputs=gr.Image(label="Predicted Image"),
34
- title="DeepForest Tree Detection",
35
- examples=["./example.jpg"],
36
- description="Upload an image to detect trees using the DeepForest model.")
37
-
38
- # Launch the Gradio app
39
- iface.launch()
 
1
+ import streamlit as st
2
+ from PIL import Image
3
+ import os
4
  from deepforest import main
5
+ from deepforest import get_data
6
  import matplotlib.pyplot as plt
7
+ # from predict import extract_features, predict_similarity, compare_features, extract_features_cp
8
+ import os, re
9
+ import streamlit as st
10
+ import pandas as pd
11
+ from PIL import Image
12
+ import tempfile
13
+ from inference import split_image_from_dataframe
14
+ from datetime import datetime
15
+ from predict_vit import extract_features, predict_similarity, compare_features, extract_features_cp
16
+ from rag import generate_image, setup_client, setup_retriever
17
+ from predict_copy import extract_features_with_augmentation, extract_features_with_augmentation_cp
18
+
19
+
20
+ import rasterio
21
+ import geopandas as gpd
22
 
 
23
  model = main.deepforest()
24
  model.use_release()
25
 
26
+ # Set the page configuration
27
+ st.set_page_config(page_title="Wise-Vision", page_icon=":deciduous_tree:")
28
+
29
+ # Title and description
30
+ st.title("🌳 Wise-Vision")
31
+ st.subheader("AI + Environment Hackathon 2024")
32
+
33
+ # Sidebar information
34
+ st.sidebar.title("About")
35
+ st.sidebar.info(
36
  """
37
+ This app is designed for the AI + Environment Hackathon 2024.
38
+ Upload a panoramic image and specify a folder path to detect tree species in the image.
39
+ Upload a word file to integrate knowledge into the image.
40
+ Output will be a panoramic image with identified trees and knowledge symbols.
41
+ """
42
+ )
43
+
44
+ st.sidebar.title("Contact")
45
+ st.sidebar.info(
46
  """
47
+ For more information, contact us at:
48
49
+ """
50
+ )
51
+
52
+
53
+ script_dir = os.path.dirname(os.path.abspath(__file__))
54
+
55
+ # Create a new folder within the script directory for storing cropped images
56
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
57
+ output_folder_name = f"output_{timestamp}"
58
+ output_image_folder = os.path.join(script_dir, output_folder_name)
59
+ os.makedirs(output_image_folder, exist_ok=True)
60
+ output_image_folder = os.path.abspath(output_image_folder)
61
+ # Define paths for the image and Excel file within the new folder
62
+ cropped_image_path = os.path.join(output_image_folder, f"panoramic_{timestamp}.png")
63
+ excel_output_path = os.path.join(output_image_folder, f"results_{timestamp}.xlsx")
64
+
65
+ # Input: Upload panoramic image
66
+ uploaded_image = st.file_uploader("Upload a panoramic image", type=['png', 'jpeg', 'JPG'])
67
+
68
+ # Input: Folder path for tree species detection
69
+
70
+ def extract_treespecies_features(folder_path):
71
+ image_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith(('png', 'jpg', 'jpeg', '.JPG'))]
72
+
73
+ species_feature_list = [{"feature": extract_features_with_augmentation(file), "file_name": file} for file in image_files]
74
+ return species_feature_list
75
+
76
+
77
+ # print(species_feature_list[:2])
78
+ def perform_inference(cropped_images, species_feature_list, img_df):
79
+ st.success("Setting up OPENAI Client:")
80
+ client = setup_client()
81
+ st.success("Setting up knowledge database & BM25 retriever:")
82
+ retriever = setup_retriever()
83
+ st.success("Setting up BM25 Retriever:")
84
+ for img_idx, item in enumerate(cropped_images):
85
+ image = item["image"]
86
+ feature_cp = extract_features_with_augmentation_cp(image)
87
+ row_results = []
88
+ species_result = []
89
+ emoji = []
90
+ species_context = []
91
+ for idx, species in enumerate(species_feature_list):
92
+ # euclidean_dist, cos_sim = compare_features(feature_cp, species["feature"])
93
+ # print(f'Euclidean Distance: {euclidean_dist}')
94
+ # print(f'Cosine Similarity: {cos_sim}')
95
+
96
+ # Predict similarity
97
+ is_similar = predict_similarity(feature_cp, species["feature"], threshold=0.92)
98
+ # print(species)
99
+ # print(f'Are the images similar? {"Yes" if is_similar else "No"}')
100
+
101
+ result = "Yes" if is_similar else "No"
102
+
103
+ if result == "Yes":
104
+ item[f"result_{idx}"] = result
105
+ item[f"file_name_{idx}"] = species["file_name"]
106
+ row_results.append(species["file_name"])
107
+ # Regular expression to match the tree species name
108
+ species_pattern = r'identified_species\\([^\\]+) -'
109
+
110
+ # Search for the pattern in the file path
111
+ match = re.search(species_pattern, species["file_name"])
112
+
113
+ # Extract and print the tree species name if found
114
+ if match:
115
+ tree_species = match.group(1)
116
+ species_info = retriever.invoke(f"Scientific name:{tree_species}")
117
+
118
+ ans = generate_image(species_info, client)
119
+ emoji.append(ans)
120
+ text_context = [doc.page_content for doc in species_info]
121
+ text_context = ", ".join(text_context)
122
+ species_context.append(text_context)
123
+ # print(ans)
124
+ species_result.append(tree_species)
125
+
126
+ else:
127
+ print("Tree species name not found.")
128
+ img_df.at[img_idx, "species_identified"] = ", ".join(species_result) if species_result else "No similar species found"
129
+ img_df.at[img_idx, "result_file_path"] = ", ".join(row_results) if row_results else ""
130
+ img_df.at[img_idx, "emoji"] = ", ".join(emoji) if emoji else ""
131
+ img_df.at[img_idx, "retreived context"] = ", ".join(species_context) if species_context else ""
132
+
133
+
134
+ return cropped_images
135
+
136
+
137
+ # Function to simulate tree species detection
138
+
139
+ # Display uploaded image and detected tree species
140
+ if uploaded_image is not None:
141
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.JPG') as temp_file:
142
+ temp_file.write(uploaded_image.read())
143
+ temp_file_path = temp_file.name
144
+ # Open and display the image
145
+ # image = Image.open(uploaded_image)
146
+ sample_image_path = get_data(temp_file_path)
147
+ boxes = model.predict_image(path=sample_image_path, return_plot=False)
148
+ img_actual = model.predict_image(path=sample_image_path, return_plot=True, color=(137, 0, 0), thickness=9)
149
+ st.image(img_actual, caption='Segmented Panoramic Image', channels ='RGB', use_column_width=True)
150
+ st.success("Sample Dataframe:")
151
+ st.dataframe(boxes.head())
152
+ plt.imshow(img_actual[:,:,::-1])
153
+ # plt.show(img[:,:,::-1])
154
+ plt.savefig(cropped_image_path)
155
+ # if st.button("Next Step"):
156
+
157
+ accuracy_threshold = st.slider("Accuracy threshold for cropping images:",min_value=0.1, max_value=1.0, value=0.4)
158
+ images_list = split_image_from_dataframe(boxes, temp_file_path, output_folder_name)
159
+ image_width = 200
160
+ st.success("Sample Images:")
161
+ # Display the images in a row
162
+ col1, col2, col3 = st.columns(3)
163
+
164
+ with col1:
165
+ st.image(images_list[3]["image"], caption="Sample 1", width=image_width)
166
+
167
+ with col2:
168
+ st.image(images_list[4]["image"], caption="Sample 2", width=image_width)
169
+
170
+ with col3:
171
+ st.image(images_list[5]["image"], caption="Sample 3", width=image_width)
172
 
173
+ folder_path = 'D:/Downloads/image/plant_images/plant_images/drone_igapo_flooded_forest/identified_species'
174
+
175
+ species_feature_list = extract_treespecies_features(folder_path)
176
+ final_result = perform_inference(images_list, species_feature_list, boxes)
177
+ st.success("Final Data:")
178
+ st.dataframe(boxes)
179
+ boxes.to_excel(excel_output_path)
180
+ for index, row in boxes.iterrows():
181
+ species_identified = row['species_identified']
182
+ if species_identified !="No similar species found":
183
+ cropped_image_path = row['cropped_image_path']
184
+ result_file_path = row['result_file_path']
185
+ if type(result_file_path) == list:
186
+ result_file_path = result_file_path[0]
187
+
188
+
189
+ result_file_path = result_file_path.split(',')[0]
190
+ st.write(species_identified)
191
+ col1, col2 = st.columns(2)
192
+ with col1:
193
+ st.image(cropped_image_path, caption='Cropped Image')
194
+ with col2:
195
+ st.image(result_file_path, caption='Species Match')
196
+
197
+
198
+
199
+
200
+
201
+
202
 
203
+ # Detect tree species
204
+ # detected_species = detect_tree_species(image, folder_path)
205
+
206
+ # Display detected tree species
207
+ # st.write("### Detected Tree Species:")
208
+ # for species in detected_species:
209
+ # st.write(f"- {species}")
210
+
211
+
212
+