taesiri commited on
Commit
f96280e
1 Parent(s): defdeae
.gitattributes CHANGED
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ examples/frame_000168.jpg filter=lfs diff=lfs merge=lfs -text
37
+ examples/1727808849.jpg filter=lfs diff=lfs merge=lfs -text
38
+ examples/1727809389.jpg filter=lfs diff=lfs merge=lfs -text
39
+ examples/Birch[[:space:]]MWF014-0001.jpg filter=lfs diff=lfs merge=lfs -text
40
+ examples/frame_000036.jpg filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: FireNet LLama 3.2
3
- emoji: 😻
4
  colorFrom: gray
5
  colorTo: red
6
  sdk: gradio
 
1
  ---
2
  title: FireNet LLama 3.2
3
+ emoji: 🔥
4
  colorFrom: gray
5
  colorTo: red
6
  sdk: gradio
app.py CHANGED
@@ -1,14 +1,16 @@
 
 
 
1
  import os
 
2
  import gradio as gr
 
 
3
  import torch
4
- from PIL import Image
5
- from transformers import MllamaForConditionalGeneration, AutoProcessor
6
- from peft import PeftModel
7
  from huggingface_hub import login
8
- import json
9
- import matplotlib.pyplot as plt
10
- import io
11
- import base64
12
 
13
 
14
  def check_environment():
@@ -22,44 +24,19 @@ def check_environment():
22
  )
23
 
24
 
25
- # # Login to Hugging Face
26
- # check_environment()
27
- # login(token=os.environ["HF_TOKEN"], add_to_git_credential=True)
28
-
29
- # Load model and processor (do this outside the inference function to avoid reloading)
30
- # base_model_path = (
31
- # "taesiri/BugsBunny-LLama-3.2-11B-Vision-BaseCaptioner-Medium-FullModel"
32
- # )
33
 
34
- # processor = AutoProcessor.from_pretrained(base_model_path)
35
- # model = MllamaForConditionalGeneration.from_pretrained(
36
- # base_model_path,
37
- # torch_dtype=torch.bfloat16,
38
- # device_map="cuda",
39
- # cache_dir="./",
40
- # )
41
- # #
42
- # odel = PeftModel.from_pretrained(model, lora_weights_path)
43
 
44
-
45
- from transformers import MllamaForConditionalGeneration, AutoProcessor
46
  import torch
 
47
 
48
-
49
- local_model_path = "../merged-llama-3.2-dummy"
50
-
51
- # Load model and processor (do this outside the inference function to avoid reloading)
52
- base_model_path = (
53
- local_model_path
54
- )
55
- # lora_weights_path = "taesiri/BugsBunny-LLama-3.2-11B-Vision-Base-Medium-LoRA"
56
 
57
  processor = AutoProcessor.from_pretrained(base_model_path)
58
  model = MllamaForConditionalGeneration.from_pretrained(
59
- base_model_path,
60
- torch_dtype=torch.bfloat16,
61
- device_map="cuda",
62
- cache_dir="./"
63
  )
64
 
65
  model.tie_weights()
@@ -94,6 +71,7 @@ def create_color_palette_image(colors):
94
  return None
95
 
96
 
 
97
  def inference(image):
98
  if image is None:
99
  return ["Please provide an image"] * 4
@@ -111,7 +89,10 @@ def inference(image):
111
  "role": "user",
112
  "content": [
113
  {"type": "image"},
114
- {"type": "text", "text": "Analyze this image for fire, smoke, haze, or other related conditions."},
 
 
 
115
  ],
116
  }
117
  ]
@@ -139,12 +120,12 @@ def inference(image):
139
  try:
140
  json_str = result.strip().split("assistant\n")[1].strip()
141
  parsed_json = json.loads(json_str)
142
-
143
  # Create specific JSON subsets for each section
144
  fire_analysis = {
145
  "predictions": parsed_json.get("predictions", "N/A"),
146
  "description": parsed_json.get("description", "No description available"),
147
- "confidence_scores": parsed_json.get("confidence_score", {})
148
  }
149
 
150
  environment_analysis = {
@@ -153,12 +134,14 @@ def inference(image):
153
 
154
  detection_analysis = {
155
  "detections": parsed_json.get("detections", []),
156
- "detection_count": len(parsed_json.get("detections", []))
157
  }
158
 
159
  report_analysis = {
160
  "uncertainty_factors": parsed_json.get("uncertainty_factors", []),
161
- "false_positive_indicators": parsed_json.get("false_positive_indicators", [])
 
 
162
  }
163
 
164
  return (
@@ -169,7 +152,7 @@ def inference(image):
169
  json_str,
170
  "",
171
  "Analysis complete",
172
- parsed_json
173
  )
174
  except Exception as e:
175
  print("DEBUG: Error processing response:", e)
@@ -181,7 +164,7 @@ def inference(image):
181
  str(result),
182
  str(e),
183
  "Error",
184
- {}
185
  )
186
 
187
 
@@ -197,17 +180,19 @@ with gr.Blocks() as demo:
197
  elem_id="large-image",
198
  )
199
  submit_btn = gr.Button("Analyze Image", variant="primary")
200
-
201
- # Add examples here
202
  gr.Examples(
203
  examples=[
204
- "examples/Birch MWF014-0001.png",
205
- "examples/Birch MWF014-0006.png",
206
- "examples/Blackstone PB-0010.png",
 
 
207
  ],
208
  inputs=image_input,
209
  label="Example Images",
210
- examples_per_page=4
211
  )
212
 
213
  with gr.Tabs() as tabs:
@@ -216,30 +201,26 @@ with gr.Blocks() as demo:
216
  with gr.Column():
217
  fire_output = gr.JSON(
218
  label="Fire Details",
219
- lines=4,
220
  )
221
  with gr.Column():
222
  environment_output = gr.JSON(
223
  label="Environment Details",
224
- lines=4,
225
  )
226
  with gr.Row():
227
  with gr.Column():
228
  detection_output = gr.JSON(
229
  label="Detection Details",
230
- lines=4,
231
  )
232
  with gr.Column():
233
  report_output = gr.JSON(
234
  label="Report Details",
235
- lines=4,
236
  )
237
 
238
  with gr.Tab("JSON Output", id=0):
239
  json_output = gr.JSON(
240
  label="Detailed JSON Results",
241
  )
242
-
243
  with gr.Tab("Raw Output"):
244
  raw_output = gr.Textbox(
245
  label="Raw JSON Response",
@@ -264,4 +245,4 @@ with gr.Blocks() as demo:
264
  ],
265
  )
266
 
267
- demo.launch(share=True)
 
1
+ import base64
2
+ import io
3
+ import json
4
  import os
5
+
6
  import gradio as gr
7
+ import matplotlib.pyplot as plt
8
+ import spaces
9
  import torch
 
 
 
10
  from huggingface_hub import login
11
+ from peft import PeftModel
12
+ from PIL import Image
13
+ from transformers import AutoProcessor, MllamaForConditionalGeneration
 
14
 
15
 
16
  def check_environment():
 
24
  )
25
 
26
 
27
+ # Login to Hugging Face
28
+ check_environment()
29
+ login(token=os.environ["HF_TOKEN"], add_to_git_credential=True)
 
 
 
 
 
30
 
 
 
 
 
 
 
 
 
 
31
 
 
 
32
  import torch
33
+ from transformers import AutoProcessor, MllamaForConditionalGeneration
34
 
35
+ base_model_path = "taesiri/FireNet-LLama-3.2-11B-Vision-Base"
 
 
 
 
 
 
 
36
 
37
  processor = AutoProcessor.from_pretrained(base_model_path)
38
  model = MllamaForConditionalGeneration.from_pretrained(
39
+ base_model_path, torch_dtype=torch.bfloat16, device_map="cuda"
 
 
 
40
  )
41
 
42
  model.tie_weights()
 
71
  return None
72
 
73
 
74
+ @spaces.GPU
75
  def inference(image):
76
  if image is None:
77
  return ["Please provide an image"] * 4
 
89
  "role": "user",
90
  "content": [
91
  {"type": "image"},
92
+ {
93
+ "type": "text",
94
+ "text": "Analyze this image for fire, smoke, haze, or other related conditions.",
95
+ },
96
  ],
97
  }
98
  ]
 
120
  try:
121
  json_str = result.strip().split("assistant\n")[1].strip()
122
  parsed_json = json.loads(json_str)
123
+
124
  # Create specific JSON subsets for each section
125
  fire_analysis = {
126
  "predictions": parsed_json.get("predictions", "N/A"),
127
  "description": parsed_json.get("description", "No description available"),
128
+ "confidence_scores": parsed_json.get("confidence_score", {}),
129
  }
130
 
131
  environment_analysis = {
 
134
 
135
  detection_analysis = {
136
  "detections": parsed_json.get("detections", []),
137
+ "detection_count": len(parsed_json.get("detections", [])),
138
  }
139
 
140
  report_analysis = {
141
  "uncertainty_factors": parsed_json.get("uncertainty_factors", []),
142
+ "false_positive_indicators": parsed_json.get(
143
+ "false_positive_indicators", []
144
+ ),
145
  }
146
 
147
  return (
 
152
  json_str,
153
  "",
154
  "Analysis complete",
155
+ parsed_json,
156
  )
157
  except Exception as e:
158
  print("DEBUG: Error processing response:", e)
 
164
  str(result),
165
  str(e),
166
  "Error",
167
+ {},
168
  )
169
 
170
 
 
180
  elem_id="large-image",
181
  )
182
  submit_btn = gr.Button("Analyze Image", variant="primary")
183
+
184
+ # Updated examples
185
  gr.Examples(
186
  examples=[
187
+ "examples/1727808849.jpg",
188
+ "examples/1727809389.jpg",
189
+ "examples/Birch MWF014-0001.jpg",
190
+ "examples/frame_000036.jpg",
191
+ "examples/frame_000168.jpg",
192
  ],
193
  inputs=image_input,
194
  label="Example Images",
195
+ examples_per_page=5,
196
  )
197
 
198
  with gr.Tabs() as tabs:
 
201
  with gr.Column():
202
  fire_output = gr.JSON(
203
  label="Fire Details",
 
204
  )
205
  with gr.Column():
206
  environment_output = gr.JSON(
207
  label="Environment Details",
 
208
  )
209
  with gr.Row():
210
  with gr.Column():
211
  detection_output = gr.JSON(
212
  label="Detection Details",
 
213
  )
214
  with gr.Column():
215
  report_output = gr.JSON(
216
  label="Report Details",
 
217
  )
218
 
219
  with gr.Tab("JSON Output", id=0):
220
  json_output = gr.JSON(
221
  label="Detailed JSON Results",
222
  )
223
+
224
  with gr.Tab("Raw Output"):
225
  raw_output = gr.Textbox(
226
  label="Raw JSON Response",
 
245
  ],
246
  )
247
 
248
+ demo.launch(share=True)
examples/1727808849.jpg ADDED

Git LFS Details

  • SHA256: 952b935f195b46f27bbc50ca7df7d4fa13fc1897d9c804fe1270aed4150a2f62
  • Pointer size: 131 Bytes
  • Size of remote file: 747 kB
examples/1727809389.jpg ADDED

Git LFS Details

  • SHA256: c77a402ef1a5da2c4cbeb60617df9e1901bf491af40b45586e34e48b49844d3d
  • Pointer size: 132 Bytes
  • Size of remote file: 1.76 MB
examples/Birch MWF014-0001.jpg ADDED

Git LFS Details

  • SHA256: 5ea60107c481362c8a340f4f0993937d4308c20cbd3e23d798ec4dabba5afdd1
  • Pointer size: 131 Bytes
  • Size of remote file: 447 kB
examples/frame_000036.jpg ADDED

Git LFS Details

  • SHA256: f2db5dc927577eda7e234fd063ab5c0d66b2d10e29a2bbcaa053ecbbbc00ec83
  • Pointer size: 131 Bytes
  • Size of remote file: 767 kB
examples/frame_000168.jpg ADDED

Git LFS Details

  • SHA256: bf2febe22a5efe9e82376585902380046790bbf3c8fa06445563225d4d5374a4
  • Pointer size: 131 Bytes
  • Size of remote file: 154 kB
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ datasets
4
+ git+https://github.com/huggingface/transformers.git
5
+ accelerate
6
+ pillow
7
+ gradio
8
+ matplotlib