xinghaochen commited on
Commit
e53817f
1 Parent(s): 48c1344

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -32
app.py CHANGED
@@ -39,46 +39,50 @@ description_p = """# Interactive Instance Segmentation
39
  </ol>
40
  - Github [link](https://github.com/xinghaochen/TinySAM)
41
  """
 
42
 
43
  def infer(img):
44
- if img is None:
45
- gr.Error("Please upload an image and select a point.")
46
- if img["background"] is None:
47
- gr.Error("Please upload an image and select a point.")
48
- # background (original image) layers[0] ( point prompt) composite (total image)
49
- image = img["background"].convert("RGB")
50
- point_prompt = img["layers"][0]
51
- total_image = img["composite"]
52
- predictor.set_image(np.array(image))
 
53
 
54
- # get point prompt
55
- img_arr = np.array(point_prompt)
56
- if not np.any(img_arr):
57
- gr.Error("Please select a point on top of the image.")
58
- else:
59
- nonzero_indices = np.nonzero(img_arr)
60
  img_arr = np.array(point_prompt)
61
- nonzero_indices = np.nonzero(img_arr)
62
- center_x = int(np.mean(nonzero_indices[1]))
63
- center_y = int(np.mean(nonzero_indices[0]))
64
- input_point = np.array([[center_x, center_y]])
65
- input_label = np.array([1])
66
- masks, scores, logits = predictor.predict(
67
- point_coords=input_point,
68
- point_labels=input_label,
69
- )
70
- result_label = [(masks[scores.argmax(), :, :], "mask")]
71
- return image, result_label
 
 
 
 
 
72
 
73
 
74
- with gr.Blocks() as demo:
75
- gr.Markdown("## TinySAM")
76
- gr.Markdown("**[TinySAM](https://arxiv.org/abs/2312.13789) is a framework to distill Segment Anything Model.**")
77
- gr.Markdown("**To try it out, simply upload an image, click the green tick, and then leave a point mark on what you would like to segment using the pencil on Image Editor.**")
 
78
  with gr.Row():
79
  with gr.Column():
80
  im = gr.ImageEditor(
81
- type="pil"
 
82
  )
83
  output = gr.AnnotatedImage()
84
  with gr.Row():
@@ -87,7 +91,7 @@ with gr.Blocks() as demo:
87
  gr.Examples(
88
  examples=examples,
89
  inputs=[im],
90
- examples_per_page=4,
91
  )
92
 
93
  with gr.Column():
 
39
  </ol>
40
  - Github [link](https://github.com/xinghaochen/TinySAM)
41
  """
42
+ css = "h1 { text-align: center } .about { text-align: justify; padding-left: 10%; padding-right: 10%; }"
43
 
44
  def infer(img):
45
+ if img is None:
46
+ gr.Error("Please upload an image and select a point.")
47
+ if img["background"] is None:
48
+ gr.Error("Please upload an image and select a point.")
49
+ # background (original image) layers[0] ( point prompt) composite (total image)
50
+ image = img["background"].convert("RGB")
51
+ point_prompt = img["layers"][0]
52
+ total_image = img["composite"]
53
+ predictor.set_image(np.array(image))
54
+ print("point_prompt : ", point_prompt)
55
 
56
+ # get point prompt
 
 
 
 
 
57
  img_arr = np.array(point_prompt)
58
+ if not np.any(img_arr):
59
+ gr.Error("Please select a point on top of the image.")
60
+ else:
61
+ nonzero_indices = np.nonzero(img_arr)
62
+ img_arr = np.array(point_prompt)
63
+ nonzero_indices = np.nonzero(img_arr)
64
+ center_x = int(np.mean(nonzero_indices[1]))
65
+ center_y = int(np.mean(nonzero_indices[0]))
66
+ input_point = np.array([[center_x, center_y]])
67
+ input_label = np.array([1])
68
+ masks, scores, logits = predictor.predict(
69
+ point_coords=input_point,
70
+ point_labels=input_label,
71
+ )
72
+ result_label = [(masks[scores.argmax(), :, :], "mask")]
73
+ return image, result_label
74
 
75
 
76
+ with gr.Blocks(css=css, title="TinySAM") as demo:
77
+ with gr.Row():
78
+ with gr.Column(scale=1):
79
+ # Title
80
+ gr.Markdown(title)
81
  with gr.Row():
82
  with gr.Column():
83
  im = gr.ImageEditor(
84
+ type="pil",
85
+ value=default_example[0]
86
  )
87
  output = gr.AnnotatedImage()
88
  with gr.Row():
 
91
  gr.Examples(
92
  examples=examples,
93
  inputs=[im],
94
+ examples_per_page=6,
95
  )
96
 
97
  with gr.Column():