divimund95 commited on
Commit
cc8944c
1 Parent(s): 6faa2d6

Add Big-LaMa model exported with CoreMLtools

Browse files
LaMa.mlpackage/Data/com.apple.CoreML/model.mlmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:289f2c611bd3e52805ee3e686e290981d96d3b9674db93fe6bf30962f7e60d87
3
+ size 1166404
LaMa.mlpackage/Data/com.apple.CoreML/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aae26da8deca02ead81120f1d683b6c38361cd593c5a685e543c4b84726500e1
3
+ size 204086656
LaMa.mlpackage/Manifest.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "fileFormatVersion": "1.0.0",
3
+ "itemInfoEntries": {
4
+ "058403EC-D454-47EC-9C08-D1149DC8311C": {
5
+ "author": "com.apple.CoreML",
6
+ "description": "CoreML Model Specification",
7
+ "name": "model.mlmodel",
8
+ "path": "com.apple.CoreML/model.mlmodel"
9
+ },
10
+ "BCCB46DC-D6B9-4B28-8D24-B59CF8160E49": {
11
+ "author": "com.apple.CoreML",
12
+ "description": "CoreML Model Weights",
13
+ "name": "weights",
14
+ "path": "com.apple.CoreML/weights"
15
+ }
16
+ },
17
+ "rootModelIdentifier": "058403EC-D454-47EC-9C08-D1149DC8311C"
18
+ }
app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import coremltools as ct
3
+ import numpy as np
4
+ from PIL import Image
5
+ import io
6
+
7
+ # Load the model
8
+ coreml_model_file_name = "LaMa.mlpackage"
9
+ loaded_model = ct.models.MLModel(coreml_model_file_name)
10
+
11
+ def inpaint(input_dict):
12
+ # Resize input image and mask to 800x800
13
+ input_image = input_dict["background"].convert("RGB").resize((800, 800), Image.LANCZOS)
14
+ input_mask = pil_to_binary_mask(input_dict['layers'][0].resize((800, 800), Image.NEAREST))
15
+
16
+ # Convert mask to grayscale
17
+ input_mask = input_mask.convert("L")
18
+
19
+ # Run inference
20
+ prediction = loaded_model.predict({"image": input_image, "mask": input_mask})
21
+
22
+ # Access the output
23
+ output_image = prediction["output"]
24
+
25
+ return output_image, input_mask
26
+
27
+ def pil_to_binary_mask(pil_image, threshold=0):
28
+ np_image = np.array(pil_image)
29
+ grayscale_image = Image.fromarray(np_image).convert("L")
30
+ binary_mask = np.array(grayscale_image) > threshold
31
+ mask = np.zeros(binary_mask.shape, dtype=np.uint8)
32
+ for i in range(binary_mask.shape[0]):
33
+ for j in range(binary_mask.shape[1]):
34
+ if binary_mask[i,j] == True :
35
+ mask[i,j] = 1
36
+ mask = (mask*255).astype(np.uint8)
37
+ output_mask = Image.fromarray(mask)
38
+ return output_mask
39
+
40
+ # Create Gradio interface
41
+ with gr.Blocks() as demo:
42
+ gr.Markdown("# Image Inpainting")
43
+ gr.Markdown("Upload an image and draw a mask to remove unwanted objects.")
44
+
45
+ with gr.Row():
46
+ input_image = gr.ImageEditor(type="pil", label='Input image & Mask', interactive=True)
47
+ output_image = gr.Image(type="pil", label="Output Image")
48
+ with gr.Column():
49
+ masked_image = gr.Image(label="Masked image", type="pil")
50
+
51
+ inpaint_button = gr.Button("Inpaint")
52
+ inpaint_button.click(fn=inpaint, inputs=[input_image], outputs=[output_image, masked_image])
53
+
54
+ # Launch the interface
55
+ if __name__ == "__main__":
56
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ coremltools
3
+ numpy
4
+ pillow