File size: 2,211 Bytes
9dfa4de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import numpy as np
import torch
import cv2

def HWC3(x):
    assert x.dtype == np.uint8
    if x.ndim == 2:
        x = x[:, :, None]
    assert x.ndim == 3
    H, W, C = x.shape
    assert C == 1 or C == 3 or C == 4
    if C == 3:
        return x
    if C == 1:
        return np.concatenate([x, x, x], axis=2)
    if C == 4:
        color = x[:, :, 0:3].astype(np.float32)
        alpha = x[:, :, 3:4].astype(np.float32) / 255.0
        y = color * alpha + 255.0 * (1.0 - alpha)
        y = y.clip(0, 255).astype(np.uint8)
        return y


def resize_image(input_image, resolution):
    H, W, C = input_image.shape
    H = float(H)
    W = float(W)
    k = float(resolution) / min(H, W)
    H *= k
    W *= k
    H = int(np.round(H / 64.0)) * 64
    W = int(np.round(W / 64.0)) * 64
    img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
    return img


# normalize
def norm_normalize(norm_out):
    norm_x, norm_y, norm_z = torch.split(norm_out, 1, dim=0)
    norm = torch.sqrt(norm_x ** 2.0 + norm_y ** 2.0 + norm_z ** 2.0) + 1e-10
    final_out = torch.cat([norm_x / norm, norm_y / norm, norm_z / norm], dim=0)
    fg_mask = torch.ones_like(norm).repeat(3, 1, 1)
    fg_mask[norm.repeat(3, 1, 1) < 0.5] = 0.
    fg_mask[norm.repeat(3, 1, 1) > 1.5] = 0.

    final_out[norm.repeat(3, 1, 1) < 0.5] = -1
    final_out[norm.repeat(3, 1, 1) > 1.5] = -1
    return final_out, fg_mask.bool()


def center_crop(input_image):
    height, width = input_image.shape[:2]

    if height < width:
        min_dim = height
    else:
        min_dim = width

    center_x = width // 2
    center_y = height // 2
    half_length = min_dim // 2

    crop_x1 = center_x - half_length
    crop_x2 = center_x + half_length
    crop_y1 = center_y - half_length
    crop_y2 = center_y + half_length

    center_cropped_image = input_image[crop_y1:crop_y2, crop_x1:crop_x2]

    return center_cropped_image


def flip_x(normal):

    if isinstance(normal, np.ndarray):
        return normal.dot(np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])).astype(np.float32)
    else:
        trans = torch.tensor([[-1, 0, 0], [0, 1, 0], [0, 0, 1]]).float()
        return  normal @ trans