ACCC1380 commited on
Commit
283a26b
1 Parent(s): 267d457

Upload lora-scripts/sd-scripts/bitsandbytes_windows/main.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/bitsandbytes_windows/main.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ extract factors the build is dependent on:
3
+ [X] compute capability
4
+ [ ] TODO: Q - What if we have multiple GPUs of different makes?
5
+ - CUDA version
6
+ - Software:
7
+ - CPU-only: only CPU quantization functions (no optimizer, no matrix multiple)
8
+ - CuBLAS-LT: full-build 8-bit optimizer
9
+ - no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`)
10
+
11
+ evaluation:
12
+ - if paths faulty, return meaningful error
13
+ - else:
14
+ - determine CUDA version
15
+ - determine capabilities
16
+ - based on that set the default path
17
+ """
18
+
19
+ import ctypes
20
+
21
+ from .paths import determine_cuda_runtime_lib_path
22
+
23
+
24
+ def check_cuda_result(cuda, result_val):
25
+ # 3. Check for CUDA errors
26
+ if result_val != 0:
27
+ error_str = ctypes.c_char_p()
28
+ cuda.cuGetErrorString(result_val, ctypes.byref(error_str))
29
+ print(f"CUDA exception! Error code: {error_str.value.decode()}")
30
+
31
+ def get_cuda_version(cuda, cudart_path):
32
+ # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION
33
+ try:
34
+ cudart = ctypes.CDLL(cudart_path)
35
+ except OSError:
36
+ # TODO: shouldn't we error or at least warn here?
37
+ print(f'ERROR: libcudart.so could not be read from path: {cudart_path}!')
38
+ return None
39
+
40
+ version = ctypes.c_int()
41
+ check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ctypes.byref(version)))
42
+ version = int(version.value)
43
+ major = version//1000
44
+ minor = (version-(major*1000))//10
45
+
46
+ if major < 11:
47
+ print('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!')
48
+
49
+ return f'{major}{minor}'
50
+
51
+
52
+ def get_cuda_lib_handle():
53
+ # 1. find libcuda.so library (GPU driver) (/usr/lib)
54
+ try:
55
+ cuda = ctypes.CDLL("libcuda.so")
56
+ except OSError:
57
+ # TODO: shouldn't we error or at least warn here?
58
+ print('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!')
59
+ return None
60
+ check_cuda_result(cuda, cuda.cuInit(0))
61
+
62
+ return cuda
63
+
64
+
65
+ def get_compute_capabilities(cuda):
66
+ """
67
+ 1. find libcuda.so library (GPU driver) (/usr/lib)
68
+ init_device -> init variables -> call function by reference
69
+ 2. call extern C function to determine CC
70
+ (https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html)
71
+ 3. Check for CUDA errors
72
+ https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
73
+ # bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
74
+ """
75
+
76
+
77
+ nGpus = ctypes.c_int()
78
+ cc_major = ctypes.c_int()
79
+ cc_minor = ctypes.c_int()
80
+
81
+ device = ctypes.c_int()
82
+
83
+ check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus)))
84
+ ccs = []
85
+ for i in range(nGpus.value):
86
+ check_cuda_result(cuda, cuda.cuDeviceGet(ctypes.byref(device), i))
87
+ ref_major = ctypes.byref(cc_major)
88
+ ref_minor = ctypes.byref(cc_minor)
89
+ # 2. call extern C function to determine CC
90
+ check_cuda_result(
91
+ cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device)
92
+ )
93
+ ccs.append(f"{cc_major.value}.{cc_minor.value}")
94
+
95
+ return ccs
96
+
97
+
98
+ # def get_compute_capability()-> Union[List[str, ...], None]: # FIXME: error
99
+ def get_compute_capability(cuda):
100
+ """
101
+ Extracts the highest compute capbility from all available GPUs, as compute
102
+ capabilities are downwards compatible. If no GPUs are detected, it returns
103
+ None.
104
+ """
105
+ ccs = get_compute_capabilities(cuda)
106
+ if ccs is not None:
107
+ # TODO: handle different compute capabilities; for now, take the max
108
+ return ccs[-1]
109
+ return None
110
+
111
+
112
+ def evaluate_cuda_setup():
113
+ print('')
114
+ print('='*35 + 'BUG REPORT' + '='*35)
115
+ print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')
116
+ print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link')
117
+ print('='*80)
118
+ return "libbitsandbytes_cuda116.dll" # $$$
119
+
120
+ binary_name = "libbitsandbytes_cpu.so"
121
+ #if not torch.cuda.is_available():
122
+ #print('No GPU detected. Loading CPU library...')
123
+ #return binary_name
124
+
125
+ cudart_path = determine_cuda_runtime_lib_path()
126
+ if cudart_path is None:
127
+ print(
128
+ "WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!"
129
+ )
130
+ return binary_name
131
+
132
+ print(f"CUDA SETUP: CUDA runtime path found: {cudart_path}")
133
+ cuda = get_cuda_lib_handle()
134
+ cc = get_compute_capability(cuda)
135
+ print(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}")
136
+ cuda_version_string = get_cuda_version(cuda, cudart_path)
137
+
138
+
139
+ if cc == '':
140
+ print(
141
+ "WARNING: No GPU detected! Check your CUDA paths. Processing to load CPU-only library..."
142
+ )
143
+ return binary_name
144
+
145
+ # 7.5 is the minimum CC vor cublaslt
146
+ has_cublaslt = cc in ["7.5", "8.0", "8.6"]
147
+
148
+ # TODO:
149
+ # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
150
+ # (2) Multiple CUDA versions installed
151
+
152
+ # we use ls -l instead of nvcc to determine the cuda version
153
+ # since most installations will have the libcudart.so installed, but not the compiler
154
+ print(f'CUDA SETUP: Detected CUDA version {cuda_version_string}')
155
+
156
+ def get_binary_name():
157
+ "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so"
158
+ bin_base_name = "libbitsandbytes_cuda"
159
+ if has_cublaslt:
160
+ return f"{bin_base_name}{cuda_version_string}.so"
161
+ else:
162
+ return f"{bin_base_name}{cuda_version_string}_nocublaslt.so"
163
+
164
+ binary_name = get_binary_name()
165
+
166
+ return binary_name