Upload lora-scripts/sd-scripts/library/ipex/__init__.py with huggingface_hub
Browse files
lora-scripts/sd-scripts/library/ipex/__init__.py
ADDED
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import contextlib
|
4 |
+
import torch
|
5 |
+
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
|
6 |
+
from .hijacks import ipex_hijacks
|
7 |
+
|
8 |
+
# pylint: disable=protected-access, missing-function-docstring, line-too-long
|
9 |
+
|
10 |
+
def ipex_init(): # pylint: disable=too-many-statements
|
11 |
+
try:
|
12 |
+
if hasattr(torch, "cuda") and hasattr(torch.cuda, "is_xpu_hijacked") and torch.cuda.is_xpu_hijacked:
|
13 |
+
return True, "Skipping IPEX hijack"
|
14 |
+
else:
|
15 |
+
# Replace cuda with xpu:
|
16 |
+
torch.cuda.current_device = torch.xpu.current_device
|
17 |
+
torch.cuda.current_stream = torch.xpu.current_stream
|
18 |
+
torch.cuda.device = torch.xpu.device
|
19 |
+
torch.cuda.device_count = torch.xpu.device_count
|
20 |
+
torch.cuda.device_of = torch.xpu.device_of
|
21 |
+
torch.cuda.get_device_name = torch.xpu.get_device_name
|
22 |
+
torch.cuda.get_device_properties = torch.xpu.get_device_properties
|
23 |
+
torch.cuda.init = torch.xpu.init
|
24 |
+
torch.cuda.is_available = torch.xpu.is_available
|
25 |
+
torch.cuda.is_initialized = torch.xpu.is_initialized
|
26 |
+
torch.cuda.is_current_stream_capturing = lambda: False
|
27 |
+
torch.cuda.set_device = torch.xpu.set_device
|
28 |
+
torch.cuda.stream = torch.xpu.stream
|
29 |
+
torch.cuda.synchronize = torch.xpu.synchronize
|
30 |
+
torch.cuda.Event = torch.xpu.Event
|
31 |
+
torch.cuda.Stream = torch.xpu.Stream
|
32 |
+
torch.cuda.FloatTensor = torch.xpu.FloatTensor
|
33 |
+
torch.Tensor.cuda = torch.Tensor.xpu
|
34 |
+
torch.Tensor.is_cuda = torch.Tensor.is_xpu
|
35 |
+
torch.nn.Module.cuda = torch.nn.Module.xpu
|
36 |
+
torch.UntypedStorage.cuda = torch.UntypedStorage.xpu
|
37 |
+
torch.cuda._initialization_lock = torch.xpu.lazy_init._initialization_lock
|
38 |
+
torch.cuda._initialized = torch.xpu.lazy_init._initialized
|
39 |
+
torch.cuda._lazy_seed_tracker = torch.xpu.lazy_init._lazy_seed_tracker
|
40 |
+
torch.cuda._queued_calls = torch.xpu.lazy_init._queued_calls
|
41 |
+
torch.cuda._tls = torch.xpu.lazy_init._tls
|
42 |
+
torch.cuda.threading = torch.xpu.lazy_init.threading
|
43 |
+
torch.cuda.traceback = torch.xpu.lazy_init.traceback
|
44 |
+
torch.cuda.Optional = torch.xpu.Optional
|
45 |
+
torch.cuda.__cached__ = torch.xpu.__cached__
|
46 |
+
torch.cuda.__loader__ = torch.xpu.__loader__
|
47 |
+
torch.cuda.ComplexFloatStorage = torch.xpu.ComplexFloatStorage
|
48 |
+
torch.cuda.Tuple = torch.xpu.Tuple
|
49 |
+
torch.cuda.streams = torch.xpu.streams
|
50 |
+
torch.cuda._lazy_new = torch.xpu._lazy_new
|
51 |
+
torch.cuda.FloatStorage = torch.xpu.FloatStorage
|
52 |
+
torch.cuda.Any = torch.xpu.Any
|
53 |
+
torch.cuda.__doc__ = torch.xpu.__doc__
|
54 |
+
torch.cuda.default_generators = torch.xpu.default_generators
|
55 |
+
torch.cuda.HalfTensor = torch.xpu.HalfTensor
|
56 |
+
torch.cuda._get_device_index = torch.xpu._get_device_index
|
57 |
+
torch.cuda.__path__ = torch.xpu.__path__
|
58 |
+
torch.cuda.Device = torch.xpu.Device
|
59 |
+
torch.cuda.IntTensor = torch.xpu.IntTensor
|
60 |
+
torch.cuda.ByteStorage = torch.xpu.ByteStorage
|
61 |
+
torch.cuda.set_stream = torch.xpu.set_stream
|
62 |
+
torch.cuda.BoolStorage = torch.xpu.BoolStorage
|
63 |
+
torch.cuda.os = torch.xpu.os
|
64 |
+
torch.cuda.torch = torch.xpu.torch
|
65 |
+
torch.cuda.BFloat16Storage = torch.xpu.BFloat16Storage
|
66 |
+
torch.cuda.Union = torch.xpu.Union
|
67 |
+
torch.cuda.DoubleTensor = torch.xpu.DoubleTensor
|
68 |
+
torch.cuda.ShortTensor = torch.xpu.ShortTensor
|
69 |
+
torch.cuda.LongTensor = torch.xpu.LongTensor
|
70 |
+
torch.cuda.IntStorage = torch.xpu.IntStorage
|
71 |
+
torch.cuda.LongStorage = torch.xpu.LongStorage
|
72 |
+
torch.cuda.__annotations__ = torch.xpu.__annotations__
|
73 |
+
torch.cuda.__package__ = torch.xpu.__package__
|
74 |
+
torch.cuda.__builtins__ = torch.xpu.__builtins__
|
75 |
+
torch.cuda.CharTensor = torch.xpu.CharTensor
|
76 |
+
torch.cuda.List = torch.xpu.List
|
77 |
+
torch.cuda._lazy_init = torch.xpu._lazy_init
|
78 |
+
torch.cuda.BFloat16Tensor = torch.xpu.BFloat16Tensor
|
79 |
+
torch.cuda.DoubleStorage = torch.xpu.DoubleStorage
|
80 |
+
torch.cuda.ByteTensor = torch.xpu.ByteTensor
|
81 |
+
torch.cuda.StreamContext = torch.xpu.StreamContext
|
82 |
+
torch.cuda.ComplexDoubleStorage = torch.xpu.ComplexDoubleStorage
|
83 |
+
torch.cuda.ShortStorage = torch.xpu.ShortStorage
|
84 |
+
torch.cuda._lazy_call = torch.xpu._lazy_call
|
85 |
+
torch.cuda.HalfStorage = torch.xpu.HalfStorage
|
86 |
+
torch.cuda.random = torch.xpu.random
|
87 |
+
torch.cuda._device = torch.xpu._device
|
88 |
+
torch.cuda.classproperty = torch.xpu.classproperty
|
89 |
+
torch.cuda.__name__ = torch.xpu.__name__
|
90 |
+
torch.cuda._device_t = torch.xpu._device_t
|
91 |
+
torch.cuda.warnings = torch.xpu.warnings
|
92 |
+
torch.cuda.__spec__ = torch.xpu.__spec__
|
93 |
+
torch.cuda.BoolTensor = torch.xpu.BoolTensor
|
94 |
+
torch.cuda.CharStorage = torch.xpu.CharStorage
|
95 |
+
torch.cuda.__file__ = torch.xpu.__file__
|
96 |
+
torch.cuda._is_in_bad_fork = torch.xpu.lazy_init._is_in_bad_fork
|
97 |
+
# torch.cuda.is_current_stream_capturing = torch.xpu.is_current_stream_capturing
|
98 |
+
|
99 |
+
# Memory:
|
100 |
+
torch.cuda.memory = torch.xpu.memory
|
101 |
+
if 'linux' in sys.platform and "WSL2" in os.popen("uname -a").read():
|
102 |
+
torch.xpu.empty_cache = lambda: None
|
103 |
+
torch.cuda.empty_cache = torch.xpu.empty_cache
|
104 |
+
torch.cuda.memory_stats = torch.xpu.memory_stats
|
105 |
+
torch.cuda.memory_summary = torch.xpu.memory_summary
|
106 |
+
torch.cuda.memory_snapshot = torch.xpu.memory_snapshot
|
107 |
+
torch.cuda.memory_allocated = torch.xpu.memory_allocated
|
108 |
+
torch.cuda.max_memory_allocated = torch.xpu.max_memory_allocated
|
109 |
+
torch.cuda.memory_reserved = torch.xpu.memory_reserved
|
110 |
+
torch.cuda.memory_cached = torch.xpu.memory_reserved
|
111 |
+
torch.cuda.max_memory_reserved = torch.xpu.max_memory_reserved
|
112 |
+
torch.cuda.max_memory_cached = torch.xpu.max_memory_reserved
|
113 |
+
torch.cuda.reset_peak_memory_stats = torch.xpu.reset_peak_memory_stats
|
114 |
+
torch.cuda.reset_max_memory_cached = torch.xpu.reset_peak_memory_stats
|
115 |
+
torch.cuda.reset_max_memory_allocated = torch.xpu.reset_peak_memory_stats
|
116 |
+
torch.cuda.memory_stats_as_nested_dict = torch.xpu.memory_stats_as_nested_dict
|
117 |
+
torch.cuda.reset_accumulated_memory_stats = torch.xpu.reset_accumulated_memory_stats
|
118 |
+
|
119 |
+
# RNG:
|
120 |
+
torch.cuda.get_rng_state = torch.xpu.get_rng_state
|
121 |
+
torch.cuda.get_rng_state_all = torch.xpu.get_rng_state_all
|
122 |
+
torch.cuda.set_rng_state = torch.xpu.set_rng_state
|
123 |
+
torch.cuda.set_rng_state_all = torch.xpu.set_rng_state_all
|
124 |
+
torch.cuda.manual_seed = torch.xpu.manual_seed
|
125 |
+
torch.cuda.manual_seed_all = torch.xpu.manual_seed_all
|
126 |
+
torch.cuda.seed = torch.xpu.seed
|
127 |
+
torch.cuda.seed_all = torch.xpu.seed_all
|
128 |
+
torch.cuda.initial_seed = torch.xpu.initial_seed
|
129 |
+
|
130 |
+
# AMP:
|
131 |
+
torch.cuda.amp = torch.xpu.amp
|
132 |
+
torch.is_autocast_enabled = torch.xpu.is_autocast_xpu_enabled
|
133 |
+
torch.get_autocast_gpu_dtype = torch.xpu.get_autocast_xpu_dtype
|
134 |
+
|
135 |
+
if not hasattr(torch.cuda.amp, "common"):
|
136 |
+
torch.cuda.amp.common = contextlib.nullcontext()
|
137 |
+
torch.cuda.amp.common.amp_definitely_not_available = lambda: False
|
138 |
+
|
139 |
+
try:
|
140 |
+
torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler
|
141 |
+
except Exception: # pylint: disable=broad-exception-caught
|
142 |
+
try:
|
143 |
+
from .gradscaler import gradscaler_init # pylint: disable=import-outside-toplevel, import-error
|
144 |
+
gradscaler_init()
|
145 |
+
torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler
|
146 |
+
except Exception: # pylint: disable=broad-exception-caught
|
147 |
+
torch.cuda.amp.GradScaler = ipex.cpu.autocast._grad_scaler.GradScaler
|
148 |
+
|
149 |
+
# C
|
150 |
+
torch._C._cuda_getCurrentRawStream = ipex._C._getCurrentStream
|
151 |
+
ipex._C._DeviceProperties.multi_processor_count = ipex._C._DeviceProperties.gpu_subslice_count
|
152 |
+
ipex._C._DeviceProperties.major = 2024
|
153 |
+
ipex._C._DeviceProperties.minor = 0
|
154 |
+
|
155 |
+
# Fix functions with ipex:
|
156 |
+
torch.cuda.mem_get_info = lambda device=None: [(torch.xpu.get_device_properties(device).total_memory - torch.xpu.memory_reserved(device)), torch.xpu.get_device_properties(device).total_memory]
|
157 |
+
torch._utils._get_available_device_type = lambda: "xpu"
|
158 |
+
torch.has_cuda = True
|
159 |
+
torch.cuda.has_half = True
|
160 |
+
torch.cuda.is_bf16_supported = lambda *args, **kwargs: True
|
161 |
+
torch.cuda.is_fp16_supported = lambda *args, **kwargs: True
|
162 |
+
torch.backends.cuda.is_built = lambda *args, **kwargs: True
|
163 |
+
torch.version.cuda = "12.1"
|
164 |
+
torch.cuda.get_device_capability = lambda *args, **kwargs: [12,1]
|
165 |
+
torch.cuda.get_device_properties.major = 12
|
166 |
+
torch.cuda.get_device_properties.minor = 1
|
167 |
+
torch.cuda.ipc_collect = lambda *args, **kwargs: None
|
168 |
+
torch.cuda.utilization = lambda *args, **kwargs: 0
|
169 |
+
|
170 |
+
ipex_hijacks()
|
171 |
+
if not torch.xpu.has_fp64_dtype() or os.environ.get('IPEX_FORCE_ATTENTION_SLICE', None) is not None:
|
172 |
+
try:
|
173 |
+
from .diffusers import ipex_diffusers
|
174 |
+
ipex_diffusers()
|
175 |
+
except Exception: # pylint: disable=broad-exception-caught
|
176 |
+
pass
|
177 |
+
torch.cuda.is_xpu_hijacked = True
|
178 |
+
except Exception as e:
|
179 |
+
return False, e
|
180 |
+
return True, None
|