PromptKing commited on
Commit
3513fce
1 Parent(s): 536b1bb

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +371 -0
README.md CHANGED
@@ -1,3 +1,374 @@
1
  ---
2
  license: gpl-3.0
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: gpl-3.0
3
+ pipeline_tag: graph-ml
4
+ tags:
5
+ - code
6
  ---
7
+ ---
8
+ license: gpl-3.0
9
+ pipeline_tag: graph-ml
10
+
11
+
12
+ import contextlib
13
+ import os
14
+ from matplotlib import pyplot as plt
15
+ import numpy as np
16
+ import torch
17
+ import torch.nn as nn
18
+ import torch.optim as optim
19
+ import requests
20
+ from torchvision import datasets, transforms
21
+ import psutil
22
+ import time
23
+ import subprocess
24
+ import onnxruntime as ort
25
+ import matplotlib.pyplot as plt
26
+ import numpy as np
27
+ import numexpr as ne
28
+
29
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
30
+
31
+ tokenizer = AutoTokenizer.from_pretrained("janpase97/codeformer-pretrained")
32
+
33
+ model = AutoModelForSeq2SeqLM.from_pretrained("janpase97/codeformer-pretrained")
34
+
35
+ def check_graphics_api(target_app_name):
36
+ graphics_api = None
37
+
38
+ with contextlib.suppress(subprocess.CalledProcessError):
39
+ output = subprocess.check_output(['tasklist', '/FI', f'imagename eq {target_app_name}', '/M']).decode('utf-8')
40
+ if "opengl32.dll" in output:
41
+ graphics_api = "OpenGL"
42
+ elif "d3d11.dll" in output:
43
+ graphics_api = "DirectX11"
44
+ elif "d3d12.dll" in output:
45
+ graphics_api = "DirectX12"
46
+ elif "vulkan" in output:
47
+ graphics_api = "VULKAN"
48
+ return graphics_api
49
+
50
+
51
+ # Get the target application's process object
52
+ def get_target_app_process(target_app_name):
53
+ return next(
54
+ (
55
+ process
56
+ for process in psutil.process_iter(['name'])
57
+ if process.info['name'] == target_app_name
58
+ ),
59
+ None,
60
+ )
61
+
62
+ # Attach the AI to the application's process by PID
63
+ def attach_ai_to_app_pid(target_app_process):
64
+ if target_app_process is not None:
65
+ print(f"AI is attached to the application's process with PID: {target_app_process.pid}")
66
+ return True
67
+ else:
68
+ print("Could not find the target application's process to attach the AI.")
69
+ return False
70
+
71
+ # Check if the targeted application is running
72
+ def is_target_app_running(target_app_name):
73
+ return any(
74
+ process.info['name'] == target_app_name
75
+ for process in psutil.process_iter(['name'])
76
+ )
77
+
78
+ # Create the directory if it doesn't exist
79
+ directory = r"G:\Epic Games\GTAV\GTA5_AI\trained_models"
80
+ if not os.path.exists(directory):
81
+ os.makedirs(directory)
82
+
83
+ # Define the neural network model
84
+ class NanoCircuit(nn.Module):
85
+ def __init__(self):
86
+ super(NanoCircuit, self).__init__()
87
+ self.fc1 = nn.Linear(784, 128)
88
+ self.fc2 = nn.Linear(128, 10)
89
+
90
+ def forward(self, x):
91
+ x = x.view(-1, 784) # Reshape the input from (batch_size, 28, 28) to (batch_size, 784)
92
+ x = torch.relu(self.fc1(x))
93
+ x = self.fc2(x)
94
+ return x
95
+
96
+ # Set the device to GPU if available
97
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
98
+
99
+ # Load the MNIST dataset
100
+ transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
101
+ train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
102
+ train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True)
103
+
104
+
105
+ # Initialize the model and move it to the GPU
106
+ model = NanoCircuit().to(device)
107
+ criterion = nn.CrossEntropyLoss()
108
+ optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
109
+
110
+ # Train the model on the GPU with a data cap
111
+ def train_with_data_cap(model, data_loader, criterion, optimizer, device, data_cap_gb):
112
+ data_processed = 0
113
+ data_cap_bytes = data_cap_gb * (1024 ** 3)
114
+ epoch = 0
115
+
116
+ while data_processed < data_cap_bytes:
117
+ running_loss = 0.0
118
+ for i, data in enumerate(data_loader, 0):
119
+ inputs, labels = data
120
+ inputs, labels = inputs.to(device), labels.to(device)
121
+
122
+ # Update the amount of data processed
123
+ data_processed += inputs.nelement() * inputs.element_size()
124
+ if data_processed >= data_cap_bytes:
125
+ break
126
+
127
+ optimizer.zero_grad()
128
+
129
+ outputs = model(inputs.view(-1, 28 * 28))
130
+ loss = criterion(outputs, labels)
131
+ loss.backward()
132
+ optimizer.step()
133
+
134
+ running_loss += loss.item()
135
+
136
+ epoch += 1
137
+ print(f"Epoch {epoch}, Loss: {running_loss / (i + 1)}")
138
+ print(f"Data processed: {data_processed / (1024 ** 3):.2f} GB")
139
+
140
+ return model
141
+
142
+
143
+ # Save the updated model as a .onnx file
144
+ def save_model(model, filepath):
145
+ dummy_input = torch.randn(1, 1, 28, 28).to(device)
146
+ torch.onnx.export(model, dummy_input, filepath, input_names=['input'], output_names=['output'], opset_version=11)
147
+
148
+
149
+ # Train the model with a 1 GB data cap
150
+ trained_model = train_with_data_cap(model, train_loader, criterion, optimizer, device, data_cap_gb=50)
151
+ save_model(trained_model, os.path.join(directory, 'GTA5_TRAINED.onnx'))
152
+
153
+ target_app_name = "GTA5_TRAINED.exe"
154
+ save_interval_seconds = 5 * 60
155
+ application_was_running = False
156
+ while True:
157
+ if is_target_app_running(target_app_name):
158
+ print("Target application is running. Training and updating the model...")
159
+ trained_model = train_with_data_cap(model, train_loader, criterion, optimizer, device, data_cap_gb=.1)
160
+ save_model(trained_model, os.path.join(directory, 'GTA5_TRAINED.onnx'))
161
+ application_was_running = True
162
+ elif application_was_running:
163
+ print("Target application has exited. Saving the model...")
164
+ save_model(trained_model, os.path.join(directory, 'GTA5_TRAINED.onnx'))
165
+ print("Finished training and saved the model.")
166
+ break
167
+ else:
168
+ print("Target application is not running. Waiting to start training and updating the model...")
169
+
170
+ time.sleep(save_interval_seconds)
171
+
172
+ def train_with_data_cap(model, data_loader, criterion, optimizer, device, data_cap_gb):
173
+ data_processed = 0
174
+ data_cap_bytes = data_cap_gb * (1024 ** 3)
175
+ epoch = 0
176
+
177
+ while data_processed < data_cap_bytes:
178
+ running_loss = 0.0
179
+ for i, data in enumerate(data_loader, 0):
180
+ inputs, labels = data
181
+ inputs, labels = inputs.to(device), labels.to(device)
182
+
183
+ # Update the amount of data processed
184
+ data_processed += inputs.nelement() * inputs.element_size()
185
+ if data_processed >= data_cap_bytes:
186
+ break
187
+
188
+ optimizer.zero_grad()
189
+
190
+ # Compute the outputs and loss using numexpr
191
+ outputs = model(inputs.view(-1, 28 * 28))
192
+ outputs = outputs.cpu().detach().numpy()
193
+ labels = labels.cpu().detach().numpy()
194
+ loss = ne.evaluate("sum(-log(outputs[arange(outputs.shape[0]), labels]))") / len(labels)
195
+
196
+ # Backpropagate and update the model parameters
197
+ ne.evaluate("loss", out=loss)
198
+ grad_outputs = np.ones_like(outputs)
199
+ grad_outputs[np.arange(grad_outputs.shape[0]), labels] = -1
200
+ grad_outputs /= len(labels)
201
+ grad_outputs = ne.evaluate("grad_outputs * loss_grad")
202
+ grad_outputs = torch.from_numpy(grad_outputs).to(device)
203
+ outputs = torch.from_numpy(outputs).to(device)
204
+ loss.backward(grad_outputs)
205
+ optimizer.step()
206
+
207
+ running_loss += loss.item()
208
+
209
+ epoch += 1
210
+ print(f"Epoch {epoch}, Loss: {running_loss / (i + 1)}")
211
+ print(f"Data processed: {data_processed / (1024 ** 3):.2f} GB")
212
+
213
+ return model
214
+
215
+ # Train the model with a 10 GB data cap
216
+ trained_model = train_with_data_cap(model, train_loader, criterion, optimizer, os.device_encoding, data_cap_gb=10)
217
+ save_model(trained_model, os.path.join(directory, 'GTA5_TRAINED.onnx'))
218
+
219
+ target_app_name = "GTA5.exe"
220
+ save_interval_seconds = 5 * 60
221
+ application_was_running = False
222
+ while True:
223
+ if is_target_app_running(target_app_name):
224
+ print("Target application is running. Training and updating the model...")
225
+ trained_model = train_with_data_cap(model, train_loader, criterion, optimizer, os.device_encoding, data_cap_gb=10)
226
+ save_model(trained_model, os.path.join(directory, 'GTA5_TRAINED.onnx'))
227
+ application_was_running = True
228
+ elif application_was_running:
229
+ print("Target application has exited. Saving the model...")
230
+ save_model(trained_model, os.path.join(directory, 'GTA5_TRAINED.onnx'))
231
+ print("Finished training and saved the model.")
232
+ break
233
+ else:
234
+ print("Target application is not running. Waiting to start training and updating the model...")
235
+
236
+ time.sleep(save_interval_seconds)
237
+
238
+ def train_with_data_cap(model, data_loader, criterion, optimizer, device, data_cap_gb):
239
+ data_processed = 0
240
+ data_cap_bytes = data_cap_gb * (1024 ** 3)
241
+ epoch = 0
242
+
243
+ while data_processed < data_cap_bytes:
244
+ running_loss = 0.0
245
+ for i, data in enumerate(data_loader, 0):
246
+ inputs, labels = data
247
+ inputs, labels = inputs.to(device), labels.to(device)
248
+
249
+ # Update the amount of data processed
250
+ data_processed += inputs.nelement() * inputs.element_size()
251
+ if data_processed >= data_cap_bytes:
252
+ break
253
+
254
+ optimizer.zero_grad()
255
+
256
+ # Compute the outputs and loss using numexpr
257
+ outputs = model(inputs.view(-1, 28 * 28))
258
+ outputs = outputs.cpu().detach().numpy()
259
+ labels = labels.cpu().detach().numpy()
260
+ loss = ne.evaluate("sum(-log(outputs[arange(outputs.shape[0]), labels]))") / len(labels)
261
+
262
+ # Backpropagate and update the model parameters
263
+ ne.evaluate("loss", out=loss)
264
+ grad_outputs = np.ones_like(outputs)
265
+ grad_outputs[np.arange(grad_outputs.shape[0]), labels] = -1
266
+ grad_outputs /= len(labels)
267
+ grad_outputs = ne.evaluate("grad_outputs * loss_grad")
268
+ grad_outputs = torch.from_numpy(grad_outputs).to(device)
269
+ outputs = torch.from_numpy(outputs).to(device)
270
+ loss.backward(grad_outputs)
271
+ optimizer.step()
272
+
273
+ running_loss += loss.item()
274
+
275
+ epoch += 1
276
+ print(f"Epoch {epoch}, Loss: {running_loss / (i + 1)}")
277
+ print(f"Data processed: {data_processed / (1024 ** 3):.2f} GB")
278
+
279
+ return model
280
+
281
+ target_app_name = "GTA5.exe"
282
+ save_interval_seconds = 1 * 60
283
+ application_was_running = False
284
+
285
+ while True:
286
+ if is_target_app_running(target_app_name):
287
+ print("Target application is running. Training and updating the model...")
288
+ trained_model = train_with_data_cap(model, train_loader, criterion, optimizer, device, data_cap_gb=10)
289
+ save_model(trained_model, os.path.join(directory, 'GTA5_TRAINED.onnx'))
290
+ application_was_running = True
291
+ elif application_was_running:
292
+ print("Target application has exited. Saving the model...")
293
+ save_model(trained_model, os.path.join(directory, 'GTA5_TRAINED.onnx'))
294
+ print("Finished training and saved the model.")
295
+ break
296
+ else:
297
+ start_time = time.time()
298
+ print("Target application is not running. Waiting to detect the graphics API...")
299
+ while (time.time() - start_time) < 5:
300
+ if is_target_app_running(target_app_name):
301
+ if graphics_api := check_graphics_api(target_app_name):
302
+ print(f"Detected {graphics_api} in the target application.")
303
+ break
304
+ else:
305
+ print("Could not detect the graphics API used in the target application.")
306
+ time.sleep(1)
307
+
308
+ if not is_target_app_running(target_app_name):
309
+ print("Target application not detected in 5 seconds. Shutting down the AI.")
310
+ break
311
+
312
+
313
+ while True:
314
+ if is_target_app_running(target_app_name):
315
+ if graphics_api := check_graphics_api(target_app_name):
316
+ print(f"Detected {graphics_api} in the target application.")
317
+ else:
318
+ print("Could not detect the graphics API used in the target application.")
319
+ else:
320
+ start_time = time.time()
321
+ print("Target application is not running. Waiting to start training and updating the model...")
322
+ while (time.time() - start_time) < 5:
323
+ if is_target_app_running(target_app_name):
324
+ print(f"Detected {graphics_api} in the target application.")
325
+ break
326
+ time.sleep(1)
327
+
328
+ if not is_target_app_running(target_app_name):
329
+ print("Target application not detected in 5 seconds. Shutting down the AI.")
330
+ break
331
+
332
+
333
+ #Generate some random data for the boxplots
334
+ np.random.seed(0)
335
+ original_data = np.random.normal(0, 1, 100)
336
+ trained_data = np.random.normal(0.5, 1, 100)
337
+
338
+ while True:
339
+ if is_target_app_running(target_app_name):
340
+ print("Target application is running. Training and updating the model...")
341
+ trained_model = train_with_data_cap(model, train_loader, criterion, optimizer, device, data_cap_gb=10)
342
+ save_model(trained_model, os.path.join(directory, 'GTA5_TRAINED.onnx'))
343
+
344
+ # Create a box plot of the original and trained data
345
+ plt.figure()
346
+ plt.boxplot([original_data, trained_data], labels=["Original Data", "Trained Data"])
347
+ plt.title("Boxplot of Original and Trained Data")
348
+ plt.ylabel("Values")
349
+ plt.show()
350
+
351
+ # Save the box plot as an image
352
+ plt.savefig(r"G:\Epic Games\GTAV\GTA5_AI\Plot Box Comparison\boxplot_comparison.png")
353
+
354
+ application_was_running = True
355
+ elif application_was_running:
356
+ print("Target application has exited. Saving the model...")
357
+ save_model(trained_model, os.path.join(directory, 'GTA5_TRAINED.onnx'))
358
+ print("Finished training and saved the model.")
359
+ break
360
+ else:
361
+ start_time = time.time()
362
+ print("Target application is not running. Waiting to detect the graphics API...")
363
+ while (time.time() - start_time) < 5:
364
+ if is_target_app_running(target_app_name):
365
+ if graphics_api := check_graphics_api(target_app_name):
366
+ print(f"Detected {graphics_api} in the target application.")
367
+ break
368
+ else:
369
+ print("Could not detect the graphics API used in the target application.")
370
+ time.sleep(1)
371
+
372
+ if not is_target_app_running(target_app_name):
373
+ print("Target application not detected in 5 seconds. Shutting down the AI.")
374
+ break