Wakka2905 commited on
Commit
20c73bc
1 Parent(s): 84a1e08
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Assets/Bird/Bird1.png +0 -0
  2. Assets/Bird/Bird2.png +0 -0
  3. Assets/Cactus/LargeCactus1.png +0 -0
  4. Assets/Cactus/LargeCactus2.png +0 -0
  5. Assets/Cactus/LargeCactus3.png +0 -0
  6. Assets/Cactus/SmallCactus1.png +0 -0
  7. Assets/Cactus/SmallCactus2.png +0 -0
  8. Assets/Cactus/SmallCactus3.png +0 -0
  9. Assets/Dino/DinoDead.png +0 -0
  10. Assets/Dino/DinoDuck1.png +0 -0
  11. Assets/Dino/DinoDuck2.png +0 -0
  12. Assets/Dino/DinoJump.png +0 -0
  13. Assets/Dino/DinoRun1.png +0 -0
  14. Assets/Dino/DinoRun2.png +0 -0
  15. Assets/Dino/DinoStart.png +0 -0
  16. Assets/Other/Cloud.png +0 -0
  17. Assets/Other/GameOver.png +0 -0
  18. Assets/Other/Reset.png +0 -0
  19. Assets/Other/Track.png +0 -0
  20. ProyectoFinalChatbotsPyTorch.ipynb +1136 -0
  21. models/Episode_100_Points_68_2023-12-01_15-13-08_model.pth +3 -0
  22. models/Episode_100_Points_71_2023-12-01_15-43-17_model.pth +3 -0
  23. models/Episode_150_Points_203_2023-12-01_15-19-12_model.pth +3 -0
  24. models/Episode_150_Points_77_2023-12-01_15-47-31_model.pth +3 -0
  25. models/Episode_200_Points_225_2023-12-01_15-51-38_model.pth +3 -0
  26. models/Episode_200_Points_76_2023-12-01_15-26-34_model.pth +3 -0
  27. models/Episode_250_Points_192_2023-12-01_15-55-41_model.pth +3 -0
  28. models/Episode_250_Points_352_2023-12-01_15-33-57_model.pth +3 -0
  29. models/Episode_300_Points_65_2023-12-01_15-58-47_model.pth +3 -0
  30. models/Episode_350_Points_75_2023-12-01_16-02-41_model.pth +3 -0
  31. models/Episode_400_Points_82_2023-12-01_16-06-52_model.pth +3 -0
  32. models/Episode_50_Points_100_2023-12-01_15-39-38_model.pth +3 -0
  33. models/Episode_50_Points_102_2023-12-01_14-51-42_model.pth +3 -0
  34. models/Episode_50_Points_345_2023-12-01_15-07-35_model.pth +3 -0
  35. models/Episode_50_Points_73_2023-12-01_14-11-02_model.pth +3 -0
  36. models/episodes/106_Points,Episode_50_Date_2023-12-10_20-10-06_model.pth +3 -0
  37. models/episodes/115_Points,Episode_50_Date_2023-12-10_20-57-02_model.pth +3 -0
  38. models/episodes/116_Points,Episode_50_Date_2023-12-10_21-53-05_model.pth +3 -0
  39. models/episodes/1193_Points,Episode_100_Date_2023-12-02_00-08-52_model.pth +3 -0
  40. models/episodes/124_Points,Episode_100_Date_2023-12-10_20-47-01_model.pth +3 -0
  41. models/episodes/171_Points,Episode_50_Date_2023-12-10_17-50-12_model.pth +3 -0
  42. models/episodes/1825_Points,Episode_50_Date_2023-12-02_08-46-15_model.pth +3 -0
  43. models/episodes/187_Points,Episode_100_Date_2023-12-10_21-46-09_model.pth +3 -0
  44. models/episodes/190_Points,Episode_100_Date_2023-12-10_18-43-53_model.pth +3 -0
  45. models/episodes/2006_Points,Episode_50_Date_2023-12-10_22-20-30_model.pth +3 -0
  46. models/episodes/211_Points,Episode_50_Date_2023-12-10_19-41-01_model.pth +3 -0
  47. models/episodes/215_Points,Episode_50_Date_2023-12-10_20-30-31_model.pth +3 -0
  48. models/episodes/342_Points,Episode_100_Date_2023-12-10_19-53-01_model.pth +3 -0
  49. models/episodes/3608_Points,Episode_50_Date_2023-12-01_23-21-56_model.pth +3 -0
  50. models/episodes/3630_Points,Episode_100_Date_2023-12-10_21-17-25_model.pth +3 -0
Assets/Bird/Bird1.png ADDED
Assets/Bird/Bird2.png ADDED
Assets/Cactus/LargeCactus1.png ADDED
Assets/Cactus/LargeCactus2.png ADDED
Assets/Cactus/LargeCactus3.png ADDED
Assets/Cactus/SmallCactus1.png ADDED
Assets/Cactus/SmallCactus2.png ADDED
Assets/Cactus/SmallCactus3.png ADDED
Assets/Dino/DinoDead.png ADDED
Assets/Dino/DinoDuck1.png ADDED
Assets/Dino/DinoDuck2.png ADDED
Assets/Dino/DinoJump.png ADDED
Assets/Dino/DinoRun1.png ADDED
Assets/Dino/DinoRun2.png ADDED
Assets/Dino/DinoStart.png ADDED
Assets/Other/Cloud.png ADDED
Assets/Other/GameOver.png ADDED
Assets/Other/Reset.png ADDED
Assets/Other/Track.png ADDED
ProyectoFinalChatbotsPyTorch.ipynb ADDED
@@ -0,0 +1,1136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Q-Learning en el juego del Dinosaurio en Google Chrome"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "markdown",
12
+ "metadata": {},
13
+ "source": [
14
+ "Usamos el modulo Pygame y el tutorial de Max Teaches Tech de Youtube para recrear el juego del dinosaurio. El repositorio con los assets se encuentra aqui:\n",
15
+ "\n",
16
+ "https://github.com/maxontech/chrome-dinosaur/tree/master/Assets\n",
17
+ "\n",
18
+ "\n",
19
+ "En los primeros 300 puntos del juego, solo aparecen cactus; despues, aparecen mas obstaculos, y la velocidad va aumentando entre mas avanza el juego."
20
+ ]
21
+ },
22
+ {
23
+ "cell_type": "markdown",
24
+ "metadata": {},
25
+ "source": [
26
+ "# Configuraciones Iniciales y Importaciones\n",
27
+ "Aquí definimos las dimensiones de la pantalla del juego y establecemos configuraciones iniciales como la velocidad del juego, tamaños de memoria, y parámetros para el entrenamiento de la red neuronal. También importamos las bibliotecas necesarias como Pygame para la interfaz gráfica y PyTorch para la implementación de la red neuronal.\n",
28
+ "\n",
29
+ "### 1. Tamaños de Memoria de Replay\n",
30
+ "- **`INIT_REPLAY_MEM_SIZE = 5_000` y `REPLAY_MEMORY_SIZE = 45_000`:** Una memoria de replay suficientemente grande permite al agente aprender de muchas experiencias pasadas, y asi generalizar.\n",
31
+ "- **`MIN_REPLAY_MEMORY_SIZE = 1_000`:** Este es el tamaño mínimo de memoria de replay necesario antes de comenzar el entrenamiento. Garantiza que el modelo tenga suficientes datos para un entrenamiento bueno.\n",
32
+ "\n",
33
+ "### 2. Tamaño del Minibatch y Factor de Descuento\n",
34
+ "- **`MINIBATCH_SIZE = 64`:** Un tamaño de minibatch de 64 es un equilibrio común entre eficiencia computacional y estabilidad del entrenamiento. Permite que la red aprenda de diferentes experiencias en cada actualización.\n",
35
+ "- **`DISCOUNT = 0.95`:** El factor de descuento (gamma) de 0.95 reduce el valor presente de las recompensas futuras, lo que ayuda a enfocar el agente en recompensas a corto plazo pero sin ignorar completamente las consecuencias a largo plazo.\n",
36
+ "\n",
37
+ "### 3. Actualización del Modelo Objetivo\n",
38
+ "- **`UPDATE_TARGET_THRESH = 5`:** La actualización de los pesos del modelo objetivo cada 5 episodios ayuda a mantener la estabilidad del aprendizaje. Asegura que el modelo objetivo no cambie demasiado rápido y proporciona estimaciones consistentes de los valores Q objetivo.\n",
39
+ "\n",
40
+ "### 4. Estrategia Epsilon-Greedy\n",
41
+ "- **`EPSILON_INIT = 0.45`, `EPSILON_DECAY = 0.997`, `MIN_EPSILON = 0.05`:** Estos valores controlan la tasa de exploración/explotación del agente. Un `epsilon` inicial de 0.45 se eligió debido a las características específicas del juego (p. ej., la duración de la acción de saltar comparada con correr o agacharse). La tasa de decaimiento de 0.997 reduce gradualmente la exploración a medida que el agente aprende, pero evita que se reduzca a cero, manteniendo siempre una cierta probabilidad de exploración.\n",
42
+ "\n",
43
+ "### 5. Duración del Entrenamiento\n",
44
+ "- **`NUM_EPISODES = 2_000`:** El numero de epochs que queremos que corra.\n",
45
+ "\n",
46
+ "### Conclusión\n",
47
+ "La selección de estos hiperparámetros refleja un enfoque equilibrado que considera las particularidades del juego del dinosaurio, como la diferencia en la duración de las acciones y la necesidad de estabilizar el aprendizaje. Además, la estrategia de aprendizaje y memoria de replay ayuda a mantener un equilibrio entre aprender de experiencias recientes y no olvidar lecciones anteriores, facilitando un aprendizaje eficaz y robusto."
48
+ ]
49
+ },
50
+ {
51
+ "cell_type": "code",
52
+ "execution_count": 1,
53
+ "metadata": {},
54
+ "outputs": [],
55
+ "source": [
56
+ "#%pip install sqlalchemy\n",
57
+ "#%pip install pygame\n",
58
+ "#Para usar GPU y torch\n",
59
+ "# %pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121"
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "code",
64
+ "execution_count": 2,
65
+ "id": "9e601655",
66
+ "metadata": {},
67
+ "outputs": [],
68
+ "source": [
69
+ "SCREEN_HEIGHT = 600\n",
70
+ "SCREEN_WIDTH = 1100\n",
71
+ "\n",
72
+ "INIT_GAME_SPEED = 14\n",
73
+ "X_POS_BG_INIT = 0\n",
74
+ "Y_POS_BG = 380\n",
75
+ "\n",
76
+ "INIT_REPLAY_MEM_SIZE = 5_000\n",
77
+ "REPLAY_MEMORY_SIZE = 45_000\n",
78
+ "MODEL_NAME = \"DINO\"\n",
79
+ "MIN_REPLAY_MEMORY_SIZE = 1_000\n",
80
+ "MINIBATCH_SIZE = 64\n",
81
+ "DISCOUNT = 0.95\n",
82
+ "UPDATE_TARGET_THRESH = 5\n",
83
+ "#EPSILON_INIT = 0.45 epsilon inicial\n",
84
+ "EPSILON_INIT = 0.25 #modificamos para que sea menos exploratorio, menor epsilon menos exploratorio\n",
85
+ "#EPSILON_DECAY = 0.997 epsilon inicial\n",
86
+ "EPSILON_DECAY = 0.75 #modificamos para que sea menos exploratorio, menor epsilon menos exploratorio\n",
87
+ "NUM_EPISODES = 100\n",
88
+ "MIN_EPSILON = 0.05"
89
+ ]
90
+ },
91
+ {
92
+ "cell_type": "code",
93
+ "execution_count": 3,
94
+ "metadata": {},
95
+ "outputs": [
96
+ {
97
+ "name": "stdout",
98
+ "output_type": "stream",
99
+ "text": [
100
+ "pygame 2.5.2 (SDL 2.28.3, Python 3.10.9)\n",
101
+ "Hello from the pygame community. https://www.pygame.org/contribute.html\n"
102
+ ]
103
+ }
104
+ ],
105
+ "source": [
106
+ "import pygame\n",
107
+ "import os\n",
108
+ "\n",
109
+ "import torch\n",
110
+ "import torch.nn as nn\n",
111
+ "import torch.optim as optim\n",
112
+ "\n",
113
+ "\n",
114
+ "import pandas as pd\n",
115
+ "import numpy as np\n",
116
+ "from collections import deque\n",
117
+ "import random\n",
118
+ "\n",
119
+ "import pygame\n",
120
+ "import random\n",
121
+ "from typing import List\n",
122
+ "\n",
123
+ "from argparse import Action\n",
124
+ "import random\n",
125
+ "import sys\n",
126
+ "import pygame\n",
127
+ "\n",
128
+ "from sqlalchemy import asc\n",
129
+ "import math\n",
130
+ "import time\n",
131
+ "from tqdm import tqdm\n",
132
+ "from datetime import datetime"
133
+ ]
134
+ },
135
+ {
136
+ "cell_type": "code",
137
+ "execution_count": 4,
138
+ "metadata": {},
139
+ "outputs": [],
140
+ "source": [
141
+ "import threading\n",
142
+ "import queue\n",
143
+ "\n",
144
+ "# Cola para comunicación entre hilos\n",
145
+ "action_queue = queue.Queue()\n"
146
+ ]
147
+ },
148
+ {
149
+ "cell_type": "markdown",
150
+ "metadata": {},
151
+ "source": [
152
+ "# Carga de Assets\n",
153
+ "Esta sección carga las imágenes necesarias para el juego, como el dinosaurio, cactus, pájaros, etc.\n",
154
+ "Utilizamos pygame.image.load() para cargar cada imagen y las almacenamos en listas o variables para su uso en el juego."
155
+ ]
156
+ },
157
+ {
158
+ "cell_type": "code",
159
+ "execution_count": 5,
160
+ "id": "41eebe05",
161
+ "metadata": {},
162
+ "outputs": [],
163
+ "source": [
164
+ "RUNNING = [pygame.image.load(os.path.join(\"Assets/Dino\", \"DinoRun1.png\")), \n",
165
+ " pygame.image.load(os.path.join(\"Assets/Dino\", \"DinoRun2.png\"))]\n",
166
+ "\n",
167
+ "DUCKING = [pygame.image.load(os.path.join(\"Assets/Dino\", \"DinoDuck1.png\")), \n",
168
+ " pygame.image.load(os.path.join(\"Assets/Dino\", \"DinoDuck2.png\"))]\n",
169
+ "\n",
170
+ "\n",
171
+ "JUMPING = pygame.image.load(os.path.join(\"Assets/Dino\", \"DinoJump.png\"))\n",
172
+ "\n",
173
+ "SMALL_CACTUS = [pygame.image.load(os.path.join(\"Assets/Cactus\", \"SmallCactus1.png\")), \n",
174
+ " pygame.image.load(os.path.join(\"Assets/Cactus\", \"SmallCactus2.png\")), \n",
175
+ " pygame.image.load(os.path.join(\"Assets/Cactus\", \"SmallCactus3.png\"))]\n",
176
+ "\n",
177
+ "\n",
178
+ "LARGE_CACTUS = [pygame.image.load(os.path.join(\"Assets/Cactus\", \"LargeCactus1.png\")), \n",
179
+ " pygame.image.load(os.path.join(\"Assets/Cactus\", \"LargeCactus2.png\")), \n",
180
+ " pygame.image.load(os.path.join(\"Assets/Cactus\", \"LargeCactus3.png\"))]\n",
181
+ "\n",
182
+ "BIRD = [pygame.image.load(os.path.join(\"Assets/Bird\", \"Bird1.png\")), pygame.image.load(os.path.join(\"Assets/Bird\", \"Bird2.png\"))]\n",
183
+ "\n",
184
+ "CLOUD = pygame.image.load(os.path.join(\"Assets/Other\", \"Cloud.png\"))\n",
185
+ "\n",
186
+ "BACKGROUND = pygame.image.load(os.path.join(\"Assets/Other\", \"Track.png\"))"
187
+ ]
188
+ },
189
+ {
190
+ "cell_type": "markdown",
191
+ "metadata": {},
192
+ "source": [
193
+ "### Explicacion de Q-Learning breve\n",
194
+ "\n",
195
+ "Q-Learning es un algoritmo de aprendizaje por refuerzo que el agente (en este caso, el dinosaurio en el juego) utiliza para aprender qué acciones tomar en diferentes situaciones o estados para maximizar su recompensa total. Es un proceso iterativo donde el agente aprende gradualmente a tomar decisiones óptimas (acciones) para maximizar su recompensa total (puntuación en el juego) mediante la experimentación y el ajuste de las predicciones de los valores Q a través de una red neuronal.\n",
196
+ "\n",
197
+ "1. **Valores Q (Quality):** Q-Learning se basa en una función de valor Q, que estima la \"calidad\" o utilidad total esperada de tomar una acción específica en un estado dado. Aqui, los valores Q indicarían cuán bueno es realizar acciones como saltar, agacharse o correr en un determinado momento del juego.\n",
198
+ "\n",
199
+ "2. **Recompensas y Decisiones:** El objetivo del agente es maximizar su recompensa total. Cada vez que el agente toma una acción, recibe una recompensa (o penalización). Las recompensas se basan en la supervivencia del dinosaurio sin chocar con obstáculos.\n",
200
+ "\n",
201
+ "3. **Actualización de Valores Q:** Los valores Q se actualizan a medida que el agente experimenta nuevas situaciones (estados y recompensas) y aprende de sus errores y éxitos.\n",
202
+ "\n",
203
+ "### Implementación en el Proyecto\n",
204
+ "\n",
205
+ "1. **Estados y Acciones:** En cada paso del juego, el agente observa el estado actual del juego (la posición del dinosaurio, los obstáculos próximos, etc.) y decide qué acción tomar (saltar, agacharse, correr). La red neuronal predice los valores Q para cada posible acción en ese estado.\n",
206
+ "\n",
207
+ "2. **Memoria de Replay:** Cada experiencia (estado, acción, recompensa, nuevo estado) se almacena en una memoria de replay. Esto permite al agente aprender de experiencias pasadas.\n",
208
+ "\n",
209
+ "3. **Entrenamiento del Modelo:** Se toman muestras de la memoria de replay para entrenar la red neuronal. Durante el entrenamiento, la red ajusta sus pesos para predecir con mayor precisión los valores Q basándose en las recompensas obtenidas y las predicciones del modelo objetivo.\n",
210
+ "\n",
211
+ "4. **Modelo Objetivo:** Se utiliza un segundo modelo, el modelo objetivo, para calcular los valores Q objetivo durante el entrenamiento. Esto ayuda a estabilizar el aprendizaje, ya que proporciona una estimación los valores Q objetivo.\n",
212
+ "\n",
213
+ "5. **Exploración vs. Explotación:** Al principio, el agente tiene más probabilidades de tomar acciones aleatorias para explorar el entorno (alta tasa de exploración `epsilon`). A medida que aprende, se vuelve más propenso a confiar en las predicciones de la red neuronal (explotación)."
214
+ ]
215
+ },
216
+ {
217
+ "cell_type": "markdown",
218
+ "metadata": {},
219
+ "source": [
220
+ "# Implementación de Q-Learning y Entrenamiento\n",
221
+ " Aquí implementamos el algoritmo Q-Learning, un tipo de aprendizaje por refuerzo. Definimos el estado del juego, las posibles acciones y las recompensas asociadas. La red neuronal se actualiza continuamente en base a la memoria de replay y las recompensas obtenidas, ajustando las decisiones del agente.\n",
222
+ " \n",
223
+ "\n",
224
+ "Los inputs son:\n",
225
+ "- Distancia del Dinosaurio del obstaculo\n",
226
+ "- Coordenada Y del dinosaurio\n",
227
+ "- Coordenada Y del Obstaculo\n",
228
+ "- Ancho del obstaculo\n",
229
+ "- Velocidad del juego\n",
230
+ "- El tipo de obstaculo (Cactus o Pterodactilo)\n",
231
+ "\n",
232
+ "Los outputs son caminar, saltar o agacharte.\n",
233
+ "\n",
234
+ "\n",
235
+ "### Clase NeuralNetwork\n",
236
+ "\n",
237
+ "Originalmente queriamos hacer el proyecto con TensorFlow, pero por limitaciones tecnicas nos limitamos al uso de PyTorch; por eso, en esta clase definimos una red neuronal simple para la toma de decisiones dentro del juego, algo que originalmente no era necesario. Se usa para predecir los valores Q, que representan el valor esperado de tomar una determinada acción en un estado dado del juego. \n",
238
+ " \n",
239
+ "- Constructor __init__(self)\n",
240
+ "\n",
241
+ " Hereda de nn.Module de PyTorch, que es la base para todas las redes neuronales en PyTorch. Define dos capas lineales (fc1 y fc2). La primera capa (fc1) toma 7 características de entrada y las transforma en 4 características, y la segunda capa (fc2) toma estas 4 características y produce 3 salidas. Estas salidas representarán los valores Q para cada posible acción en el juego.\n",
242
+ "\n",
243
+ "- forward(self, x)\n",
244
+ "\n",
245
+ " Define cómo se pasa la entrada a través de la red. La función relu se aplica a la salida de la primera capa para introducir no linealidad, y luego se pasa a la segunda capa. El resultado es el conjunto de valores Q predichos para las acciones dadas el estado actual del juego."
246
+ ]
247
+ },
248
+ {
249
+ "cell_type": "code",
250
+ "execution_count": 6,
251
+ "metadata": {},
252
+ "outputs": [],
253
+ "source": [
254
+ "class NeuralNetwork(nn.Module):\n",
255
+ " def __init__(self):\n",
256
+ " super(NeuralNetwork, self).__init__()\n",
257
+ " self.fc1 = nn.Linear(7, 4) # 7 input features, 4 output features\n",
258
+ " self.fc2 = nn.Linear(4, 3) # 4 input features, 3 output features\n",
259
+ "\n",
260
+ " def forward(self, x):\n",
261
+ " x = torch.relu(self.fc1(x))\n",
262
+ " x = self.fc2(x)\n",
263
+ " return x\n",
264
+ "\n"
265
+ ]
266
+ },
267
+ {
268
+ "cell_type": "markdown",
269
+ "metadata": {},
270
+ "source": [
271
+ "### Clase DQNAgent\n",
272
+ "\n",
273
+ "\n",
274
+ "El `DQNAgent` utiliza esta red para aprender y decidir acciones basándose en el método Q-Learning. El agente entrena la red utilizando experiencias de juego anteriores almacenadas en la memoria de replay, ajustando sus estrategias para mejorar el rendimiento en el juego.\n",
275
+ "\n",
276
+ "#### Constructor `__init__(self)`\n",
277
+ "- Creamos dos instancias de `NeuralNetwork`, una como el modelo principal y otra como el modelo objetivo. El modelo objetivo se actualiza ocasionalmente con los pesos del modelo principal.\n",
278
+ "- Usamos el optimizador Adam para ajustar los pesos de la red y Mean Squared Error (MSE) como la función de pérdida.\n",
279
+ "- Se inicializa dos memorias de replay (`init_replay_memory` y `late_replay_memory`) para almacenar experiencias pasadas y aprender de ellas.\n",
280
+ "\n",
281
+ "#### update_replay_memory(self, transition)\n",
282
+ "- Agrega una nueva experiencia (transición) a la memoria de replay. La memoria de replay se utiliza para entrenar la red neuronal con experiencias pasadas.\n",
283
+ "\n",
284
+ "#### get_qs(self, state)\n",
285
+ "- Calcula y devuelve los valores Q para un estado dado utilizando el modelo principal. Esto se utiliza para decidir qué acción tomar en un estado particular del juego.\n",
286
+ "\n",
287
+ "#### train(self, terminal_state, step)\n",
288
+ "- Entrenamos la red neuronal usando un minibatch de experiencias de la memoria de replay. Utiliza tanto el modelo principal como el modelo objetivo para calcular el valor Q objetivo y ajustar los pesos del modelo principal.\n",
289
+ "- Si se alcanza un cierto umbral (indicado por `UPDATE_TARGET_THRESH`), se actualizan los pesos del modelo objetivo con los del modelo principal."
290
+ ]
291
+ },
292
+ {
293
+ "cell_type": "code",
294
+ "execution_count": 7,
295
+ "id": "362f34ac",
296
+ "metadata": {},
297
+ "outputs": [],
298
+ "source": [
299
+ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") #Para poder usar GPU\n",
300
+ "\n",
301
+ "class DQNAgent:\n",
302
+ " def __init__(self,learning_rate=0.001):\n",
303
+ " self.model = NeuralNetwork().to(device) # Mover el modelo a la GPU si está disponible\n",
304
+ " self.target_model = NeuralNetwork().to(device) # Mover el modelo a la GPU si está disponible\n",
305
+ " self.target_model.load_state_dict(self.model.state_dict())\n",
306
+ " self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)\n",
307
+ " self.loss_function = nn.MSELoss()\n",
308
+ "\n",
309
+ " self.init_replay_memory = deque(maxlen=INIT_REPLAY_MEM_SIZE)\n",
310
+ " self.late_replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)\n",
311
+ " self.target_update_counter = 0\n",
312
+ " self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)\n",
313
+ " # Update the memory store\n",
314
+ " def update_replay_memory(self, transition):\n",
315
+ " # if len(self.replay_memory) > 50_000:\n",
316
+ " # self.replay_memory.clear()\n",
317
+ " if len(self.init_replay_memory) < INIT_REPLAY_MEM_SIZE:\n",
318
+ " self.init_replay_memory.append(transition)\n",
319
+ " else:\n",
320
+ " self.late_replay_memory.append(transition)\n",
321
+ "\n",
322
+ " # Método get_qs dentro de la clase DQNAgent\n",
323
+ " def get_qs(self, state):\n",
324
+ " state_tensor = torch.Tensor(state).to(device) # Asegúrate de mover el tensor al dispositivo correcto\n",
325
+ " with torch.no_grad():\n",
326
+ " return self.model(state_tensor).cpu().numpy() # Luego mueve el resultado de vuelta a la CPU si es necesario\n",
327
+ " \n",
328
+ " def calculate_action(self, state_queue, action_queue):\n",
329
+ " while True:\n",
330
+ " state = state_queue.get() # Espera y obtiene el estado del juego\n",
331
+ " if state is None:\n",
332
+ " break # Si recibes None, termina el hilo\n",
333
+ "\n",
334
+ " # Calcula la acción usando el modelo\n",
335
+ " q_values = self.get_qs(state)\n",
336
+ " action = np.argmax(q_values) # Elige la acción con el Q-value más alto\n",
337
+ "\n",
338
+ " action_queue.put(action) # Coloca la acción en la cola\n",
339
+ " \n",
340
+ " def train(self, terminal_state, step):\n",
341
+ " if len(self.init_replay_memory) < MIN_REPLAY_MEMORY_SIZE:\n",
342
+ " return\n",
343
+ "\n",
344
+ " total_mem = list(self.init_replay_memory)\n",
345
+ " total_mem.extend(self.late_replay_memory)\n",
346
+ " minibatch = random.sample(total_mem, MINIBATCH_SIZE)\n",
347
+ "\n",
348
+ " # Asegurarse de que los tensores estén en el dispositivo correcto\n",
349
+ " current_states = torch.Tensor([transition[0] for transition in minibatch]).to(device)\n",
350
+ " current_qs_list = self.model(current_states)\n",
351
+ " new_current_states = torch.Tensor([transition[3] for transition in minibatch]).to(device)\n",
352
+ " future_qs_list = self.target_model(new_current_states)\n",
353
+ "\n",
354
+ " X = []\n",
355
+ " y = []\n",
356
+ "\n",
357
+ " for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch):\n",
358
+ " if not done:\n",
359
+ " max_future_q = torch.max(future_qs_list[index])\n",
360
+ " new_q = reward + DISCOUNT * max_future_q\n",
361
+ " else:\n",
362
+ " new_q = reward\n",
363
+ "\n",
364
+ " current_qs = current_qs_list[index]\n",
365
+ " current_qs[action] = new_q\n",
366
+ "\n",
367
+ " X.append(current_state)\n",
368
+ " y.append(current_qs)\n",
369
+ "\n",
370
+ " X = torch.tensor(np.array(X, dtype=np.float32)).to(device) # Mover X a la GPU\n",
371
+ " y = torch.tensor(np.array([y_item.detach().cpu().numpy() if isinstance(y_item, torch.Tensor) else y_item for y_item in y], dtype=np.float32)).to(device) # Mover y a la GPU\n",
372
+ "\n",
373
+ " self.optimizer.zero_grad()\n",
374
+ " output = self.model(X) # X ya está en el dispositivo correcto\n",
375
+ " loss = self.loss_function(output, y) # y ya está en el dispositivo correcto\n",
376
+ " loss.backward()\n",
377
+ " self.optimizer.step()\n",
378
+ "\n",
379
+ " if terminal_state:\n",
380
+ " self.target_update_counter += 1\n",
381
+ "\n",
382
+ " if self.target_update_counter > UPDATE_TARGET_THRESH:\n",
383
+ " self.target_model.load_state_dict(self.model.state_dict())\n",
384
+ " self.target_update_counter = 0\n",
385
+ " # print(self.target_update_counter)"
386
+ ]
387
+ },
388
+ {
389
+ "cell_type": "markdown",
390
+ "metadata": {},
391
+ "source": [
392
+ "# Definición de Clases del Juego\n",
393
+ "En esta parte del código, definimos varias clases para manejar diferentes aspectos del juego:\n",
394
+ " - Clase Obstacle: Representa los obstáculos del juego.\n",
395
+ " - Clase Dino: Controla el personaje del dinosaurio y su interacción con el juego.\n",
396
+ " - Clase Game: Maneja la lógica principal del juego, incluyendo la creación de obstáculos y la actualización de la interfaz"
397
+ ]
398
+ },
399
+ {
400
+ "cell_type": "code",
401
+ "execution_count": 8,
402
+ "metadata": {},
403
+ "outputs": [],
404
+ "source": [
405
+ "class Obstacle:\n",
406
+ " def __init__(self, image: List[pygame.Surface], type: int) -> None:\n",
407
+ " self.image = image\n",
408
+ " self.type = type\n",
409
+ " self.rect = self.image[self.type].get_rect()\n",
410
+ " self.rect.x = SCREEN_WIDTH\n",
411
+ "\n",
412
+ " def update(self, obstacles: list, game_speed: int):\n",
413
+ " self.rect.x -= game_speed\n",
414
+ " if self.rect.x < -self.rect.width:\n",
415
+ " obstacles.pop()\n",
416
+ " \n",
417
+ " def draw(self, SCREEN: pygame.Surface):\n",
418
+ " SCREEN.blit(self.image[self.type], self.rect)"
419
+ ]
420
+ },
421
+ {
422
+ "cell_type": "code",
423
+ "execution_count": 9,
424
+ "id": "66589bde",
425
+ "metadata": {},
426
+ "outputs": [],
427
+ "source": [
428
+ "class Dino(DQNAgent):\n",
429
+ " X_POS = 80\n",
430
+ " Y_POS = 310\n",
431
+ " Y_DUCK_POS = 340\n",
432
+ " JUMP_VEL = 8.5\n",
433
+ " #code here\n",
434
+ " def __init__(self, learning_rate=0.001):\n",
435
+ " super().__init__(learning_rate=learning_rate) \n",
436
+ " self.duck_img = DUCKING\n",
437
+ " self.run_img = RUNNING\n",
438
+ " self.jump_img = JUMPING\n",
439
+ "\n",
440
+ "\n",
441
+ " #Initially the dino starts running\n",
442
+ " self.dino_duck = False\n",
443
+ " self.dino_run = True\n",
444
+ " self.dino_jump = False\n",
445
+ "\n",
446
+ " self.step_index = 0\n",
447
+ " self.jump_vel = self.JUMP_VEL\n",
448
+ " self.image = self.run_img[0]\n",
449
+ " self.dino_rect = self.image.get_rect()\n",
450
+ "\n",
451
+ " self.dino_rect.x = self.X_POS\n",
452
+ " self.dino_rect.y = self.Y_POS\n",
453
+ "\n",
454
+ " self.score = 0\n",
455
+ "\n",
456
+ " super().__init__()\n",
457
+ " \n",
458
+ " \n",
459
+ " # Update the Dino's state\n",
460
+ " def update(self, move: pygame.key.ScancodeWrapper):\n",
461
+ " if self.dino_duck:\n",
462
+ " self.duck()\n",
463
+ " \n",
464
+ " if self.dino_jump:\n",
465
+ " self.jump()\n",
466
+ " \n",
467
+ " if self.dino_run:\n",
468
+ " self.run()\n",
469
+ "\n",
470
+ " if self.step_index >= 20:\n",
471
+ " self.step_index = 0\n",
472
+ " \n",
473
+ "\n",
474
+ " if move[pygame.K_UP] and not self.dino_jump:\n",
475
+ " self.dino_jump = True\n",
476
+ " self.dino_run = False\n",
477
+ " self.dino_duck = False\n",
478
+ "\n",
479
+ " elif move[pygame.K_DOWN] and not self.dino_jump:\n",
480
+ " self.dino_duck = True\n",
481
+ " self.dino_run = False\n",
482
+ " self.dino_jump = False\n",
483
+ " \n",
484
+ " elif not(self.dino_jump or move[pygame.K_DOWN]):\n",
485
+ " self.dino_run = True\n",
486
+ " self.dino_jump = False\n",
487
+ " self.dino_duck = False\n",
488
+ " \n",
489
+ " def update_auto(self, move):\n",
490
+ " if self.dino_duck == True:\n",
491
+ " self.duck()\n",
492
+ " \n",
493
+ " if self.dino_jump == True:\n",
494
+ " self.jump()\n",
495
+ " \n",
496
+ " if self.dino_run == True:\n",
497
+ " self.run()\n",
498
+ "\n",
499
+ " if self.step_index >= 20:\n",
500
+ " self.step_index = 0\n",
501
+ " \n",
502
+ " if move == 0 and not self.dino_jump:\n",
503
+ " self.dino_jump = True\n",
504
+ " self.dino_run = False\n",
505
+ " self.dino_duck = False\n",
506
+ "\n",
507
+ " elif move == 1 and not self.dino_jump:\n",
508
+ " self.dino_duck = True\n",
509
+ " self.dino_run = False\n",
510
+ " self.dino_jump = False\n",
511
+ " \n",
512
+ " elif not(self.dino_jump or move == 1):\n",
513
+ " self.dino_run = True\n",
514
+ " self.dino_jump = False\n",
515
+ " self.dino_duck = False\n",
516
+ "\n",
517
+ " def duck(self) -> None:\n",
518
+ " self.image = self.duck_img[self.step_index // 10]\n",
519
+ " self.dino_rect = self.image.get_rect()\n",
520
+ " self.dino_rect.x = self.X_POS\n",
521
+ " self.dino_rect.y = self.Y_DUCK_POS\n",
522
+ " self.step_index += 1\n",
523
+ "\n",
524
+ " def run(self) -> None:\n",
525
+ " self.image = self.run_img[self.step_index // 10]\n",
526
+ " self.dino_rect = self.image.get_rect()\n",
527
+ " self.dino_rect.x = self.X_POS\n",
528
+ " self.dino_rect.y = self.Y_POS\n",
529
+ " self.step_index += 1\n",
530
+ " \n",
531
+ "\n",
532
+ " def jump(self) -> None:\n",
533
+ " self.image = self.jump_img\n",
534
+ " if self.dino_jump:\n",
535
+ " self.dino_rect.y -= self.jump_vel * 3\n",
536
+ " self.jump_vel -= 0.6\n",
537
+ " \n",
538
+ " if self.jump_vel < -self.JUMP_VEL:\n",
539
+ " self.dino_jump = False\n",
540
+ " self.dino_run = True\n",
541
+ " self.jump_vel = self.JUMP_VEL\n",
542
+ "\n",
543
+ " def draw(self, SCREEN: pygame.Surface):\n",
544
+ " SCREEN.blit(self.image, (self.dino_rect.x, self.dino_rect.y))"
545
+ ]
546
+ },
547
+ {
548
+ "cell_type": "code",
549
+ "execution_count": 10,
550
+ "id": "a1bc8f20",
551
+ "metadata": {},
552
+ "outputs": [],
553
+ "source": [
554
+ "class LargeCactus(Obstacle):\n",
555
+ " def __init__(self, image: List[pygame.Surface]) -> None:\n",
556
+ " self.type = random.randint(0, 2)\n",
557
+ " super().__init__(image, self.type)\n",
558
+ " self.rect.y = 300\n",
559
+ "\n",
560
+ "\n",
561
+ "class SmallCactus(Obstacle):\n",
562
+ " def __init__(self, image: List[pygame.Surface]) -> None:\n",
563
+ " self.type = random.randint(0, 2)\n",
564
+ " super().__init__(image, self.type)\n",
565
+ " self.rect.y = 325\n",
566
+ "\n",
567
+ "class Bird(Obstacle):\n",
568
+ " def __init__(self, image: List[pygame.Surface]) -> None:\n",
569
+ " self.type = 0\n",
570
+ " super().__init__(image, self.type)\n",
571
+ " self.rect.y = SCREEN_HEIGHT - 340\n",
572
+ " self.index = 0\n",
573
+ " \n",
574
+ " def draw(self, SCREEN: pygame.Surface):\n",
575
+ " if self.index >= 19:\n",
576
+ " self.index = 0\n",
577
+ " \n",
578
+ " SCREEN.blit(self.image[self.index // 10], self.rect)\n",
579
+ " self.index += 1\n",
580
+ " \n",
581
+ "class Cloud:\n",
582
+ " def __init__(self) -> None:\n",
583
+ " self.x = SCREEN_WIDTH + random.randint(800, 1000)\n",
584
+ " self.y = random.randint(50, 100)\n",
585
+ " self.image = CLOUD\n",
586
+ " self.width = self.image.get_width()\n",
587
+ "\n",
588
+ " def update(self, game_speed: int):\n",
589
+ " self.x -= game_speed\n",
590
+ " if self.x < -self.width:\n",
591
+ " self.x = SCREEN_WIDTH + random.randint(800, 1000)\n",
592
+ " self.y = random.randint(50, 100)\n",
593
+ " \n",
594
+ "\n",
595
+ " def draw(self, SCREEN: pygame.Surface):\n",
596
+ " SCREEN.blit(self.image, (self.x, self.y)) "
597
+ ]
598
+ },
599
+ {
600
+ "cell_type": "markdown",
601
+ "id": "fd31220d",
602
+ "metadata": {},
603
+ "source": [
604
+ "# Clase Game\n",
605
+ "\n",
606
+ "La clase Game es donde se ejecuta el juego del dinosaurio. En general es casi lo mismo que la clase original, pero con play_auto siendo la parte donde se entrena al agente:\n",
607
+ "\n",
608
+ "\n",
609
+ "#### Inicialización y Configuración del Juego\n",
610
+ "- Se establece points_label para rastrear los puntos obtenidos en cada episodio y se inicia un ciclo que recorre un número predefinido de episodios (NUM_EPISODES).\n",
611
+ "- Para cada episodio, se inicializa episode_reward a 0 y se obtiene el estado inicial del juego mediante get_state.\n",
612
+ "\n",
613
+ "#### Ciclo Principal del Juego\n",
614
+ "\n",
615
+ "- Dentro de cada episodio, se inicia un while que continúa mientras self.run sea True. Este representa el juego continuo hasta que el dinosaurio choca con un obstáculo.\n",
616
+ "- Se verifica si es necesario crear un nuevo obstáculo en el juego y se llama a create_obstacle si es así.\n",
617
+ "- El agente decide qué acción tomar. Si un valor aleatorio es mayor que epsilon (que representa la probabilidad de exploración), el agente selecciona la acción basada en las predicciones de la red neuronal (usando get_qs). Si el valor es menor, se elige una acción aleatoria.\n",
618
+ "\n",
619
+ "#### Actualización y Aprendizaje\n",
620
+ "\n",
621
+ "- Se actualiza el juego con la acción seleccionada llamando a update_game, y se obtiene el nuevo estado del juego.\n",
622
+ "- Se calcula la recompensa basada en la interacción del dinosaurio con los obstáculos y se actualiza la memoria de replay del agente con la transición (estado actual, acción, recompensa, nuevo estado).\n",
623
+ "- Se entrena el modelo del agente con la nueva experiencia (transición).\n",
624
+ "\n",
625
+ "#### Fin del Episodio y Guardado del Modelo\n",
626
+ "\n",
627
+ "- Si el dinosaurio colisiona con un obstáculo, se termina el episodio. Se actualiza episode_reward con la recompensa obtenida, y se reinicia el juego para el próximo episodio.\n",
628
+ "- Se añade la reward del episodio a ep_rewards para rastrear el rendimiento a lo largo del tiempo. epsilon se reduce (decay) para disminuir la exploración a medida que el agente aprende.\n",
629
+ "- El modelo se guarda a intervalos regulares para conservar el estado de aprendizaje."
630
+ ]
631
+ },
632
+ {
633
+ "cell_type": "code",
634
+ "execution_count": 11,
635
+ "metadata": {},
636
+ "outputs": [],
637
+ "source": [
638
+ "#Codigo para guardar los modelos\n",
639
+ "# Verifica si el directorio 'models' existe, y si no, créalo\n",
640
+ "if not os.path.exists('models/episodes'):\n",
641
+ " os.makedirs('models/episodes')\n",
642
+ "\n",
643
+ "# Verifica si el directorio 'models/highscore' existe, y si no, créalo\n",
644
+ "if not os.path.exists('models/highscore'):\n",
645
+ " os.makedirs('models/highscore')"
646
+ ]
647
+ },
648
+ {
649
+ "cell_type": "code",
650
+ "execution_count": 12,
651
+ "id": "3665702a",
652
+ "metadata": {},
653
+ "outputs": [],
654
+ "source": [
655
+ "class Game:\n",
656
+ " def __init__(self, epsilon, learning_rate, num_episodes, load_model=False, model_path=None):\n",
657
+ " pygame.init()\n",
658
+ " self.SCREEN = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n",
659
+ "\n",
660
+ " self.obstacles = []\n",
661
+ "\n",
662
+ " self.run = True\n",
663
+ "\n",
664
+ " self.clock = pygame.time.Clock()\n",
665
+ "\n",
666
+ " self.cloud = Cloud()\n",
667
+ "\n",
668
+ " self.game_speed = INIT_GAME_SPEED\n",
669
+ "\n",
670
+ " self.font = pygame.font.Font(\"freesansbold.ttf\", 20)\n",
671
+ "\n",
672
+ " self.dino = Dino()\n",
673
+ "\n",
674
+ " # tu código existente aquí...\n",
675
+ " self.epsilon = epsilon\n",
676
+ " self.num_episodes = num_episodes\n",
677
+ " self.dino = Dino(learning_rate=learning_rate) # Pasa learning_rate aquí\n",
678
+ " \n",
679
+ " # Cargar el modelo si se solicita\n",
680
+ " if load_model and model_path:\n",
681
+ " self.dino.model.load_state_dict(torch.load(model_path, map_location=device))\n",
682
+ "\n",
683
+ " self.x_pos_bg = X_POS_BG_INIT\n",
684
+ "\n",
685
+ " self.points = 0\n",
686
+ " \n",
687
+ " self.epsilon = epsilon\n",
688
+ "\n",
689
+ " self.ep_rewards = [-200]\n",
690
+ "\n",
691
+ " self.high_score = 0 # Inicializa el high score con 0 o carga el high score existente de un archivo si lo prefieres\n",
692
+ "\n",
693
+ " self.best_score = 0\n",
694
+ "\n",
695
+ " def reset(self):\n",
696
+ " self.game_speed = INIT_GAME_SPEED\n",
697
+ " old_dino = self.dino\n",
698
+ " self.dino = Dino()\n",
699
+ " self.dino.init_replay_memory = old_dino.init_replay_memory\n",
700
+ " self.dino.late_replay_memory = old_dino.late_replay_memory\n",
701
+ " self.dino.target_update_counter = old_dino.target_update_counter\n",
702
+ "\n",
703
+ " self.dino.model.load_state_dict(old_dino.model.state_dict())\n",
704
+ " self.dino.target_model.load_state_dict(old_dino.target_model.state_dict())\n",
705
+ "\n",
706
+ " self.x_pos_bg = X_POS_BG_INIT\n",
707
+ " self.points = 0\n",
708
+ " self.SCREEN = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n",
709
+ " self.clock = pygame.time.Clock()\n",
710
+ "\n",
711
+ " def get_dist(self, pos_a: tuple, pos_b:tuple):\n",
712
+ " dx = pos_a[0] - pos_b[0]\n",
713
+ " dy = pos_a[1] - pos_b[1]\n",
714
+ "\n",
715
+ " return math.sqrt(dx**2 + dy**2) \n",
716
+ "\n",
717
+ " def update_background(self):\n",
718
+ " image_width = BACKGROUND.get_width()\n",
719
+ "\n",
720
+ " self.SCREEN.blit(BACKGROUND, (self.x_pos_bg, Y_POS_BG))\n",
721
+ " self.SCREEN.blit(BACKGROUND, (self.x_pos_bg + image_width, Y_POS_BG))\n",
722
+ "\n",
723
+ " if self.x_pos_bg <= -image_width:\n",
724
+ " self.SCREEN.blit(BACKGROUND, (self.x_pos_bg + image_width, Y_POS_BG))\n",
725
+ " self.x_pos_bg = 0\n",
726
+ " \n",
727
+ " self.x_pos_bg -= self.game_speed\n",
728
+ " return self.x_pos_bg\n",
729
+ " \n",
730
+ " def get_state(self):\n",
731
+ " state = []\n",
732
+ " state.append(self.dino.dino_rect.y / self.dino.Y_DUCK_POS + 10) \n",
733
+ " pos_a = (self.dino.dino_rect.x, self.dino.dino_rect.y)\n",
734
+ " bird = 0\n",
735
+ " cactus = 0\n",
736
+ " if len(self.obstacles) == 0:\n",
737
+ " dist = self.get_dist(pos_a, tuple([SCREEN_WIDTH + 10, self.dino.Y_POS])) / math.sqrt(SCREEN_HEIGHT**2 + SCREEN_WIDTH**2)\n",
738
+ " obs_height = 0\n",
739
+ " obj_width = 0\n",
740
+ " else:\n",
741
+ " dist = self.get_dist(pos_a, (self.obstacles[0].rect.midtop)) / math.sqrt(SCREEN_HEIGHT**2 + SCREEN_WIDTH**2)\n",
742
+ " obs_height = self.obstacles[0].rect.midtop[1] / self.dino.Y_DUCK_POS\n",
743
+ " obj_width = self.obstacles[0].rect.width / SMALL_CACTUS[2].get_rect().width\n",
744
+ " if self.obstacles[0].__class__ == SmallCactus(SMALL_CACTUS).__class__ or \\\n",
745
+ " self.obstacles[0].__class__ == LargeCactus(LARGE_CACTUS).__class__:\n",
746
+ " cactus = 1\n",
747
+ " else:\n",
748
+ " bird = 1\n",
749
+ " \n",
750
+ " state.append(dist)\n",
751
+ " state.append(obs_height)\n",
752
+ " state.append(self.game_speed / 24)\n",
753
+ " state.append(obj_width)\n",
754
+ " state.append(cactus)\n",
755
+ " state.append(bird)\n",
756
+ " \n",
757
+ " return state\n",
758
+ "\n",
759
+ "\n",
760
+ " def update_score(self):\n",
761
+ " self.points += 1\n",
762
+ " if self.points % 200 == 0:\n",
763
+ " self.game_speed += 1\n",
764
+ "\n",
765
+ " if self.points > self.high_score:\n",
766
+ " self.high_score = self.points\n",
767
+ "\n",
768
+ " text = self.font.render(f\"Points: {self.points} Highscore: {self.high_score}\", True, (0, 0, 0))\n",
769
+ " textRect = text.get_rect()\n",
770
+ " textRect.center = (SCREEN_WIDTH - textRect.width // 2 - 10, 40)\n",
771
+ " self.SCREEN.blit(text, textRect)\n",
772
+ "\n",
773
+ " \n",
774
+ " def create_obstacle(self):\n",
775
+ " # bird_prob = random.randint(0, 15)\n",
776
+ " # cactus_prob = random.randint(0, 10)\n",
777
+ " # if bird_prob == 0:\n",
778
+ " # self.obstacles.append(Bird(BIRD))\n",
779
+ " # elif cactus_prob == 0:\n",
780
+ " # self.obstacles.append(SmallCactus(SMALL_CACTUS))\n",
781
+ " # elif cactus_prob == 1:\n",
782
+ " # self.obstacles.append(LargeCactus(LARGE_CACTUS))\n",
783
+ "\n",
784
+ " obstacle_prob = random.randint(0, 50)\n",
785
+ " if obstacle_prob == 0:\n",
786
+ " self.obstacles.append(SmallCactus(SMALL_CACTUS))\n",
787
+ " elif obstacle_prob == 1:\n",
788
+ " self.obstacles.append(LargeCactus(LARGE_CACTUS))\n",
789
+ " elif obstacle_prob == 2 and self.points > 300:\n",
790
+ " self.obstacles.append(Bird(BIRD))\n",
791
+ " \n",
792
+ " def update_game(self, moves, user_input=None):\n",
793
+ " self.dino.draw(self.SCREEN)\n",
794
+ " if user_input is not None:\n",
795
+ " self.dino.update(user_input)\n",
796
+ " else:\n",
797
+ " self.dino.update_auto(moves)\n",
798
+ "\n",
799
+ " self.update_background()\n",
800
+ "\n",
801
+ " self.cloud.draw(self.SCREEN)\n",
802
+ "\n",
803
+ " self.cloud.update(self.game_speed)\n",
804
+ "\n",
805
+ " self.update_score() \n",
806
+ "\n",
807
+ " self.clock.tick(30)\n",
808
+ "\n",
809
+ " # pygame.display.update()\n",
810
+ "\n",
811
+ " def play_manual(self):\n",
812
+ " \n",
813
+ " while self.run is True:\n",
814
+ " for event in pygame.event.get():\n",
815
+ " if event.type == pygame.QUIT:\n",
816
+ " sys.exit()\n",
817
+ " \n",
818
+ " self.SCREEN.fill((255, 255, 255))\n",
819
+ " user_input = pygame.key.get_pressed()\n",
820
+ " # moves = []\n",
821
+ "\n",
822
+ " if len(self.obstacles) == 0:\n",
823
+ " self.create_obstacle()\n",
824
+ "\n",
825
+ " for obstacle in self.obstacles:\n",
826
+ " obstacle.draw(SCREEN=self.SCREEN)\n",
827
+ " obstacle.update(self.obstacles, self.game_speed)\n",
828
+ " if self.dino.dino_rect.colliderect(obstacle.rect):\n",
829
+ " self.dino.score = self.points\n",
830
+ " pygame.quit()\n",
831
+ " self.obstacles.pop()\n",
832
+ " print(\"Game over!\")\n",
833
+ " return\n",
834
+ "\n",
835
+ " self.update_game(user_input=user_input, moves=2)\n",
836
+ " pygame.display.update()\n",
837
+ "\n",
838
+ "\n",
839
+ " def play_auto(self):\n",
840
+ " state_queue = queue.Queue()\n",
841
+ " # Crea y comienza el hilo de cálculo\n",
842
+ " calculation_thread = threading.Thread(target=self.dino.calculate_action, args=(state_queue, action_queue))\n",
843
+ " calculation_thread.start()\n",
844
+ " \n",
845
+ " try:\n",
846
+ " points_label = 0\n",
847
+ " for episode in tqdm(range(1, NUM_EPISODES + 1), ascii=True, unit='episodes'):\n",
848
+ " episode_reward = 0\n",
849
+ " step = 1\n",
850
+ " current_state = self.get_state()\n",
851
+ " self.run = True\n",
852
+ " while self.run is True:\n",
853
+ "\n",
854
+ " for event in pygame.event.get():\n",
855
+ " if event.type == pygame.QUIT:\n",
856
+ " sys.exit()\n",
857
+ " \n",
858
+ " self.SCREEN.fill((255, 255, 255))\n",
859
+ "\n",
860
+ " if len(self.obstacles) == 0:\n",
861
+ " self.create_obstacle()\n",
862
+ "\n",
863
+ " # if self.run == False:\n",
864
+ " # print(current_state)\n",
865
+ " # time.sleep(2)\n",
866
+ " # continue\n",
867
+ "\n",
868
+ " if np.random.random() > self.epsilon:\n",
869
+ " action = self.dino.get_qs(torch.Tensor(current_state))\n",
870
+ " # print(action)\n",
871
+ " action = np.argmax(action)\n",
872
+ " # print(action)\n",
873
+ " else:\n",
874
+ " num = np.random.randint(0, 10)\n",
875
+ " if num == 0:\n",
876
+ " # print(\"yes\")\n",
877
+ " action = num\n",
878
+ " elif num <= 3:\n",
879
+ " action = 1\n",
880
+ " else:\n",
881
+ " action = 2\n",
882
+ "\n",
883
+ " self.update_game(moves=action)\n",
884
+ " # print(self.game_speed)\n",
885
+ " next_state = self.get_state()\n",
886
+ " reward = 0\n",
887
+ "\n",
888
+ " for obstacle in self.obstacles:\n",
889
+ " obstacle.draw(SCREEN=self.SCREEN)\n",
890
+ " obstacle.update(self.obstacles, self.game_speed)\n",
891
+ " next_state = self.get_state()\n",
892
+ " if self.dino.dino_rect.x > obstacle.rect.x + obstacle.rect.width:\n",
893
+ " reward = 3\n",
894
+ " \n",
895
+ " if action == 0 and obstacle.rect.x > SCREEN_WIDTH // 2:\n",
896
+ " reward = -1\n",
897
+ " \n",
898
+ " if self.dino.dino_rect.colliderect(obstacle.rect):\n",
899
+ " self.dino.score = self.points\n",
900
+ " # pygame.quit()\n",
901
+ " self.obstacles.pop()\n",
902
+ " points_label = self.points\n",
903
+ " self.reset()\n",
904
+ " reward = -10\n",
905
+ " # print(\"Game over!\")\n",
906
+ " self.run = False\n",
907
+ " break\n",
908
+ " current_state = self.get_state()\n",
909
+ " state_queue.put(current_state) # Envía el estado actual al hilo de cálculo\n",
910
+ "\n",
911
+ " if not action_queue.empty():\n",
912
+ " action = action_queue.get() # Recibe la acción del hilo de cálculo\n",
913
+ " self.update_game(moves=action)\n",
914
+ "\n",
915
+ " # if reward != 0:\n",
916
+ " # print(reward > 0)\n",
917
+ " \n",
918
+ " episode_reward += reward\n",
919
+ " \n",
920
+ " self.dino.update_replay_memory(tuple([current_state, action, reward, next_state, self.run]))\n",
921
+ "\n",
922
+ " self.dino.train( not self.run, step=step)\n",
923
+ "\n",
924
+ " current_state = next_state\n",
925
+ "\n",
926
+ " step += 1\n",
927
+ "\n",
928
+ " # self.clock.tick(60)\n",
929
+ "\n",
930
+ " #print(self.points)\n",
931
+ " #print(self.high_score)\n",
932
+ "\n",
933
+ " # Al final de cada episodio, verifica si hay un nuevo mejor puntaje\n",
934
+ " if self.points > self.best_score:\n",
935
+ " self.best_score = self.points\n",
936
+ " # Este archivo se sobrescribirá con el último mejor modelo\n",
937
+ " self.best_model_filename = 'models/highscore/BestScore_model.pth'\n",
938
+ " torch.save(self.dino.model.state_dict(), self.best_model_filename)\n",
939
+ "\n",
940
+ " pygame.display.update()\n",
941
+ " \n",
942
+ "\n",
943
+ " self.ep_rewards.append(episode_reward)\n",
944
+ "\n",
945
+ " # Obtenemos la fecha y hora actual\n",
946
+ " current_time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n",
947
+ "\n",
948
+ " # Guardar el modelo cada 50 escenarios\n",
949
+ " if episode % 50 == 0:\n",
950
+ " filename = f'models/episodes/{points_label}_Points,Episode_{episode}_Date_{current_time}_model.pth'\n",
951
+ " torch.save(self.dino.model.state_dict(), filename)\n",
952
+ "\n",
953
+ "\n",
954
+ " if self.epsilon > MIN_EPSILON:\n",
955
+ " self.epsilon *= EPSILON_DECAY\n",
956
+ " if self.epsilon < MIN_EPSILON:\n",
957
+ " self.epsilon = 0\n",
958
+ " # print(self.epsilon)\n",
959
+ " else:\n",
960
+ " self.epsilon = max(MIN_EPSILON, self.epsilon)\n",
961
+ " # print(self.epsilon)\n",
962
+ " # print((self.dino.replay_memory))\n",
963
+ " # Al salir del bucle del juego, envía None para detener el hilo de cálculo\n",
964
+ " state_queue.put(None)\n",
965
+ " calculation_thread.join()\n",
966
+ " finally:\n",
967
+ " # Este bloque se ejecutará incluso si se interrumpe el juego.\n",
968
+ " # Aquí duplicas el archivo del mejor puntaje alcanzado hasta ahora.\n",
969
+ " if hasattr(self, 'best_model_filename'):\n",
970
+ " current_time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n",
971
+ " final_model_filename = f'models/highscore/{self.best_score}_BestScore_Final_{current_time}_model.pth'\n",
972
+ " import shutil\n",
973
+ " shutil.copy(self.best_model_filename, final_model_filename)\n",
974
+ " print(f\"Modelo duplicado guardado como: {final_model_filename}\")\n"
975
+ ]
976
+ },
977
+ {
978
+ "cell_type": "markdown",
979
+ "metadata": {},
980
+ "source": [
981
+ "El código tiene dos modos de juego:\n",
982
+ " - Modo Manual: El usuario juega controlando al dinosaurio con el teclado.\n",
983
+ " - Modo Automático: El agente controla al dinosaurio basándose en las decisiones tomadas por el modelo de Q-Learning.\n"
984
+ ]
985
+ },
986
+ {
987
+ "cell_type": "code",
988
+ "execution_count": 13,
989
+ "id": "2c04f7ce",
990
+ "metadata": {},
991
+ "outputs": [],
992
+ "source": [
993
+ "def run_game(epsilon, learning_rate, num_episodes):\n",
994
+ " model_path = 'models/highscore/4245_BestScore_Final_2023-12-10_18-43-53_model.pth'\n",
995
+ " game = Game(epsilon, learning_rate=learning_rate, num_episodes=num_episodes, load_model=True, model_path=model_path)\n",
996
+ " game.play_auto()\n"
997
+ ]
998
+ },
999
+ {
1000
+ "cell_type": "code",
1001
+ "execution_count": 14,
1002
+ "metadata": {},
1003
+ "outputs": [
1004
+ {
1005
+ "name": "stderr",
1006
+ "output_type": "stream",
1007
+ "text": [
1008
+ " 0%| | 0/100 [00:00<?, ?episodes/s]"
1009
+ ]
1010
+ },
1011
+ {
1012
+ "name": "stderr",
1013
+ "output_type": "stream",
1014
+ "text": [
1015
+ "100%|##########| 100/100 [22:16<00:00, 13.37s/episodes] \n"
1016
+ ]
1017
+ },
1018
+ {
1019
+ "name": "stdout",
1020
+ "output_type": "stream",
1021
+ "text": [
1022
+ "Modelo duplicado guardado como: models/highscore/3836_BestScore_Final_2023-12-10_19-53-01_model.pth\n"
1023
+ ]
1024
+ },
1025
+ {
1026
+ "name": "stderr",
1027
+ "output_type": "stream",
1028
+ "text": [
1029
+ "100%|##########| 100/100 [26:16<00:00, 15.77s/episodes]\n"
1030
+ ]
1031
+ },
1032
+ {
1033
+ "name": "stdout",
1034
+ "output_type": "stream",
1035
+ "text": [
1036
+ "Modelo duplicado guardado como: models/highscore/3813_BestScore_Final_2023-12-10_20-19-18_model.pth\n"
1037
+ ]
1038
+ },
1039
+ {
1040
+ "name": "stderr",
1041
+ "output_type": "stream",
1042
+ "text": [
1043
+ "100%|##########| 100/100 [27:43<00:00, 16.64s/episodes]\n"
1044
+ ]
1045
+ },
1046
+ {
1047
+ "name": "stdout",
1048
+ "output_type": "stream",
1049
+ "text": [
1050
+ "Modelo duplicado guardado como: models/highscore/3683_BestScore_Final_2023-12-10_20-47-01_model.pth\n"
1051
+ ]
1052
+ },
1053
+ {
1054
+ "name": "stderr",
1055
+ "output_type": "stream",
1056
+ "text": [
1057
+ "100%|##########| 100/100 [30:23<00:00, 18.23s/episodes]\n"
1058
+ ]
1059
+ },
1060
+ {
1061
+ "name": "stdout",
1062
+ "output_type": "stream",
1063
+ "text": [
1064
+ "Modelo duplicado guardado como: models/highscore/3629_BestScore_Final_2023-12-10_21-17-25_model.pth\n"
1065
+ ]
1066
+ },
1067
+ {
1068
+ "name": "stderr",
1069
+ "output_type": "stream",
1070
+ "text": [
1071
+ "100%|##########| 100/100 [28:44<00:00, 17.24s/episodes] \n"
1072
+ ]
1073
+ },
1074
+ {
1075
+ "name": "stdout",
1076
+ "output_type": "stream",
1077
+ "text": [
1078
+ "Modelo duplicado guardado como: models/highscore/3783_BestScore_Final_2023-12-10_21-46-09_model.pth\n"
1079
+ ]
1080
+ },
1081
+ {
1082
+ "name": "stderr",
1083
+ "output_type": "stream",
1084
+ "text": [
1085
+ "100%|##########| 100/100 [21:19<00:00, 12.79s/episodes]\n"
1086
+ ]
1087
+ },
1088
+ {
1089
+ "name": "stdout",
1090
+ "output_type": "stream",
1091
+ "text": [
1092
+ "Modelo duplicado guardado como: models/highscore/4157_BestScore_Final_2023-12-10_22-07-29_model.pth\n"
1093
+ ]
1094
+ },
1095
+ {
1096
+ "name": "stderr",
1097
+ "output_type": "stream",
1098
+ "text": [
1099
+ " 52%|#####2 | 52/100 [15:01<31:07, 38.90s/episodes] "
1100
+ ]
1101
+ }
1102
+ ],
1103
+ "source": [
1104
+ "epsilons = [0.1, 0.2, 0.3] # diferentes valores de epsilon que quieres probar\n",
1105
+ "learning_rates = [0.001, 0.005, 0.01] # diferentes tasas de aprendizaje\n",
1106
+ "num_episodes_list = [100, 200, 300] # diferentes números de episodios\n",
1107
+ "\n",
1108
+ "for epsilon in epsilons:\n",
1109
+ " for lr in learning_rates:\n",
1110
+ " for num_episodes in num_episodes_list:\n",
1111
+ " run_game(epsilon, lr, num_episodes)"
1112
+ ]
1113
+ }
1114
+ ],
1115
+ "metadata": {
1116
+ "kernelspec": {
1117
+ "display_name": "Python 3 (ipykernel)",
1118
+ "language": "python",
1119
+ "name": "python3"
1120
+ },
1121
+ "language_info": {
1122
+ "codemirror_mode": {
1123
+ "name": "ipython",
1124
+ "version": 3
1125
+ },
1126
+ "file_extension": ".py",
1127
+ "mimetype": "text/x-python",
1128
+ "name": "python",
1129
+ "nbconvert_exporter": "python",
1130
+ "pygments_lexer": "ipython3",
1131
+ "version": "3.10.9"
1132
+ }
1133
+ },
1134
+ "nbformat": 4,
1135
+ "nbformat_minor": 5
1136
+ }
models/Episode_100_Points_68_2023-12-01_15-13-08_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:daa17921df65261d3b627ba35687064f67ed4bef11b7f64a989e0f77eacd2a7b
3
+ size 2704
models/Episode_100_Points_71_2023-12-01_15-43-17_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17cdc1274df22f476c99579fc82fa5f9d34f29da45e0be27fec69b5f94d05069
3
+ size 2704
models/Episode_150_Points_203_2023-12-01_15-19-12_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cb8993206afb2761fdaf7d308391c8b15199f7f435cf14d7495cb1904889ee8
3
+ size 2712
models/Episode_150_Points_77_2023-12-01_15-47-31_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f72c04bd89720eb92c4ceecdf74a4581654535d61130bc99ffba6f96fa0c65f
3
+ size 2704
models/Episode_200_Points_225_2023-12-01_15-51-38_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:108ad001f868673c47e445c1260b7b6c41dbeafeb18ccfeeb683b01e96800d87
3
+ size 2712
models/Episode_200_Points_76_2023-12-01_15-26-34_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:790d4e57e6508dd4654a46d9f222bba62633909321e7d500e478fb73230367b6
3
+ size 2704
models/Episode_250_Points_192_2023-12-01_15-55-41_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf951fbcb5ea2bd0d67469f336bec15505f4fa586d22ccb5a6ad3d0757d9186f
3
+ size 2712
models/Episode_250_Points_352_2023-12-01_15-33-57_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47729429ba5ada9a9004a4ad22cff65c504ad5d92366e06d2cf9cabfe744951d
3
+ size 2712
models/Episode_300_Points_65_2023-12-01_15-58-47_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc2178b63236047c1adce9ddb0ed759be09cd66ed825649575dfbba7e9989190
3
+ size 2704
models/Episode_350_Points_75_2023-12-01_16-02-41_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd86058ca02c5fe157616211a30481e1992bb24f130978dd6f93b110aba5d9be
3
+ size 2704
models/Episode_400_Points_82_2023-12-01_16-06-52_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7936520e16b9ee9a3faf0cac0e8cb1f2700de11eb8bdd9a07897dc2e7c655f65
3
+ size 2704
models/Episode_50_Points_100_2023-12-01_15-39-38_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2861356abb0deb518d3c2b4b8ccb60a422c9137b73ca48e0ce0e485d0e5201d0
3
+ size 2704
models/Episode_50_Points_102_2023-12-01_14-51-42_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:789b98b684cf13e2de1e871af7be660defcc339840182600b0fbd9d366b5a520
3
+ size 2704
models/Episode_50_Points_345_2023-12-01_15-07-35_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15529848641109a13f04e1f8dfa34a28ef6573736f502fc8ded8a93ed7cf5d0b
3
+ size 2704
models/Episode_50_Points_73_2023-12-01_14-11-02_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8637a1f3f81ff75e91bc086e56bca970abc4473420d3c52d96ae8696bf3b5af0
3
+ size 2696
models/episodes/106_Points,Episode_50_Date_2023-12-10_20-10-06_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3668f4936c856ff84290fc8fb94f0f8e533ce48a539d3f2bad01d52f63619753
3
+ size 2744
models/episodes/115_Points,Episode_50_Date_2023-12-10_20-57-02_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07b25f5d5809f9574ea3a1128ba6f80960c0751387c7f0f5fd83f4c0a59ec49b
3
+ size 2744
models/episodes/116_Points,Episode_50_Date_2023-12-10_21-53-05_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a123e193f3f84a703b7312eac5c3e1a59f6047b5baec2111bdc1acc8262bb728
3
+ size 2744
models/episodes/1193_Points,Episode_100_Date_2023-12-02_00-08-52_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab8046bf5a31628b55b54724d7ddafd33b195e545c52f6ce723d8e706b8f8abc
3
+ size 2824
models/episodes/124_Points,Episode_100_Date_2023-12-10_20-47-01_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d0be3e7279448ad9580edd67761317f5743dca78d6aec5a381a9060d24d6cf9
3
+ size 2752
models/episodes/171_Points,Episode_50_Date_2023-12-10_17-50-12_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6533b52c2142d5576070bf1b9a7384c5d7636ab2c595b1f7a5ebc7ce086d3d7
3
+ size 2744
models/episodes/1825_Points,Episode_50_Date_2023-12-02_08-46-15_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07949644ed79fa7ee2c2500dbc7ad2feed726139fdc02b69cb450860312cb81f
3
+ size 2752
models/episodes/187_Points,Episode_100_Date_2023-12-10_21-46-09_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2f6b08f51f288fab4d2fa538ec9ddd8e47dae2993df131dd3a5cfd5a4d5f764
3
+ size 2752
models/episodes/190_Points,Episode_100_Date_2023-12-10_18-43-53_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52f14c7202e199421d3415aa526ad20ea7067bdc9c9f9be720672ab82a98f050
3
+ size 2752
models/episodes/2006_Points,Episode_50_Date_2023-12-10_22-20-30_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6766a1138885fb7df97fb06d0de5d7f374795aa31ed4fbd61650ec7cb7910dd
3
+ size 2752
models/episodes/211_Points,Episode_50_Date_2023-12-10_19-41-01_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:014ef2e3c290b72992a8ead535a4a62a45abf4cbbff2b85708fc112bd3e0802a
3
+ size 2744
models/episodes/215_Points,Episode_50_Date_2023-12-10_20-30-31_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4ab69f5937e9f5148c9fa5c2e38f731ec0f67cf0cbeb50767534d312e8da07f
3
+ size 2744
models/episodes/342_Points,Episode_100_Date_2023-12-10_19-53-01_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bae91a7c39e062b2bc0b0a4bbbbdc36c6df2a70735c08f4d38888b39067a082b
3
+ size 2752
models/episodes/3608_Points,Episode_50_Date_2023-12-01_23-21-56_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9839b3c29873f50abcee0d5ad9dd9e1c7fd676432184a82744dd3769064380ca
3
+ size 2752
models/episodes/3630_Points,Episode_100_Date_2023-12-10_21-17-25_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f4c98884c476a4233ddede45a55204cc6e8d34e32793dfb7f9732e320aaabad
3
+ size 2824