Create Netflix_Recommendation_Notebook_Code
Browse files
Netflix_Recommendation_Notebook_Code
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
#ran on Kaggle
|
3 |
+
!pip install sentence-transformers
|
4 |
+
!pip install torch
|
5 |
+
import torch
|
6 |
+
from sentence_transformers import SentenceTransformer
|
7 |
+
import numpy as np
|
8 |
+
import pandas as pd
|
9 |
+
from tqdm import tqdm # For tracking progress in batches
|
10 |
+
|
11 |
+
# Check if GPU is available
|
12 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
13 |
+
print(f"Using device: {device}")
|
14 |
+
|
15 |
+
# Load dataset
|
16 |
+
dataset = pd.read_csv('/kaggle/input/d/infamouscoder/dataset-netflix-shows/netflix_titles.csv')
|
17 |
+
|
18 |
+
# Load model to GPU if available
|
19 |
+
model = SentenceTransformer("all-MiniLM-L6-v2").to(device)
|
20 |
+
|
21 |
+
# Combine fields for embeddings
|
22 |
+
def combine_description_title_and_genre(description, listed_in, title):
|
23 |
+
return f"{description} Genre: {listed_in} Title: {title}"
|
24 |
+
|
25 |
+
# Create combined text column
|
26 |
+
dataset['combined_text'] = dataset.apply(lambda row: combine_description_title_and_genre(row['description'], row['listed_in'], row['title']), axis=1)
|
27 |
+
|
28 |
+
# Generate embeddings in batches to save memory
|
29 |
+
batch_size = 32
|
30 |
+
embeddings = []
|
31 |
+
|
32 |
+
for i in tqdm(range(0, len(dataset), batch_size), desc="Generating Embeddings"):
|
33 |
+
batch_texts = dataset['combined_text'][i:i+batch_size].tolist()
|
34 |
+
batch_embeddings = model.encode(batch_texts, convert_to_tensor=True, device=device)
|
35 |
+
embeddings.extend(batch_embeddings.cpu().numpy()) # Move to CPU to save memory
|
36 |
+
|
37 |
+
# Convert list to numpy array
|
38 |
+
embeddings = np.array(embeddings)
|
39 |
+
|
40 |
+
# Save embeddings and metadata
|
41 |
+
np.save("/kaggle/working/netflix_embeddings.npy", embeddings)
|
42 |
+
dataset[['show_id', 'title', 'description', 'listed_in']].to_csv("/kaggle/working/netflix_metadata.csv", index=False)
|