File size: 4,190 Bytes
d3bb4ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "0dd1298de3c84ea3ab8ed31b2a0b2888",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import torch\n",
"from multiprocessing import set_start_method\n",
"from transformers import Blip2Processor, Blip2ForConditionalGeneration\n",
"from datasets import load_dataset\n",
"\n",
"# Load BLIP-2 model and processor\n",
"processor = Blip2Processor.from_pretrained(\"Salesforce/blip2-opt-2.7b\")\n",
"model = Blip2ForConditionalGeneration.from_pretrained(\"Salesforce/blip2-opt-2.7b\", torch_dtype=torch.float16)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"def gpu_computation(batch, rank):\n",
" device = f\"cuda:{(rank or 0) % torch.cuda.device_count()}\"\n",
" model.to(device)\n",
" inputs = processor(images=batch[\"image\"], return_tensors=\"pt\").to(device, torch.float16)\n",
"\n",
" with torch.no_grad():\n",
" generated_ids = model.generate(**inputs, max_length=51)\n",
" \n",
" batch[\"caption\"] = processor.batch_decode(generated_ids, skip_special_tokens=True)\n",
" return batch"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "61fe62d696904a7c894bd2c6f082b426",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Map: 0%| | 0/10 [00:00<?, ? examples/s]"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import multiprocessing\n",
"\n",
"\n",
"if __name__ == \"__main__\":\n",
" # Check if start method is already set\n",
" try:\n",
" multiprocessing.get_start_method()\n",
" except RuntimeError:\n",
" multiprocessing.set_start_method(\"spawn\")\n",
"\n",
" # Load your dataset\n",
" dataset = load_dataset(\"visual-layer/oxford-iiit-pet-vl-enriched\", split=\"train\")\n",
" dataset = dataset.select(range(10))\n",
"\n",
" updated_dataset = dataset.map(\n",
" gpu_computation,\n",
" batched=True,\n",
" batch_size=4, # Adjust based on your GPU memory\n",
" with_rank=True,\n",
" num_proc=torch.cuda.device_count(), # one process per GPU\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['a cat walking on grass\\n',\n",
" 'a white dog playing with a ball\\n',\n",
" 'a dog sitting in the grass\\n',\n",
" 'a dog laying in the grass\\n',\n",
" 'a dog standing in the snow\\n',\n",
" 'a dog laying in the grass\\n',\n",
" 'a dog laying on a brick sidewalk\\n',\n",
" 'a man holding a black dog\\n',\n",
" 'a large dog standing in the grass\\n',\n",
" 'a pug dog with its tongue out standing on a tiled floor\\n']"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"updated_dataset['caption']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|