V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] Output code: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # AOT ID: ['0_inference'] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from ctypes import c_void_p, c_long V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import torch V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import random V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import os V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import tempfile V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from math import inf, nan V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.hooks import run_intermediate_hooks V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.utils import maybe_profile V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.codegen.memory_planning import _align as align V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch import device, empty_strided V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.async_compile import AsyncCompile V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.select_algorithm import extern_kernels V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.codegen.multi_kernel import MultiKernelCall V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] aten = torch.ops.aten V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_ops = torch.ops.inductor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] _quantized = torch.ops._quantized V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride = torch._C._dynamo.guards.assert_size_stride V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] alloc_from_pool = torch.ops.inductor._alloc_from_pool V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] async_compile = AsyncCompile() V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/kx/ckxvjo3hhiznc4ieahqn6ez5oq2jhe6or6jnoamh6yry3pp64dc5.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [emb_3], Original ATen: [aten.cat] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # emb_3 => cat V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.pointwise( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[512], V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] min_elem_per_thread=0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 512 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = xindex < xnumel V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x0 = xindex % 256 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x2 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp5 = tl.load(in_ptr0 + (0)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = x0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = tl.full([1], 0, tl.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tmp0 >= tmp1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tl.full([1], 128, tl.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tmp0 < tmp3 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7 = tmp6.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp8 = tmp0.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp9 = -9.210340371976184 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp10 = tmp8 * tmp9 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11 = 0.0078125 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp12 = tmp10 * tmp11 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp13 = tl_math.exp(tmp12) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp14 = tmp7 * tmp13 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp15 = 1.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp16 = tmp14 * tmp15 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp17 = tl_math.sin(tmp16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp19 = tl.where(tmp4, tmp17, tmp18) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp20 = tmp0 >= tmp3 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp21 = tl.full([1], 256, tl.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp22 = tmp0 < tmp21 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp23 = (-128) + x0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp24 = tmp23.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25 = tmp24 * tmp9 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp26 = tmp25 * tmp11 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp27 = tl_math.exp(tmp26) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp28 = tmp7 * tmp27 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp29 = tmp28 * tmp15 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp30 = tl_math.cos(tmp29) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp32 = tl.where(tmp20, tmp30, tmp31) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp33 = tl.where(tmp4, tmp19, tmp32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr0 + (x2), tmp33, xmask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._C import _cuda_getCurrentRawStream as get_raw_stream V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/k4/ck4ppbsxlgqceo5nzd4foh4sn22blcqdmh5samnqzwpazyxckw45.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [emb_4, to_2], Original ATen: [aten._to_copy, aten.cat] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # emb_4 => cat_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # to_2 => convert_element_type_3 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__to_copy_cat_1 = async_compile.triton('triton_poi_fused__to_copy_cat_1', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.pointwise( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[512], V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*fp32', 1: '*bf16', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] min_elem_per_thread=0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_poi_fused__to_copy_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 512 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = xindex < xnumel V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x0 = xindex % 256 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x1 = (xindex // 256) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x2 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = x0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = tl.full([1], 0, tl.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tmp0 >= tmp1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tl.full([1], 128, tl.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tmp0 < tmp3 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp5 = tl.load(in_ptr0 + (128 + (256*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp6 = tmp0 >= tmp3 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7 = tl.full([1], 256, tl.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp8 = tmp0 < tmp7 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp9 = tl.load(in_ptr0 + ((256*x1) + ((-128) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp10 = tl.where(tmp4, tmp5, tmp9) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11 = tmp10.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr0 + (x2), tmp11, xmask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/ka/cka2dphtyw3dob6bgq3hy534npzwz7j3ch2zhk6fzpuf56kaqtjr.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [emb_4, sample, sample_1, to_2], Original ATen: [aten._to_copy, aten.add, aten.cat, aten.mm, aten.mul, aten.silu, aten.view] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # emb_4 => cat_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # sample => add_2, mixed_mm_285, mul_5, view_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # sample_1 => convert_element_type_7, convert_element_type_8, mul_6, sigmoid V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # to_2 => convert_element_type_3 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_tem_fused__to_copy_add_cat_mm_mul_silu_view_2 = async_compile.triton('triton_tem_fused__to_copy_add_cat_mm_mul_silu_view_2', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.template( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] num_stages=1, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] num_warps=2, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*i8', 2: '*bf16', 3: '*bf16', 4: '*bf16'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'kernel_name': 'triton_tem_fused__to_copy_add_cat_mm_mul_silu_view_2', 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_tem_fused__to_copy_add_cat_mm_mul_silu_view_2(arg_A, arg_B, in_ptr2, in_ptr3, out_ptr1): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] GROUP_M : tl.constexpr = 8 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] EVEN_K : tl.constexpr = True V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ALLOW_TF32 : tl.constexpr = True V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ACC_TYPE : tl.constexpr = tl.float32 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B_PROLOGUE_CAST_TYPE : tl.constexpr = tl.bfloat16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_M : tl.constexpr = 16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_N : tl.constexpr = 32 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_K : tl.constexpr = 32 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] A = arg_A V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B = arg_B V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] M = 2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] N = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] K = 256 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if M * N == 0: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # early exit due to zero-size input(s) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] return V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_am = 256 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_ak = 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_bk = 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_bn = 256 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # based on triton.ops.matmul V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] pid = tl.program_id(0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] grid_m = (M + BLOCK_M - 1) // BLOCK_M V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] grid_n = (N + BLOCK_N - 1) // BLOCK_N V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # re-order program ID for better L2 performance V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] width = GROUP_M * grid_n V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] group_id = pid // width V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] group_size = min(grid_m - group_id * GROUP_M, GROUP_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] pid_m = group_id * GROUP_M + (pid % group_size) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] pid_n = (pid % width) // (group_size) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if (stride_am == 1 and stride_ak == M) or (stride_am == K and stride_ak == 1): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] else: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ram = rm % M V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if (stride_bk == 1 and stride_bn == K) or (stride_bk == N and stride_bn == 1): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] else: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rbn = rn % N V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rk = tl.arange(0, BLOCK_K) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] for k in range(K, 0, -BLOCK_K): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if EVEN_K: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] a = tl.load(A) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] b = tl.load(B) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] else: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] a = tl.load(A, mask=rk[None, :] < k, other=0.) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] b = tl.load(B, mask=rk[:, None] < k, other=0.) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if B_PROLOGUE_CAST_TYPE is not None: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] b = b.to(B_PROLOGUE_CAST_TYPE) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] acc += tl.dot(a, b, allow_tf32=ALLOW_TF32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] A += BLOCK_K * stride_ak V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B += BLOCK_K * stride_bk V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # rematerialize rm and rn to save registers V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_m = rm[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_n = rn[None, :] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] mask = (idx_m < M) & (idx_n < N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # inductor generates a suffix V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = idx_n + (1152*idx_m) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_ptr2 + (tl.broadcast_to(idx_n, acc.shape)), mask, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tl.load(in_ptr3 + (tl.broadcast_to(idx_n, acc.shape)), mask, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = acc * tmp0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tmp1 + tmp2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tmp3.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp5 = tl.sigmoid(tmp4) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp6 = tmp4 * tmp5 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7 = tmp6.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr1 + (tl.broadcast_to(idx_n + (1152*idx_m), acc.shape)), tmp7, mask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import torch._inductor.kernel.mm_common V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] meta0 = {'GROUP_M': 8, 'EVEN_K': True, 'ALLOW_TF32': True, 'ACC_TYPE': 'tl.float32', 'B_PROLOGUE_CAST_TYPE': 'tl.bfloat16', 'BLOCK_M': 16, 'BLOCK_N': 32, 'BLOCK_K': 32} V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/av/cav3jia36ikceb3vhavfyh6ofagck6av7flxjdbrt7el33cz6la7.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [sample, sample_1, sample_2, silu_1], Original ATen: [aten.add, aten.mm, aten.mul, aten.silu, aten.view] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # sample => add_2, mul_5 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # sample_1 => convert_element_type_7, convert_element_type_8, mul_6, sigmoid V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # sample_2 => add_3, mixed_mm_284, mul_7, view_5 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # silu_1 => convert_element_type_12, convert_element_type_13, mul_8, sigmoid_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_tem_fused_add_mm_mul_silu_view_3 = async_compile.triton('triton_tem_fused_add_mm_mul_silu_view_3', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.template( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] num_stages=5, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] num_warps=4, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*i8', 2: '*bf16', 3: '*bf16', 4: '*bf16', 5: '*bf16'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'kernel_name': 'triton_tem_fused_add_mm_mul_silu_view_3', 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_tem_fused_add_mm_mul_silu_view_3(arg_A, arg_B, in_ptr2, in_ptr3, out_ptr0, out_ptr1): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] GROUP_M : tl.constexpr = 8 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] EVEN_K : tl.constexpr = True V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ALLOW_TF32 : tl.constexpr = True V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ACC_TYPE : tl.constexpr = tl.float32 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B_PROLOGUE_CAST_TYPE : tl.constexpr = tl.bfloat16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_M : tl.constexpr = 16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_N : tl.constexpr = 64 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_K : tl.constexpr = 32 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] A = arg_A V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B = arg_B V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] M = 2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] N = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] K = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if M * N == 0: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # early exit due to zero-size input(s) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] return V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_am = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_ak = 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_bk = 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_bn = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # based on triton.ops.matmul V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] pid = tl.program_id(0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] grid_m = (M + BLOCK_M - 1) // BLOCK_M V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] grid_n = (N + BLOCK_N - 1) // BLOCK_N V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # re-order program ID for better L2 performance V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] width = GROUP_M * grid_n V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] group_id = pid // width V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] group_size = min(grid_m - group_id * GROUP_M, GROUP_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] pid_m = group_id * GROUP_M + (pid % group_size) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] pid_n = (pid % width) // (group_size) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if (stride_am == 1 and stride_ak == M) or (stride_am == K and stride_ak == 1): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] else: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ram = rm % M V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if (stride_bk == 1 and stride_bn == K) or (stride_bk == N and stride_bn == 1): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] else: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rbn = rn % N V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rk = tl.arange(0, BLOCK_K) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] for k in range(K, 0, -BLOCK_K): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if EVEN_K: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] a = tl.load(A) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] b = tl.load(B) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] else: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] a = tl.load(A, mask=rk[None, :] < k, other=0.) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] b = tl.load(B, mask=rk[:, None] < k, other=0.) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if B_PROLOGUE_CAST_TYPE is not None: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] b = b.to(B_PROLOGUE_CAST_TYPE) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] acc += tl.dot(a, b, allow_tf32=ALLOW_TF32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] A += BLOCK_K * stride_ak V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B += BLOCK_K * stride_bk V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # rematerialize rm and rn to save registers V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_m = rm[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_n = rn[None, :] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] mask = (idx_m < M) & (idx_n < N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # inductor generates a suffix V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = idx_n + (1152*idx_m) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr0 + (tl.broadcast_to(xindex, acc.shape)), acc, mask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_ptr2 + (tl.broadcast_to(idx_n, acc.shape)), mask, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tl.load(in_ptr3 + (tl.broadcast_to(idx_n, acc.shape)), mask, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = acc * tmp0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tmp1 + tmp2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tmp3.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp5 = tl.sigmoid(tmp4) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp6 = tmp4 * tmp5 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7 = tmp6.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr1 + (tl.broadcast_to(idx_n + (1152*idx_m), acc.shape)), tmp7, mask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] meta1 = {'GROUP_M': 8, 'EVEN_K': True, 'ALLOW_TF32': True, 'ACC_TYPE': 'tl.float32', 'B_PROLOGUE_CAST_TYPE': 'tl.bfloat16', 'BLOCK_M': 16, 'BLOCK_N': 64, 'BLOCK_K': 32} V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/od/cod6uyckepiuioqqiwmjickjh5edtsml7ou52lb6y6pviosrpn4s.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [sample_2, silu_1, timestep], Original ATen: [aten.add, aten.mm, aten.mul, aten.silu, aten.view] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # sample_2 => add_3, mul_7 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # silu_1 => convert_element_type_12, convert_element_type_13, mul_8, sigmoid_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # timestep => mixed_mm_283, view_9 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_tem_fused_add_mm_mul_silu_view_4 = async_compile.triton('triton_tem_fused_add_mm_mul_silu_view_4', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.template( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] num_stages=5, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] num_warps=8, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*i8', 2: '*bf16'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'kernel_name': 'triton_tem_fused_add_mm_mul_silu_view_4', 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_tem_fused_add_mm_mul_silu_view_4(arg_A, arg_B, out_ptr0): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] GROUP_M : tl.constexpr = 8 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] EVEN_K : tl.constexpr = True V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ALLOW_TF32 : tl.constexpr = True V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ACC_TYPE : tl.constexpr = tl.float32 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B_PROLOGUE_CAST_TYPE : tl.constexpr = tl.bfloat16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_M : tl.constexpr = 16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_N : tl.constexpr = 128 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_K : tl.constexpr = 64 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] A = arg_A V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B = arg_B V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] M = 2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] N = 6912 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] K = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if M * N == 0: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # early exit due to zero-size input(s) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] return V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_am = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_ak = 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_bk = 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_bn = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # based on triton.ops.matmul V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] pid = tl.program_id(0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] grid_m = (M + BLOCK_M - 1) // BLOCK_M V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] grid_n = (N + BLOCK_N - 1) // BLOCK_N V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # re-order program ID for better L2 performance V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] width = GROUP_M * grid_n V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] group_id = pid // width V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] group_size = min(grid_m - group_id * GROUP_M, GROUP_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] pid_m = group_id * GROUP_M + (pid % group_size) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] pid_n = (pid % width) // (group_size) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if (stride_am == 1 and stride_ak == M) or (stride_am == K and stride_ak == 1): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] else: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ram = rm % M V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if (stride_bk == 1 and stride_bn == K) or (stride_bk == N and stride_bn == 1): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] else: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rbn = rn % N V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rk = tl.arange(0, BLOCK_K) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] for k in range(K, 0, -BLOCK_K): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if EVEN_K: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] a = tl.load(A) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] b = tl.load(B) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] else: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] a = tl.load(A, mask=rk[None, :] < k, other=0.) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] b = tl.load(B, mask=rk[:, None] < k, other=0.) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if B_PROLOGUE_CAST_TYPE is not None: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] b = b.to(B_PROLOGUE_CAST_TYPE) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] acc += tl.dot(a, b, allow_tf32=ALLOW_TF32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] A += BLOCK_K * stride_ak V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B += BLOCK_K * stride_bk V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # rematerialize rm and rn to save registers V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_m = rm[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_n = rn[None, :] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] mask = (idx_m < M) & (idx_n < N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # inductor generates a suffix V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = idx_n + (6912*idx_m) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr0 + (tl.broadcast_to(xindex, acc.shape)), acc, mask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] meta2 = {'GROUP_M': 8, 'EVEN_K': True, 'ALLOW_TF32': True, 'ACC_TYPE': 'tl.float32', 'B_PROLOGUE_CAST_TYPE': 'tl.bfloat16', 'BLOCK_M': 16, 'BLOCK_N': 128, 'BLOCK_K': 64} V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/co/ccojw2zwsmu35geawpgiwntosfe5kmpuwlpjjbly3f2ysklbi4mz.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [latent], Original ATen: [aten.convolution] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # latent => convolution V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.pointwise( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[8, 16384], tile_hint=TileHint.SQUARE, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*bf16', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] min_elem_per_thread=0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_poi_fused_convolution_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ynumel = 8 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 16384 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] yoffset = tl.program_id(1) * YBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] yindex = yoffset + tl.arange(0, YBLOCK)[None, :] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ymask = yindex < ynumel V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x2 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] y3 = yindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] y0 = yindex % 4 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] y1 = (yindex // 4) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_ptr0 + (x2 + (16384*y3)), ymask, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr0 + (y0 + (4*x2) + (65536*y1)), tmp0, ymask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/s3/cs3lnjpwytywl6ts7oltpssxxvj4lj67fjfz3cinq25jmy32iele.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [latent], Original ATen: [aten.convolution] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # latent => convolution V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_tem_fused_convolution_6 = async_compile.triton('triton_tem_fused_convolution_6', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.template( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] num_stages=2, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] num_warps=4, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*bf16', 2: '*bf16'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'kernel_name': 'triton_tem_fused_convolution_6', 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_tem_fused_convolution_6(arg_X, arg_W, out_ptr0): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] KERNEL_H : tl.constexpr = 2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] KERNEL_W : tl.constexpr = 2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] STRIDE_H : tl.constexpr = 2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] STRIDE_W : tl.constexpr = 2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] PADDING_H : tl.constexpr = 0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] PADDING_W : tl.constexpr = 0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] GROUPS : tl.constexpr = 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] UNROLL : tl.constexpr = False V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ALLOW_TF32 : tl.constexpr = True V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_M : tl.constexpr = 64 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_N : tl.constexpr = 256 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_K : tl.constexpr = 16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] X = arg_X V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] W = arg_W V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Tensor dimensions V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BATCH = 2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] IN_C = 4 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] IN_H = 128 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] IN_W = 128 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] OUT_C = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] OUT_H = 64 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] OUT_W = 64 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Strides: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_xn = 65536 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_xc = 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_xh = 512 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_xw = 4 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_wc_out = 16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_wc_in = 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_wh = 8 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_ww = 4 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] nhw = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_y_w = nhw % OUT_W V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] nh = nhw // OUT_W V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_y_h = nh % OUT_H V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_n = nh // OUT_H V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_y_c = tl.program_id(1) * BLOCK_N + tl.arange(0, BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] group = 0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] GROUP_IN_C = IN_C V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] GROUP_OUT_C = OUT_C V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x_base = X + (group * stride_xc * GROUP_IN_C + idx_n * stride_xn)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] w_base = ( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] W + (group * stride_wc_out * GROUP_OUT_C + idx_y_c * stride_wc_out)[None, :] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Could be simplified, but slightly slower: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # for i in range(KERNEL_H): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # for j in range(KERNEL_W): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # for k in range(0, GROUP_IN_C, BLOCK_K): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_K_COUNT = (GROUP_IN_C + BLOCK_K - 1) // BLOCK_K V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] for ijk in range(KERNEL_H * KERNEL_W * BLOCK_K_COUNT): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] k = (ijk % BLOCK_K_COUNT) * BLOCK_K V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ij = ijk // BLOCK_K_COUNT V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] i = ij // KERNEL_W V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] j = ij % KERNEL_W V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_x_h = i - PADDING_H + idx_y_h * STRIDE_H V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_x_w = j - PADDING_W + idx_y_w * STRIDE_W V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_x_c = tl.arange(0, BLOCK_K) + k V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x_ptrs = x_base + ( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] (idx_x_h * stride_xh)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] + (idx_x_w * stride_xw)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] + (idx_x_c * stride_xc)[None, :] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] mask_x = ( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] (idx_n < BATCH)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] & (idx_x_h >= 0)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] & (idx_x_h < IN_H)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] & (idx_x_w >= 0)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] & (idx_x_w < IN_W)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] & (idx_x_c < GROUP_IN_C)[None, :] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] matrix_x = tl.load(x_ptrs, mask=mask_x, other=0.0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] w_ptrs = w_base + ( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] (idx_x_c * stride_wc_in)[:, None] + (i * stride_wh) + (j * stride_ww) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] mask_w = (idx_x_c[:, None] < GROUP_IN_C) & (idx_y_c[None, :] < GROUP_OUT_C) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] matrix_w = tl.load(w_ptrs, mask=mask_w, other=0.0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] acc += tl.dot(matrix_x, matrix_w, allow_tf32=ALLOW_TF32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] mask = ( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] (idx_n < BATCH)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] & (idx_y_h < OUT_H)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] & (idx_y_w < OUT_W)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] & (idx_y_c < GROUP_OUT_C)[None, :] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_n = idx_n[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_c = idx_y_c[None, :] + group * GROUP_OUT_C V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_h = idx_y_h[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_w = idx_y_w[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # inductor generates a suffix V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = idx_w + (64*idx_h) + (4096*idx_c) + (4718592*idx_n) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr0 + (tl.broadcast_to(idx_c + (1152*idx_w) + (73728*idx_h) + (4718592*idx_n), acc.shape)), acc, mask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import torch._inductor.kernel.conv V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] meta3 = {'KERNEL_H': 2, 'KERNEL_W': 2, 'STRIDE_H': 2, 'STRIDE_W': 2, 'PADDING_H': 0, 'PADDING_W': 0, 'GROUPS': 1, 'UNROLL': False, 'ALLOW_TF32': True, 'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 16} V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/eq/ceqaw6ogtldd7wludnmzoyzevycc4r6zdhh3gnslfmewbfk7gbii.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add, add_2, mul_4, norm_hidden_states, norm_hidden_states_1], Original ATen: [aten.add, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # add => add V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # add_2 => add_11 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # mul_4 => mul_19 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # norm_hidden_states => add_10, convert_element_type_25, convert_element_type_26, mul_18, rsqrt, sub_1, var_mean V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # norm_hidden_states_1 => add_12 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_mul_native_layer_norm_7 = async_compile.triton('triton_red_fused_add_mul_native_layer_norm_7', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.reduction( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[8192, 2048], V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] reduction_hint=ReductionHint.INNER, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*bf16', 2: '*bf16', 3: '*bf16', 4: '*bf16', 5: '*bf16', 6: '*bf16', 7: '*bf16', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_mul_native_layer_norm_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 14, 'num_reduction': 2, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True} V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_red_fused_add_mul_native_layer_norm_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 8192 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rnumel = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rbase = tl.arange(0, RBLOCK)[None, :] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x3 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x0 = xindex % 4096 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] for roffset in range(0, rnumel, RBLOCK): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rindex = roffset + rbase V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rmask = rindex < rnumel V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] r2 = rindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_ptr0 + (r2 + (1152*x3)), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tl.load(in_ptr2 + (r2 + (1152*x0)), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tmp0 + tmp1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tmp2 + tmp3 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp5 = tmp4.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7_mean_next, tmp7_m2_next, tmp7_weight_next = triton_helpers.welford_reduce( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp6, tmp7_mean, tmp7_m2, tmp7_weight, roffset == 0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7_mean = tl.where(rmask, tmp7_mean_next, tmp7_mean) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7_m2 = tl.where(rmask, tmp7_m2_next, tmp7_m2) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7_weight = tl.where(rmask, tmp7_weight_next, tmp7_weight) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7_tmp, tmp8_tmp, tmp9_tmp = triton_helpers.welford( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7_mean, tmp7_m2, tmp7_weight, 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7 = tmp7_tmp[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp8 = tmp8_tmp[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp9 = tmp9_tmp[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x1 = (xindex // 4096) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] for roffset in range(0, rnumel, RBLOCK): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rindex = roffset + rbase V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rmask = rindex < rnumel V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] r2 = rindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp10 = tl.load(in_ptr0 + (r2 + (1152*x3)), rmask, eviction_policy='evict_first', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp13 = tl.load(in_ptr2 + (r2 + (1152*x0)), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp24 = tl.load(in_ptr3 + (1152 + r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25 = tl.load(in_ptr4 + (1152 + r2 + (6912*x1)), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp26 = tl.load(in_ptr5 + (1152 + r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp28 = tl.load(in_ptr6 + (1152 + r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp34 = tl.load(in_ptr3 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp35 = tl.load(in_ptr4 + (r2 + (6912*x1)), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp36 = tl.load(in_ptr5 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp38 = tl.load(in_ptr6 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp12 = tmp10 + tmp11 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp14 = tmp12 + tmp13 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp15 = tmp14.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp16 = tmp15 - tmp7 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp17 = 1152.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp18 = tmp8 / tmp17 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp19 = 1e-06 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp20 = tmp18 + tmp19 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp21 = libdevice.rsqrt(tmp20) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp22 = tmp16 * tmp21 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp23 = tmp22.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp27 = tmp25 * tmp26 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp29 = tmp27 + tmp28 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp30 = tmp24 + tmp29 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp31 = 1.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp32 = tmp30 + tmp31 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp33 = tmp23 * tmp32 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp37 = tmp35 * tmp36 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp39 = tmp37 + tmp38 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp40 = tmp34 + tmp39 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp41 = tmp33 + tmp40 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(in_out_ptr0 + (r2 + (1152*x3)), tmp41, rmask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/uz/cuzzky36j5opdum3juz3of6725l2z25xqqngykpdr7qvmut6ioc7.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_4], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_4 => _scaled_dot_product_cudnn_attention V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8 = async_compile.triton('triton_poi_fused__scaled_dot_product_cudnn_attention_8', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.pointwise( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[16777216], V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*bf16', 2: '*bf16', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__scaled_dot_product_cudnn_attention_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] min_elem_per_thread=0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_poi_fused__scaled_dot_product_cudnn_attention_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 9437184 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = tl.full([XBLOCK], True, tl.int1) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x2 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x0 = xindex % 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_out_ptr0 + (x2), None).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tmp0 * tmp1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tmp2 + tmp3 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(in_out_ptr0 + (x2), tmp4, None) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/3c/c3ctkppqdhjrzvakm62lqlful43l5avkxmbnjjbpefhmygidtq22.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_5], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_5 => clone V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9 = async_compile.triton('triton_poi_fused_clone_9', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.pointwise( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[16777216], V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*bf16', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] min_elem_per_thread=0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_poi_fused_clone_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 9437184 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = tl.full([XBLOCK], True, tl.int1) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x0 = xindex % 72 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x1 = (xindex // 72) % 16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x2 = (xindex // 1152) % 4096 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x3 = (xindex // 4718592) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x4 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_ptr0 + (x0 + (72*x2) + (294912*x1) + (4718592*x3)), None).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr0 + (x4), tmp0, None) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/de/cdeclsyjtrafugvlfphszwlstmzxi3jt3e3gsjzstwkipkixpklq.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add, attn_output, hidden_states_10, hidden_states_9], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # add => add V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # attn_output => mul_24 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_10 => add_17 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_9 => div_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_10 = async_compile.triton('triton_poi_fused_add_div_mul_10', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.pointwise( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[16777216], V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*bf16', 2: '*bf16', 3: '*bf16', 4: '*bf16', 5: '*bf16', 6: '*bf16', 7: '*bf16', 8: '*bf16', 9: '*bf16', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] min_elem_per_thread=0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_poi_fused_add_div_mul_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 9437184 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = tl.full([XBLOCK], True, tl.int1) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x0 = xindex % 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x2 = (xindex // 4718592) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x4 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x5 = xindex % 4718592 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_ptr0 + (2304 + x0), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = tl.load(in_ptr1 + (2304 + x0 + (6912*x2)), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tl.load(in_ptr2 + (2304 + x0), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tl.load(in_ptr3 + (2304 + x0), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7 = tl.load(in_out_ptr0 + (x4), None).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp8 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp10 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp15 = tl.load(in_ptr6 + (x4), None).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp16 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp18 = tl.load(in_ptr8 + (x5), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tmp1 * tmp2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp5 = tmp3 + tmp4 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp6 = tmp0 + tmp5 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp9 = tmp7 * tmp8 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11 = tmp9 + tmp10 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp12 = 1.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp13 = tmp11 * tmp12 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp14 = tmp6 * tmp13 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp17 = tmp15 + tmp16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp19 = tmp17 + tmp18 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp20 = tmp14 + tmp19 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(in_out_ptr0 + (x4), tmp20, None) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/qq/cqq5cdirnwkosjztnifquajcxlmvrvsubqazfoqu7an5alxz6p6k.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_1, hidden_states_2], Original ATen: [aten.gelu, aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_1 => mixed_mm_277 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_2 => add_6, add_7, convert_element_type_20, convert_element_type_21, mul_11, mul_12, mul_13, mul_14, mul_15, mul_16, tanh V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_tem_fused_gelu_mm_11 = async_compile.triton('triton_tem_fused_gelu_mm_11', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.template( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] num_stages=5, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] num_warps=4, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*i8', 2: '*bf16', 3: '*bf16', 4: '*bf16'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'kernel_name': 'triton_tem_fused_gelu_mm_11', 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_tem_fused_gelu_mm_11(arg_A, arg_B, in_ptr2, in_ptr3, out_ptr1): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] GROUP_M : tl.constexpr = 8 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] EVEN_K : tl.constexpr = True V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ALLOW_TF32 : tl.constexpr = True V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ACC_TYPE : tl.constexpr = tl.float32 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B_PROLOGUE_CAST_TYPE : tl.constexpr = tl.bfloat16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_M : tl.constexpr = 64 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_N : tl.constexpr = 64 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_K : tl.constexpr = 128 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] A = arg_A V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B = arg_B V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] M = 600 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] N = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] K = 4096 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if M * N == 0: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # early exit due to zero-size input(s) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] return V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_am = 4096 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_ak = 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_bk = 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_bn = 4096 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # based on triton.ops.matmul V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] pid = tl.program_id(0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] grid_m = (M + BLOCK_M - 1) // BLOCK_M V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] grid_n = (N + BLOCK_N - 1) // BLOCK_N V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # re-order program ID for better L2 performance V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] width = GROUP_M * grid_n V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] group_id = pid // width V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] group_size = min(grid_m - group_id * GROUP_M, GROUP_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] pid_m = group_id * GROUP_M + (pid % group_size) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] pid_n = (pid % width) // (group_size) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if (stride_am == 1 and stride_ak == M) or (stride_am == K and stride_ak == 1): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] else: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ram = rm % M V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if (stride_bk == 1 and stride_bn == K) or (stride_bk == N and stride_bn == 1): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] else: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rbn = rn % N V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rk = tl.arange(0, BLOCK_K) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] for k in range(K, 0, -BLOCK_K): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if EVEN_K: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] a = tl.load(A) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] b = tl.load(B) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] else: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] a = tl.load(A, mask=rk[None, :] < k, other=0.) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] b = tl.load(B, mask=rk[:, None] < k, other=0.) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if B_PROLOGUE_CAST_TYPE is not None: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] b = b.to(B_PROLOGUE_CAST_TYPE) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] acc += tl.dot(a, b, allow_tf32=ALLOW_TF32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] A += BLOCK_K * stride_ak V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B += BLOCK_K * stride_bk V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # rematerialize rm and rn to save registers V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_m = rm[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_n = rn[None, :] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] mask = (idx_m < M) & (idx_n < N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # inductor generates a suffix V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = idx_n + (1152*idx_m) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_ptr2 + (tl.broadcast_to(idx_n, acc.shape)), mask, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tl.load(in_ptr3 + (tl.broadcast_to(idx_n, acc.shape)), mask, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = acc * tmp0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tmp1 + tmp2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tmp3.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp5 = 0.5 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp6 = tmp4 * tmp5 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7 = tmp4 * tmp4 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp8 = tmp7 * tmp4 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp9 = 0.044715 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp10 = tmp8 * tmp9 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11 = tmp4 + tmp10 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp12 = 0.7978845608028654 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp13 = tmp11 * tmp12 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp14 = libdevice.tanh(tmp13) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp15 = 1.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp16 = tmp14 + tmp15 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp17 = tmp6 * tmp16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp18 = tmp17.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr1 + (tl.broadcast_to(idx_n + (1152*idx_m), acc.shape)), tmp18, mask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] meta4 = {'GROUP_M': 8, 'EVEN_K': True, 'ALLOW_TF32': True, 'ACC_TYPE': 'tl.float32', 'B_PROLOGUE_CAST_TYPE': 'tl.bfloat16', 'BLOCK_M': 64, 'BLOCK_N': 64, 'BLOCK_K': 128} V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/en/cenakj56p2tlzbwtknu223cdm6opr5gfdxvhaesw2fxp42o2h6pz.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_3], Original ATen: [aten.add] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_3 => add_8 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12 = async_compile.triton('triton_poi_fused_add_12', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.pointwise( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[1048576], V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*bf16', 2: '*bf16', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] min_elem_per_thread=0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 691200 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = xindex < xnumel V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x2 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x0 = xindex % 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_out_ptr0 + (x2), xmask).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tmp0 * tmp1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tmp2 + tmp3 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(in_out_ptr0 + (x2), tmp4, xmask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/y7/cy7uos3clxuatewpxd6p6hbqevsthx2iskx5zixrk367diamryvg.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [key_2, key_6, value_2, value_6], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # key_2 => mixed_mm_275 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # key_6 => mixed_mm_265 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # value_2 => mixed_mm_274 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # value_6 => mixed_mm_264 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_mm_13 = async_compile.triton('triton_poi_fused_mm_13', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.pointwise( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[1048576], V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*bf16', 2: '*bf16', 3: '*bf16', 4: '*bf16', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mm_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] min_elem_per_thread=0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_poi_fused_mm_13(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 691200 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = xindex < xnumel V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x0 = xindex % 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x1 = (xindex // 1152) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x2 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_ptr0 + (x0 + (1152*x1) + (345600*((x1 % 300) // 300))), xmask).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr0 + (x2), tmp0, xmask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr1 + (x2), tmp0, xmask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr2 + (x2), tmp0, xmask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr3 + (x2), tmp0, xmask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/vi/cvi62zxotjcncijppau52dxzd6rteio363jy2eac3cyt6qr4vvza.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_11, hidden_states_30], Original ATen: [aten.constant_pad_nd] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_11 => constant_pad_nd V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_30 => constant_pad_nd_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_constant_pad_nd_14 = async_compile.triton('triton_poi_fused_constant_pad_nd_14', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.pointwise( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[16384], V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*i64', 1: '*bf16', 2: '*bf16', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] min_elem_per_thread=0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_poi_fused_constant_pad_nd_14(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 9728 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = xindex < xnumel V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x0 = xindex % 304 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x2 = (xindex // 4864) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x3 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = x0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = tl.full([1], 300, tl.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tmp0 < tmp1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tl.load(in_ptr0 + (x0 + (300*x2)), tmp2 & xmask, eviction_policy='evict_last', other=0.0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tmp3.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp5 = 1.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp6 = tmp5 - tmp4 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7 = -10000.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp8 = tmp6 * tmp7 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp10 = tl.where(tmp2, tmp8, tmp9) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr0 + (x3), tmp10, xmask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr1 + (x3), tmp10, xmask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/ds/cdsj7w6oskiff3ntzknpr3q2krjxocj5jutwpo4v3ak4775hocsz.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_6, hidden_states_16, hidden_states_17, mul_6, norm_hidden_states_2, norm_hidden_states_3], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # add_6 => add_24 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_16 => div_2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_17 => add_22 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # mul_6 => mul_30 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # norm_hidden_states_2 => add_23, convert_element_type_51, convert_element_type_52, mul_29, rsqrt_1, sub_2, var_mean_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # norm_hidden_states_3 => add_25 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15 = async_compile.triton('triton_red_fused_add_div_mul_native_layer_norm_15', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.reduction( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[8192, 2048], V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] reduction_hint=ReductionHint.INNER, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*bf16', 2: '*bf16', 3: '*bf16', 4: '*bf16', 5: '*bf16', 6: '*bf16', 7: '*bf16', 8: '*bf16', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_mul_native_layer_norm_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 2, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True} V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_red_fused_add_div_mul_native_layer_norm_15(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 8192 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rnumel = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rbase = tl.arange(0, RBLOCK)[None, :] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x0 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] for roffset in range(0, rnumel, RBLOCK): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rindex = roffset + rbase V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rmask = rindex < rnumel V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] r1 = rindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_ptr0 + (r1 + (1152*x0)), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = tl.load(in_ptr1 + (r1), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tl.load(in_ptr2 + (r1), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7 = tl.load(in_ptr3 + (r1 + (1152*x0)), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tmp0 * tmp1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tmp2 + tmp3 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp5 = 1.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp6 = tmp4 * tmp5 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp8 = tmp6 + tmp7 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp9 = tmp8.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11_mean_next, tmp11_m2_next, tmp11_weight_next = triton_helpers.welford_reduce( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp10, tmp11_mean, tmp11_m2, tmp11_weight, roffset == 0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11_mean = tl.where(rmask, tmp11_mean_next, tmp11_mean) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11_m2 = tl.where(rmask, tmp11_m2_next, tmp11_m2) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11_weight = tl.where(rmask, tmp11_weight_next, tmp11_weight) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11_tmp, tmp12_tmp, tmp13_tmp = triton_helpers.welford( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11_mean, tmp11_m2, tmp11_weight, 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11 = tmp11_tmp[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp12 = tmp12_tmp[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp13 = tmp13_tmp[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x3 = (xindex // 4096) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] for roffset in range(0, rnumel, RBLOCK): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rindex = roffset + rbase V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rmask = rindex < rnumel V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] r1 = rindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp14 = tl.load(in_ptr0 + (r1 + (1152*x0)), rmask, eviction_policy='evict_first', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp15 = tl.load(in_ptr1 + (r1), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp17 = tl.load(in_ptr2 + (r1), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp21 = tl.load(in_ptr3 + (r1 + (1152*x0)), rmask, eviction_policy='evict_first', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp32 = tl.load(in_ptr4 + (4608 + r1), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp33 = tl.load(in_ptr5 + (4608 + r1 + (6912*x3)), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp34 = tl.load(in_ptr6 + (4608 + r1), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp36 = tl.load(in_ptr7 + (4608 + r1), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp41 = tl.load(in_ptr4 + (3456 + r1), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp42 = tl.load(in_ptr5 + (3456 + r1 + (6912*x3)), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp43 = tl.load(in_ptr6 + (3456 + r1), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp45 = tl.load(in_ptr7 + (3456 + r1), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp16 = tmp14 * tmp15 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp18 = tmp16 + tmp17 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp19 = 1.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp20 = tmp18 * tmp19 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp22 = tmp20 + tmp21 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp23 = tmp22.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp24 = tmp23 - tmp11 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25 = 1152.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp26 = tmp12 / tmp25 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp27 = 1e-06 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp28 = tmp26 + tmp27 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp29 = libdevice.rsqrt(tmp28) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp30 = tmp24 * tmp29 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp31 = tmp30.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp35 = tmp33 * tmp34 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp37 = tmp35 + tmp36 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp38 = tmp32 + tmp37 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp39 = tmp38 + tmp19 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp40 = tmp31 * tmp39 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp44 = tmp42 * tmp43 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp46 = tmp44 + tmp45 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp47 = tmp41 + tmp46 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp48 = tmp40 + tmp47 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(in_out_ptr0 + (r1 + (1152*x0)), tmp48, rmask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/ms/cmss55drcx6gbnmfqwp7r2mrold3hvnrgh6cxfbkryaamhz6p63e.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_19], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_19 => add_27, add_28, convert_element_type_56, convert_element_type_57, mul_32, mul_33, mul_34, mul_35, mul_36, mul_37, tanh_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16 = async_compile.triton('triton_poi_fused_gelu_16', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.pointwise( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[67108864], V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*bf16', 2: '*bf16', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] min_elem_per_thread=0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_poi_fused_gelu_16(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 37748736 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = tl.full([XBLOCK], True, tl.int1) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x2 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x0 = xindex % 4608 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_out_ptr0 + (x2), None).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tmp0 * tmp1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tmp2 + tmp3 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp5 = tmp4.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp6 = 0.5 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7 = tmp5 * tmp6 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp8 = tmp5 * tmp5 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp9 = tmp8 * tmp5 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp10 = 0.044715 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11 = tmp9 * tmp10 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp12 = tmp5 + tmp11 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp13 = 0.7978845608028654 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp14 = tmp12 * tmp13 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp15 = libdevice.tanh(tmp14) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp16 = 1.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp17 = tmp15 + tmp16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp18 = tmp7 * tmp17 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp19 = tmp18.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(in_out_ptr0 + (x2), tmp19, None) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/fz/cfzskzm3kyfpmloer4fsqwkj62plnyee7xd65fsedrqebaqc4tiz.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_10, ff_output, hidden_states_16, hidden_states_17, hidden_states_22, mul_8, norm_hidden_states_4, norm_hidden_states_5], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # add_10 => add_33 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # ff_output => mul_39 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_16 => div_2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_17 => add_22 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_22 => add_30 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # mul_8 => mul_41 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # norm_hidden_states_4 => add_32, convert_element_type_61, convert_element_type_62, mul_40, rsqrt_2, sub_3, var_mean_2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # norm_hidden_states_5 => add_34 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17 = async_compile.triton('triton_red_fused_add_div_mul_native_layer_norm_17', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.reduction( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[8192, 2048], V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] reduction_hint=ReductionHint.INNER, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*bf16', 2: '*bf16', 3: '*bf16', 4: '*bf16', 5: '*bf16', 6: '*bf16', 7: '*bf16', 8: '*bf16', 9: '*bf16', 10: '*bf16', 11: '*bf16', 12: '*bf16', 13: 'i32', 14: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_mul_native_layer_norm_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 2, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True} V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_red_fused_add_div_mul_native_layer_norm_17(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 8192 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rnumel = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rbase = tl.arange(0, RBLOCK)[None, :] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x1 = (xindex // 4096) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x3 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] for roffset in range(0, rnumel, RBLOCK): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rindex = roffset + rbase V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rmask = rindex < rnumel V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] r2 = rindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_ptr0 + (5760 + r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = tl.load(in_ptr1 + (5760 + r2 + (6912*x1)), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tl.load(in_ptr2 + (5760 + r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tl.load(in_ptr3 + (5760 + r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7 = tl.load(in_out_ptr0 + (r2 + (1152*x3)), rmask, eviction_policy='evict_first', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp8 = tl.load(in_ptr4 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp10 = tl.load(in_ptr5 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp13 = tl.load(in_ptr6 + (r2 + (1152*x3)), rmask, eviction_policy='evict_first', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp14 = tl.load(in_ptr7 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp16 = tl.load(in_ptr8 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp20 = tl.load(in_ptr9 + (r2 + (1152*x3)), rmask, eviction_policy='evict_first', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tmp1 * tmp2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp5 = tmp3 + tmp4 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp6 = tmp0 + tmp5 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp9 = tmp7 * tmp8 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11 = tmp9 + tmp10 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp12 = tmp6 * tmp11 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp15 = tmp13 * tmp14 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp17 = tmp15 + tmp16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp18 = 1.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp19 = tmp17 * tmp18 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp21 = tmp19 + tmp20 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp22 = tmp12 + tmp21 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp23 = tmp22.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_mean_next, tmp25_m2_next, tmp25_weight_next = triton_helpers.welford_reduce( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp24, tmp25_mean, tmp25_m2, tmp25_weight, roffset == 0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_mean = tl.where(rmask, tmp25_mean_next, tmp25_mean) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_m2 = tl.where(rmask, tmp25_m2_next, tmp25_m2) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_weight = tl.where(rmask, tmp25_weight_next, tmp25_weight) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(in_out_ptr0 + (r2 + (1152*x3)), tmp22, rmask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_tmp, tmp26_tmp, tmp27_tmp = triton_helpers.welford( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_mean, tmp25_m2, tmp25_weight, 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25 = tmp25_tmp[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp26 = tmp26_tmp[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp27 = tmp27_tmp[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] for roffset in range(0, rnumel, RBLOCK): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rindex = roffset + rbase V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rmask = rindex < rnumel V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] r2 = rindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp28 = tl.load(in_out_ptr0 + (r2 + (1152*x3)), rmask, eviction_policy='evict_first', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp38 = tl.load(in_ptr10 + (1152 + r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp39 = tl.load(in_ptr1 + (1152 + r2 + (6912*x1)), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp40 = tl.load(in_ptr2 + (1152 + r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp42 = tl.load(in_ptr3 + (1152 + r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp48 = tl.load(in_ptr10 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp49 = tl.load(in_ptr1 + (r2 + (6912*x1)), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp50 = tl.load(in_ptr2 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp52 = tl.load(in_ptr3 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp29 = tmp28.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp30 = tmp29 - tmp25 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp31 = 1152.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp32 = tmp26 / tmp31 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp33 = 1e-06 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp34 = tmp32 + tmp33 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp35 = libdevice.rsqrt(tmp34) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp36 = tmp30 * tmp35 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp37 = tmp36.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp41 = tmp39 * tmp40 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp43 = tmp41 + tmp42 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp44 = tmp38 + tmp43 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp45 = 1.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp46 = tmp44 + tmp45 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp47 = tmp37 * tmp46 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp51 = tmp49 * tmp50 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp53 = tmp51 + tmp52 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp54 = tmp48 + tmp53 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp55 = tmp47 + tmp54 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr2 + (r2 + (1152*x3)), tmp55, rmask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/bc/cbcpepse4mmiv2t7xdafr7glksxsbsaj23ag6vq6ucr2g6vzqky4.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_1, hidden_states_28, hidden_states_29], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # attn_output_1 => mul_46 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_28 => div_3 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_29 => add_39 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18 = async_compile.triton('triton_poi_fused_add_div_mul_18', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.pointwise( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[16777216], V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*bf16', 2: '*bf16', 3: '*bf16', 4: '*bf16', 5: '*bf16', 6: '*bf16', 7: '*bf16', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] min_elem_per_thread=0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_poi_fused_add_div_mul_18(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 9437184 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = tl.full([XBLOCK], True, tl.int1) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x0 = xindex % 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x2 = (xindex // 4718592) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x4 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_ptr0 + (2304 + x0), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = tl.load(in_ptr1 + (2304 + x0 + (6912*x2)), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tl.load(in_ptr2 + (2304 + x0), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tl.load(in_ptr3 + (2304 + x0), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7 = tl.load(in_out_ptr0 + (x4), None).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp8 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp10 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp15 = tl.load(in_ptr6 + (x4), None).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tmp1 * tmp2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp5 = tmp3 + tmp4 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp6 = tmp0 + tmp5 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp9 = tmp7 * tmp8 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11 = tmp9 + tmp10 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp12 = 1.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp13 = tmp11 * tmp12 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp14 = tmp6 * tmp13 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp16 = tmp14 + tmp15 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(in_out_ptr0 + (x4), tmp16, None) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/ie/ciepvtq7k7pcuhh3jpcbgeme4j273qnbauzgc3nxqbscrjg3tyz7.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_226, ff_output_27, hidden_states_529, hidden_states_530, hidden_states_535, hidden_states_536, hidden_states_537, mul_116], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # add_226 => add_627 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # ff_output_27 => mul_633 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_529 => div_56 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_530 => add_616 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_535 => add_624 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_536 => add_626, convert_element_type_1033, convert_element_type_1034, mul_634, rsqrt_56, sub_57, var_mean_56 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_537 => add_628 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # mul_116 => mul_635 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_19 = async_compile.triton('triton_red_fused_add_div_mul_native_layer_norm_19', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.reduction( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[8192, 2048], V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] reduction_hint=ReductionHint.INNER, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*bf16', 2: '*bf16', 3: '*bf16', 4: '*bf16', 5: '*bf16', 6: '*bf16', 7: '*bf16', 8: '*bf16', 9: '*bf16', 10: '*bf16', 11: '*bf16', 12: '*bf16', 13: '*bf16', 14: '*bf16', 15: '*bf16', 16: 'i32', 17: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_mul_native_layer_norm_19', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 2, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True} V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_red_fused_add_div_mul_native_layer_norm_19(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 8192 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rnumel = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rbase = tl.arange(0, RBLOCK)[None, :] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x1 = (xindex // 4096) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x3 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] for roffset in range(0, rnumel, RBLOCK): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rindex = roffset + rbase V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rmask = rindex < rnumel V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] r2 = rindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_ptr0 + (5760 + r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = tl.load(in_ptr1 + (5760 + r2 + (6912*x1)), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tl.load(in_ptr2 + (5760 + r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tl.load(in_ptr3 + (5760 + r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp7 = tl.load(in_out_ptr0 + (r2 + (1152*x3)), rmask, eviction_policy='evict_first', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp8 = tl.load(in_ptr4 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp10 = tl.load(in_ptr5 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp13 = tl.load(in_ptr6 + (r2 + (1152*x3)), rmask, eviction_policy='evict_first', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp14 = tl.load(in_ptr7 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp16 = tl.load(in_ptr8 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp20 = tl.load(in_ptr9 + (r2 + (1152*x3)), rmask, eviction_policy='evict_first', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tmp1 * tmp2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp5 = tmp3 + tmp4 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp6 = tmp0 + tmp5 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp9 = tmp7 * tmp8 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp11 = tmp9 + tmp10 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp12 = tmp6 * tmp11 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp15 = tmp13 * tmp14 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp17 = tmp15 + tmp16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp18 = 1.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp19 = tmp17 * tmp18 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp21 = tmp19 + tmp20 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp22 = tmp12 + tmp21 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp23 = tmp22.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_mean_next, tmp25_m2_next, tmp25_weight_next = triton_helpers.welford_reduce( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp24, tmp25_mean, tmp25_m2, tmp25_weight, roffset == 0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_mean = tl.where(rmask, tmp25_mean_next, tmp25_mean) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_m2 = tl.where(rmask, tmp25_m2_next, tmp25_m2) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_weight = tl.where(rmask, tmp25_weight_next, tmp25_weight) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(in_out_ptr0 + (r2 + (1152*x3)), tmp22, rmask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_tmp, tmp26_tmp, tmp27_tmp = triton_helpers.welford( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25_mean, tmp25_m2, tmp25_weight, 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp25 = tmp25_tmp[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp26 = tmp26_tmp[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp27 = tmp27_tmp[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] for roffset in range(0, rnumel, RBLOCK): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rindex = roffset + rbase V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rmask = rindex < rnumel V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] r2 = rindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp28 = tl.load(in_out_ptr0 + (r2 + (1152*x3)), rmask, eviction_policy='evict_first', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp38 = tl.load(in_ptr10 + (1152 + r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp39 = tl.load(in_ptr11 + (r2 + (1152*x1)), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp40 = tl.load(in_ptr12 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp42 = tl.load(in_ptr13 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp48 = tl.load(in_ptr10 + (r2), rmask, eviction_policy='evict_last', other=0.0).to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp29 = tmp28.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp30 = tmp29 - tmp25 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp31 = 1152.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp32 = tmp26 / tmp31 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp33 = 1e-06 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp34 = tmp32 + tmp33 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp35 = libdevice.rsqrt(tmp34) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp36 = tmp30 * tmp35 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp37 = tmp36.to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp41 = tmp39 * tmp40 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp43 = tmp41 + tmp42 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp44 = tmp38 + tmp43 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp45 = 1.0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp46 = tmp44 + tmp45 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp47 = tmp37 * tmp46 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp49 = tmp48 + tmp43 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp50 = tmp47 + tmp49 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr2 + (r2 + (1152*x3)), tmp50, rmask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/hb/chbvrjtstw7pwhlkb34o2b4oy6hk22lgiw2wgxgk3cvmshhwoitr.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_538], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # hidden_states_538 => mixed_mm V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_tem_fused_mm_20 = async_compile.triton('triton_tem_fused_mm_20', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.template( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] num_stages=3, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] num_warps=2, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*i8', 2: '*bf16'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'kernel_name': 'triton_tem_fused_mm_20', 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_tem_fused_mm_20(arg_A, arg_B, out_ptr0): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] GROUP_M : tl.constexpr = 8 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] EVEN_K : tl.constexpr = False V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ALLOW_TF32 : tl.constexpr = True V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ACC_TYPE : tl.constexpr = tl.float32 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B_PROLOGUE_CAST_TYPE : tl.constexpr = tl.bfloat16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_M : tl.constexpr = 16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_N : tl.constexpr = 32 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] BLOCK_K : tl.constexpr = 256 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] A = arg_A V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B = arg_B V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] M = 8192 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] N = 32 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] K = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if M * N == 0: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # early exit due to zero-size input(s) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] return V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_am = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_ak = 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_bk = 1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stride_bn = 1152 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # based on triton.ops.matmul V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] pid = tl.program_id(0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] grid_m = (M + BLOCK_M - 1) // BLOCK_M V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] grid_n = (N + BLOCK_N - 1) // BLOCK_N V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # re-order program ID for better L2 performance V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] width = GROUP_M * grid_n V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] group_id = pid // width V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] group_size = min(grid_m - group_id * GROUP_M, GROUP_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] pid_m = group_id * GROUP_M + (pid % group_size) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] pid_n = (pid % width) // (group_size) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if (stride_am == 1 and stride_ak == M) or (stride_am == K and stride_ak == 1): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] else: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ram = rm % M V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if (stride_bk == 1 and stride_bn == K) or (stride_bk == N and stride_bn == 1): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] else: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rbn = rn % N V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rk = tl.arange(0, BLOCK_K) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] for k in range(K, 0, -BLOCK_K): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if EVEN_K: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] a = tl.load(A) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] b = tl.load(B) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] else: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] a = tl.load(A, mask=rk[None, :] < k, other=0.) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] b = tl.load(B, mask=rk[:, None] < k, other=0.) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if B_PROLOGUE_CAST_TYPE is not None: V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] b = b.to(B_PROLOGUE_CAST_TYPE) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] acc += tl.dot(a, b, allow_tf32=ALLOW_TF32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] A += BLOCK_K * stride_ak V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] B += BLOCK_K * stride_bk V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # rematerialize rm and rn to save registers V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_m = rm[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] idx_n = rn[None, :] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] mask = (idx_m < M) & (idx_n < N) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # inductor generates a suffix V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = idx_n + (32*idx_m) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr0 + (tl.broadcast_to(xindex, acc.shape)), acc, mask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] meta5 = {'GROUP_M': 8, 'EVEN_K': False, 'ALLOW_TF32': True, 'ACC_TYPE': 'tl.float32', 'B_PROLOGUE_CAST_TYPE': 'tl.bfloat16', 'BLOCK_M': 16, 'BLOCK_N': 32, 'BLOCK_K': 256} V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # kernel path: /tmp/torchinductor_sayak/ou/couotyyjp2udhhhnhciuyehe343er47rvpui6zi3qj6vuxjvmwwo.py V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [output], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # output => clone_140 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_21 = async_compile.triton('triton_poi_fused_clone_21', ''' V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] import triton.language as tl V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from triton.compiler.compiler import AttrsDescriptor V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime import triton_helpers, triton_heuristics V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton_heuristics.pointwise( V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] size_hints=[16, 16384], tile_hint=TileHint.DEFAULT, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] filename=__file__, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_meta={'signature': {0: '*bf16', 1: '*bf16', 2: '*bf16', 3: '*bf16', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': '002A1A9B1115CD8E0489B47343AA1BAA75B3F6181CDF90468122931EFBBE395F', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': True, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'coordinate_descent_tuning': True, 'coordinate_descent_search_radius': 1, 'coordinate_descent_check_all_directions': True}, V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] min_elem_per_thread=0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] @triton.jit V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def triton_poi_fused_clone_21(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ynumel = 16 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xnumel = 16384 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] yoffset = tl.program_id(1) * YBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] yindex = yoffset + tl.arange(0, YBLOCK)[None, :] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ymask = yindex < ynumel V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xoffset = tl.program_id(0) * XBLOCK V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xindex = xoffset + tl.arange(0, XBLOCK)[:, None] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x2 = xindex % 2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x3 = (xindex // 2) % 64 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x4 = (xindex // 128) % 2 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x5 = (xindex // 256) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] y0 = yindex % 8 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] y1 = (yindex // 8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] x7 = xindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] y6 = yindex V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp0 = tl.load(in_ptr0 + (y0 + (8*x2) + (16*x4) + (32*x3) + (2048*x5) + (131072*y1) + (131072*((x3 + (64*x5)) // 4096))), ymask, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp1 = tl.load(in_ptr1 + (y0 + (8*x2) + (16*x4)), ymask, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp3 = tl.load(in_ptr2 + (y0 + (8*x2) + (16*x4)), ymask, eviction_policy='evict_last').to(tl.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp2 = tmp0 * tmp1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tmp4 = tmp2 + tmp3 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] tl.store(out_ptr0 + (x7 + (16384*y6)), tmp4, ymask) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] ''', device_str='cuda') V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] async_compile.wait(globals()) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del async_compile V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def call(args): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, arg7_1, arg8_1, arg9_1, arg10_1, arg11_1, arg12_1, arg13_1, arg14_1, arg15_1, arg16_1, arg17_1, arg18_1, arg19_1, arg20_1, arg21_1, arg22_1, arg23_1, arg24_1, arg25_1, arg26_1, arg27_1, arg28_1, arg29_1, arg30_1, arg31_1, arg32_1, arg33_1, arg34_1, arg35_1, arg36_1, arg37_1, arg38_1, arg39_1, arg40_1, arg41_1, arg42_1, arg43_1, arg44_1, arg45_1, arg46_1, arg47_1, arg48_1, arg49_1, arg50_1, arg51_1, arg52_1, arg53_1, arg54_1, arg55_1, arg56_1, arg57_1, arg58_1, arg59_1, arg60_1, arg61_1, arg62_1, arg63_1, arg64_1, arg65_1, arg66_1, arg67_1, arg68_1, arg69_1, arg70_1, arg71_1, arg72_1, arg73_1, arg74_1, arg75_1, arg76_1, arg77_1, arg78_1, arg79_1, arg80_1, arg81_1, arg82_1, arg83_1, arg84_1, arg85_1, arg86_1, arg87_1, arg88_1, arg89_1, arg90_1, arg91_1, arg92_1, arg93_1, arg94_1, arg95_1, arg96_1, arg97_1, arg98_1, arg99_1, arg100_1, arg101_1, arg102_1, arg103_1, arg104_1, arg105_1, arg106_1, arg107_1, arg108_1, arg109_1, arg110_1, arg111_1, arg112_1, arg113_1, arg114_1, arg115_1, arg116_1, arg117_1, arg118_1, arg119_1, arg120_1, arg121_1, arg122_1, arg123_1, arg124_1, arg125_1, arg126_1, arg127_1, arg128_1, arg129_1, arg130_1, arg131_1, arg132_1, arg133_1, arg134_1, arg135_1, arg136_1, arg137_1, arg138_1, arg139_1, arg140_1, arg141_1, arg142_1, arg143_1, arg144_1, arg145_1, arg146_1, arg147_1, arg148_1, arg149_1, arg150_1, arg151_1, arg152_1, arg153_1, arg154_1, arg155_1, arg156_1, arg157_1, arg158_1, arg159_1, arg160_1, arg161_1, arg162_1, arg163_1, arg164_1, arg165_1, arg166_1, arg167_1, arg168_1, arg169_1, arg170_1, arg171_1, arg172_1, arg173_1, arg174_1, arg175_1, arg176_1, arg177_1, arg178_1, arg179_1, arg180_1, arg181_1, arg182_1, arg183_1, arg184_1, arg185_1, arg186_1, arg187_1, arg188_1, arg189_1, arg190_1, arg191_1, arg192_1, arg193_1, arg194_1, arg195_1, arg196_1, arg197_1, arg198_1, arg199_1, arg200_1, arg201_1, arg202_1, arg203_1, arg204_1, arg205_1, arg206_1, arg207_1, arg208_1, arg209_1, arg210_1, arg211_1, arg212_1, arg213_1, arg214_1, arg215_1, arg216_1, arg217_1, arg218_1, arg219_1, arg220_1, arg221_1, arg222_1, arg223_1, arg224_1, arg225_1, arg226_1, arg227_1, arg228_1, arg229_1, arg230_1, arg231_1, arg232_1, arg233_1, arg234_1, arg235_1, arg236_1, arg237_1, arg238_1, arg239_1, arg240_1, arg241_1, arg242_1, arg243_1, arg244_1, arg245_1, arg246_1, arg247_1, arg248_1, arg249_1, arg250_1, arg251_1, arg252_1, arg253_1, arg254_1, arg255_1, arg256_1, arg257_1, arg258_1, arg259_1, arg260_1, arg261_1, arg262_1, arg263_1, arg264_1, arg265_1, arg266_1, arg267_1, arg268_1, arg269_1, arg270_1, arg271_1, arg272_1, arg273_1, arg274_1, arg275_1, arg276_1, arg277_1, arg278_1, arg279_1, arg280_1, arg281_1, arg282_1, arg283_1, arg284_1, arg285_1, arg286_1, arg287_1, arg288_1, arg289_1, arg290_1, arg291_1, arg292_1, arg293_1, arg294_1, arg295_1, arg296_1, arg297_1, arg298_1, arg299_1, arg300_1, arg301_1, arg302_1, arg303_1, arg304_1, arg305_1, arg306_1, arg307_1, arg308_1, arg309_1, arg310_1, arg311_1, arg312_1, arg313_1, arg314_1, arg315_1, arg316_1, arg317_1, arg318_1, arg319_1, arg320_1, arg321_1, arg322_1, arg323_1, arg324_1, arg325_1, arg326_1, arg327_1, arg328_1, arg329_1, arg330_1, arg331_1, arg332_1, arg333_1, arg334_1, arg335_1, arg336_1, arg337_1, arg338_1, arg339_1, arg340_1, arg341_1, arg342_1, arg343_1, arg344_1, arg345_1, arg346_1, arg347_1, arg348_1, arg349_1, arg350_1, arg351_1, arg352_1, arg353_1, arg354_1, arg355_1, arg356_1, arg357_1, arg358_1, arg359_1, arg360_1, arg361_1, arg362_1, arg363_1, arg364_1, arg365_1, arg366_1, arg367_1, arg368_1, arg369_1, arg370_1, arg371_1, arg372_1, arg373_1, arg374_1, arg375_1, arg376_1, arg377_1, arg378_1, arg379_1, arg380_1, arg381_1, arg382_1, arg383_1, arg384_1, arg385_1, arg386_1, arg387_1, arg388_1, arg389_1, arg390_1, arg391_1, arg392_1, arg393_1, arg394_1, arg395_1, arg396_1, arg397_1, arg398_1, arg399_1, arg400_1, arg401_1, arg402_1, arg403_1, arg404_1, arg405_1, arg406_1, arg407_1, arg408_1, arg409_1, arg410_1, arg411_1, arg412_1, arg413_1, arg414_1, arg415_1, arg416_1, arg417_1, arg418_1, arg419_1, arg420_1, arg421_1, arg422_1, arg423_1, arg424_1, arg425_1, arg426_1, arg427_1, arg428_1, arg429_1, arg430_1, arg431_1, arg432_1, arg433_1, arg434_1, arg435_1, arg436_1, arg437_1, arg438_1, arg439_1, arg440_1, arg441_1, arg442_1, arg443_1, arg444_1, arg445_1, arg446_1, arg447_1, arg448_1, arg449_1, arg450_1, arg451_1, arg452_1, arg453_1, arg454_1, arg455_1, arg456_1, arg457_1, arg458_1, arg459_1, arg460_1, arg461_1, arg462_1, arg463_1, arg464_1, arg465_1, arg466_1, arg467_1, arg468_1, arg469_1, arg470_1, arg471_1, arg472_1, arg473_1, arg474_1, arg475_1, arg476_1, arg477_1, arg478_1, arg479_1, arg480_1, arg481_1, arg482_1, arg483_1, arg484_1, arg485_1, arg486_1, arg487_1, arg488_1, arg489_1, arg490_1, arg491_1, arg492_1, arg493_1, arg494_1, arg495_1, arg496_1, arg497_1, arg498_1, arg499_1, arg500_1, arg501_1, arg502_1, arg503_1, arg504_1, arg505_1, arg506_1, arg507_1, arg508_1, arg509_1, arg510_1, arg511_1, arg512_1, arg513_1, arg514_1, arg515_1, arg516_1, arg517_1, arg518_1, arg519_1, arg520_1, arg521_1, arg522_1, arg523_1, arg524_1, arg525_1, arg526_1, arg527_1, arg528_1, arg529_1, arg530_1, arg531_1, arg532_1, arg533_1, arg534_1, arg535_1, arg536_1, arg537_1, arg538_1, arg539_1, arg540_1, arg541_1, arg542_1, arg543_1, arg544_1, arg545_1, arg546_1, arg547_1, arg548_1, arg549_1, arg550_1, arg551_1, arg552_1, arg553_1, arg554_1, arg555_1, arg556_1, arg557_1, arg558_1, arg559_1, arg560_1, arg561_1, arg562_1, arg563_1, arg564_1, arg565_1, arg566_1, arg567_1, arg568_1, arg569_1, arg570_1, arg571_1, arg572_1, arg573_1, arg574_1, arg575_1, arg576_1, arg577_1, arg578_1, arg579_1, arg580_1, arg581_1, arg582_1, arg583_1, arg584_1, arg585_1, arg586_1, arg587_1, arg588_1, arg589_1, arg590_1, arg591_1, arg592_1, arg593_1, arg594_1, arg595_1, arg596_1, arg597_1, arg598_1, arg599_1, arg600_1, arg601_1, arg602_1, arg603_1, arg604_1, arg605_1, arg606_1, arg607_1, arg608_1, arg609_1, arg610_1, arg611_1, arg612_1, arg613_1, arg614_1, arg615_1, arg616_1, arg617_1, arg618_1, arg619_1, arg620_1, arg621_1, arg622_1, arg623_1, arg624_1, arg625_1, arg626_1, arg627_1, arg628_1, arg629_1, arg630_1, arg631_1, arg632_1, arg633_1, arg634_1, arg635_1, arg636_1, arg637_1, arg638_1, arg639_1, arg640_1, arg641_1, arg642_1, arg643_1, arg644_1, arg645_1, arg646_1, arg647_1, arg648_1, arg649_1, arg650_1, arg651_1, arg652_1, arg653_1, arg654_1, arg655_1, arg656_1, arg657_1, arg658_1, arg659_1, arg660_1, arg661_1, arg662_1, arg663_1, arg664_1, arg665_1, arg666_1, arg667_1, arg668_1, arg669_1, arg670_1, arg671_1, arg672_1, arg673_1, arg674_1, arg675_1, arg676_1, arg677_1, arg678_1, arg679_1, arg680_1, arg681_1, arg682_1, arg683_1, arg684_1, arg685_1, arg686_1, arg687_1, arg688_1, arg689_1, arg690_1, arg691_1, arg692_1, arg693_1, arg694_1, arg695_1, arg696_1, arg697_1, arg698_1, arg699_1, arg700_1, arg701_1, arg702_1, arg703_1, arg704_1, arg705_1, arg706_1, arg707_1, arg708_1, arg709_1, arg710_1, arg711_1, arg712_1, arg713_1, arg714_1, arg715_1, arg716_1, arg717_1, arg718_1, arg719_1, arg720_1, arg721_1, arg722_1, arg723_1, arg724_1, arg725_1, arg726_1, arg727_1, arg728_1, arg729_1, arg730_1, arg731_1, arg732_1, arg733_1, arg734_1, arg735_1, arg736_1, arg737_1, arg738_1, arg739_1, arg740_1, arg741_1, arg742_1, arg743_1, arg744_1, arg745_1, arg746_1, arg747_1, arg748_1, arg749_1, arg750_1, arg751_1, arg752_1, arg753_1, arg754_1, arg755_1, arg756_1, arg757_1, arg758_1, arg759_1, arg760_1, arg761_1, arg762_1, arg763_1, arg764_1, arg765_1, arg766_1, arg767_1, arg768_1, arg769_1, arg770_1, arg771_1, arg772_1, arg773_1, arg774_1, arg775_1, arg776_1, arg777_1, arg778_1, arg779_1, arg780_1, arg781_1, arg782_1, arg783_1, arg784_1, arg785_1, arg786_1, arg787_1, arg788_1, arg789_1, arg790_1, arg791_1, arg792_1, arg793_1, arg794_1, arg795_1, arg796_1, arg797_1, arg798_1, arg799_1, arg800_1, arg801_1, arg802_1, arg803_1, arg804_1, arg805_1, arg806_1, arg807_1, arg808_1, arg809_1, arg810_1, arg811_1, arg812_1, arg813_1, arg814_1, arg815_1, arg816_1, arg817_1, arg818_1, arg819_1, arg820_1, arg821_1, arg822_1, arg823_1, arg824_1, arg825_1, arg826_1, arg827_1, arg828_1, arg829_1, arg830_1, arg831_1, arg832_1, arg833_1, arg834_1, arg835_1, arg836_1, arg837_1, arg838_1, arg839_1, arg840_1, arg841_1, arg842_1, arg843_1, arg844_1, arg845_1, arg846_1, arg847_1, arg848_1, arg849_1, arg850_1, arg851_1, arg852_1, arg853_1, arg854_1, arg855_1, arg856_1, arg857_1, arg858_1, arg859_1, arg860_1, arg861_1, arg862_1, arg863_1, arg864_1, arg865_1, arg866_1, arg867_1, arg868_1, arg869_1, arg870_1, arg871_1, arg872_1, arg873_1, arg874_1, arg875_1, arg876_1, arg877_1, arg878_1, arg879_1, arg880_1, arg881_1, arg882_1, arg883_1, arg884_1, arg885_1, arg886_1, arg887_1, arg888_1, arg889_1, arg890_1, arg891_1, arg892_1, arg893_1, arg894_1, arg895_1, arg896_1, arg897_1, arg898_1, arg899_1, arg900_1, arg901_1, arg902_1, arg903_1, arg904_1, arg905_1, arg906_1, arg907_1, arg908_1, arg909_1, arg910_1, arg911_1, arg912_1, arg913_1, arg914_1, arg915_1, arg916_1, arg917_1, arg918_1, arg919_1, arg920_1, arg921_1, arg922_1, arg923_1, arg924_1, arg925_1, arg926_1, arg927_1, arg928_1, arg929_1, arg930_1, arg931_1, arg932_1, arg933_1, arg934_1, arg935_1, arg936_1, arg937_1, arg938_1, arg939_1, arg940_1, arg941_1, arg942_1, arg943_1, arg944_1, arg945_1, arg946_1, arg947_1, arg948_1, arg949_1, arg950_1, arg951_1, arg952_1, arg953_1, arg954_1, arg955_1, arg956_1, arg957_1, arg958_1, arg959_1, arg960_1, arg961_1, arg962_1, arg963_1, arg964_1, arg965_1, arg966_1, arg967_1, arg968_1, arg969_1, arg970_1, arg971_1, arg972_1, arg973_1, arg974_1, arg975_1, arg976_1, arg977_1, arg978_1, arg979_1, arg980_1, arg981_1, arg982_1, arg983_1, arg984_1, arg985_1, arg986_1, arg987_1, arg988_1, arg989_1, arg990_1, arg991_1, arg992_1, arg993_1, arg994_1, arg995_1, arg996_1, arg997_1, arg998_1, arg999_1, arg1000_1, arg1001_1, arg1002_1, arg1003_1, arg1004_1, arg1005_1, arg1006_1, arg1007_1, arg1008_1, arg1009_1, arg1010_1, arg1011_1, arg1012_1, arg1013_1, arg1014_1, arg1015_1, arg1016_1, arg1017_1, arg1018_1, arg1019_1, arg1020_1, arg1021_1, arg1022_1, arg1023_1, arg1024_1, arg1025_1, arg1026_1, arg1027_1, arg1028_1, arg1029_1, arg1030_1, arg1031_1, arg1032_1, arg1033_1, arg1034_1, arg1035_1, arg1036_1, arg1037_1, arg1038_1, arg1039_1, arg1040_1, arg1041_1, arg1042_1, arg1043_1, arg1044_1, arg1045_1, arg1046_1, arg1047_1, arg1048_1, arg1049_1, arg1050_1, arg1051_1, arg1052_1, arg1053_1, arg1054_1, arg1055_1, arg1056_1, arg1057_1, arg1058_1, arg1059_1, arg1060_1, arg1061_1, arg1062_1, arg1063_1, arg1064_1, arg1065_1, arg1066_1, arg1067_1, arg1068_1, arg1069_1, arg1070_1, arg1071_1, arg1072_1, arg1073_1, arg1074_1, arg1075_1, arg1076_1, arg1077_1, arg1078_1, arg1079_1, arg1080_1, arg1081_1, arg1082_1, arg1083_1, arg1084_1, arg1085_1, arg1086_1, arg1087_1, arg1088_1, arg1089_1, arg1090_1, arg1091_1, arg1092_1, arg1093_1, arg1094_1, arg1095_1, arg1096_1, arg1097_1, arg1098_1, arg1099_1, arg1100_1, arg1101_1, arg1102_1, arg1103_1, arg1104_1, arg1105_1, arg1106_1, arg1107_1, arg1108_1, arg1109_1, arg1110_1, arg1111_1, arg1112_1, arg1113_1, arg1114_1, arg1115_1, arg1116_1, arg1117_1, arg1118_1, arg1119_1, arg1120_1, arg1121_1, arg1122_1, arg1123_1, arg1124_1, arg1125_1, arg1126_1, arg1127_1, arg1128_1, arg1129_1, arg1130_1, arg1131_1, arg1132_1, arg1133_1, arg1134_1, arg1135_1, arg1136_1, arg1137_1, arg1138_1, arg1139_1, arg1140_1, arg1141_1, arg1142_1, arg1143_1, arg1144_1, arg1145_1, arg1146_1, arg1147_1, arg1148_1, arg1149_1, arg1150_1, arg1151_1, arg1152_1, arg1153_1, arg1154_1, arg1155_1, arg1156_1, arg1157_1, arg1158_1, arg1159_1, arg1160_1, arg1161_1, arg1162_1, arg1163_1, arg1164_1, arg1165_1, arg1166_1, arg1167_1, arg1168_1, arg1169_1, arg1170_1, arg1171_1, arg1172_1, arg1173_1, arg1174_1, arg1175_1, arg1176_1, arg1177_1, arg1178_1, arg1179_1 = args V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] args.clear() V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg0_1, (2, 300), (300, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1_1, (2, 4, 128, 128), (65536, 16384, 128, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg2_1, (1152, 4, 2, 2), (16, 1, 8, 4)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg3_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg4_1, (1, 4096, 1152), (4718592, 1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg5_1, (2, ), (0, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg6_1, (1152, 256), (256, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg7_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg8_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg9_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg10_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg11_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg12_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg13_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg14_1, (6912, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg15_1, (6912, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg16_1, (6912, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg17_1, (6912, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg18_1, (1152, 4096), (4096, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg19_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg20_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg21_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg22_1, (2, 300, 4096), (1228800, 4096, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg23_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg24_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg25_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg26_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg27_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg28_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg29_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg30_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg31_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg32_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg33_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg34_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg35_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg36_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg37_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg38_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg39_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg40_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg41_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg42_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg43_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg44_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg45_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg46_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg47_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg48_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg49_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg50_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg51_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg52_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg53_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg54_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg55_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg56_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg57_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg58_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg59_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg60_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg61_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg62_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg63_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg64_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg65_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg66_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg67_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg68_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg69_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg70_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg71_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg72_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg73_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg74_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg75_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg76_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg77_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg78_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg79_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg80_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg81_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg82_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg83_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg84_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg85_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg86_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg87_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg88_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg89_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg90_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg91_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg92_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg93_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg94_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg95_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg96_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg97_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg98_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg99_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg100_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg101_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg102_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg103_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg104_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg105_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg106_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg107_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg108_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg109_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg110_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg111_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg112_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg113_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg114_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg115_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg116_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg117_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg118_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg119_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg120_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg121_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg122_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg123_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg124_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg125_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg126_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg127_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg128_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg129_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg130_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg131_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg132_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg133_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg134_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg135_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg136_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg137_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg138_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg139_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg140_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg141_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg142_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg143_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg144_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg145_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg146_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg147_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg148_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg149_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg150_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg151_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg152_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg153_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg154_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg155_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg156_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg157_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg158_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg159_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg160_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg161_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg162_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg163_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg164_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg165_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg166_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg167_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg168_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg169_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg170_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg171_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg172_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg173_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg174_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg175_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg176_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg177_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg178_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg179_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg180_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg181_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg182_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg183_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg184_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg185_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg186_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg187_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg188_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg189_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg190_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg191_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg192_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg193_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg194_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg195_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg196_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg197_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg198_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg199_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg200_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg201_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg202_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg203_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg204_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg205_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg206_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg207_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg208_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg209_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg210_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg211_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg212_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg213_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg214_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg215_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg216_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg217_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg218_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg219_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg220_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg221_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg222_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg223_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg224_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg225_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg226_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg227_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg228_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg229_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg230_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg231_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg232_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg233_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg234_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg235_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg236_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg237_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg238_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg239_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg240_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg241_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg242_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg243_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg244_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg245_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg246_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg247_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg248_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg249_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg250_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg251_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg252_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg253_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg254_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg255_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg256_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg257_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg258_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg259_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg260_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg261_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg262_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg263_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg264_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg265_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg266_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg267_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg268_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg269_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg270_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg271_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg272_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg273_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg274_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg275_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg276_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg277_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg278_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg279_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg280_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg281_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg282_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg283_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg284_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg285_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg286_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg287_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg288_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg289_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg290_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg291_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg292_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg293_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg294_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg295_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg296_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg297_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg298_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg299_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg300_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg301_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg302_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg303_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg304_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg305_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg306_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg307_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg308_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg309_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg310_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg311_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg312_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg313_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg314_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg315_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg316_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg317_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg318_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg319_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg320_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg321_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg322_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg323_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg324_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg325_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg326_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg327_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg328_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg329_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg330_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg331_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg332_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg333_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg334_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg335_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg336_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg337_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg338_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg339_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg340_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg341_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg342_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg343_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg344_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg345_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg346_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg347_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg348_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg349_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg350_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg351_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg352_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg353_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg354_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg355_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg356_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg357_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg358_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg359_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg360_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg361_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg362_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg363_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg364_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg365_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg366_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg367_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg368_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg369_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg370_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg371_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg372_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg373_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg374_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg375_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg376_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg377_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg378_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg379_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg380_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg381_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg382_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg383_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg384_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg385_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg386_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg387_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg388_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg389_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg390_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg391_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg392_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg393_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg394_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg395_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg396_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg397_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg398_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg399_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg400_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg401_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg402_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg403_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg404_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg405_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg406_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg407_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg408_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg409_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg410_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg411_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg412_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg413_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg414_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg415_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg416_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg417_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg418_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg419_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg420_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg421_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg422_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg423_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg424_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg425_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg426_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg427_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg428_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg429_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg430_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg431_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg432_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg433_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg434_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg435_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg436_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg437_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg438_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg439_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg440_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg441_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg442_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg443_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg444_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg445_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg446_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg447_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg448_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg449_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg450_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg451_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg452_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg453_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg454_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg455_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg456_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg457_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg458_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg459_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg460_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg461_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg462_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg463_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg464_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg465_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg466_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg467_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg468_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg469_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg470_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg471_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg472_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg473_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg474_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg475_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg476_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg477_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg478_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg479_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg480_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg481_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg482_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg483_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg484_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg485_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg486_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg487_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg488_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg489_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg490_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg491_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg492_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg493_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg494_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg495_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg496_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg497_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg498_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg499_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg500_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg501_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg502_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg503_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg504_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg505_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg506_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg507_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg508_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg509_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg510_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg511_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg512_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg513_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg514_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg515_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg516_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg517_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg518_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg519_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg520_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg521_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg522_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg523_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg524_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg525_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg526_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg527_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg528_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg529_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg530_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg531_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg532_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg533_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg534_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg535_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg536_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg537_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg538_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg539_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg540_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg541_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg542_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg543_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg544_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg545_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg546_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg547_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg548_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg549_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg550_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg551_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg552_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg553_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg554_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg555_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg556_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg557_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg558_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg559_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg560_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg561_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg562_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg563_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg564_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg565_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg566_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg567_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg568_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg569_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg570_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg571_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg572_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg573_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg574_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg575_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg576_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg577_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg578_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg579_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg580_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg581_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg582_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg583_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg584_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg585_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg586_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg587_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg588_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg589_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg590_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg591_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg592_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg593_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg594_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg595_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg596_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg597_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg598_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg599_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg600_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg601_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg602_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg603_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg604_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg605_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg606_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg607_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg608_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg609_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg610_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg611_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg612_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg613_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg614_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg615_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg616_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg617_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg618_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg619_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg620_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg621_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg622_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg623_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg624_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg625_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg626_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg627_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg628_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg629_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg630_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg631_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg632_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg633_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg634_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg635_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg636_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg637_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg638_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg639_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg640_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg641_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg642_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg643_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg644_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg645_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg646_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg647_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg648_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg649_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg650_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg651_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg652_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg653_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg654_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg655_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg656_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg657_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg658_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg659_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg660_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg661_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg662_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg663_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg664_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg665_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg666_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg667_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg668_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg669_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg670_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg671_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg672_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg673_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg674_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg675_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg676_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg677_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg678_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg679_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg680_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg681_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg682_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg683_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg684_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg685_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg686_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg687_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg688_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg689_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg690_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg691_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg692_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg693_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg694_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg695_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg696_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg697_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg698_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg699_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg700_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg701_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg702_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg703_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg704_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg705_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg706_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg707_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg708_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg709_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg710_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg711_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg712_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg713_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg714_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg715_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg716_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg717_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg718_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg719_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg720_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg721_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg722_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg723_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg724_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg725_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg726_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg727_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg728_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg729_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg730_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg731_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg732_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg733_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg734_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg735_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg736_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg737_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg738_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg739_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg740_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg741_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg742_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg743_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg744_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg745_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg746_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg747_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg748_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg749_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg750_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg751_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg752_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg753_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg754_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg755_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg756_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg757_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg758_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg759_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg760_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg761_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg762_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg763_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg764_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg765_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg766_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg767_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg768_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg769_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg770_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg771_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg772_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg773_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg774_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg775_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg776_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg777_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg778_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg779_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg780_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg781_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg782_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg783_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg784_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg785_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg786_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg787_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg788_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg789_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg790_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg791_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg792_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg793_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg794_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg795_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg796_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg797_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg798_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg799_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg800_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg801_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg802_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg803_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg804_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg805_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg806_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg807_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg808_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg809_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg810_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg811_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg812_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg813_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg814_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg815_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg816_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg817_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg818_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg819_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg820_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg821_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg822_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg823_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg824_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg825_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg826_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg827_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg828_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg829_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg830_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg831_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg832_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg833_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg834_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg835_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg836_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg837_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg838_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg839_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg840_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg841_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg842_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg843_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg844_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg845_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg846_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg847_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg848_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg849_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg850_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg851_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg852_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg853_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg854_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg855_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg856_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg857_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg858_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg859_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg860_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg861_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg862_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg863_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg864_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg865_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg866_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg867_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg868_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg869_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg870_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg871_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg872_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg873_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg874_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg875_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg876_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg877_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg878_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg879_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg880_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg881_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg882_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg883_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg884_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg885_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg886_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg887_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg888_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg889_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg890_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg891_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg892_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg893_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg894_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg895_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg896_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg897_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg898_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg899_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg900_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg901_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg902_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg903_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg904_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg905_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg906_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg907_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg908_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg909_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg910_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg911_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg912_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg913_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg914_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg915_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg916_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg917_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg918_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg919_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg920_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg921_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg922_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg923_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg924_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg925_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg926_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg927_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg928_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg929_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg930_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg931_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg932_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg933_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg934_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg935_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg936_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg937_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg938_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg939_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg940_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg941_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg942_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg943_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg944_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg945_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg946_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg947_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg948_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg949_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg950_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg951_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg952_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg953_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg954_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg955_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg956_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg957_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg958_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg959_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg960_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg961_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg962_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg963_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg964_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg965_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg966_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg967_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg968_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg969_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg970_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg971_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg972_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg973_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg974_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg975_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg976_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg977_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg978_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg979_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg980_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg981_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg982_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg983_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg984_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg985_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg986_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg987_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg988_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg989_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg990_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg991_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg992_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg993_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg994_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg995_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg996_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg997_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg998_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg999_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1000_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1001_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1002_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1003_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1004_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1005_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1006_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1007_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1008_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1009_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1010_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1011_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1012_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1013_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1014_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1015_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1016_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1017_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1018_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1019_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1020_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1021_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1022_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1023_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1024_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1025_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1026_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1027_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1028_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1029_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1030_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1031_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1032_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1033_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1034_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1035_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1036_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1037_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1038_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1039_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1040_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1041_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1042_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1043_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1044_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1045_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1046_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1047_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1048_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1049_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1050_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1051_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1052_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1053_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1054_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1055_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1056_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1057_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1058_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1059_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1060_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1061_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1062_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1063_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1064_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1065_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1066_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1067_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1068_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1069_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1070_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1071_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1072_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1073_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1074_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1075_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1076_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1077_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1078_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1079_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1080_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1081_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1082_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1083_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1084_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1085_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1086_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1087_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1088_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1089_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1090_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1091_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1092_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1093_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1094_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1095_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1096_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1097_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1098_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1099_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1100_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1101_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1102_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1103_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1104_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1105_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1106_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1107_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1108_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1109_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1110_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1111_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1112_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1113_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1114_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1115_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1116_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1117_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1118_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1119_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1120_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1121_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1122_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1123_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1124_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1125_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1126_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1127_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1128_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1129_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1130_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1131_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1132_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1133_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1134_1, (6, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1135_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1136_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1137_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1138_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1139_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1140_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1141_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1142_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1143_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1144_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1145_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1146_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1147_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1148_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1149_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1150_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1151_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1152_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1153_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1154_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1155_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1156_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1157_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1158_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1159_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1160_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1161_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1162_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1163_1, (1152, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1164_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1165_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1166_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1167_1, (4608, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1168_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1169_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1170_1, (4608, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1171_1, (1152, 4608), (4608, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1172_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1173_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1174_1, (1152, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1175_1, (2, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1176_1, (32, 1152), (1152, 1)) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1177_1, (32, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1178_1, (32, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] assert_size_stride(arg1179_1, (32, ), (1, )) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] with torch.cuda._DeviceGuard(0): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] torch.cuda.set_device(0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf0 = empty_strided_cuda((2, 256), (256, 1), torch.float32) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [emb_3], Original ATen: [aten.cat] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] stream0 = get_raw_stream(0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_cat_0.run(arg5_1, buf0, 512, grid=grid(512), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg5_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1 = empty_strided_cuda((2, 256), (256, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [emb_4, to_2], Original ATen: [aten._to_copy, aten.cat] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__to_copy_cat_1.run(buf0, buf1, 512, grid=grid(512), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf0 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf3 = empty_strided_cuda((2, 1152), (1152, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [emb_4, sample, sample_1, to_2], Original ATen: [aten._to_copy, aten.add, aten.cat, aten.mm, aten.mul, aten.silu, aten.view] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_tem_fused__to_copy_add_cat_mm_mul_silu_view_2.run(buf1, arg6_1, arg7_1, arg9_1, buf3, grid=torch._inductor.kernel.mm_common.mm_grid(2, 1152, meta0), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg6_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg7_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg9_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf4 = empty_strided_cuda((2, 1152), (1152, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf5 = empty_strided_cuda((2, 1152), (1152, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [sample, sample_1, sample_2, silu_1], Original ATen: [aten.add, aten.mm, aten.mul, aten.silu, aten.view] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_tem_fused_add_mm_mul_silu_view_3.run(buf3, arg10_1, arg11_1, arg13_1, buf4, buf5, grid=torch._inductor.kernel.mm_common.mm_grid(2, 1152, meta1), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg10_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf3 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf6 = empty_strided_cuda((2, 6912), (6912, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [sample_2, silu_1, timestep], Original ATen: [aten.add, aten.mm, aten.mul, aten.silu, aten.view] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_tem_fused_add_mm_mul_silu_view_4.run(buf5, arg14_1, buf6, grid=torch._inductor.kernel.mm_common.mm_grid(2, 6912, meta2), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg14_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf5 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf7 = empty_strided_cuda((2, 4, 128, 128), (65536, 1, 512, 4), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [latent], Original ATen: [aten.convolution] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_convolution_5.run(arg1_1, buf7, 8, 16384, grid=grid(8, 16384), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf8 = empty_strided_cuda((2, 1152, 64, 64), (4718592, 1, 73728, 1152), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [latent], Original ATen: [aten.convolution] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_tem_fused_convolution_6.run(buf7, arg2_1, buf8, grid=torch._inductor.kernel.conv.conv2d_grid(2, 1152, 64, 64, meta3), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg2_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf7 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf12 = empty_strided_cuda((2, 4096, 1152), (4718592, 1152, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf13 = buf12; del buf12 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add, add_2, mul_4, norm_hidden_states, norm_hidden_states_1], Original ATen: [aten.add, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_mul_native_layer_norm_7.run(buf13, buf8, arg3_1, arg4_1, arg27_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf14 = empty_strided_cuda((8192, 1152), (1152, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf13, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg28_1, (1152, 1152), (1, 1152), 0), out=buf14) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg28_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf15 = empty_strided_cuda((8192, 1152), (1152, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf13, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg32_1, (1152, 1152), (1, 1152), 0), out=buf15) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg32_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf16 = empty_strided_cuda((8192, 1152), (1152, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf13, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg36_1, (1152, 1152), (1, 1152), 0), out=buf16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg36_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf13 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf17 = reinterpret_tensor(buf14, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf14 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_4], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf17, arg29_1, arg31_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg29_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg31_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf18 = reinterpret_tensor(buf15, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf15 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_4], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf18, arg33_1, arg35_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg33_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg35_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf19 = reinterpret_tensor(buf16, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf16 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_4], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf19, arg37_1, arg39_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg37_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg39_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_4], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf20 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf17, buf18, buf19, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf17 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf21 = buf20[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf20 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf25 = reinterpret_tensor(buf19, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf19 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_5], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf21, buf25, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf26 = reinterpret_tensor(buf21, (8192, 1152), (1152, 1), 0); del buf21 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf25, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg40_1, (1152, 1152), (1, 1152), 0), out=buf26) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg40_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf27 = reinterpret_tensor(buf26, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf26 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add, attn_output, hidden_states_10, hidden_states_9], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_10.run(buf27, arg27_1, buf6, arg15_1, arg17_1, arg41_1, arg43_1, buf8, arg3_1, arg4_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg3_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg41_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg43_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg4_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf28 = reinterpret_tensor(buf8, (8192, 1152), (1152, 1), 0); del buf8 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf27, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg44_1, (1152, 1152), (1, 1152), 0), out=buf28) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg44_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf30 = empty_strided_cuda((2, 300, 1152), (345600, 1152, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_1, hidden_states_2], Original ATen: [aten.gelu, aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_tem_fused_gelu_mm_11.run(arg22_1, arg18_1, arg19_1, arg21_1, buf30, grid=torch._inductor.kernel.mm_common.mm_grid(600, 1152, meta4), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg18_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg19_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg21_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg22_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf31 = empty_strided_cuda((600, 1152), (1152, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf30, (600, 1152), (1152, 1), 0), reinterpret_tensor(arg23_1, (1152, 1152), (1, 1152), 0), out=buf31) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg23_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf32 = reinterpret_tensor(buf31, (2, 300, 1152), (345600, 1152, 1), 0); del buf31 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_3], Original ATen: [aten.add] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf32, arg24_1, arg26_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg24_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg26_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf33 = reinterpret_tensor(buf30, (600, 1152), (1152, 1), 0); del buf30 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf35 = empty_strided_cuda((600, 1152), (1152, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf75 = empty_strided_cuda((600, 1152), (1152, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf77 = empty_strided_cuda((600, 1152), (1152, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [key_2, key_6, value_2, value_6], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_mm_13.run(buf32, buf33, buf35, buf75, buf77, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf34 = empty_strided_cuda((600, 1152), (1152, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf33, reinterpret_tensor(arg48_1, (1152, 1152), (1, 1152), 0), out=buf34) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg48_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf36 = buf33; del buf33 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf35, reinterpret_tensor(arg52_1, (1152, 1152), (1, 1152), 0), out=buf36) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg52_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf37 = reinterpret_tensor(buf28, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf28 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_11], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf37, arg45_1, arg47_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg45_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg47_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf38 = reinterpret_tensor(buf34, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf34 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_11], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf38, arg49_1, arg51_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg49_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg51_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf39 = reinterpret_tensor(buf36, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf36 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_11], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf39, arg53_1, arg55_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg53_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg55_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf40 = empty_strided_cuda((2, 16, 1, 304), (4864, 304, 304, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf82 = empty_strided_cuda((2, 16, 1, 304), (4864, 304, 304, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_11, hidden_states_30], Original ATen: [aten.constant_pad_nd] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_constant_pad_nd_14.run(arg0_1, buf40, buf82, 9728, grid=grid(9728), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_11], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf41 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf37, buf38, buf39, reinterpret_tensor(buf40, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf42 = buf41[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf41 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf46 = reinterpret_tensor(buf37, (8192, 1152), (1152, 1), 0); del buf37 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf42, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg56_1, (1152, 1152), (1, 1152), 0), out=buf46) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg56_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf50 = reinterpret_tensor(buf42, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf42 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf51 = buf50; del buf50 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_6, hidden_states_16, hidden_states_17, mul_6, norm_hidden_states_2, norm_hidden_states_3], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf51, buf46, arg57_1, arg59_1, buf27, arg27_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf52 = empty_strided_cuda((8192, 4608), (4608, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf51, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg60_1, (1152, 4608), (1, 1152), 0), out=buf52) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg60_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf53 = reinterpret_tensor(buf52, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf52 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_19], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf53, arg61_1, arg63_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg61_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg63_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf54 = reinterpret_tensor(buf51, (8192, 1152), (1152, 1), 0); del buf51 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf53, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg64_1, (4608, 1152), (1, 4608), 0), out=buf54) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg64_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf55 = reinterpret_tensor(buf54, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf54 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf59 = reinterpret_tensor(buf25, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf25 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_10, ff_output, hidden_states_16, hidden_states_17, hidden_states_22, mul_8, norm_hidden_states_4, norm_hidden_states_5], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf55, arg27_1, buf6, arg15_1, arg17_1, arg65_1, arg67_1, buf46, arg57_1, arg59_1, buf27, arg68_1, buf59, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg27_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg57_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg59_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg65_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg67_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf60 = buf46; del buf46 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf59, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg69_1, (1152, 1152), (1, 1152), 0), out=buf60) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg69_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf61 = reinterpret_tensor(buf27, (8192, 1152), (1152, 1), 0); del buf27 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf59, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg73_1, (1152, 1152), (1, 1152), 0), out=buf61) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg73_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf62 = reinterpret_tensor(buf18, (8192, 1152), (1152, 1), 0); del buf18 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf59, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg77_1, (1152, 1152), (1, 1152), 0), out=buf62) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg77_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf59 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf63 = reinterpret_tensor(buf60, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf60 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_23], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf63, arg70_1, arg72_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg70_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg72_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf64 = reinterpret_tensor(buf61, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf61 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_23], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf64, arg74_1, arg76_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg74_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg76_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf65 = reinterpret_tensor(buf62, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf62 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_23], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf65, arg78_1, arg80_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg78_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg80_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_23], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf66 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf63, buf64, buf65, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf63 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf67 = buf66[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf66 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf71 = reinterpret_tensor(buf65, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf65 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_24], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf67, buf71, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf72 = reinterpret_tensor(buf67, (8192, 1152), (1152, 1), 0); del buf67 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf71, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg81_1, (1152, 1152), (1, 1152), 0), out=buf72) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg81_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf73 = reinterpret_tensor(buf72, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf72 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_1, hidden_states_28, hidden_states_29], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf73, arg68_1, buf6, arg15_1, arg17_1, arg82_1, arg84_1, buf55, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg82_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg84_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf74 = reinterpret_tensor(buf55, (8192, 1152), (1152, 1), 0); del buf55 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf73, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg85_1, (1152, 1152), (1, 1152), 0), out=buf74) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg85_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf76 = reinterpret_tensor(buf39, (600, 1152), (1152, 1), 0); del buf39 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf75, reinterpret_tensor(arg89_1, (1152, 1152), (1, 1152), 0), out=buf76) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg89_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf78 = buf75; del buf75 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf77, reinterpret_tensor(arg93_1, (1152, 1152), (1, 1152), 0), out=buf78) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg93_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf79 = reinterpret_tensor(buf74, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf74 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_30], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf79, arg86_1, arg88_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg86_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg88_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf80 = reinterpret_tensor(buf76, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf76 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_30], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf80, arg90_1, arg92_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg90_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg92_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf81 = reinterpret_tensor(buf78, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf78 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_30], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf81, arg94_1, arg96_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg94_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg96_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_30], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf83 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf79, buf80, buf81, reinterpret_tensor(buf82, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf84 = buf83[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf83 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf88 = reinterpret_tensor(buf79, (8192, 1152), (1152, 1), 0); del buf79 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf84, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg97_1, (1152, 1152), (1, 1152), 0), out=buf88) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg97_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf92 = reinterpret_tensor(buf84, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf84 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf93 = buf92; del buf92 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_14, hidden_states_35, hidden_states_36, mul_10, norm_hidden_states_6, norm_hidden_states_7], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf93, buf88, arg98_1, arg100_1, buf73, arg68_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf94 = reinterpret_tensor(buf53, (8192, 4608), (4608, 1), 0); del buf53 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf93, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg101_1, (1152, 4608), (1, 1152), 0), out=buf94) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg101_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf95 = reinterpret_tensor(buf94, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf94 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_38], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf95, arg102_1, arg104_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg102_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg104_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf96 = reinterpret_tensor(buf93, (8192, 1152), (1152, 1), 0); del buf93 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf95, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg105_1, (4608, 1152), (1, 4608), 0), out=buf96) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg105_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf97 = reinterpret_tensor(buf96, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf96 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf101 = reinterpret_tensor(buf71, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf71 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_18, ff_output_1, hidden_states_35, hidden_states_36, hidden_states_41, mul_12, norm_hidden_states_8, norm_hidden_states_9], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf97, arg68_1, buf6, arg15_1, arg17_1, arg106_1, arg108_1, buf88, arg98_1, arg100_1, buf73, arg109_1, buf101, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg100_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg106_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg108_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg68_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg98_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf102 = buf88; del buf88 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf101, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg110_1, (1152, 1152), (1, 1152), 0), out=buf102) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg110_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf103 = reinterpret_tensor(buf73, (8192, 1152), (1152, 1), 0); del buf73 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf101, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg114_1, (1152, 1152), (1, 1152), 0), out=buf103) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg114_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf104 = reinterpret_tensor(buf64, (8192, 1152), (1152, 1), 0); del buf64 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf101, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg118_1, (1152, 1152), (1, 1152), 0), out=buf104) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg118_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf101 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf105 = reinterpret_tensor(buf102, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf102 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_42], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf105, arg111_1, arg113_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg111_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg113_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf106 = reinterpret_tensor(buf103, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf103 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_42], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf106, arg115_1, arg117_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg115_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg117_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf107 = reinterpret_tensor(buf104, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf104 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_42], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf107, arg119_1, arg121_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg119_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg121_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_42], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf108 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf105, buf106, buf107, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf105 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf109 = buf108[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf108 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf113 = reinterpret_tensor(buf107, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf107 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_43], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf109, buf113, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf114 = reinterpret_tensor(buf109, (8192, 1152), (1152, 1), 0); del buf109 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf113, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg122_1, (1152, 1152), (1, 1152), 0), out=buf114) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg122_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf115 = reinterpret_tensor(buf114, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf114 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_2, hidden_states_47, hidden_states_48], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf115, arg109_1, buf6, arg15_1, arg17_1, arg123_1, arg125_1, buf97, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg123_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg125_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf116 = reinterpret_tensor(buf97, (8192, 1152), (1152, 1), 0); del buf97 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf115, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg126_1, (1152, 1152), (1, 1152), 0), out=buf116) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg126_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf117 = reinterpret_tensor(buf81, (600, 1152), (1152, 1), 0); del buf81 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf119 = reinterpret_tensor(buf80, (600, 1152), (1152, 1), 0); del buf80 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf159 = buf77; del buf77 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf161 = reinterpret_tensor(buf38, (600, 1152), (1152, 1), 0); del buf38 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [key_10, key_14, value_10, value_14], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_mm_13.run(buf32, buf117, buf119, buf159, buf161, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf118 = buf35; del buf35 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf117, reinterpret_tensor(arg130_1, (1152, 1152), (1, 1152), 0), out=buf118) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg130_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf120 = buf117; del buf117 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf119, reinterpret_tensor(arg134_1, (1152, 1152), (1, 1152), 0), out=buf120) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg134_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf121 = reinterpret_tensor(buf116, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf116 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_49], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf121, arg127_1, arg129_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg127_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg129_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf122 = reinterpret_tensor(buf118, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf118 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_49], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf122, arg131_1, arg133_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg131_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg133_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf123 = reinterpret_tensor(buf120, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf120 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_49], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf123, arg135_1, arg137_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg135_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg137_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf124 = buf82; del buf82 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf166 = buf40; del buf40 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_49, hidden_states_68], Original ATen: [aten.constant_pad_nd] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_constant_pad_nd_14.run(arg0_1, buf124, buf166, 9728, grid=grid(9728), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_49], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf125 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf121, buf122, buf123, reinterpret_tensor(buf124, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf126 = buf125[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf125 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf130 = reinterpret_tensor(buf121, (8192, 1152), (1152, 1), 0); del buf121 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf126, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg138_1, (1152, 1152), (1, 1152), 0), out=buf130) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg138_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf134 = reinterpret_tensor(buf126, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf126 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf135 = buf134; del buf134 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_22, hidden_states_54, hidden_states_55, mul_14, norm_hidden_states_10, norm_hidden_states_11], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf135, buf130, arg139_1, arg141_1, buf115, arg109_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf136 = reinterpret_tensor(buf95, (8192, 4608), (4608, 1), 0); del buf95 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf135, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg142_1, (1152, 4608), (1, 1152), 0), out=buf136) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg142_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf137 = reinterpret_tensor(buf136, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf136 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_57], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf137, arg143_1, arg145_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg143_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg145_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf138 = reinterpret_tensor(buf135, (8192, 1152), (1152, 1), 0); del buf135 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf137, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg146_1, (4608, 1152), (1, 4608), 0), out=buf138) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg146_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf139 = reinterpret_tensor(buf138, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf138 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf143 = reinterpret_tensor(buf113, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf113 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_26, ff_output_2, hidden_states_54, hidden_states_55, hidden_states_60, mul_16, norm_hidden_states_12, norm_hidden_states_13], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf139, arg109_1, buf6, arg15_1, arg17_1, arg147_1, arg149_1, buf130, arg139_1, arg141_1, buf115, arg150_1, buf143, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg109_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg139_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg141_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg147_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg149_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf144 = buf130; del buf130 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf143, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg151_1, (1152, 1152), (1, 1152), 0), out=buf144) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg151_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf145 = reinterpret_tensor(buf115, (8192, 1152), (1152, 1), 0); del buf115 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf143, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg155_1, (1152, 1152), (1, 1152), 0), out=buf145) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg155_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf146 = reinterpret_tensor(buf106, (8192, 1152), (1152, 1), 0); del buf106 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf143, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg159_1, (1152, 1152), (1, 1152), 0), out=buf146) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg159_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf143 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf147 = reinterpret_tensor(buf144, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf144 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_61], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf147, arg152_1, arg154_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg152_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg154_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf148 = reinterpret_tensor(buf145, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf145 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_61], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf148, arg156_1, arg158_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg156_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg158_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf149 = reinterpret_tensor(buf146, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf146 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_61], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf149, arg160_1, arg162_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg160_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg162_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_61], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf150 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf147, buf148, buf149, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf147 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf151 = buf150[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf150 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf155 = reinterpret_tensor(buf149, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf149 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_62], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf151, buf155, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf156 = reinterpret_tensor(buf151, (8192, 1152), (1152, 1), 0); del buf151 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf155, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg163_1, (1152, 1152), (1, 1152), 0), out=buf156) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg163_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf157 = reinterpret_tensor(buf156, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf156 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_3, hidden_states_66, hidden_states_67], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf157, arg150_1, buf6, arg15_1, arg17_1, arg164_1, arg166_1, buf139, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg164_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg166_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf158 = reinterpret_tensor(buf139, (8192, 1152), (1152, 1), 0); del buf139 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf157, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg167_1, (1152, 1152), (1, 1152), 0), out=buf158) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg167_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf160 = reinterpret_tensor(buf123, (600, 1152), (1152, 1), 0); del buf123 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf159, reinterpret_tensor(arg171_1, (1152, 1152), (1, 1152), 0), out=buf160) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg171_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf162 = buf159; del buf159 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf161, reinterpret_tensor(arg175_1, (1152, 1152), (1, 1152), 0), out=buf162) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg175_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf163 = reinterpret_tensor(buf158, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf158 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_68], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf163, arg168_1, arg170_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg168_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg170_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf164 = reinterpret_tensor(buf160, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf160 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_68], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf164, arg172_1, arg174_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg172_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg174_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf165 = reinterpret_tensor(buf162, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf162 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_68], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf165, arg176_1, arg178_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg176_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg178_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_68], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf167 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf163, buf164, buf165, reinterpret_tensor(buf166, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf168 = buf167[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf167 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf172 = reinterpret_tensor(buf163, (8192, 1152), (1152, 1), 0); del buf163 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf168, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg179_1, (1152, 1152), (1, 1152), 0), out=buf172) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg179_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf176 = reinterpret_tensor(buf168, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf168 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf177 = buf176; del buf176 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_30, hidden_states_73, hidden_states_74, mul_18, norm_hidden_states_14, norm_hidden_states_15], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf177, buf172, arg180_1, arg182_1, buf157, arg150_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf178 = reinterpret_tensor(buf137, (8192, 4608), (4608, 1), 0); del buf137 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf177, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg183_1, (1152, 4608), (1, 1152), 0), out=buf178) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg183_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf179 = reinterpret_tensor(buf178, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf178 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_76], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf179, arg184_1, arg186_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg184_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg186_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf180 = reinterpret_tensor(buf177, (8192, 1152), (1152, 1), 0); del buf177 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf179, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg187_1, (4608, 1152), (1, 4608), 0), out=buf180) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg187_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf181 = reinterpret_tensor(buf180, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf180 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf185 = reinterpret_tensor(buf155, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf155 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_34, ff_output_3, hidden_states_73, hidden_states_74, hidden_states_79, mul_20, norm_hidden_states_16, norm_hidden_states_17], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf181, arg150_1, buf6, arg15_1, arg17_1, arg188_1, arg190_1, buf172, arg180_1, arg182_1, buf157, arg191_1, buf185, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg150_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg180_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg182_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg188_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg190_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf186 = buf172; del buf172 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf185, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg192_1, (1152, 1152), (1, 1152), 0), out=buf186) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg192_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf187 = reinterpret_tensor(buf157, (8192, 1152), (1152, 1), 0); del buf157 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf185, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg196_1, (1152, 1152), (1, 1152), 0), out=buf187) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg196_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf188 = reinterpret_tensor(buf148, (8192, 1152), (1152, 1), 0); del buf148 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf185, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg200_1, (1152, 1152), (1, 1152), 0), out=buf188) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg200_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf185 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf189 = reinterpret_tensor(buf186, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf186 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_80], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf189, arg193_1, arg195_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg193_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg195_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf190 = reinterpret_tensor(buf187, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf187 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_80], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf190, arg197_1, arg199_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg197_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg199_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf191 = reinterpret_tensor(buf188, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf188 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_80], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf191, arg201_1, arg203_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg201_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg203_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_80], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf192 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf189, buf190, buf191, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf189 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf193 = buf192[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf192 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf197 = reinterpret_tensor(buf191, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf191 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_81], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf193, buf197, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf198 = reinterpret_tensor(buf193, (8192, 1152), (1152, 1), 0); del buf193 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf197, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg204_1, (1152, 1152), (1, 1152), 0), out=buf198) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg204_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf199 = reinterpret_tensor(buf198, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf198 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_4, hidden_states_85, hidden_states_86], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf199, arg191_1, buf6, arg15_1, arg17_1, arg205_1, arg207_1, buf181, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg205_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg207_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf200 = reinterpret_tensor(buf181, (8192, 1152), (1152, 1), 0); del buf181 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf199, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg208_1, (1152, 1152), (1, 1152), 0), out=buf200) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg208_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf201 = reinterpret_tensor(buf165, (600, 1152), (1152, 1), 0); del buf165 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf203 = reinterpret_tensor(buf164, (600, 1152), (1152, 1), 0); del buf164 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf243 = buf161; del buf161 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf245 = reinterpret_tensor(buf122, (600, 1152), (1152, 1), 0); del buf122 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [key_18, key_22, value_18, value_22], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_mm_13.run(buf32, buf201, buf203, buf243, buf245, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf202 = buf119; del buf119 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf201, reinterpret_tensor(arg212_1, (1152, 1152), (1, 1152), 0), out=buf202) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg212_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf204 = buf201; del buf201 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf203, reinterpret_tensor(arg216_1, (1152, 1152), (1, 1152), 0), out=buf204) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg216_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf205 = reinterpret_tensor(buf200, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf200 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_87], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf205, arg209_1, arg211_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg209_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg211_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf206 = reinterpret_tensor(buf202, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf202 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_87], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf206, arg213_1, arg215_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg213_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg215_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf207 = reinterpret_tensor(buf204, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf204 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_87], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf207, arg217_1, arg219_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg217_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg219_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf208 = buf166; del buf166 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf250 = buf124; del buf124 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_106, hidden_states_87], Original ATen: [aten.constant_pad_nd] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_constant_pad_nd_14.run(arg0_1, buf208, buf250, 9728, grid=grid(9728), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_87], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf209 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf205, buf206, buf207, reinterpret_tensor(buf208, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf210 = buf209[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf209 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf214 = reinterpret_tensor(buf205, (8192, 1152), (1152, 1), 0); del buf205 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf210, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg220_1, (1152, 1152), (1, 1152), 0), out=buf214) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg220_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf218 = reinterpret_tensor(buf210, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf210 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf219 = buf218; del buf218 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_38, hidden_states_92, hidden_states_93, mul_22, norm_hidden_states_18, norm_hidden_states_19], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf219, buf214, arg221_1, arg223_1, buf199, arg191_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf220 = reinterpret_tensor(buf179, (8192, 4608), (4608, 1), 0); del buf179 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf219, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg224_1, (1152, 4608), (1, 1152), 0), out=buf220) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg224_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf221 = reinterpret_tensor(buf220, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf220 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_95], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf221, arg225_1, arg227_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg225_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg227_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf222 = reinterpret_tensor(buf219, (8192, 1152), (1152, 1), 0); del buf219 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf221, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg228_1, (4608, 1152), (1, 4608), 0), out=buf222) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg228_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf223 = reinterpret_tensor(buf222, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf222 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf227 = reinterpret_tensor(buf197, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf197 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_42, ff_output_4, hidden_states_92, hidden_states_93, hidden_states_98, mul_24, norm_hidden_states_20, norm_hidden_states_21], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf223, arg191_1, buf6, arg15_1, arg17_1, arg229_1, arg231_1, buf214, arg221_1, arg223_1, buf199, arg232_1, buf227, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg191_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg221_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg223_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg229_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg231_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf228 = buf214; del buf214 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf227, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg233_1, (1152, 1152), (1, 1152), 0), out=buf228) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg233_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf229 = reinterpret_tensor(buf199, (8192, 1152), (1152, 1), 0); del buf199 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf227, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg237_1, (1152, 1152), (1, 1152), 0), out=buf229) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg237_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf230 = reinterpret_tensor(buf190, (8192, 1152), (1152, 1), 0); del buf190 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf227, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg241_1, (1152, 1152), (1, 1152), 0), out=buf230) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg241_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf227 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf231 = reinterpret_tensor(buf228, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf228 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_99], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf231, arg234_1, arg236_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg234_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg236_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf232 = reinterpret_tensor(buf229, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf229 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_99], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf232, arg238_1, arg240_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg238_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg240_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf233 = reinterpret_tensor(buf230, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf230 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_99], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf233, arg242_1, arg244_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg242_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg244_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_99], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf234 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf231, buf232, buf233, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf231 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf235 = buf234[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf234 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf239 = reinterpret_tensor(buf233, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf233 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_100], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf235, buf239, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf240 = reinterpret_tensor(buf235, (8192, 1152), (1152, 1), 0); del buf235 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf239, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg245_1, (1152, 1152), (1, 1152), 0), out=buf240) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg245_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf241 = reinterpret_tensor(buf240, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf240 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_5, hidden_states_104, hidden_states_105], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf241, arg232_1, buf6, arg15_1, arg17_1, arg246_1, arg248_1, buf223, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg246_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg248_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf242 = reinterpret_tensor(buf223, (8192, 1152), (1152, 1), 0); del buf223 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf241, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg249_1, (1152, 1152), (1, 1152), 0), out=buf242) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg249_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf244 = reinterpret_tensor(buf207, (600, 1152), (1152, 1), 0); del buf207 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf243, reinterpret_tensor(arg253_1, (1152, 1152), (1, 1152), 0), out=buf244) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg253_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf246 = buf243; del buf243 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf245, reinterpret_tensor(arg257_1, (1152, 1152), (1, 1152), 0), out=buf246) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg257_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf247 = reinterpret_tensor(buf242, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf242 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_106], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf247, arg250_1, arg252_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg250_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg252_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf248 = reinterpret_tensor(buf244, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf244 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_106], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf248, arg254_1, arg256_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg254_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg256_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf249 = reinterpret_tensor(buf246, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf246 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_106], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf249, arg258_1, arg260_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg258_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg260_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_106], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf251 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf247, buf248, buf249, reinterpret_tensor(buf250, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf252 = buf251[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf251 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf256 = reinterpret_tensor(buf247, (8192, 1152), (1152, 1), 0); del buf247 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf252, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg261_1, (1152, 1152), (1, 1152), 0), out=buf256) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg261_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf260 = reinterpret_tensor(buf252, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf252 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf261 = buf260; del buf260 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_46, hidden_states_111, hidden_states_112, mul_26, norm_hidden_states_22, norm_hidden_states_23], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf261, buf256, arg262_1, arg264_1, buf241, arg232_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf262 = reinterpret_tensor(buf221, (8192, 4608), (4608, 1), 0); del buf221 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf261, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg265_1, (1152, 4608), (1, 1152), 0), out=buf262) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg265_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf263 = reinterpret_tensor(buf262, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf262 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_114], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf263, arg266_1, arg268_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg266_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg268_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf264 = reinterpret_tensor(buf261, (8192, 1152), (1152, 1), 0); del buf261 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf263, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg269_1, (4608, 1152), (1, 4608), 0), out=buf264) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg269_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf265 = reinterpret_tensor(buf264, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf264 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf269 = reinterpret_tensor(buf239, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf239 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_50, ff_output_5, hidden_states_111, hidden_states_112, hidden_states_117, mul_28, norm_hidden_states_24, norm_hidden_states_25], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf265, arg232_1, buf6, arg15_1, arg17_1, arg270_1, arg272_1, buf256, arg262_1, arg264_1, buf241, arg273_1, buf269, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg232_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg262_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg264_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg270_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg272_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf270 = buf256; del buf256 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf269, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg274_1, (1152, 1152), (1, 1152), 0), out=buf270) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg274_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf271 = reinterpret_tensor(buf241, (8192, 1152), (1152, 1), 0); del buf241 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf269, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg278_1, (1152, 1152), (1, 1152), 0), out=buf271) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg278_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf272 = reinterpret_tensor(buf232, (8192, 1152), (1152, 1), 0); del buf232 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf269, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg282_1, (1152, 1152), (1, 1152), 0), out=buf272) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg282_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf269 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf273 = reinterpret_tensor(buf270, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf270 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_118], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf273, arg275_1, arg277_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg275_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg277_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf274 = reinterpret_tensor(buf271, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf271 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_118], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf274, arg279_1, arg281_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg279_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg281_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf275 = reinterpret_tensor(buf272, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf272 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_118], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf275, arg283_1, arg285_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg283_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg285_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_118], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf276 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf273, buf274, buf275, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf273 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf277 = buf276[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf276 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf281 = reinterpret_tensor(buf275, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf275 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_119], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf277, buf281, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf282 = reinterpret_tensor(buf277, (8192, 1152), (1152, 1), 0); del buf277 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf281, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg286_1, (1152, 1152), (1, 1152), 0), out=buf282) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg286_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf283 = reinterpret_tensor(buf282, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf282 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_6, hidden_states_123, hidden_states_124], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf283, arg273_1, buf6, arg15_1, arg17_1, arg287_1, arg289_1, buf265, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg287_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg289_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf284 = reinterpret_tensor(buf265, (8192, 1152), (1152, 1), 0); del buf265 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf283, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg290_1, (1152, 1152), (1, 1152), 0), out=buf284) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg290_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf285 = reinterpret_tensor(buf249, (600, 1152), (1152, 1), 0); del buf249 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf287 = reinterpret_tensor(buf248, (600, 1152), (1152, 1), 0); del buf248 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf327 = buf245; del buf245 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf329 = reinterpret_tensor(buf206, (600, 1152), (1152, 1), 0); del buf206 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [key_26, key_30, value_26, value_30], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_mm_13.run(buf32, buf285, buf287, buf327, buf329, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf286 = buf203; del buf203 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf285, reinterpret_tensor(arg294_1, (1152, 1152), (1, 1152), 0), out=buf286) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg294_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf288 = buf285; del buf285 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf287, reinterpret_tensor(arg298_1, (1152, 1152), (1, 1152), 0), out=buf288) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg298_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf289 = reinterpret_tensor(buf284, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf284 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_125], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf289, arg291_1, arg293_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg291_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg293_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf290 = reinterpret_tensor(buf286, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf286 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_125], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf290, arg295_1, arg297_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg295_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg297_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf291 = reinterpret_tensor(buf288, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf288 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_125], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf291, arg299_1, arg301_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg299_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg301_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf292 = buf250; del buf250 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf334 = buf208; del buf208 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_125, hidden_states_144], Original ATen: [aten.constant_pad_nd] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_constant_pad_nd_14.run(arg0_1, buf292, buf334, 9728, grid=grid(9728), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_125], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf293 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf289, buf290, buf291, reinterpret_tensor(buf292, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf294 = buf293[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf293 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf298 = reinterpret_tensor(buf289, (8192, 1152), (1152, 1), 0); del buf289 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf294, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg302_1, (1152, 1152), (1, 1152), 0), out=buf298) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg302_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf302 = reinterpret_tensor(buf294, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf294 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf303 = buf302; del buf302 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_54, hidden_states_130, hidden_states_131, mul_30, norm_hidden_states_26, norm_hidden_states_27], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf303, buf298, arg303_1, arg305_1, buf283, arg273_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf304 = reinterpret_tensor(buf263, (8192, 4608), (4608, 1), 0); del buf263 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf303, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg306_1, (1152, 4608), (1, 1152), 0), out=buf304) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg306_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf305 = reinterpret_tensor(buf304, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf304 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_133], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf305, arg307_1, arg309_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg307_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg309_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf306 = reinterpret_tensor(buf303, (8192, 1152), (1152, 1), 0); del buf303 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf305, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg310_1, (4608, 1152), (1, 4608), 0), out=buf306) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg310_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf307 = reinterpret_tensor(buf306, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf306 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf311 = reinterpret_tensor(buf281, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf281 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_58, ff_output_6, hidden_states_130, hidden_states_131, hidden_states_136, mul_32, norm_hidden_states_28, norm_hidden_states_29], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf307, arg273_1, buf6, arg15_1, arg17_1, arg311_1, arg313_1, buf298, arg303_1, arg305_1, buf283, arg314_1, buf311, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg273_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg303_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg305_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg311_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg313_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf312 = buf298; del buf298 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf311, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg315_1, (1152, 1152), (1, 1152), 0), out=buf312) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg315_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf313 = reinterpret_tensor(buf283, (8192, 1152), (1152, 1), 0); del buf283 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf311, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg319_1, (1152, 1152), (1, 1152), 0), out=buf313) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg319_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf314 = reinterpret_tensor(buf274, (8192, 1152), (1152, 1), 0); del buf274 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf311, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg323_1, (1152, 1152), (1, 1152), 0), out=buf314) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg323_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf311 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf315 = reinterpret_tensor(buf312, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf312 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_137], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf315, arg316_1, arg318_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg316_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg318_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf316 = reinterpret_tensor(buf313, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf313 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_137], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf316, arg320_1, arg322_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg320_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg322_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf317 = reinterpret_tensor(buf314, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf314 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_137], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf317, arg324_1, arg326_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg324_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg326_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_137], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf318 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf315, buf316, buf317, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf315 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf319 = buf318[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf318 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf323 = reinterpret_tensor(buf317, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf317 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_138], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf319, buf323, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf324 = reinterpret_tensor(buf319, (8192, 1152), (1152, 1), 0); del buf319 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf323, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg327_1, (1152, 1152), (1, 1152), 0), out=buf324) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg327_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf325 = reinterpret_tensor(buf324, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf324 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_7, hidden_states_142, hidden_states_143], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf325, arg314_1, buf6, arg15_1, arg17_1, arg328_1, arg330_1, buf307, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg328_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg330_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf326 = reinterpret_tensor(buf307, (8192, 1152), (1152, 1), 0); del buf307 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf325, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg331_1, (1152, 1152), (1, 1152), 0), out=buf326) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg331_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf328 = reinterpret_tensor(buf291, (600, 1152), (1152, 1), 0); del buf291 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf327, reinterpret_tensor(arg335_1, (1152, 1152), (1, 1152), 0), out=buf328) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg335_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf330 = buf327; del buf327 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf329, reinterpret_tensor(arg339_1, (1152, 1152), (1, 1152), 0), out=buf330) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg339_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf331 = reinterpret_tensor(buf326, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf326 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_144], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf331, arg332_1, arg334_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg332_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg334_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf332 = reinterpret_tensor(buf328, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf328 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_144], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf332, arg336_1, arg338_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg336_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg338_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf333 = reinterpret_tensor(buf330, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf330 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_144], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf333, arg340_1, arg342_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg340_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg342_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_144], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf335 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf331, buf332, buf333, reinterpret_tensor(buf334, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf336 = buf335[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf335 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf340 = reinterpret_tensor(buf331, (8192, 1152), (1152, 1), 0); del buf331 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf336, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg343_1, (1152, 1152), (1, 1152), 0), out=buf340) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg343_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf344 = reinterpret_tensor(buf336, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf336 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf345 = buf344; del buf344 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_62, hidden_states_149, hidden_states_150, mul_34, norm_hidden_states_30, norm_hidden_states_31], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf345, buf340, arg344_1, arg346_1, buf325, arg314_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf346 = reinterpret_tensor(buf305, (8192, 4608), (4608, 1), 0); del buf305 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf345, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg347_1, (1152, 4608), (1, 1152), 0), out=buf346) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg347_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf347 = reinterpret_tensor(buf346, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf346 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_152], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf347, arg348_1, arg350_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg348_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg350_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf348 = reinterpret_tensor(buf345, (8192, 1152), (1152, 1), 0); del buf345 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf347, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg351_1, (4608, 1152), (1, 4608), 0), out=buf348) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg351_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf349 = reinterpret_tensor(buf348, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf348 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf353 = reinterpret_tensor(buf323, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf323 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_66, ff_output_7, hidden_states_149, hidden_states_150, hidden_states_155, mul_36, norm_hidden_states_32, norm_hidden_states_33], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf349, arg314_1, buf6, arg15_1, arg17_1, arg352_1, arg354_1, buf340, arg344_1, arg346_1, buf325, arg355_1, buf353, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg314_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg344_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg346_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg352_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg354_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf354 = buf340; del buf340 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf353, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg356_1, (1152, 1152), (1, 1152), 0), out=buf354) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg356_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf355 = reinterpret_tensor(buf325, (8192, 1152), (1152, 1), 0); del buf325 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf353, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg360_1, (1152, 1152), (1, 1152), 0), out=buf355) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg360_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf356 = reinterpret_tensor(buf316, (8192, 1152), (1152, 1), 0); del buf316 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf353, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg364_1, (1152, 1152), (1, 1152), 0), out=buf356) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg364_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf353 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf357 = reinterpret_tensor(buf354, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf354 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_156], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf357, arg357_1, arg359_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg357_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg359_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf358 = reinterpret_tensor(buf355, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf355 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_156], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf358, arg361_1, arg363_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg361_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg363_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf359 = reinterpret_tensor(buf356, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf356 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_156], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf359, arg365_1, arg367_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg365_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg367_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_156], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf360 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf357, buf358, buf359, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf357 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf361 = buf360[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf360 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf365 = reinterpret_tensor(buf359, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf359 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_157], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf361, buf365, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf366 = reinterpret_tensor(buf361, (8192, 1152), (1152, 1), 0); del buf361 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf365, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg368_1, (1152, 1152), (1, 1152), 0), out=buf366) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg368_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf367 = reinterpret_tensor(buf366, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf366 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_8, hidden_states_161, hidden_states_162], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf367, arg355_1, buf6, arg15_1, arg17_1, arg369_1, arg371_1, buf349, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg369_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg371_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf368 = reinterpret_tensor(buf349, (8192, 1152), (1152, 1), 0); del buf349 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf367, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg372_1, (1152, 1152), (1, 1152), 0), out=buf368) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg372_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf369 = reinterpret_tensor(buf333, (600, 1152), (1152, 1), 0); del buf333 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf371 = reinterpret_tensor(buf332, (600, 1152), (1152, 1), 0); del buf332 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf411 = buf329; del buf329 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf413 = reinterpret_tensor(buf290, (600, 1152), (1152, 1), 0); del buf290 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [key_34, key_38, value_34, value_38], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_mm_13.run(buf32, buf369, buf371, buf411, buf413, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf370 = buf287; del buf287 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf369, reinterpret_tensor(arg376_1, (1152, 1152), (1, 1152), 0), out=buf370) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg376_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf372 = buf369; del buf369 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf371, reinterpret_tensor(arg380_1, (1152, 1152), (1, 1152), 0), out=buf372) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg380_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf373 = reinterpret_tensor(buf368, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf368 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_163], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf373, arg373_1, arg375_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg373_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg375_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf374 = reinterpret_tensor(buf370, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf370 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_163], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf374, arg377_1, arg379_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg377_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg379_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf375 = reinterpret_tensor(buf372, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf372 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_163], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf375, arg381_1, arg383_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg381_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg383_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf376 = buf334; del buf334 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf418 = buf292; del buf292 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_163, hidden_states_182], Original ATen: [aten.constant_pad_nd] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_constant_pad_nd_14.run(arg0_1, buf376, buf418, 9728, grid=grid(9728), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_163], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf377 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf373, buf374, buf375, reinterpret_tensor(buf376, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf378 = buf377[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf377 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf382 = reinterpret_tensor(buf373, (8192, 1152), (1152, 1), 0); del buf373 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf378, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg384_1, (1152, 1152), (1, 1152), 0), out=buf382) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg384_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf386 = reinterpret_tensor(buf378, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf378 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf387 = buf386; del buf386 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_70, hidden_states_168, hidden_states_169, mul_38, norm_hidden_states_34, norm_hidden_states_35], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf387, buf382, arg385_1, arg387_1, buf367, arg355_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf388 = reinterpret_tensor(buf347, (8192, 4608), (4608, 1), 0); del buf347 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf387, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg388_1, (1152, 4608), (1, 1152), 0), out=buf388) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg388_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf389 = reinterpret_tensor(buf388, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf388 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_171], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf389, arg389_1, arg391_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg389_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg391_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf390 = reinterpret_tensor(buf387, (8192, 1152), (1152, 1), 0); del buf387 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf389, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg392_1, (4608, 1152), (1, 4608), 0), out=buf390) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg392_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf391 = reinterpret_tensor(buf390, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf390 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf395 = reinterpret_tensor(buf365, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf365 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_74, ff_output_8, hidden_states_168, hidden_states_169, hidden_states_174, mul_40, norm_hidden_states_36, norm_hidden_states_37], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf391, arg355_1, buf6, arg15_1, arg17_1, arg393_1, arg395_1, buf382, arg385_1, arg387_1, buf367, arg396_1, buf395, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg355_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg385_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg387_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg393_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg395_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf396 = buf382; del buf382 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf395, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg397_1, (1152, 1152), (1, 1152), 0), out=buf396) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg397_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf397 = reinterpret_tensor(buf367, (8192, 1152), (1152, 1), 0); del buf367 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf395, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg401_1, (1152, 1152), (1, 1152), 0), out=buf397) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg401_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf398 = reinterpret_tensor(buf358, (8192, 1152), (1152, 1), 0); del buf358 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf395, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg405_1, (1152, 1152), (1, 1152), 0), out=buf398) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg405_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf395 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf399 = reinterpret_tensor(buf396, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf396 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_175], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf399, arg398_1, arg400_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg398_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg400_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf400 = reinterpret_tensor(buf397, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf397 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_175], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf400, arg402_1, arg404_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg402_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg404_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf401 = reinterpret_tensor(buf398, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf398 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_175], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf401, arg406_1, arg408_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg406_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg408_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_175], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf402 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf399, buf400, buf401, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf399 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf403 = buf402[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf402 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf407 = reinterpret_tensor(buf401, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf401 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_176], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf403, buf407, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf408 = reinterpret_tensor(buf403, (8192, 1152), (1152, 1), 0); del buf403 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf407, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg409_1, (1152, 1152), (1, 1152), 0), out=buf408) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg409_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf409 = reinterpret_tensor(buf408, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf408 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_9, hidden_states_180, hidden_states_181], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf409, arg396_1, buf6, arg15_1, arg17_1, arg410_1, arg412_1, buf391, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg410_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg412_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf410 = reinterpret_tensor(buf391, (8192, 1152), (1152, 1), 0); del buf391 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf409, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg413_1, (1152, 1152), (1, 1152), 0), out=buf410) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg413_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf412 = reinterpret_tensor(buf375, (600, 1152), (1152, 1), 0); del buf375 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf411, reinterpret_tensor(arg417_1, (1152, 1152), (1, 1152), 0), out=buf412) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg417_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf414 = buf411; del buf411 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf413, reinterpret_tensor(arg421_1, (1152, 1152), (1, 1152), 0), out=buf414) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg421_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf415 = reinterpret_tensor(buf410, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf410 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_182], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf415, arg414_1, arg416_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg414_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg416_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf416 = reinterpret_tensor(buf412, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf412 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_182], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf416, arg418_1, arg420_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg418_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg420_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf417 = reinterpret_tensor(buf414, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf414 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_182], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf417, arg422_1, arg424_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg422_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg424_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_182], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf419 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf415, buf416, buf417, reinterpret_tensor(buf418, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf420 = buf419[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf419 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf424 = reinterpret_tensor(buf415, (8192, 1152), (1152, 1), 0); del buf415 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf420, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg425_1, (1152, 1152), (1, 1152), 0), out=buf424) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg425_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf428 = reinterpret_tensor(buf420, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf420 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf429 = buf428; del buf428 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_78, hidden_states_187, hidden_states_188, mul_42, norm_hidden_states_38, norm_hidden_states_39], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf429, buf424, arg426_1, arg428_1, buf409, arg396_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf430 = reinterpret_tensor(buf389, (8192, 4608), (4608, 1), 0); del buf389 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf429, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg429_1, (1152, 4608), (1, 1152), 0), out=buf430) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg429_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf431 = reinterpret_tensor(buf430, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf430 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_190], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf431, arg430_1, arg432_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg430_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg432_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf432 = reinterpret_tensor(buf429, (8192, 1152), (1152, 1), 0); del buf429 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf431, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg433_1, (4608, 1152), (1, 4608), 0), out=buf432) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg433_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf433 = reinterpret_tensor(buf432, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf432 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf437 = reinterpret_tensor(buf407, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf407 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_82, ff_output_9, hidden_states_187, hidden_states_188, hidden_states_193, mul_44, norm_hidden_states_40, norm_hidden_states_41], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf433, arg396_1, buf6, arg15_1, arg17_1, arg434_1, arg436_1, buf424, arg426_1, arg428_1, buf409, arg437_1, buf437, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg396_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg426_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg428_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg434_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg436_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf438 = buf424; del buf424 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf437, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg438_1, (1152, 1152), (1, 1152), 0), out=buf438) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg438_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf439 = reinterpret_tensor(buf409, (8192, 1152), (1152, 1), 0); del buf409 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf437, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg442_1, (1152, 1152), (1, 1152), 0), out=buf439) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg442_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf440 = reinterpret_tensor(buf400, (8192, 1152), (1152, 1), 0); del buf400 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf437, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg446_1, (1152, 1152), (1, 1152), 0), out=buf440) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg446_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf437 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf441 = reinterpret_tensor(buf438, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf438 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_194], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf441, arg439_1, arg441_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg439_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg441_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf442 = reinterpret_tensor(buf439, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf439 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_194], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf442, arg443_1, arg445_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg443_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg445_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf443 = reinterpret_tensor(buf440, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf440 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_194], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf443, arg447_1, arg449_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg447_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg449_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_194], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf444 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf441, buf442, buf443, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf441 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf445 = buf444[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf444 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf449 = reinterpret_tensor(buf443, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf443 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_195], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf445, buf449, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf450 = reinterpret_tensor(buf445, (8192, 1152), (1152, 1), 0); del buf445 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf449, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg450_1, (1152, 1152), (1, 1152), 0), out=buf450) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg450_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf451 = reinterpret_tensor(buf450, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf450 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_10, hidden_states_199, hidden_states_200], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf451, arg437_1, buf6, arg15_1, arg17_1, arg451_1, arg453_1, buf433, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg451_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg453_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf452 = reinterpret_tensor(buf433, (8192, 1152), (1152, 1), 0); del buf433 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf451, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg454_1, (1152, 1152), (1, 1152), 0), out=buf452) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg454_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf453 = reinterpret_tensor(buf417, (600, 1152), (1152, 1), 0); del buf417 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf455 = reinterpret_tensor(buf416, (600, 1152), (1152, 1), 0); del buf416 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf495 = buf413; del buf413 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf497 = reinterpret_tensor(buf374, (600, 1152), (1152, 1), 0); del buf374 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [key_42, key_46, value_42, value_46], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_mm_13.run(buf32, buf453, buf455, buf495, buf497, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf454 = buf371; del buf371 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf453, reinterpret_tensor(arg458_1, (1152, 1152), (1, 1152), 0), out=buf454) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg458_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf456 = buf453; del buf453 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf455, reinterpret_tensor(arg462_1, (1152, 1152), (1, 1152), 0), out=buf456) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg462_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf457 = reinterpret_tensor(buf452, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf452 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_201], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf457, arg455_1, arg457_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg455_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg457_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf458 = reinterpret_tensor(buf454, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf454 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_201], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf458, arg459_1, arg461_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg459_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg461_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf459 = reinterpret_tensor(buf456, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf456 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_201], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf459, arg463_1, arg465_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg463_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg465_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf460 = buf418; del buf418 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf502 = buf376; del buf376 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_201, hidden_states_220], Original ATen: [aten.constant_pad_nd] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_constant_pad_nd_14.run(arg0_1, buf460, buf502, 9728, grid=grid(9728), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_201], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf461 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf457, buf458, buf459, reinterpret_tensor(buf460, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf462 = buf461[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf461 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf466 = reinterpret_tensor(buf457, (8192, 1152), (1152, 1), 0); del buf457 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf462, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg466_1, (1152, 1152), (1, 1152), 0), out=buf466) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg466_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf470 = reinterpret_tensor(buf462, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf462 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf471 = buf470; del buf470 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_86, hidden_states_206, hidden_states_207, mul_46, norm_hidden_states_42, norm_hidden_states_43], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf471, buf466, arg467_1, arg469_1, buf451, arg437_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf472 = reinterpret_tensor(buf431, (8192, 4608), (4608, 1), 0); del buf431 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf471, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg470_1, (1152, 4608), (1, 1152), 0), out=buf472) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg470_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf473 = reinterpret_tensor(buf472, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf472 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_209], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf473, arg471_1, arg473_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg471_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg473_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf474 = reinterpret_tensor(buf471, (8192, 1152), (1152, 1), 0); del buf471 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf473, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg474_1, (4608, 1152), (1, 4608), 0), out=buf474) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg474_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf475 = reinterpret_tensor(buf474, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf474 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf479 = reinterpret_tensor(buf449, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf449 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_90, ff_output_10, hidden_states_206, hidden_states_207, hidden_states_212, mul_48, norm_hidden_states_44, norm_hidden_states_45], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf475, arg437_1, buf6, arg15_1, arg17_1, arg475_1, arg477_1, buf466, arg467_1, arg469_1, buf451, arg478_1, buf479, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg437_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg467_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg469_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg475_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg477_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf480 = buf466; del buf466 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf479, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg479_1, (1152, 1152), (1, 1152), 0), out=buf480) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg479_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf481 = reinterpret_tensor(buf451, (8192, 1152), (1152, 1), 0); del buf451 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf479, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg483_1, (1152, 1152), (1, 1152), 0), out=buf481) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg483_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf482 = reinterpret_tensor(buf442, (8192, 1152), (1152, 1), 0); del buf442 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf479, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg487_1, (1152, 1152), (1, 1152), 0), out=buf482) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg487_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf479 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf483 = reinterpret_tensor(buf480, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf480 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_213], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf483, arg480_1, arg482_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg480_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg482_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf484 = reinterpret_tensor(buf481, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf481 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_213], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf484, arg484_1, arg486_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg484_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg486_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf485 = reinterpret_tensor(buf482, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf482 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_213], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf485, arg488_1, arg490_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg488_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg490_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_213], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf486 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf483, buf484, buf485, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf483 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf487 = buf486[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf486 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf491 = reinterpret_tensor(buf485, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf485 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_214], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf487, buf491, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf492 = reinterpret_tensor(buf487, (8192, 1152), (1152, 1), 0); del buf487 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf491, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg491_1, (1152, 1152), (1, 1152), 0), out=buf492) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg491_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf493 = reinterpret_tensor(buf492, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf492 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_11, hidden_states_218, hidden_states_219], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf493, arg478_1, buf6, arg15_1, arg17_1, arg492_1, arg494_1, buf475, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg492_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg494_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf494 = reinterpret_tensor(buf475, (8192, 1152), (1152, 1), 0); del buf475 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf493, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg495_1, (1152, 1152), (1, 1152), 0), out=buf494) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg495_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf496 = reinterpret_tensor(buf459, (600, 1152), (1152, 1), 0); del buf459 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf495, reinterpret_tensor(arg499_1, (1152, 1152), (1, 1152), 0), out=buf496) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg499_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf498 = buf495; del buf495 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf497, reinterpret_tensor(arg503_1, (1152, 1152), (1, 1152), 0), out=buf498) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg503_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf499 = reinterpret_tensor(buf494, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf494 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_220], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf499, arg496_1, arg498_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg496_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg498_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf500 = reinterpret_tensor(buf496, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf496 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_220], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf500, arg500_1, arg502_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg500_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg502_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf501 = reinterpret_tensor(buf498, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf498 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_220], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf501, arg504_1, arg506_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg504_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg506_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_220], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf503 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf499, buf500, buf501, reinterpret_tensor(buf502, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf504 = buf503[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf503 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf508 = reinterpret_tensor(buf499, (8192, 1152), (1152, 1), 0); del buf499 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf504, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg507_1, (1152, 1152), (1, 1152), 0), out=buf508) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg507_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf512 = reinterpret_tensor(buf504, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf504 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf513 = buf512; del buf512 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_94, hidden_states_225, hidden_states_226, mul_50, norm_hidden_states_46, norm_hidden_states_47], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf513, buf508, arg508_1, arg510_1, buf493, arg478_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf514 = reinterpret_tensor(buf473, (8192, 4608), (4608, 1), 0); del buf473 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf513, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg511_1, (1152, 4608), (1, 1152), 0), out=buf514) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg511_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf515 = reinterpret_tensor(buf514, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf514 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_228], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf515, arg512_1, arg514_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg512_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg514_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf516 = reinterpret_tensor(buf513, (8192, 1152), (1152, 1), 0); del buf513 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf515, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg515_1, (4608, 1152), (1, 4608), 0), out=buf516) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg515_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf517 = reinterpret_tensor(buf516, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf516 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf521 = reinterpret_tensor(buf491, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf491 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_98, ff_output_11, hidden_states_225, hidden_states_226, hidden_states_231, mul_52, norm_hidden_states_48, norm_hidden_states_49], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf517, arg478_1, buf6, arg15_1, arg17_1, arg516_1, arg518_1, buf508, arg508_1, arg510_1, buf493, arg519_1, buf521, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg478_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg508_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg510_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg516_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg518_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf522 = buf508; del buf508 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf521, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg520_1, (1152, 1152), (1, 1152), 0), out=buf522) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg520_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf523 = reinterpret_tensor(buf493, (8192, 1152), (1152, 1), 0); del buf493 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf521, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg524_1, (1152, 1152), (1, 1152), 0), out=buf523) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg524_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf524 = reinterpret_tensor(buf484, (8192, 1152), (1152, 1), 0); del buf484 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf521, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg528_1, (1152, 1152), (1, 1152), 0), out=buf524) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg528_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf521 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf525 = reinterpret_tensor(buf522, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf522 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_232], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf525, arg521_1, arg523_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg521_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg523_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf526 = reinterpret_tensor(buf523, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf523 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_232], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf526, arg525_1, arg527_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg525_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg527_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf527 = reinterpret_tensor(buf524, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf524 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_232], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf527, arg529_1, arg531_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg529_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg531_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_232], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf528 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf525, buf526, buf527, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf525 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf529 = buf528[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf528 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf533 = reinterpret_tensor(buf527, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf527 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_233], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf529, buf533, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf534 = reinterpret_tensor(buf529, (8192, 1152), (1152, 1), 0); del buf529 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf533, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg532_1, (1152, 1152), (1, 1152), 0), out=buf534) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg532_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf535 = reinterpret_tensor(buf534, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf534 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_12, hidden_states_237, hidden_states_238], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf535, arg519_1, buf6, arg15_1, arg17_1, arg533_1, arg535_1, buf517, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg533_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg535_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf536 = reinterpret_tensor(buf517, (8192, 1152), (1152, 1), 0); del buf517 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf535, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg536_1, (1152, 1152), (1, 1152), 0), out=buf536) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg536_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf537 = reinterpret_tensor(buf501, (600, 1152), (1152, 1), 0); del buf501 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf539 = reinterpret_tensor(buf500, (600, 1152), (1152, 1), 0); del buf500 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf579 = buf497; del buf497 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf581 = reinterpret_tensor(buf458, (600, 1152), (1152, 1), 0); del buf458 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [key_50, key_54, value_50, value_54], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_mm_13.run(buf32, buf537, buf539, buf579, buf581, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf538 = buf455; del buf455 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf537, reinterpret_tensor(arg540_1, (1152, 1152), (1, 1152), 0), out=buf538) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg540_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf540 = buf537; del buf537 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf539, reinterpret_tensor(arg544_1, (1152, 1152), (1, 1152), 0), out=buf540) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg544_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf541 = reinterpret_tensor(buf536, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf536 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_239], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf541, arg537_1, arg539_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg537_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg539_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf542 = reinterpret_tensor(buf538, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf538 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_239], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf542, arg541_1, arg543_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg541_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg543_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf543 = reinterpret_tensor(buf540, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf540 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_239], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf543, arg545_1, arg547_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg545_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg547_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf544 = buf502; del buf502 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf586 = buf460; del buf460 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_239, hidden_states_258], Original ATen: [aten.constant_pad_nd] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_constant_pad_nd_14.run(arg0_1, buf544, buf586, 9728, grid=grid(9728), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_239], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf545 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf541, buf542, buf543, reinterpret_tensor(buf544, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf546 = buf545[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf545 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf550 = reinterpret_tensor(buf541, (8192, 1152), (1152, 1), 0); del buf541 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf546, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg548_1, (1152, 1152), (1, 1152), 0), out=buf550) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg548_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf554 = reinterpret_tensor(buf546, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf546 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf555 = buf554; del buf554 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_102, hidden_states_244, hidden_states_245, mul_54, norm_hidden_states_50, norm_hidden_states_51], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf555, buf550, arg549_1, arg551_1, buf535, arg519_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf556 = reinterpret_tensor(buf515, (8192, 4608), (4608, 1), 0); del buf515 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf555, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg552_1, (1152, 4608), (1, 1152), 0), out=buf556) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg552_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf557 = reinterpret_tensor(buf556, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf556 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_247], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf557, arg553_1, arg555_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg553_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg555_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf558 = reinterpret_tensor(buf555, (8192, 1152), (1152, 1), 0); del buf555 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf557, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg556_1, (4608, 1152), (1, 4608), 0), out=buf558) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg556_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf559 = reinterpret_tensor(buf558, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf558 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf563 = reinterpret_tensor(buf533, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf533 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_106, ff_output_12, hidden_states_244, hidden_states_245, hidden_states_250, mul_56, norm_hidden_states_52, norm_hidden_states_53], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf559, arg519_1, buf6, arg15_1, arg17_1, arg557_1, arg559_1, buf550, arg549_1, arg551_1, buf535, arg560_1, buf563, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg519_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg549_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg551_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg557_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg559_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf564 = buf550; del buf550 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf563, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg561_1, (1152, 1152), (1, 1152), 0), out=buf564) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg561_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf565 = reinterpret_tensor(buf535, (8192, 1152), (1152, 1), 0); del buf535 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf563, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg565_1, (1152, 1152), (1, 1152), 0), out=buf565) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg565_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf566 = reinterpret_tensor(buf526, (8192, 1152), (1152, 1), 0); del buf526 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf563, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg569_1, (1152, 1152), (1, 1152), 0), out=buf566) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg569_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf563 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf567 = reinterpret_tensor(buf564, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf564 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_251], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf567, arg562_1, arg564_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg562_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg564_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf568 = reinterpret_tensor(buf565, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf565 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_251], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf568, arg566_1, arg568_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg566_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg568_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf569 = reinterpret_tensor(buf566, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf566 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_251], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf569, arg570_1, arg572_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg570_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg572_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_251], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf570 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf567, buf568, buf569, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf567 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf571 = buf570[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf570 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf575 = reinterpret_tensor(buf569, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf569 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_252], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf571, buf575, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf576 = reinterpret_tensor(buf571, (8192, 1152), (1152, 1), 0); del buf571 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf575, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg573_1, (1152, 1152), (1, 1152), 0), out=buf576) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg573_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf577 = reinterpret_tensor(buf576, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf576 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_13, hidden_states_256, hidden_states_257], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf577, arg560_1, buf6, arg15_1, arg17_1, arg574_1, arg576_1, buf559, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg574_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg576_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf578 = reinterpret_tensor(buf559, (8192, 1152), (1152, 1), 0); del buf559 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf577, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg577_1, (1152, 1152), (1, 1152), 0), out=buf578) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg577_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf580 = reinterpret_tensor(buf543, (600, 1152), (1152, 1), 0); del buf543 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf579, reinterpret_tensor(arg581_1, (1152, 1152), (1, 1152), 0), out=buf580) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg581_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf582 = buf579; del buf579 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf581, reinterpret_tensor(arg585_1, (1152, 1152), (1, 1152), 0), out=buf582) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg585_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf583 = reinterpret_tensor(buf578, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf578 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_258], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf583, arg578_1, arg580_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg578_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg580_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf584 = reinterpret_tensor(buf580, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf580 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_258], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf584, arg582_1, arg584_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg582_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg584_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf585 = reinterpret_tensor(buf582, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf582 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_258], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf585, arg586_1, arg588_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg586_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg588_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_258], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf587 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf583, buf584, buf585, reinterpret_tensor(buf586, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf588 = buf587[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf587 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf592 = reinterpret_tensor(buf583, (8192, 1152), (1152, 1), 0); del buf583 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf588, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg589_1, (1152, 1152), (1, 1152), 0), out=buf592) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg589_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf596 = reinterpret_tensor(buf588, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf588 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf597 = buf596; del buf596 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_110, hidden_states_263, hidden_states_264, mul_58, norm_hidden_states_54, norm_hidden_states_55], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf597, buf592, arg590_1, arg592_1, buf577, arg560_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf598 = reinterpret_tensor(buf557, (8192, 4608), (4608, 1), 0); del buf557 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf597, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg593_1, (1152, 4608), (1, 1152), 0), out=buf598) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg593_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf599 = reinterpret_tensor(buf598, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf598 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_266], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf599, arg594_1, arg596_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg594_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg596_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf600 = reinterpret_tensor(buf597, (8192, 1152), (1152, 1), 0); del buf597 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf599, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg597_1, (4608, 1152), (1, 4608), 0), out=buf600) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg597_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf601 = reinterpret_tensor(buf600, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf600 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf605 = reinterpret_tensor(buf575, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf575 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_114, ff_output_13, hidden_states_263, hidden_states_264, hidden_states_269, mul_60, norm_hidden_states_56, norm_hidden_states_57], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf601, arg560_1, buf6, arg15_1, arg17_1, arg598_1, arg600_1, buf592, arg590_1, arg592_1, buf577, arg601_1, buf605, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg560_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg590_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg592_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg598_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg600_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf606 = buf592; del buf592 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf605, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg602_1, (1152, 1152), (1, 1152), 0), out=buf606) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg602_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf607 = reinterpret_tensor(buf577, (8192, 1152), (1152, 1), 0); del buf577 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf605, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg606_1, (1152, 1152), (1, 1152), 0), out=buf607) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg606_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf608 = reinterpret_tensor(buf568, (8192, 1152), (1152, 1), 0); del buf568 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf605, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg610_1, (1152, 1152), (1, 1152), 0), out=buf608) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg610_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf605 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf609 = reinterpret_tensor(buf606, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf606 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_270], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf609, arg603_1, arg605_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg603_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg605_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf610 = reinterpret_tensor(buf607, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf607 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_270], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf610, arg607_1, arg609_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg607_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg609_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf611 = reinterpret_tensor(buf608, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf608 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_270], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf611, arg611_1, arg613_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg611_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg613_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_270], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf612 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf609, buf610, buf611, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf609 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf613 = buf612[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf612 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf617 = reinterpret_tensor(buf611, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf611 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_271], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf613, buf617, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf618 = reinterpret_tensor(buf613, (8192, 1152), (1152, 1), 0); del buf613 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf617, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg614_1, (1152, 1152), (1, 1152), 0), out=buf618) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg614_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf619 = reinterpret_tensor(buf618, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf618 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_14, hidden_states_275, hidden_states_276], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf619, arg601_1, buf6, arg15_1, arg17_1, arg615_1, arg617_1, buf601, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg615_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg617_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf620 = reinterpret_tensor(buf601, (8192, 1152), (1152, 1), 0); del buf601 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf619, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg618_1, (1152, 1152), (1, 1152), 0), out=buf620) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg618_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf621 = reinterpret_tensor(buf585, (600, 1152), (1152, 1), 0); del buf585 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf623 = reinterpret_tensor(buf584, (600, 1152), (1152, 1), 0); del buf584 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf663 = buf581; del buf581 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf665 = reinterpret_tensor(buf542, (600, 1152), (1152, 1), 0); del buf542 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [key_58, key_62, value_58, value_62], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_mm_13.run(buf32, buf621, buf623, buf663, buf665, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf622 = buf539; del buf539 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf621, reinterpret_tensor(arg622_1, (1152, 1152), (1, 1152), 0), out=buf622) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg622_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf624 = buf621; del buf621 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf623, reinterpret_tensor(arg626_1, (1152, 1152), (1, 1152), 0), out=buf624) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg626_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf625 = reinterpret_tensor(buf620, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf620 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_277], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf625, arg619_1, arg621_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg619_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg621_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf626 = reinterpret_tensor(buf622, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf622 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_277], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf626, arg623_1, arg625_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg623_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg625_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf627 = reinterpret_tensor(buf624, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf624 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_277], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf627, arg627_1, arg629_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg627_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg629_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf628 = buf586; del buf586 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf670 = buf544; del buf544 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_277, hidden_states_296], Original ATen: [aten.constant_pad_nd] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_constant_pad_nd_14.run(arg0_1, buf628, buf670, 9728, grid=grid(9728), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_277], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf629 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf625, buf626, buf627, reinterpret_tensor(buf628, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf630 = buf629[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf629 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf634 = reinterpret_tensor(buf625, (8192, 1152), (1152, 1), 0); del buf625 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf630, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg630_1, (1152, 1152), (1, 1152), 0), out=buf634) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg630_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf638 = reinterpret_tensor(buf630, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf630 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf639 = buf638; del buf638 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_118, hidden_states_282, hidden_states_283, mul_62, norm_hidden_states_58, norm_hidden_states_59], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf639, buf634, arg631_1, arg633_1, buf619, arg601_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf640 = reinterpret_tensor(buf599, (8192, 4608), (4608, 1), 0); del buf599 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf639, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg634_1, (1152, 4608), (1, 1152), 0), out=buf640) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg634_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf641 = reinterpret_tensor(buf640, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf640 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_285], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf641, arg635_1, arg637_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg635_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg637_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf642 = reinterpret_tensor(buf639, (8192, 1152), (1152, 1), 0); del buf639 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf641, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg638_1, (4608, 1152), (1, 4608), 0), out=buf642) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg638_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf643 = reinterpret_tensor(buf642, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf642 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf647 = reinterpret_tensor(buf617, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf617 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_122, ff_output_14, hidden_states_282, hidden_states_283, hidden_states_288, mul_64, norm_hidden_states_60, norm_hidden_states_61], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf643, arg601_1, buf6, arg15_1, arg17_1, arg639_1, arg641_1, buf634, arg631_1, arg633_1, buf619, arg642_1, buf647, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg601_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg631_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg633_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg639_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg641_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf648 = buf634; del buf634 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf647, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg643_1, (1152, 1152), (1, 1152), 0), out=buf648) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg643_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf649 = reinterpret_tensor(buf619, (8192, 1152), (1152, 1), 0); del buf619 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf647, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg647_1, (1152, 1152), (1, 1152), 0), out=buf649) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg647_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf650 = reinterpret_tensor(buf610, (8192, 1152), (1152, 1), 0); del buf610 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf647, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg651_1, (1152, 1152), (1, 1152), 0), out=buf650) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg651_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf647 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf651 = reinterpret_tensor(buf648, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf648 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_289], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf651, arg644_1, arg646_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg644_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg646_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf652 = reinterpret_tensor(buf649, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf649 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_289], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf652, arg648_1, arg650_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg648_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg650_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf653 = reinterpret_tensor(buf650, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf650 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_289], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf653, arg652_1, arg654_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg652_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg654_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_289], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf654 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf651, buf652, buf653, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf651 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf655 = buf654[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf654 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf659 = reinterpret_tensor(buf653, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf653 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_290], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf655, buf659, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf660 = reinterpret_tensor(buf655, (8192, 1152), (1152, 1), 0); del buf655 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf659, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg655_1, (1152, 1152), (1, 1152), 0), out=buf660) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg655_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf661 = reinterpret_tensor(buf660, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf660 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_15, hidden_states_294, hidden_states_295], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf661, arg642_1, buf6, arg15_1, arg17_1, arg656_1, arg658_1, buf643, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg656_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg658_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf662 = reinterpret_tensor(buf643, (8192, 1152), (1152, 1), 0); del buf643 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf661, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg659_1, (1152, 1152), (1, 1152), 0), out=buf662) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg659_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf664 = reinterpret_tensor(buf627, (600, 1152), (1152, 1), 0); del buf627 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf663, reinterpret_tensor(arg663_1, (1152, 1152), (1, 1152), 0), out=buf664) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg663_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf666 = buf663; del buf663 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf665, reinterpret_tensor(arg667_1, (1152, 1152), (1, 1152), 0), out=buf666) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg667_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf667 = reinterpret_tensor(buf662, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf662 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_296], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf667, arg660_1, arg662_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg660_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg662_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf668 = reinterpret_tensor(buf664, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf664 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_296], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf668, arg664_1, arg666_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg664_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg666_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf669 = reinterpret_tensor(buf666, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf666 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_296], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf669, arg668_1, arg670_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg668_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg670_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_296], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf671 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf667, buf668, buf669, reinterpret_tensor(buf670, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf672 = buf671[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf671 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf676 = reinterpret_tensor(buf667, (8192, 1152), (1152, 1), 0); del buf667 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf672, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg671_1, (1152, 1152), (1, 1152), 0), out=buf676) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg671_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf680 = reinterpret_tensor(buf672, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf672 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf681 = buf680; del buf680 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_126, hidden_states_301, hidden_states_302, mul_66, norm_hidden_states_62, norm_hidden_states_63], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf681, buf676, arg672_1, arg674_1, buf661, arg642_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf682 = reinterpret_tensor(buf641, (8192, 4608), (4608, 1), 0); del buf641 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf681, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg675_1, (1152, 4608), (1, 1152), 0), out=buf682) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg675_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf683 = reinterpret_tensor(buf682, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf682 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_304], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf683, arg676_1, arg678_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg676_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg678_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf684 = reinterpret_tensor(buf681, (8192, 1152), (1152, 1), 0); del buf681 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf683, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg679_1, (4608, 1152), (1, 4608), 0), out=buf684) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg679_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf685 = reinterpret_tensor(buf684, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf684 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf689 = reinterpret_tensor(buf659, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf659 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_130, ff_output_15, hidden_states_301, hidden_states_302, hidden_states_307, mul_68, norm_hidden_states_64, norm_hidden_states_65], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf685, arg642_1, buf6, arg15_1, arg17_1, arg680_1, arg682_1, buf676, arg672_1, arg674_1, buf661, arg683_1, buf689, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg642_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg672_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg674_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg680_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg682_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf690 = buf676; del buf676 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf689, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg684_1, (1152, 1152), (1, 1152), 0), out=buf690) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg684_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf691 = reinterpret_tensor(buf661, (8192, 1152), (1152, 1), 0); del buf661 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf689, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg688_1, (1152, 1152), (1, 1152), 0), out=buf691) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg688_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf692 = reinterpret_tensor(buf652, (8192, 1152), (1152, 1), 0); del buf652 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf689, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg692_1, (1152, 1152), (1, 1152), 0), out=buf692) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg692_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf689 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf693 = reinterpret_tensor(buf690, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf690 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_308], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf693, arg685_1, arg687_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg685_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg687_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf694 = reinterpret_tensor(buf691, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf691 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_308], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf694, arg689_1, arg691_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg689_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg691_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf695 = reinterpret_tensor(buf692, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf692 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_308], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf695, arg693_1, arg695_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg693_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg695_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_308], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf696 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf693, buf694, buf695, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf693 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf697 = buf696[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf696 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf701 = reinterpret_tensor(buf695, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf695 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_309], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf697, buf701, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf702 = reinterpret_tensor(buf697, (8192, 1152), (1152, 1), 0); del buf697 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf701, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg696_1, (1152, 1152), (1, 1152), 0), out=buf702) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg696_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf703 = reinterpret_tensor(buf702, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf702 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_16, hidden_states_313, hidden_states_314], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf703, arg683_1, buf6, arg15_1, arg17_1, arg697_1, arg699_1, buf685, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg697_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg699_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf704 = reinterpret_tensor(buf685, (8192, 1152), (1152, 1), 0); del buf685 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf703, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg700_1, (1152, 1152), (1, 1152), 0), out=buf704) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg700_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf705 = reinterpret_tensor(buf669, (600, 1152), (1152, 1), 0); del buf669 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf707 = reinterpret_tensor(buf668, (600, 1152), (1152, 1), 0); del buf668 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf747 = buf665; del buf665 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf749 = reinterpret_tensor(buf626, (600, 1152), (1152, 1), 0); del buf626 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [key_66, key_70, value_66, value_70], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_mm_13.run(buf32, buf705, buf707, buf747, buf749, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf706 = buf623; del buf623 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf705, reinterpret_tensor(arg704_1, (1152, 1152), (1, 1152), 0), out=buf706) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg704_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf708 = buf705; del buf705 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf707, reinterpret_tensor(arg708_1, (1152, 1152), (1, 1152), 0), out=buf708) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg708_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf709 = reinterpret_tensor(buf704, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf704 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_315], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf709, arg701_1, arg703_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg701_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg703_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf710 = reinterpret_tensor(buf706, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf706 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_315], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf710, arg705_1, arg707_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg705_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg707_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf711 = reinterpret_tensor(buf708, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf708 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_315], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf711, arg709_1, arg711_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg709_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg711_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf712 = buf670; del buf670 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf754 = buf628; del buf628 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_315, hidden_states_334], Original ATen: [aten.constant_pad_nd] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_constant_pad_nd_14.run(arg0_1, buf712, buf754, 9728, grid=grid(9728), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_315], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf713 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf709, buf710, buf711, reinterpret_tensor(buf712, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf714 = buf713[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf713 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf718 = reinterpret_tensor(buf709, (8192, 1152), (1152, 1), 0); del buf709 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf714, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg712_1, (1152, 1152), (1, 1152), 0), out=buf718) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg712_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf722 = reinterpret_tensor(buf714, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf714 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf723 = buf722; del buf722 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_134, hidden_states_320, hidden_states_321, mul_70, norm_hidden_states_66, norm_hidden_states_67], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf723, buf718, arg713_1, arg715_1, buf703, arg683_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf724 = reinterpret_tensor(buf683, (8192, 4608), (4608, 1), 0); del buf683 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf723, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg716_1, (1152, 4608), (1, 1152), 0), out=buf724) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg716_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf725 = reinterpret_tensor(buf724, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf724 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_323], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf725, arg717_1, arg719_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg717_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg719_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf726 = reinterpret_tensor(buf723, (8192, 1152), (1152, 1), 0); del buf723 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf725, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg720_1, (4608, 1152), (1, 4608), 0), out=buf726) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg720_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf727 = reinterpret_tensor(buf726, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf726 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf731 = reinterpret_tensor(buf701, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf701 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_138, ff_output_16, hidden_states_320, hidden_states_321, hidden_states_326, mul_72, norm_hidden_states_68, norm_hidden_states_69], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf727, arg683_1, buf6, arg15_1, arg17_1, arg721_1, arg723_1, buf718, arg713_1, arg715_1, buf703, arg724_1, buf731, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg683_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg713_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg715_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg721_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg723_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf732 = buf718; del buf718 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf731, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg725_1, (1152, 1152), (1, 1152), 0), out=buf732) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg725_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf733 = reinterpret_tensor(buf703, (8192, 1152), (1152, 1), 0); del buf703 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf731, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg729_1, (1152, 1152), (1, 1152), 0), out=buf733) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg729_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf734 = reinterpret_tensor(buf694, (8192, 1152), (1152, 1), 0); del buf694 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf731, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg733_1, (1152, 1152), (1, 1152), 0), out=buf734) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg733_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf731 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf735 = reinterpret_tensor(buf732, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf732 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_327], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf735, arg726_1, arg728_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg726_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg728_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf736 = reinterpret_tensor(buf733, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf733 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_327], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf736, arg730_1, arg732_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg730_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg732_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf737 = reinterpret_tensor(buf734, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf734 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_327], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf737, arg734_1, arg736_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg734_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg736_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_327], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf738 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf735, buf736, buf737, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf735 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf739 = buf738[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf738 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf743 = reinterpret_tensor(buf737, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf737 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_328], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf739, buf743, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf744 = reinterpret_tensor(buf739, (8192, 1152), (1152, 1), 0); del buf739 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf743, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg737_1, (1152, 1152), (1, 1152), 0), out=buf744) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg737_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf745 = reinterpret_tensor(buf744, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf744 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_17, hidden_states_332, hidden_states_333], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf745, arg724_1, buf6, arg15_1, arg17_1, arg738_1, arg740_1, buf727, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg738_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg740_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf746 = reinterpret_tensor(buf727, (8192, 1152), (1152, 1), 0); del buf727 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf745, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg741_1, (1152, 1152), (1, 1152), 0), out=buf746) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg741_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf748 = reinterpret_tensor(buf711, (600, 1152), (1152, 1), 0); del buf711 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf747, reinterpret_tensor(arg745_1, (1152, 1152), (1, 1152), 0), out=buf748) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg745_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf750 = buf747; del buf747 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf749, reinterpret_tensor(arg749_1, (1152, 1152), (1, 1152), 0), out=buf750) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg749_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf751 = reinterpret_tensor(buf746, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf746 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_334], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf751, arg742_1, arg744_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg742_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg744_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf752 = reinterpret_tensor(buf748, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf748 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_334], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf752, arg746_1, arg748_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg746_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg748_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf753 = reinterpret_tensor(buf750, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf750 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_334], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf753, arg750_1, arg752_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg750_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg752_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_334], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf755 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf751, buf752, buf753, reinterpret_tensor(buf754, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf756 = buf755[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf755 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf760 = reinterpret_tensor(buf751, (8192, 1152), (1152, 1), 0); del buf751 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf756, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg753_1, (1152, 1152), (1, 1152), 0), out=buf760) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg753_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf764 = reinterpret_tensor(buf756, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf756 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf765 = buf764; del buf764 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_142, hidden_states_339, hidden_states_340, mul_74, norm_hidden_states_70, norm_hidden_states_71], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf765, buf760, arg754_1, arg756_1, buf745, arg724_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf766 = reinterpret_tensor(buf725, (8192, 4608), (4608, 1), 0); del buf725 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf765, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg757_1, (1152, 4608), (1, 1152), 0), out=buf766) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg757_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf767 = reinterpret_tensor(buf766, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf766 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_342], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf767, arg758_1, arg760_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg758_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg760_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf768 = reinterpret_tensor(buf765, (8192, 1152), (1152, 1), 0); del buf765 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf767, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg761_1, (4608, 1152), (1, 4608), 0), out=buf768) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg761_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf769 = reinterpret_tensor(buf768, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf768 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf773 = reinterpret_tensor(buf743, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf743 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_146, ff_output_17, hidden_states_339, hidden_states_340, hidden_states_345, mul_76, norm_hidden_states_72, norm_hidden_states_73], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf769, arg724_1, buf6, arg15_1, arg17_1, arg762_1, arg764_1, buf760, arg754_1, arg756_1, buf745, arg765_1, buf773, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg724_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg754_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg756_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg762_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg764_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf774 = buf760; del buf760 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf773, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg766_1, (1152, 1152), (1, 1152), 0), out=buf774) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg766_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf775 = reinterpret_tensor(buf745, (8192, 1152), (1152, 1), 0); del buf745 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf773, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg770_1, (1152, 1152), (1, 1152), 0), out=buf775) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg770_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf776 = reinterpret_tensor(buf736, (8192, 1152), (1152, 1), 0); del buf736 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf773, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg774_1, (1152, 1152), (1, 1152), 0), out=buf776) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg774_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf773 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf777 = reinterpret_tensor(buf774, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf774 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_346], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf777, arg767_1, arg769_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg767_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg769_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf778 = reinterpret_tensor(buf775, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf775 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_346], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf778, arg771_1, arg773_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg771_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg773_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf779 = reinterpret_tensor(buf776, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf776 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_346], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf779, arg775_1, arg777_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg775_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg777_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_346], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf780 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf777, buf778, buf779, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf777 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf781 = buf780[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf780 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf785 = reinterpret_tensor(buf779, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf779 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_347], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf781, buf785, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf786 = reinterpret_tensor(buf781, (8192, 1152), (1152, 1), 0); del buf781 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf785, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg778_1, (1152, 1152), (1, 1152), 0), out=buf786) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg778_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf787 = reinterpret_tensor(buf786, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf786 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_18, hidden_states_351, hidden_states_352], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf787, arg765_1, buf6, arg15_1, arg17_1, arg779_1, arg781_1, buf769, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg779_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg781_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf788 = reinterpret_tensor(buf769, (8192, 1152), (1152, 1), 0); del buf769 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf787, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg782_1, (1152, 1152), (1, 1152), 0), out=buf788) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg782_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf789 = reinterpret_tensor(buf753, (600, 1152), (1152, 1), 0); del buf753 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf791 = reinterpret_tensor(buf752, (600, 1152), (1152, 1), 0); del buf752 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf831 = buf749; del buf749 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf833 = reinterpret_tensor(buf710, (600, 1152), (1152, 1), 0); del buf710 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [key_74, key_78, value_74, value_78], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_mm_13.run(buf32, buf789, buf791, buf831, buf833, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf790 = buf707; del buf707 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf789, reinterpret_tensor(arg786_1, (1152, 1152), (1, 1152), 0), out=buf790) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg786_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf792 = buf789; del buf789 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf791, reinterpret_tensor(arg790_1, (1152, 1152), (1, 1152), 0), out=buf792) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg790_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf793 = reinterpret_tensor(buf788, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf788 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_353], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf793, arg783_1, arg785_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg783_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg785_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf794 = reinterpret_tensor(buf790, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf790 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_353], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf794, arg787_1, arg789_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg787_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg789_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf795 = reinterpret_tensor(buf792, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf792 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_353], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf795, arg791_1, arg793_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg791_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg793_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf796 = buf754; del buf754 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf838 = buf712; del buf712 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_353, hidden_states_372], Original ATen: [aten.constant_pad_nd] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_constant_pad_nd_14.run(arg0_1, buf796, buf838, 9728, grid=grid(9728), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_353], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf797 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf793, buf794, buf795, reinterpret_tensor(buf796, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf798 = buf797[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf797 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf802 = reinterpret_tensor(buf793, (8192, 1152), (1152, 1), 0); del buf793 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf798, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg794_1, (1152, 1152), (1, 1152), 0), out=buf802) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg794_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf806 = reinterpret_tensor(buf798, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf798 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf807 = buf806; del buf806 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_150, hidden_states_358, hidden_states_359, mul_78, norm_hidden_states_74, norm_hidden_states_75], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf807, buf802, arg795_1, arg797_1, buf787, arg765_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf808 = reinterpret_tensor(buf767, (8192, 4608), (4608, 1), 0); del buf767 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf807, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg798_1, (1152, 4608), (1, 1152), 0), out=buf808) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg798_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf809 = reinterpret_tensor(buf808, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf808 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_361], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf809, arg799_1, arg801_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg799_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg801_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf810 = reinterpret_tensor(buf807, (8192, 1152), (1152, 1), 0); del buf807 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf809, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg802_1, (4608, 1152), (1, 4608), 0), out=buf810) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg802_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf811 = reinterpret_tensor(buf810, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf810 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf815 = reinterpret_tensor(buf785, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf785 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_154, ff_output_18, hidden_states_358, hidden_states_359, hidden_states_364, mul_80, norm_hidden_states_76, norm_hidden_states_77], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf811, arg765_1, buf6, arg15_1, arg17_1, arg803_1, arg805_1, buf802, arg795_1, arg797_1, buf787, arg806_1, buf815, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg765_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg795_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg797_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg803_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg805_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf816 = buf802; del buf802 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf815, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg807_1, (1152, 1152), (1, 1152), 0), out=buf816) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg807_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf817 = reinterpret_tensor(buf787, (8192, 1152), (1152, 1), 0); del buf787 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf815, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg811_1, (1152, 1152), (1, 1152), 0), out=buf817) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg811_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf818 = reinterpret_tensor(buf778, (8192, 1152), (1152, 1), 0); del buf778 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf815, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg815_1, (1152, 1152), (1, 1152), 0), out=buf818) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg815_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf815 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf819 = reinterpret_tensor(buf816, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf816 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_365], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf819, arg808_1, arg810_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg808_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg810_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf820 = reinterpret_tensor(buf817, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf817 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_365], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf820, arg812_1, arg814_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg812_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg814_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf821 = reinterpret_tensor(buf818, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf818 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_365], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf821, arg816_1, arg818_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg816_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg818_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_365], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf822 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf819, buf820, buf821, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf819 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf823 = buf822[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf822 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf827 = reinterpret_tensor(buf821, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf821 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_366], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf823, buf827, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf828 = reinterpret_tensor(buf823, (8192, 1152), (1152, 1), 0); del buf823 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf827, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg819_1, (1152, 1152), (1, 1152), 0), out=buf828) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg819_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf829 = reinterpret_tensor(buf828, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf828 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_19, hidden_states_370, hidden_states_371], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf829, arg806_1, buf6, arg15_1, arg17_1, arg820_1, arg822_1, buf811, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg820_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg822_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf830 = reinterpret_tensor(buf811, (8192, 1152), (1152, 1), 0); del buf811 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf829, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg823_1, (1152, 1152), (1, 1152), 0), out=buf830) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg823_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf832 = reinterpret_tensor(buf795, (600, 1152), (1152, 1), 0); del buf795 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf831, reinterpret_tensor(arg827_1, (1152, 1152), (1, 1152), 0), out=buf832) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg827_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf834 = buf831; del buf831 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf833, reinterpret_tensor(arg831_1, (1152, 1152), (1, 1152), 0), out=buf834) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg831_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf835 = reinterpret_tensor(buf830, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf830 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_372], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf835, arg824_1, arg826_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg824_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg826_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf836 = reinterpret_tensor(buf832, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf832 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_372], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf836, arg828_1, arg830_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg828_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg830_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf837 = reinterpret_tensor(buf834, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf834 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_372], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf837, arg832_1, arg834_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg832_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg834_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_372], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf839 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf835, buf836, buf837, reinterpret_tensor(buf838, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf840 = buf839[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf839 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf844 = reinterpret_tensor(buf835, (8192, 1152), (1152, 1), 0); del buf835 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf840, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg835_1, (1152, 1152), (1, 1152), 0), out=buf844) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg835_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf848 = reinterpret_tensor(buf840, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf840 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf849 = buf848; del buf848 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_158, hidden_states_377, hidden_states_378, mul_82, norm_hidden_states_78, norm_hidden_states_79], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf849, buf844, arg836_1, arg838_1, buf829, arg806_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf850 = reinterpret_tensor(buf809, (8192, 4608), (4608, 1), 0); del buf809 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf849, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg839_1, (1152, 4608), (1, 1152), 0), out=buf850) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg839_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf851 = reinterpret_tensor(buf850, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf850 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_380], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf851, arg840_1, arg842_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg840_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg842_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf852 = reinterpret_tensor(buf849, (8192, 1152), (1152, 1), 0); del buf849 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf851, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg843_1, (4608, 1152), (1, 4608), 0), out=buf852) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg843_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf853 = reinterpret_tensor(buf852, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf852 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf857 = reinterpret_tensor(buf827, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf827 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_162, ff_output_19, hidden_states_377, hidden_states_378, hidden_states_383, mul_84, norm_hidden_states_80, norm_hidden_states_81], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf853, arg806_1, buf6, arg15_1, arg17_1, arg844_1, arg846_1, buf844, arg836_1, arg838_1, buf829, arg847_1, buf857, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg806_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg836_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg838_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg844_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg846_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf858 = buf844; del buf844 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf857, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg848_1, (1152, 1152), (1, 1152), 0), out=buf858) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg848_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf859 = reinterpret_tensor(buf829, (8192, 1152), (1152, 1), 0); del buf829 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf857, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg852_1, (1152, 1152), (1, 1152), 0), out=buf859) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg852_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf860 = reinterpret_tensor(buf820, (8192, 1152), (1152, 1), 0); del buf820 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf857, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg856_1, (1152, 1152), (1, 1152), 0), out=buf860) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg856_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf857 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf861 = reinterpret_tensor(buf858, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf858 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_384], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf861, arg849_1, arg851_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg849_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg851_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf862 = reinterpret_tensor(buf859, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf859 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_384], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf862, arg853_1, arg855_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg853_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg855_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf863 = reinterpret_tensor(buf860, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf860 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_384], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf863, arg857_1, arg859_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg857_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg859_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_384], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf864 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf861, buf862, buf863, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf861 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf865 = buf864[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf864 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf869 = reinterpret_tensor(buf863, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf863 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_385], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf865, buf869, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf870 = reinterpret_tensor(buf865, (8192, 1152), (1152, 1), 0); del buf865 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf869, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg860_1, (1152, 1152), (1, 1152), 0), out=buf870) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg860_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf871 = reinterpret_tensor(buf870, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf870 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_20, hidden_states_389, hidden_states_390], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf871, arg847_1, buf6, arg15_1, arg17_1, arg861_1, arg863_1, buf853, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg861_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg863_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf872 = reinterpret_tensor(buf853, (8192, 1152), (1152, 1), 0); del buf853 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf871, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg864_1, (1152, 1152), (1, 1152), 0), out=buf872) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg864_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf873 = reinterpret_tensor(buf837, (600, 1152), (1152, 1), 0); del buf837 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf875 = reinterpret_tensor(buf836, (600, 1152), (1152, 1), 0); del buf836 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf915 = buf833; del buf833 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf917 = reinterpret_tensor(buf794, (600, 1152), (1152, 1), 0); del buf794 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [key_82, key_86, value_82, value_86], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_mm_13.run(buf32, buf873, buf875, buf915, buf917, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf874 = buf791; del buf791 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf873, reinterpret_tensor(arg868_1, (1152, 1152), (1, 1152), 0), out=buf874) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg868_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf876 = buf873; del buf873 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf875, reinterpret_tensor(arg872_1, (1152, 1152), (1, 1152), 0), out=buf876) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg872_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf877 = reinterpret_tensor(buf872, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf872 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_391], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf877, arg865_1, arg867_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg865_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg867_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf878 = reinterpret_tensor(buf874, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf874 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_391], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf878, arg869_1, arg871_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg869_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg871_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf879 = reinterpret_tensor(buf876, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf876 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_391], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf879, arg873_1, arg875_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg873_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg875_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf880 = buf838; del buf838 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf922 = buf796; del buf796 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_391, hidden_states_410], Original ATen: [aten.constant_pad_nd] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_constant_pad_nd_14.run(arg0_1, buf880, buf922, 9728, grid=grid(9728), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_391], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf881 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf877, buf878, buf879, reinterpret_tensor(buf880, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf882 = buf881[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf881 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf886 = reinterpret_tensor(buf877, (8192, 1152), (1152, 1), 0); del buf877 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf882, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg876_1, (1152, 1152), (1, 1152), 0), out=buf886) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg876_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf890 = reinterpret_tensor(buf882, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf882 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf891 = buf890; del buf890 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_166, hidden_states_396, hidden_states_397, mul_86, norm_hidden_states_82, norm_hidden_states_83], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf891, buf886, arg877_1, arg879_1, buf871, arg847_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf892 = reinterpret_tensor(buf851, (8192, 4608), (4608, 1), 0); del buf851 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf891, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg880_1, (1152, 4608), (1, 1152), 0), out=buf892) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg880_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf893 = reinterpret_tensor(buf892, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf892 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_399], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf893, arg881_1, arg883_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg881_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg883_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf894 = reinterpret_tensor(buf891, (8192, 1152), (1152, 1), 0); del buf891 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf893, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg884_1, (4608, 1152), (1, 4608), 0), out=buf894) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg884_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf895 = reinterpret_tensor(buf894, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf894 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf899 = reinterpret_tensor(buf869, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf869 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_170, ff_output_20, hidden_states_396, hidden_states_397, hidden_states_402, mul_88, norm_hidden_states_84, norm_hidden_states_85], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf895, arg847_1, buf6, arg15_1, arg17_1, arg885_1, arg887_1, buf886, arg877_1, arg879_1, buf871, arg888_1, buf899, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg847_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg877_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg879_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg885_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg887_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf900 = buf886; del buf886 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf899, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg889_1, (1152, 1152), (1, 1152), 0), out=buf900) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg889_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf901 = reinterpret_tensor(buf871, (8192, 1152), (1152, 1), 0); del buf871 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf899, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg893_1, (1152, 1152), (1, 1152), 0), out=buf901) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg893_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf902 = reinterpret_tensor(buf862, (8192, 1152), (1152, 1), 0); del buf862 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf899, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg897_1, (1152, 1152), (1, 1152), 0), out=buf902) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg897_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf899 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf903 = reinterpret_tensor(buf900, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf900 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_403], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf903, arg890_1, arg892_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg890_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg892_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf904 = reinterpret_tensor(buf901, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf901 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_403], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf904, arg894_1, arg896_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg894_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg896_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf905 = reinterpret_tensor(buf902, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf902 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_403], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf905, arg898_1, arg900_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg898_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg900_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_403], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf906 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf903, buf904, buf905, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf903 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf907 = buf906[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf906 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf911 = reinterpret_tensor(buf905, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf905 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_404], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf907, buf911, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf912 = reinterpret_tensor(buf907, (8192, 1152), (1152, 1), 0); del buf907 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf911, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg901_1, (1152, 1152), (1, 1152), 0), out=buf912) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg901_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf913 = reinterpret_tensor(buf912, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf912 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_21, hidden_states_408, hidden_states_409], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf913, arg888_1, buf6, arg15_1, arg17_1, arg902_1, arg904_1, buf895, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg902_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg904_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf914 = reinterpret_tensor(buf895, (8192, 1152), (1152, 1), 0); del buf895 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf913, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg905_1, (1152, 1152), (1, 1152), 0), out=buf914) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg905_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf916 = reinterpret_tensor(buf879, (600, 1152), (1152, 1), 0); del buf879 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf915, reinterpret_tensor(arg909_1, (1152, 1152), (1, 1152), 0), out=buf916) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg909_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf918 = buf915; del buf915 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf917, reinterpret_tensor(arg913_1, (1152, 1152), (1, 1152), 0), out=buf918) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg913_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf919 = reinterpret_tensor(buf914, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf914 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_410], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf919, arg906_1, arg908_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg906_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg908_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf920 = reinterpret_tensor(buf916, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf916 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_410], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf920, arg910_1, arg912_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg910_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg912_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf921 = reinterpret_tensor(buf918, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf918 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_410], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf921, arg914_1, arg916_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg914_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg916_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_410], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf923 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf919, buf920, buf921, reinterpret_tensor(buf922, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf924 = buf923[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf923 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf928 = reinterpret_tensor(buf919, (8192, 1152), (1152, 1), 0); del buf919 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf924, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg917_1, (1152, 1152), (1, 1152), 0), out=buf928) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg917_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf932 = reinterpret_tensor(buf924, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf924 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf933 = buf932; del buf932 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_174, hidden_states_415, hidden_states_416, mul_90, norm_hidden_states_86, norm_hidden_states_87], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf933, buf928, arg918_1, arg920_1, buf913, arg888_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf934 = reinterpret_tensor(buf893, (8192, 4608), (4608, 1), 0); del buf893 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf933, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg921_1, (1152, 4608), (1, 1152), 0), out=buf934) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg921_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf935 = reinterpret_tensor(buf934, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf934 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_418], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf935, arg922_1, arg924_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg922_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg924_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf936 = reinterpret_tensor(buf933, (8192, 1152), (1152, 1), 0); del buf933 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf935, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg925_1, (4608, 1152), (1, 4608), 0), out=buf936) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg925_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf937 = reinterpret_tensor(buf936, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf936 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf941 = reinterpret_tensor(buf911, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf911 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_178, ff_output_21, hidden_states_415, hidden_states_416, hidden_states_421, mul_92, norm_hidden_states_88, norm_hidden_states_89], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf937, arg888_1, buf6, arg15_1, arg17_1, arg926_1, arg928_1, buf928, arg918_1, arg920_1, buf913, arg929_1, buf941, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg888_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg918_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg920_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg926_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg928_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf942 = buf928; del buf928 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf941, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg930_1, (1152, 1152), (1, 1152), 0), out=buf942) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg930_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf943 = reinterpret_tensor(buf913, (8192, 1152), (1152, 1), 0); del buf913 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf941, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg934_1, (1152, 1152), (1, 1152), 0), out=buf943) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg934_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf944 = reinterpret_tensor(buf904, (8192, 1152), (1152, 1), 0); del buf904 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf941, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg938_1, (1152, 1152), (1, 1152), 0), out=buf944) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg938_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf941 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf945 = reinterpret_tensor(buf942, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf942 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_422], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf945, arg931_1, arg933_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg931_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg933_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf946 = reinterpret_tensor(buf943, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf943 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_422], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf946, arg935_1, arg937_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg935_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg937_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf947 = reinterpret_tensor(buf944, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf944 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_422], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf947, arg939_1, arg941_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg939_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg941_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_422], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf948 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf945, buf946, buf947, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf945 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf949 = buf948[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf948 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf953 = reinterpret_tensor(buf947, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf947 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_423], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf949, buf953, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf954 = reinterpret_tensor(buf949, (8192, 1152), (1152, 1), 0); del buf949 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf953, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg942_1, (1152, 1152), (1, 1152), 0), out=buf954) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg942_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf955 = reinterpret_tensor(buf954, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf954 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_22, hidden_states_427, hidden_states_428], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf955, arg929_1, buf6, arg15_1, arg17_1, arg943_1, arg945_1, buf937, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg943_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg945_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf956 = reinterpret_tensor(buf937, (8192, 1152), (1152, 1), 0); del buf937 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf955, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg946_1, (1152, 1152), (1, 1152), 0), out=buf956) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg946_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf957 = reinterpret_tensor(buf921, (600, 1152), (1152, 1), 0); del buf921 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf959 = reinterpret_tensor(buf920, (600, 1152), (1152, 1), 0); del buf920 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf999 = buf917; del buf917 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1001 = reinterpret_tensor(buf878, (600, 1152), (1152, 1), 0); del buf878 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [key_90, key_94, value_90, value_94], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_mm_13.run(buf32, buf957, buf959, buf999, buf1001, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf958 = buf875; del buf875 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf957, reinterpret_tensor(arg950_1, (1152, 1152), (1, 1152), 0), out=buf958) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg950_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf960 = buf957; del buf957 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf959, reinterpret_tensor(arg954_1, (1152, 1152), (1, 1152), 0), out=buf960) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg954_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf961 = reinterpret_tensor(buf956, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf956 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_429], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf961, arg947_1, arg949_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg947_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg949_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf962 = reinterpret_tensor(buf958, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf958 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_429], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf962, arg951_1, arg953_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg951_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg953_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf963 = reinterpret_tensor(buf960, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf960 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_429], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf963, arg955_1, arg957_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg955_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg957_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf964 = buf922; del buf922 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1006 = buf880; del buf880 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_429, hidden_states_448], Original ATen: [aten.constant_pad_nd] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_constant_pad_nd_14.run(arg0_1, buf964, buf1006, 9728, grid=grid(9728), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_429], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf965 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf961, buf962, buf963, reinterpret_tensor(buf964, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf966 = buf965[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf965 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf970 = reinterpret_tensor(buf961, (8192, 1152), (1152, 1), 0); del buf961 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf966, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg958_1, (1152, 1152), (1, 1152), 0), out=buf970) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg958_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf974 = reinterpret_tensor(buf966, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf966 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf975 = buf974; del buf974 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_182, hidden_states_434, hidden_states_435, mul_94, norm_hidden_states_90, norm_hidden_states_91], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf975, buf970, arg959_1, arg961_1, buf955, arg929_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf976 = reinterpret_tensor(buf935, (8192, 4608), (4608, 1), 0); del buf935 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf975, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg962_1, (1152, 4608), (1, 1152), 0), out=buf976) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg962_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf977 = reinterpret_tensor(buf976, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf976 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_437], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf977, arg963_1, arg965_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg963_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg965_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf978 = reinterpret_tensor(buf975, (8192, 1152), (1152, 1), 0); del buf975 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf977, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg966_1, (4608, 1152), (1, 4608), 0), out=buf978) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg966_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf979 = reinterpret_tensor(buf978, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf978 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf983 = reinterpret_tensor(buf953, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf953 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_186, ff_output_22, hidden_states_434, hidden_states_435, hidden_states_440, mul_96, norm_hidden_states_92, norm_hidden_states_93], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf979, arg929_1, buf6, arg15_1, arg17_1, arg967_1, arg969_1, buf970, arg959_1, arg961_1, buf955, arg970_1, buf983, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg929_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg959_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg961_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg967_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg969_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf984 = buf970; del buf970 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf983, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg971_1, (1152, 1152), (1, 1152), 0), out=buf984) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg971_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf985 = reinterpret_tensor(buf955, (8192, 1152), (1152, 1), 0); del buf955 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf983, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg975_1, (1152, 1152), (1, 1152), 0), out=buf985) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg975_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf986 = reinterpret_tensor(buf946, (8192, 1152), (1152, 1), 0); del buf946 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf983, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg979_1, (1152, 1152), (1, 1152), 0), out=buf986) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg979_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf983 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf987 = reinterpret_tensor(buf984, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf984 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_441], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf987, arg972_1, arg974_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg972_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg974_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf988 = reinterpret_tensor(buf985, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf985 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_441], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf988, arg976_1, arg978_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg976_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg978_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf989 = reinterpret_tensor(buf986, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf986 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_441], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf989, arg980_1, arg982_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg980_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg982_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_441], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf990 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf987, buf988, buf989, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf987 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf991 = buf990[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf990 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf995 = reinterpret_tensor(buf989, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf989 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_442], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf991, buf995, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf996 = reinterpret_tensor(buf991, (8192, 1152), (1152, 1), 0); del buf991 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf995, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg983_1, (1152, 1152), (1, 1152), 0), out=buf996) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg983_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf997 = reinterpret_tensor(buf996, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf996 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_23, hidden_states_446, hidden_states_447], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf997, arg970_1, buf6, arg15_1, arg17_1, arg984_1, arg986_1, buf979, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg984_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg986_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf998 = reinterpret_tensor(buf979, (8192, 1152), (1152, 1), 0); del buf979 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf997, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg987_1, (1152, 1152), (1, 1152), 0), out=buf998) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg987_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1000 = reinterpret_tensor(buf963, (600, 1152), (1152, 1), 0); del buf963 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf999, reinterpret_tensor(arg991_1, (1152, 1152), (1, 1152), 0), out=buf1000) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg991_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1002 = buf999; del buf999 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf1001, reinterpret_tensor(arg995_1, (1152, 1152), (1, 1152), 0), out=buf1002) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg995_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1003 = reinterpret_tensor(buf998, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf998 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_448], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1003, arg988_1, arg990_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg988_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg990_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1004 = reinterpret_tensor(buf1000, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf1000 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_448], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf1004, arg992_1, arg994_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg992_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg994_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1005 = reinterpret_tensor(buf1002, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf1002 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_448], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf1005, arg996_1, arg998_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg996_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg998_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_448], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1007 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf1003, buf1004, buf1005, reinterpret_tensor(buf1006, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1008 = buf1007[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1007 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1012 = reinterpret_tensor(buf1003, (8192, 1152), (1152, 1), 0); del buf1003 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1008, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg999_1, (1152, 1152), (1, 1152), 0), out=buf1012) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg999_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1016 = reinterpret_tensor(buf1008, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1008 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1017 = buf1016; del buf1016 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_190, hidden_states_453, hidden_states_454, mul_98, norm_hidden_states_94, norm_hidden_states_95], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf1017, buf1012, arg1000_1, arg1002_1, buf997, arg970_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1018 = reinterpret_tensor(buf977, (8192, 4608), (4608, 1), 0); del buf977 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1017, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1003_1, (1152, 4608), (1, 1152), 0), out=buf1018) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1003_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1019 = reinterpret_tensor(buf1018, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf1018 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_456], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf1019, arg1004_1, arg1006_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1004_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1006_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1020 = reinterpret_tensor(buf1017, (8192, 1152), (1152, 1), 0); del buf1017 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1019, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg1007_1, (4608, 1152), (1, 4608), 0), out=buf1020) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1007_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1021 = reinterpret_tensor(buf1020, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1020 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1025 = reinterpret_tensor(buf995, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf995 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_194, ff_output_23, hidden_states_453, hidden_states_454, hidden_states_459, mul_100, norm_hidden_states_96, norm_hidden_states_97], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf1021, arg970_1, buf6, arg15_1, arg17_1, arg1008_1, arg1010_1, buf1012, arg1000_1, arg1002_1, buf997, arg1011_1, buf1025, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1000_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1002_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1008_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1010_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg970_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1026 = reinterpret_tensor(buf997, (8192, 1152), (1152, 1), 0); del buf997 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1025, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1012_1, (1152, 1152), (1, 1152), 0), out=buf1026) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1012_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1027 = buf1012; del buf1012 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1025, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1016_1, (1152, 1152), (1, 1152), 0), out=buf1027) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1016_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1028 = reinterpret_tensor(buf988, (8192, 1152), (1152, 1), 0); del buf988 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1025, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1020_1, (1152, 1152), (1, 1152), 0), out=buf1028) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1020_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1025 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1029 = reinterpret_tensor(buf1026, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1026 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_460], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1029, arg1013_1, arg1015_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1013_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1015_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1030 = reinterpret_tensor(buf1027, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1027 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_460], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1030, arg1017_1, arg1019_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1017_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1019_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1031 = reinterpret_tensor(buf1028, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1028 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_460], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1031, arg1021_1, arg1023_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1021_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1023_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_460], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1032 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf1029, buf1030, buf1031, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1029 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1033 = buf1032[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1032 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1037 = reinterpret_tensor(buf1031, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf1031 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_461], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf1033, buf1037, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1038 = reinterpret_tensor(buf1033, (8192, 1152), (1152, 1), 0); del buf1033 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1037, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1024_1, (1152, 1152), (1, 1152), 0), out=buf1038) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1024_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1039 = reinterpret_tensor(buf1038, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1038 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_24, hidden_states_465, hidden_states_466], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf1039, arg1011_1, buf6, arg15_1, arg17_1, arg1025_1, arg1027_1, buf1021, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1025_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1027_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1040 = reinterpret_tensor(buf1021, (8192, 1152), (1152, 1), 0); del buf1021 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1039, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1028_1, (1152, 1152), (1, 1152), 0), out=buf1040) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1028_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1041 = reinterpret_tensor(buf1005, (600, 1152), (1152, 1), 0); del buf1005 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1043 = reinterpret_tensor(buf1004, (600, 1152), (1152, 1), 0); del buf1004 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1083 = buf1001; del buf1001 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1085 = reinterpret_tensor(buf962, (600, 1152), (1152, 1), 0); del buf962 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [key_102, key_98, value_102, value_98], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_mm_13.run(buf32, buf1041, buf1043, buf1083, buf1085, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1042 = buf959; del buf959 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf1041, reinterpret_tensor(arg1032_1, (1152, 1152), (1, 1152), 0), out=buf1042) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1032_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1044 = buf1041; del buf1041 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf1043, reinterpret_tensor(arg1036_1, (1152, 1152), (1, 1152), 0), out=buf1044) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1036_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1043 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1045 = reinterpret_tensor(buf1040, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1040 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_467], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1045, arg1029_1, arg1031_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1029_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1031_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1046 = reinterpret_tensor(buf1042, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf1042 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_467], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf1046, arg1033_1, arg1035_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1033_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1035_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1047 = reinterpret_tensor(buf1044, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf1044 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_467], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf1047, arg1037_1, arg1039_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1037_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1039_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1048 = buf1006; del buf1006 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1090 = buf964; del buf964 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_467, hidden_states_486], Original ATen: [aten.constant_pad_nd] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_constant_pad_nd_14.run(arg0_1, buf1048, buf1090, 9728, grid=grid(9728), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_467], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1049 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf1045, buf1046, buf1047, reinterpret_tensor(buf1048, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1050 = buf1049[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1049 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1054 = reinterpret_tensor(buf1045, (8192, 1152), (1152, 1), 0); del buf1045 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1050, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1040_1, (1152, 1152), (1, 1152), 0), out=buf1054) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1040_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1058 = reinterpret_tensor(buf1050, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1050 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1059 = buf1058; del buf1058 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_198, hidden_states_472, hidden_states_473, mul_102, norm_hidden_states_98, norm_hidden_states_99], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf1059, buf1054, arg1041_1, arg1043_1, buf1039, arg1011_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1060 = reinterpret_tensor(buf1019, (8192, 4608), (4608, 1), 0); del buf1019 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1059, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1044_1, (1152, 4608), (1, 1152), 0), out=buf1060) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1044_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1061 = reinterpret_tensor(buf1060, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf1060 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_475], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf1061, arg1045_1, arg1047_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1045_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1047_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1062 = reinterpret_tensor(buf1059, (8192, 1152), (1152, 1), 0); del buf1059 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1061, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg1048_1, (4608, 1152), (1, 4608), 0), out=buf1062) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1048_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1063 = reinterpret_tensor(buf1062, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1062 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1067 = reinterpret_tensor(buf1037, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1037 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_202, ff_output_24, hidden_states_472, hidden_states_473, hidden_states_478, mul_104, norm_hidden_states_100, norm_hidden_states_101], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf1063, arg1011_1, buf6, arg15_1, arg17_1, arg1049_1, arg1051_1, buf1054, arg1041_1, arg1043_1, buf1039, arg1052_1, buf1067, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1011_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1041_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1043_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1049_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1051_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1068 = buf1054; del buf1054 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1067, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1053_1, (1152, 1152), (1, 1152), 0), out=buf1068) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1053_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1069 = reinterpret_tensor(buf1039, (8192, 1152), (1152, 1), 0); del buf1039 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1067, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1057_1, (1152, 1152), (1, 1152), 0), out=buf1069) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1057_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1070 = reinterpret_tensor(buf1030, (8192, 1152), (1152, 1), 0); del buf1030 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1067, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1061_1, (1152, 1152), (1, 1152), 0), out=buf1070) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1061_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1067 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1071 = reinterpret_tensor(buf1068, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1068 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_479], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1071, arg1054_1, arg1056_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1054_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1056_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1072 = reinterpret_tensor(buf1069, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1069 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_479], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1072, arg1058_1, arg1060_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1058_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1060_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1073 = reinterpret_tensor(buf1070, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1070 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_479], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1073, arg1062_1, arg1064_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1062_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1064_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_479], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1074 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf1071, buf1072, buf1073, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1071 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1075 = buf1074[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1074 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1079 = reinterpret_tensor(buf1073, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf1073 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_480], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf1075, buf1079, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1080 = reinterpret_tensor(buf1075, (8192, 1152), (1152, 1), 0); del buf1075 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1079, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1065_1, (1152, 1152), (1, 1152), 0), out=buf1080) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1065_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1081 = reinterpret_tensor(buf1080, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1080 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_25, hidden_states_484, hidden_states_485], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf1081, arg1052_1, buf6, arg15_1, arg17_1, arg1066_1, arg1068_1, buf1063, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1066_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1068_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1082 = reinterpret_tensor(buf1063, (8192, 1152), (1152, 1), 0); del buf1063 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1081, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1069_1, (1152, 1152), (1, 1152), 0), out=buf1082) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1069_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1084 = reinterpret_tensor(buf1047, (600, 1152), (1152, 1), 0); del buf1047 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf1083, reinterpret_tensor(arg1073_1, (1152, 1152), (1, 1152), 0), out=buf1084) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1073_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1086 = buf1083; del buf1083 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf1085, reinterpret_tensor(arg1077_1, (1152, 1152), (1, 1152), 0), out=buf1086) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1077_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1087 = reinterpret_tensor(buf1082, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1082 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_486], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1087, arg1070_1, arg1072_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1070_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1072_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1088 = reinterpret_tensor(buf1084, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf1084 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_486], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf1088, arg1074_1, arg1076_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1074_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1076_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1089 = reinterpret_tensor(buf1086, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf1086 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_486], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf1089, arg1078_1, arg1080_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1078_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1080_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_486], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1091 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf1087, buf1088, buf1089, reinterpret_tensor(buf1090, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1092 = buf1091[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1091 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1096 = reinterpret_tensor(buf1087, (8192, 1152), (1152, 1), 0); del buf1087 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1092, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1081_1, (1152, 1152), (1, 1152), 0), out=buf1096) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1081_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1100 = reinterpret_tensor(buf1092, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1092 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1101 = buf1100; del buf1100 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_206, hidden_states_491, hidden_states_492, mul_106, norm_hidden_states_102, norm_hidden_states_103], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf1101, buf1096, arg1082_1, arg1084_1, buf1081, arg1052_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1102 = reinterpret_tensor(buf1061, (8192, 4608), (4608, 1), 0); del buf1061 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1101, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1085_1, (1152, 4608), (1, 1152), 0), out=buf1102) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1085_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1103 = reinterpret_tensor(buf1102, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf1102 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_494], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf1103, arg1086_1, arg1088_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1086_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1088_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1104 = reinterpret_tensor(buf1101, (8192, 1152), (1152, 1), 0); del buf1101 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1103, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg1089_1, (4608, 1152), (1, 4608), 0), out=buf1104) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1089_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1105 = reinterpret_tensor(buf1104, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1104 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1109 = reinterpret_tensor(buf1079, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1079 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_210, ff_output_25, hidden_states_491, hidden_states_492, hidden_states_497, mul_108, norm_hidden_states_104, norm_hidden_states_105], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf1105, arg1052_1, buf6, arg15_1, arg17_1, arg1090_1, arg1092_1, buf1096, arg1082_1, arg1084_1, buf1081, arg1093_1, buf1109, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1052_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1082_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1084_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1090_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1092_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1110 = buf1096; del buf1096 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1109, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1094_1, (1152, 1152), (1, 1152), 0), out=buf1110) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1094_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1111 = reinterpret_tensor(buf1081, (8192, 1152), (1152, 1), 0); del buf1081 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1109, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1098_1, (1152, 1152), (1, 1152), 0), out=buf1111) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1098_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1112 = reinterpret_tensor(buf1072, (8192, 1152), (1152, 1), 0); del buf1072 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1109, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1102_1, (1152, 1152), (1, 1152), 0), out=buf1112) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1102_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1109 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1113 = reinterpret_tensor(buf1110, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1110 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_498], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1113, arg1095_1, arg1097_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1095_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1097_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1114 = reinterpret_tensor(buf1111, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1111 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_498], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1114, arg1099_1, arg1101_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1099_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1101_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1115 = reinterpret_tensor(buf1112, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1112 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_498], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1115, arg1103_1, arg1105_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1103_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1105_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_498], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1116 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf1113, buf1114, buf1115, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1113 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1117 = buf1116[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1116 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1121 = reinterpret_tensor(buf1115, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf1115 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_499], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf1117, buf1121, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1122 = reinterpret_tensor(buf1117, (8192, 1152), (1152, 1), 0); del buf1117 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1121, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1106_1, (1152, 1152), (1, 1152), 0), out=buf1122) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1106_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1123 = reinterpret_tensor(buf1122, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1122 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_26, hidden_states_503, hidden_states_504], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf1123, arg1093_1, buf6, arg15_1, arg17_1, arg1107_1, arg1109_1, buf1105, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1107_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1109_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1124 = reinterpret_tensor(buf1105, (8192, 1152), (1152, 1), 0); del buf1105 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1123, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1110_1, (1152, 1152), (1, 1152), 0), out=buf1124) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1110_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1125 = reinterpret_tensor(buf1089, (600, 1152), (1152, 1), 0); del buf1089 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1127 = reinterpret_tensor(buf1088, (600, 1152), (1152, 1), 0); del buf1088 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1167 = buf1085; del buf1085 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1169 = reinterpret_tensor(buf1046, (600, 1152), (1152, 1), 0); del buf1046 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [key_106, key_110, value_106, value_110], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_mm_13.run(buf32, buf1125, buf1127, buf1167, buf1169, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1126 = reinterpret_tensor(buf32, (600, 1152), (1152, 1), 0); del buf32 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf1125, reinterpret_tensor(arg1114_1, (1152, 1152), (1, 1152), 0), out=buf1126) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1114_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1128 = buf1125; del buf1125 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf1127, reinterpret_tensor(arg1118_1, (1152, 1152), (1, 1152), 0), out=buf1128) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1118_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1127 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1129 = reinterpret_tensor(buf1124, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1124 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_505], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1129, arg1111_1, arg1113_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1111_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1113_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1130 = reinterpret_tensor(buf1126, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf1126 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_505], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf1130, arg1115_1, arg1117_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1115_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1117_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1131 = reinterpret_tensor(buf1128, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf1128 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_505], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf1131, arg1119_1, arg1121_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1119_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1121_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1132 = buf1090; del buf1090 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1174 = buf1048; del buf1048 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_505, hidden_states_524], Original ATen: [aten.constant_pad_nd] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_constant_pad_nd_14.run(arg0_1, buf1132, buf1174, 9728, grid=grid(9728), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg0_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_505], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1133 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf1129, buf1130, buf1131, reinterpret_tensor(buf1132, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1130 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1132 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1134 = buf1133[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1133 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1138 = reinterpret_tensor(buf1129, (8192, 1152), (1152, 1), 0); del buf1129 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1134, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1122_1, (1152, 1152), (1, 1152), 0), out=buf1138) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1122_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1142 = reinterpret_tensor(buf1134, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1134 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1143 = buf1142; del buf1142 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_214, hidden_states_510, hidden_states_511, mul_110, norm_hidden_states_106, norm_hidden_states_107], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf1143, buf1138, arg1123_1, arg1125_1, buf1123, arg1093_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1144 = reinterpret_tensor(buf1103, (8192, 4608), (4608, 1), 0); del buf1103 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1143, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1126_1, (1152, 4608), (1, 1152), 0), out=buf1144) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1126_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1145 = reinterpret_tensor(buf1144, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf1144 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_513], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf1145, arg1127_1, arg1129_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1127_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1129_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1146 = reinterpret_tensor(buf1143, (8192, 1152), (1152, 1), 0); del buf1143 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1145, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg1130_1, (4608, 1152), (1, 4608), 0), out=buf1146) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1130_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1147 = reinterpret_tensor(buf1146, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1146 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1151 = reinterpret_tensor(buf1121, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1121 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_218, ff_output_26, hidden_states_510, hidden_states_511, hidden_states_516, mul_112, norm_hidden_states_108, norm_hidden_states_109], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_17.run(buf1147, arg1093_1, buf6, arg15_1, arg17_1, arg1131_1, arg1133_1, buf1138, arg1123_1, arg1125_1, buf1123, arg1134_1, buf1151, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1093_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1123_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1125_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1131_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1133_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1152 = buf1138; del buf1138 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1151, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1135_1, (1152, 1152), (1, 1152), 0), out=buf1152) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1135_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1153 = reinterpret_tensor(buf1123, (8192, 1152), (1152, 1), 0); del buf1123 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1151, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1139_1, (1152, 1152), (1, 1152), 0), out=buf1153) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1139_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1154 = reinterpret_tensor(buf1114, (8192, 1152), (1152, 1), 0); del buf1114 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1151, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1143_1, (1152, 1152), (1, 1152), 0), out=buf1154) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1143_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1151 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1155 = reinterpret_tensor(buf1152, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1152 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_517], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1155, arg1136_1, arg1138_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1136_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1138_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1156 = reinterpret_tensor(buf1153, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1153 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_517], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1156, arg1140_1, arg1142_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1140_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1142_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1157 = reinterpret_tensor(buf1154, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1154 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_517], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1157, arg1144_1, arg1146_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1144_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1146_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_517], Original ATen: [aten._scaled_dot_product_cudnn_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1158 = torch.ops.aten._scaled_dot_product_cudnn_attention.default(buf1155, buf1156, buf1157, None, False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1155 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1156 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1159 = buf1158[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1158 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1163 = reinterpret_tensor(buf1157, (2, 4096, 16, 72), (4718592, 1152, 72, 1), 0); del buf1157 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_518], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_9.run(buf1159, buf1163, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1164 = reinterpret_tensor(buf1159, (8192, 1152), (1152, 1), 0); del buf1159 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1163, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1147_1, (1152, 1152), (1, 1152), 0), out=buf1164) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1147_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1165 = reinterpret_tensor(buf1164, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1164 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [attn_output_27, hidden_states_522, hidden_states_523], Original ATen: [aten.add, aten.div, aten.mul] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_div_mul_18.run(buf1165, arg1134_1, buf6, arg15_1, arg17_1, arg1148_1, arg1150_1, buf1147, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1148_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1150_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1166 = reinterpret_tensor(buf1147, (8192, 1152), (1152, 1), 0); del buf1147 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1165, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1151_1, (1152, 1152), (1, 1152), 0), out=buf1166) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1151_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1168 = reinterpret_tensor(buf1131, (600, 1152), (1152, 1), 0); del buf1131 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf1167, reinterpret_tensor(arg1155_1, (1152, 1152), (1, 1152), 0), out=buf1168) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1155_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1170 = buf1167; del buf1167 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(buf1169, reinterpret_tensor(arg1159_1, (1152, 1152), (1, 1152), 0), out=buf1170) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1159_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1169 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1171 = reinterpret_tensor(buf1166, (2, 16, 4096, 72), (4718592, 72, 1152, 1), 0); del buf1166 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_524], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused__scaled_dot_product_cudnn_attention_8.run(buf1171, arg1152_1, arg1154_1, 9437184, grid=grid(9437184), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1152_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1154_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1172 = reinterpret_tensor(buf1168, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf1168 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_524], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf1172, arg1156_1, arg1158_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1156_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1158_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1173 = reinterpret_tensor(buf1170, (2, 16, 300, 72), (345600, 72, 1152, 1), 0); del buf1170 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_524], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_add_12.run(buf1173, arg1160_1, arg1162_1, 691200, grid=grid(691200), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1160_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1162_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_524], Original ATen: [aten._scaled_dot_product_efficient_attention] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1175 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf1171, buf1172, buf1173, reinterpret_tensor(buf1174, (2, 16, 4096, 300), (4864, 304, 0, 1), 0), False) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1172 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1173 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1174 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1176 = buf1175[0] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1175 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1180 = reinterpret_tensor(buf1171, (8192, 1152), (1152, 1), 0); del buf1171 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1176, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1163_1, (1152, 1152), (1, 1152), 0), out=buf1180) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1163_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1184 = reinterpret_tensor(buf1176, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1176 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1185 = buf1184; del buf1184 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_222, hidden_states_529, hidden_states_530, mul_114, norm_hidden_states_110, norm_hidden_states_111], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_15.run(buf1185, buf1180, arg1164_1, arg1166_1, buf1165, arg1134_1, buf6, arg15_1, arg17_1, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1186 = reinterpret_tensor(buf1145, (8192, 4608), (4608, 1), 0); del buf1145 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1185, (8192, 1152), (1152, 1), 0), reinterpret_tensor(arg1167_1, (1152, 4608), (1, 1152), 0), out=buf1186) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1167_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1187 = reinterpret_tensor(buf1186, (2, 4096, 4608), (18874368, 4608, 1), 0); del buf1186 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_532], Original ATen: [aten.gelu] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_gelu_16.run(buf1187, arg1168_1, arg1170_1, 37748736, grid=grid(37748736), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1168_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1170_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1188 = reinterpret_tensor(buf1185, (8192, 1152), (1152, 1), 0); del buf1185 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [], Original ATen: [] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1187, (8192, 4608), (4608, 1), 0), reinterpret_tensor(arg1171_1, (4608, 1152), (1, 4608), 0), out=buf1188) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1171_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1187 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1189 = reinterpret_tensor(buf1188, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1188 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1193 = reinterpret_tensor(buf1163, (2, 4096, 1152), (4718592, 1152, 1), 0); del buf1163 # reuse V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [add_226, ff_output_27, hidden_states_529, hidden_states_530, hidden_states_535, hidden_states_536, hidden_states_537, mul_116], Original ATen: [aten.add, aten.div, aten.mul, aten.native_layer_norm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_red_fused_add_div_mul_native_layer_norm_19.run(buf1189, arg1134_1, buf6, arg15_1, arg17_1, arg1172_1, arg1174_1, buf1180, arg1164_1, arg1166_1, buf1165, arg1175_1, buf4, arg11_1, arg13_1, buf1193, 8192, 1152, grid=grid(8192), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1134_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1164_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1166_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1172_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1174_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1175_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg11_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg13_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg15_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg17_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1165 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1180 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1189 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf4 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf6 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1194 = empty_strided_cuda((8192, 32), (32, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [hidden_states_538], Original ATen: [aten.mm] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_tem_fused_mm_20.run(buf1193, arg1176_1, buf1194, grid=torch._inductor.kernel.mm_common.mm_grid(8192, 32, meta5), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1176_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1193 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] buf1195 = empty_strided_cuda((2, 8, 64, 2, 64, 2), (131072, 16384, 256, 128, 2, 1), torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] # Source Nodes: [output], Original ATen: [aten.clone] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] triton_poi_fused_clone_21.run(buf1194, arg1177_1, arg1179_1, buf1195, 16, 16384, grid=grid(16, 16384), stream=stream0) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1177_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del arg1179_1 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] del buf1194 V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] return (reinterpret_tensor(buf1195, (2, 8, 128, 128), (131072, 16384, 128, 1), 0), ) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] def benchmark_compiled_module(times=10, repeat=10): V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._dynamo.testing import rand_strided V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.utils import print_performance V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg0_1 = rand_strided((2, 300), (300, 1), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1_1 = rand_strided((2, 4, 128, 128), (65536, 16384, 128, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg2_1 = rand_strided((1152, 4, 2, 2), (16, 1, 8, 4), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg3_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg4_1 = rand_strided((1, 4096, 1152), (4718592, 1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg5_1 = rand_strided((2, ), (0, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg6_1 = rand_strided((1152, 256), (256, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg7_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg8_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg9_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg10_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg11_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg12_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg13_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg14_1 = rand_strided((6912, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg15_1 = rand_strided((6912, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg16_1 = rand_strided((6912, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg17_1 = rand_strided((6912, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg18_1 = rand_strided((1152, 4096), (4096, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg19_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg20_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg21_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg22_1 = rand_strided((2, 300, 4096), (1228800, 4096, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg23_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg24_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg25_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg26_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg27_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg28_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg29_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg30_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg31_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg32_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg33_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg34_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg35_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg36_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg37_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg38_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg39_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg40_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg41_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg42_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg43_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg44_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg45_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg46_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg47_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg48_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg49_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg50_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg51_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg52_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg53_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg54_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg55_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg56_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg57_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg58_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg59_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg60_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg61_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg62_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg63_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg64_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg65_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg66_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg67_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg68_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg69_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg70_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg71_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg72_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg73_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg74_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg75_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg76_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg77_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg78_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg79_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg80_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg81_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg82_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg83_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg84_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg85_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg86_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg87_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg88_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg89_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg90_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg91_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg92_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg93_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg94_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg95_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg96_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg97_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg98_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg99_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg100_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg101_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg102_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg103_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg104_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg105_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg106_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg107_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg108_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg109_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg110_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg111_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg112_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg113_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg114_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg115_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg116_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg117_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg118_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg119_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg120_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg121_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg122_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg123_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg124_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg125_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg126_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg127_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg128_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg129_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg130_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg131_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg132_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg133_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg134_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg135_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg136_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg137_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg138_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg139_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg140_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg141_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg142_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg143_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg144_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg145_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg146_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg147_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg148_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg149_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg150_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg151_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg152_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg153_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg154_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg155_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg156_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg157_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg158_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg159_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg160_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg161_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg162_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg163_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg164_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg165_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg166_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg167_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg168_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg169_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg170_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg171_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg172_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg173_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg174_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg175_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg176_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg177_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg178_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg179_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg180_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg181_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg182_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg183_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg184_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg185_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg186_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg187_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg188_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg189_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg190_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg191_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg192_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg193_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg194_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg195_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg196_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg197_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg198_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg199_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg200_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg201_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg202_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg203_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg204_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg205_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg206_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg207_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg208_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg209_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg210_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg211_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg212_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg213_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg214_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg215_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg216_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg217_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg218_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg219_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg220_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg221_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg222_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg223_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg224_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg225_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg226_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg227_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg228_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg229_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg230_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg231_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg232_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg233_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg234_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg235_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg236_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg237_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg238_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg239_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg240_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg241_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg242_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg243_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg244_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg245_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg246_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg247_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg248_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg249_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg250_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg251_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg252_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg253_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg254_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg255_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg256_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg257_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg258_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg259_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg260_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg261_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg262_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg263_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg264_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg265_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg266_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg267_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg268_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg269_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg270_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg271_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg272_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg273_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg274_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg275_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg276_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg277_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg278_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg279_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg280_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg281_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg282_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg283_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg284_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg285_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg286_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg287_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg288_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg289_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg290_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg291_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg292_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg293_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg294_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg295_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg296_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg297_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg298_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg299_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg300_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg301_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg302_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg303_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg304_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg305_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg306_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg307_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg308_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg309_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg310_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg311_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg312_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg313_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg314_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg315_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg316_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg317_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg318_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg319_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg320_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg321_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg322_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg323_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg324_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg325_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg326_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg327_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg328_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg329_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg330_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg331_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg332_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg333_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg334_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg335_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg336_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg337_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg338_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg339_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg340_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg341_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg342_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg343_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg344_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg345_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg346_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg347_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg348_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg349_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg350_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg351_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg352_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg353_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg354_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg355_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg356_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg357_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg358_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg359_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg360_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg361_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg362_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg363_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg364_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg365_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg366_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg367_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg368_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg369_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg370_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg371_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg372_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg373_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg374_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg375_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg376_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg377_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg378_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg379_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg380_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg381_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg382_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg383_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg384_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg385_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg386_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg387_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg388_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg389_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg390_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg391_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg392_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg393_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg394_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg395_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg396_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg397_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg398_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg399_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg400_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg401_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg402_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg403_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg404_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg405_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg406_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg407_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg408_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg409_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg410_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg411_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg412_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg413_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg414_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg415_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg416_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg417_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg418_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg419_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg420_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg421_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg422_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg423_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg424_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg425_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg426_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg427_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg428_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg429_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg430_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg431_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg432_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg433_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg434_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg435_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg436_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg437_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg438_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg439_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg440_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg441_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg442_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg443_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg444_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg445_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg446_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg447_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg448_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg449_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg450_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg451_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg452_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg453_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg454_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg455_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg456_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg457_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg458_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg459_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg460_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg461_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg462_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg463_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg464_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg465_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg466_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg467_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg468_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg469_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg470_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg471_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg472_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg473_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg474_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg475_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg476_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg477_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg478_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg479_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg480_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg481_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg482_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg483_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg484_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg485_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg486_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg487_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg488_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg489_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg490_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg491_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg492_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg493_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg494_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg495_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg496_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg497_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg498_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg499_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg500_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg501_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg502_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg503_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg504_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg505_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg506_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg507_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg508_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg509_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg510_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg511_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg512_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg513_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg514_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg515_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg516_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg517_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg518_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg519_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg520_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg521_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg522_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg523_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg524_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg525_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg526_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg527_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg528_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg529_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg530_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg531_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg532_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg533_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg534_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg535_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg536_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg537_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg538_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg539_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg540_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg541_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg542_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg543_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg544_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg545_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg546_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg547_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg548_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg549_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg550_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg551_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg552_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg553_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg554_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg555_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg556_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg557_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg558_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg559_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg560_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg561_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg562_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg563_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg564_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg565_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg566_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg567_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg568_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg569_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg570_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg571_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg572_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg573_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg574_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg575_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg576_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg577_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg578_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg579_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg580_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg581_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg582_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg583_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg584_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg585_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg586_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg587_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg588_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg589_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg590_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg591_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg592_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg593_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg594_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg595_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg596_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg597_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg598_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg599_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg600_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg601_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg602_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg603_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg604_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg605_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg606_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg607_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg608_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg609_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg610_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg611_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg612_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg613_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg614_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg615_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg616_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg617_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg618_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg619_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg620_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg621_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg622_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg623_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg624_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg625_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg626_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg627_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg628_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg629_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg630_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg631_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg632_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg633_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg634_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg635_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg636_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg637_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg638_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg639_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg640_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg641_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg642_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg643_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg644_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg645_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg646_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg647_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg648_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg649_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg650_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg651_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg652_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg653_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg654_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg655_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg656_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg657_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg658_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg659_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg660_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg661_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg662_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg663_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg664_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg665_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg666_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg667_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg668_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg669_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg670_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg671_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg672_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg673_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg674_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg675_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg676_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg677_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg678_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg679_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg680_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg681_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg682_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg683_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg684_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg685_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg686_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg687_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg688_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg689_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg690_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg691_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg692_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg693_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg694_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg695_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg696_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg697_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg698_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg699_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg700_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg701_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg702_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg703_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg704_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg705_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg706_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg707_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg708_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg709_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg710_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg711_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg712_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg713_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg714_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg715_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg716_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg717_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg718_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg719_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg720_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg721_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg722_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg723_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg724_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg725_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg726_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg727_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg728_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg729_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg730_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg731_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg732_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg733_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg734_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg735_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg736_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg737_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg738_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg739_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg740_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg741_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg742_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg743_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg744_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg745_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg746_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg747_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg748_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg749_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg750_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg751_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg752_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg753_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg754_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg755_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg756_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg757_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg758_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg759_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg760_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg761_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg762_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg763_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg764_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg765_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg766_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg767_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg768_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg769_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg770_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg771_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg772_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg773_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg774_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg775_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg776_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg777_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg778_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg779_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg780_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg781_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg782_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg783_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg784_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg785_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg786_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg787_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg788_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg789_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg790_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg791_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg792_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg793_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg794_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg795_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg796_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg797_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg798_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg799_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg800_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg801_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg802_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg803_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg804_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg805_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg806_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg807_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg808_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg809_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg810_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg811_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg812_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg813_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg814_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg815_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg816_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg817_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg818_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg819_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg820_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg821_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg822_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg823_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg824_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg825_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg826_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg827_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg828_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg829_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg830_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg831_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg832_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg833_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg834_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg835_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg836_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg837_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg838_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg839_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg840_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg841_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg842_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg843_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg844_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg845_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg846_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg847_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg848_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg849_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg850_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg851_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg852_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg853_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg854_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg855_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg856_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg857_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg858_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg859_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg860_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg861_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg862_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg863_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg864_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg865_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg866_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg867_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg868_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg869_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg870_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg871_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg872_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg873_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg874_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg875_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg876_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg877_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg878_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg879_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg880_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg881_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg882_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg883_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg884_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg885_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg886_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg887_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg888_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg889_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg890_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg891_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg892_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg893_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg894_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg895_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg896_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg897_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg898_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg899_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg900_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg901_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg902_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg903_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg904_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg905_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg906_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg907_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg908_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg909_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg910_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg911_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg912_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg913_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg914_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg915_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg916_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg917_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg918_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg919_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg920_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg921_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg922_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg923_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg924_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg925_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg926_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg927_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg928_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg929_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg930_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg931_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg932_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg933_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg934_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg935_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg936_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg937_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg938_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg939_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg940_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg941_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg942_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg943_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg944_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg945_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg946_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg947_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg948_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg949_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg950_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg951_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg952_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg953_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg954_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg955_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg956_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg957_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg958_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg959_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg960_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg961_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg962_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg963_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg964_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg965_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg966_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg967_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg968_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg969_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg970_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg971_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg972_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg973_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg974_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg975_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg976_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg977_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg978_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg979_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg980_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg981_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg982_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg983_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg984_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg985_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg986_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg987_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg988_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg989_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg990_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg991_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg992_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg993_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg994_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg995_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg996_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg997_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg998_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg999_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1000_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1001_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1002_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1003_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1004_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1005_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1006_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1007_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1008_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1009_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1010_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1011_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1012_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1013_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1014_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1015_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1016_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1017_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1018_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1019_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1020_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1021_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1022_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1023_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1024_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1025_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1026_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1027_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1028_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1029_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1030_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1031_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1032_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1033_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1034_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1035_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1036_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1037_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1038_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1039_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1040_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1041_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1042_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1043_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1044_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1045_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1046_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1047_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1048_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1049_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1050_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1051_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1052_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1053_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1054_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1055_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1056_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1057_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1058_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1059_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1060_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1061_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1062_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1063_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1064_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1065_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1066_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1067_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1068_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1069_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1070_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1071_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1072_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1073_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1074_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1075_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1076_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1077_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1078_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1079_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1080_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1081_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1082_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1083_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1084_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1085_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1086_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1087_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1088_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1089_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1090_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1091_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1092_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1093_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1094_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1095_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1096_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1097_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1098_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1099_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1100_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1101_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1102_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1103_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1104_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1105_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1106_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1107_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1108_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1109_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1110_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1111_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1112_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1113_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1114_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1115_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1116_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1117_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1118_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1119_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1120_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1121_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1122_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1123_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1124_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1125_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1126_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1127_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1128_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1129_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1130_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1131_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1132_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1133_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1134_1 = rand_strided((6, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1135_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1136_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1137_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1138_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1139_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1140_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1141_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1142_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1143_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1144_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1145_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1146_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1147_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1148_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1149_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1150_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1151_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1152_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1153_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1154_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1155_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1156_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1157_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1158_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1159_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1160_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1161_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1162_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1163_1 = rand_strided((1152, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1164_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1165_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1166_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1167_1 = rand_strided((4608, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1168_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1169_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1170_1 = rand_strided((4608, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1171_1 = rand_strided((1152, 4608), (4608, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1172_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1173_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1174_1 = rand_strided((1152, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1175_1 = rand_strided((2, 1152), (1152, 1), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1176_1 = rand_strided((32, 1152), (1152, 1), device='cuda:0', dtype=torch.int8) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1177_1 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1178_1 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.int64) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] arg1179_1 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.bfloat16) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, arg7_1, arg8_1, arg9_1, arg10_1, arg11_1, arg12_1, arg13_1, arg14_1, arg15_1, arg16_1, arg17_1, arg18_1, arg19_1, arg20_1, arg21_1, arg22_1, arg23_1, arg24_1, arg25_1, arg26_1, arg27_1, arg28_1, arg29_1, arg30_1, arg31_1, arg32_1, arg33_1, arg34_1, arg35_1, arg36_1, arg37_1, arg38_1, arg39_1, arg40_1, arg41_1, arg42_1, arg43_1, arg44_1, arg45_1, arg46_1, arg47_1, arg48_1, arg49_1, arg50_1, arg51_1, arg52_1, arg53_1, arg54_1, arg55_1, arg56_1, arg57_1, arg58_1, arg59_1, arg60_1, arg61_1, arg62_1, arg63_1, arg64_1, arg65_1, arg66_1, arg67_1, arg68_1, arg69_1, arg70_1, arg71_1, arg72_1, arg73_1, arg74_1, arg75_1, arg76_1, arg77_1, arg78_1, arg79_1, arg80_1, arg81_1, arg82_1, arg83_1, arg84_1, arg85_1, arg86_1, arg87_1, arg88_1, arg89_1, arg90_1, arg91_1, arg92_1, arg93_1, arg94_1, arg95_1, arg96_1, arg97_1, arg98_1, arg99_1, arg100_1, arg101_1, arg102_1, arg103_1, arg104_1, arg105_1, arg106_1, arg107_1, arg108_1, arg109_1, arg110_1, arg111_1, arg112_1, arg113_1, arg114_1, arg115_1, arg116_1, arg117_1, arg118_1, arg119_1, arg120_1, arg121_1, arg122_1, arg123_1, arg124_1, arg125_1, arg126_1, arg127_1, arg128_1, arg129_1, arg130_1, arg131_1, arg132_1, arg133_1, arg134_1, arg135_1, arg136_1, arg137_1, arg138_1, arg139_1, arg140_1, arg141_1, arg142_1, arg143_1, arg144_1, arg145_1, arg146_1, arg147_1, arg148_1, arg149_1, arg150_1, arg151_1, arg152_1, arg153_1, arg154_1, arg155_1, arg156_1, arg157_1, arg158_1, arg159_1, arg160_1, arg161_1, arg162_1, arg163_1, arg164_1, arg165_1, arg166_1, arg167_1, arg168_1, arg169_1, arg170_1, arg171_1, arg172_1, arg173_1, arg174_1, arg175_1, arg176_1, arg177_1, arg178_1, arg179_1, arg180_1, arg181_1, arg182_1, arg183_1, arg184_1, arg185_1, arg186_1, arg187_1, arg188_1, arg189_1, arg190_1, arg191_1, arg192_1, arg193_1, arg194_1, arg195_1, arg196_1, arg197_1, arg198_1, arg199_1, arg200_1, arg201_1, arg202_1, arg203_1, arg204_1, arg205_1, arg206_1, arg207_1, arg208_1, arg209_1, arg210_1, arg211_1, arg212_1, arg213_1, arg214_1, arg215_1, arg216_1, arg217_1, arg218_1, arg219_1, arg220_1, arg221_1, arg222_1, arg223_1, arg224_1, arg225_1, arg226_1, arg227_1, arg228_1, arg229_1, arg230_1, arg231_1, arg232_1, arg233_1, arg234_1, arg235_1, arg236_1, arg237_1, arg238_1, arg239_1, arg240_1, arg241_1, arg242_1, arg243_1, arg244_1, arg245_1, arg246_1, arg247_1, arg248_1, arg249_1, arg250_1, arg251_1, arg252_1, arg253_1, arg254_1, arg255_1, arg256_1, arg257_1, arg258_1, arg259_1, arg260_1, arg261_1, arg262_1, arg263_1, arg264_1, arg265_1, arg266_1, arg267_1, arg268_1, arg269_1, arg270_1, arg271_1, arg272_1, arg273_1, arg274_1, arg275_1, arg276_1, arg277_1, arg278_1, arg279_1, arg280_1, arg281_1, arg282_1, arg283_1, arg284_1, arg285_1, arg286_1, arg287_1, arg288_1, arg289_1, arg290_1, arg291_1, arg292_1, arg293_1, arg294_1, arg295_1, arg296_1, arg297_1, arg298_1, arg299_1, arg300_1, arg301_1, arg302_1, arg303_1, arg304_1, arg305_1, arg306_1, arg307_1, arg308_1, arg309_1, arg310_1, arg311_1, arg312_1, arg313_1, arg314_1, arg315_1, arg316_1, arg317_1, arg318_1, arg319_1, arg320_1, arg321_1, arg322_1, arg323_1, arg324_1, arg325_1, arg326_1, arg327_1, arg328_1, arg329_1, arg330_1, arg331_1, arg332_1, arg333_1, arg334_1, arg335_1, arg336_1, arg337_1, arg338_1, arg339_1, arg340_1, arg341_1, arg342_1, arg343_1, arg344_1, arg345_1, arg346_1, arg347_1, arg348_1, arg349_1, arg350_1, arg351_1, arg352_1, arg353_1, arg354_1, arg355_1, arg356_1, arg357_1, arg358_1, arg359_1, arg360_1, arg361_1, arg362_1, arg363_1, arg364_1, arg365_1, arg366_1, arg367_1, arg368_1, arg369_1, arg370_1, arg371_1, arg372_1, arg373_1, arg374_1, arg375_1, arg376_1, arg377_1, arg378_1, arg379_1, arg380_1, arg381_1, arg382_1, arg383_1, arg384_1, arg385_1, arg386_1, arg387_1, arg388_1, arg389_1, arg390_1, arg391_1, arg392_1, arg393_1, arg394_1, arg395_1, arg396_1, arg397_1, arg398_1, arg399_1, arg400_1, arg401_1, arg402_1, arg403_1, arg404_1, arg405_1, arg406_1, arg407_1, arg408_1, arg409_1, arg410_1, arg411_1, arg412_1, arg413_1, arg414_1, arg415_1, arg416_1, arg417_1, arg418_1, arg419_1, arg420_1, arg421_1, arg422_1, arg423_1, arg424_1, arg425_1, arg426_1, arg427_1, arg428_1, arg429_1, arg430_1, arg431_1, arg432_1, arg433_1, arg434_1, arg435_1, arg436_1, arg437_1, arg438_1, arg439_1, arg440_1, arg441_1, arg442_1, arg443_1, arg444_1, arg445_1, arg446_1, arg447_1, arg448_1, arg449_1, arg450_1, arg451_1, arg452_1, arg453_1, arg454_1, arg455_1, arg456_1, arg457_1, arg458_1, arg459_1, arg460_1, arg461_1, arg462_1, arg463_1, arg464_1, arg465_1, arg466_1, arg467_1, arg468_1, arg469_1, arg470_1, arg471_1, arg472_1, arg473_1, arg474_1, arg475_1, arg476_1, arg477_1, arg478_1, arg479_1, arg480_1, arg481_1, arg482_1, arg483_1, arg484_1, arg485_1, arg486_1, arg487_1, arg488_1, arg489_1, arg490_1, arg491_1, arg492_1, arg493_1, arg494_1, arg495_1, arg496_1, arg497_1, arg498_1, arg499_1, arg500_1, arg501_1, arg502_1, arg503_1, arg504_1, arg505_1, arg506_1, arg507_1, arg508_1, arg509_1, arg510_1, arg511_1, arg512_1, arg513_1, arg514_1, arg515_1, arg516_1, arg517_1, arg518_1, arg519_1, arg520_1, arg521_1, arg522_1, arg523_1, arg524_1, arg525_1, arg526_1, arg527_1, arg528_1, arg529_1, arg530_1, arg531_1, arg532_1, arg533_1, arg534_1, arg535_1, arg536_1, arg537_1, arg538_1, arg539_1, arg540_1, arg541_1, arg542_1, arg543_1, arg544_1, arg545_1, arg546_1, arg547_1, arg548_1, arg549_1, arg550_1, arg551_1, arg552_1, arg553_1, arg554_1, arg555_1, arg556_1, arg557_1, arg558_1, arg559_1, arg560_1, arg561_1, arg562_1, arg563_1, arg564_1, arg565_1, arg566_1, arg567_1, arg568_1, arg569_1, arg570_1, arg571_1, arg572_1, arg573_1, arg574_1, arg575_1, arg576_1, arg577_1, arg578_1, arg579_1, arg580_1, arg581_1, arg582_1, arg583_1, arg584_1, arg585_1, arg586_1, arg587_1, arg588_1, arg589_1, arg590_1, arg591_1, arg592_1, arg593_1, arg594_1, arg595_1, arg596_1, arg597_1, arg598_1, arg599_1, arg600_1, arg601_1, arg602_1, arg603_1, arg604_1, arg605_1, arg606_1, arg607_1, arg608_1, arg609_1, arg610_1, arg611_1, arg612_1, arg613_1, arg614_1, arg615_1, arg616_1, arg617_1, arg618_1, arg619_1, arg620_1, arg621_1, arg622_1, arg623_1, arg624_1, arg625_1, arg626_1, arg627_1, arg628_1, arg629_1, arg630_1, arg631_1, arg632_1, arg633_1, arg634_1, arg635_1, arg636_1, arg637_1, arg638_1, arg639_1, arg640_1, arg641_1, arg642_1, arg643_1, arg644_1, arg645_1, arg646_1, arg647_1, arg648_1, arg649_1, arg650_1, arg651_1, arg652_1, arg653_1, arg654_1, arg655_1, arg656_1, arg657_1, arg658_1, arg659_1, arg660_1, arg661_1, arg662_1, arg663_1, arg664_1, arg665_1, arg666_1, arg667_1, arg668_1, arg669_1, arg670_1, arg671_1, arg672_1, arg673_1, arg674_1, arg675_1, arg676_1, arg677_1, arg678_1, arg679_1, arg680_1, arg681_1, arg682_1, arg683_1, arg684_1, arg685_1, arg686_1, arg687_1, arg688_1, arg689_1, arg690_1, arg691_1, arg692_1, arg693_1, arg694_1, arg695_1, arg696_1, arg697_1, arg698_1, arg699_1, arg700_1, arg701_1, arg702_1, arg703_1, arg704_1, arg705_1, arg706_1, arg707_1, arg708_1, arg709_1, arg710_1, arg711_1, arg712_1, arg713_1, arg714_1, arg715_1, arg716_1, arg717_1, arg718_1, arg719_1, arg720_1, arg721_1, arg722_1, arg723_1, arg724_1, arg725_1, arg726_1, arg727_1, arg728_1, arg729_1, arg730_1, arg731_1, arg732_1, arg733_1, arg734_1, arg735_1, arg736_1, arg737_1, arg738_1, arg739_1, arg740_1, arg741_1, arg742_1, arg743_1, arg744_1, arg745_1, arg746_1, arg747_1, arg748_1, arg749_1, arg750_1, arg751_1, arg752_1, arg753_1, arg754_1, arg755_1, arg756_1, arg757_1, arg758_1, arg759_1, arg760_1, arg761_1, arg762_1, arg763_1, arg764_1, arg765_1, arg766_1, arg767_1, arg768_1, arg769_1, arg770_1, arg771_1, arg772_1, arg773_1, arg774_1, arg775_1, arg776_1, arg777_1, arg778_1, arg779_1, arg780_1, arg781_1, arg782_1, arg783_1, arg784_1, arg785_1, arg786_1, arg787_1, arg788_1, arg789_1, arg790_1, arg791_1, arg792_1, arg793_1, arg794_1, arg795_1, arg796_1, arg797_1, arg798_1, arg799_1, arg800_1, arg801_1, arg802_1, arg803_1, arg804_1, arg805_1, arg806_1, arg807_1, arg808_1, arg809_1, arg810_1, arg811_1, arg812_1, arg813_1, arg814_1, arg815_1, arg816_1, arg817_1, arg818_1, arg819_1, arg820_1, arg821_1, arg822_1, arg823_1, arg824_1, arg825_1, arg826_1, arg827_1, arg828_1, arg829_1, arg830_1, arg831_1, arg832_1, arg833_1, arg834_1, arg835_1, arg836_1, arg837_1, arg838_1, arg839_1, arg840_1, arg841_1, arg842_1, arg843_1, arg844_1, arg845_1, arg846_1, arg847_1, arg848_1, arg849_1, arg850_1, arg851_1, arg852_1, arg853_1, arg854_1, arg855_1, arg856_1, arg857_1, arg858_1, arg859_1, arg860_1, arg861_1, arg862_1, arg863_1, arg864_1, arg865_1, arg866_1, arg867_1, arg868_1, arg869_1, arg870_1, arg871_1, arg872_1, arg873_1, arg874_1, arg875_1, arg876_1, arg877_1, arg878_1, arg879_1, arg880_1, arg881_1, arg882_1, arg883_1, arg884_1, arg885_1, arg886_1, arg887_1, arg888_1, arg889_1, arg890_1, arg891_1, arg892_1, arg893_1, arg894_1, arg895_1, arg896_1, arg897_1, arg898_1, arg899_1, arg900_1, arg901_1, arg902_1, arg903_1, arg904_1, arg905_1, arg906_1, arg907_1, arg908_1, arg909_1, arg910_1, arg911_1, arg912_1, arg913_1, arg914_1, arg915_1, arg916_1, arg917_1, arg918_1, arg919_1, arg920_1, arg921_1, arg922_1, arg923_1, arg924_1, arg925_1, arg926_1, arg927_1, arg928_1, arg929_1, arg930_1, arg931_1, arg932_1, arg933_1, arg934_1, arg935_1, arg936_1, arg937_1, arg938_1, arg939_1, arg940_1, arg941_1, arg942_1, arg943_1, arg944_1, arg945_1, arg946_1, arg947_1, arg948_1, arg949_1, arg950_1, arg951_1, arg952_1, arg953_1, arg954_1, arg955_1, arg956_1, arg957_1, arg958_1, arg959_1, arg960_1, arg961_1, arg962_1, arg963_1, arg964_1, arg965_1, arg966_1, arg967_1, arg968_1, arg969_1, arg970_1, arg971_1, arg972_1, arg973_1, arg974_1, arg975_1, arg976_1, arg977_1, arg978_1, arg979_1, arg980_1, arg981_1, arg982_1, arg983_1, arg984_1, arg985_1, arg986_1, arg987_1, arg988_1, arg989_1, arg990_1, arg991_1, arg992_1, arg993_1, arg994_1, arg995_1, arg996_1, arg997_1, arg998_1, arg999_1, arg1000_1, arg1001_1, arg1002_1, arg1003_1, arg1004_1, arg1005_1, arg1006_1, arg1007_1, arg1008_1, arg1009_1, arg1010_1, arg1011_1, arg1012_1, arg1013_1, arg1014_1, arg1015_1, arg1016_1, arg1017_1, arg1018_1, arg1019_1, arg1020_1, arg1021_1, arg1022_1, arg1023_1, arg1024_1, arg1025_1, arg1026_1, arg1027_1, arg1028_1, arg1029_1, arg1030_1, arg1031_1, arg1032_1, arg1033_1, arg1034_1, arg1035_1, arg1036_1, arg1037_1, arg1038_1, arg1039_1, arg1040_1, arg1041_1, arg1042_1, arg1043_1, arg1044_1, arg1045_1, arg1046_1, arg1047_1, arg1048_1, arg1049_1, arg1050_1, arg1051_1, arg1052_1, arg1053_1, arg1054_1, arg1055_1, arg1056_1, arg1057_1, arg1058_1, arg1059_1, arg1060_1, arg1061_1, arg1062_1, arg1063_1, arg1064_1, arg1065_1, arg1066_1, arg1067_1, arg1068_1, arg1069_1, arg1070_1, arg1071_1, arg1072_1, arg1073_1, arg1074_1, arg1075_1, arg1076_1, arg1077_1, arg1078_1, arg1079_1, arg1080_1, arg1081_1, arg1082_1, arg1083_1, arg1084_1, arg1085_1, arg1086_1, arg1087_1, arg1088_1, arg1089_1, arg1090_1, arg1091_1, arg1092_1, arg1093_1, arg1094_1, arg1095_1, arg1096_1, arg1097_1, arg1098_1, arg1099_1, arg1100_1, arg1101_1, arg1102_1, arg1103_1, arg1104_1, arg1105_1, arg1106_1, arg1107_1, arg1108_1, arg1109_1, arg1110_1, arg1111_1, arg1112_1, arg1113_1, arg1114_1, arg1115_1, arg1116_1, arg1117_1, arg1118_1, arg1119_1, arg1120_1, arg1121_1, arg1122_1, arg1123_1, arg1124_1, arg1125_1, arg1126_1, arg1127_1, arg1128_1, arg1129_1, arg1130_1, arg1131_1, arg1132_1, arg1133_1, arg1134_1, arg1135_1, arg1136_1, arg1137_1, arg1138_1, arg1139_1, arg1140_1, arg1141_1, arg1142_1, arg1143_1, arg1144_1, arg1145_1, arg1146_1, arg1147_1, arg1148_1, arg1149_1, arg1150_1, arg1151_1, arg1152_1, arg1153_1, arg1154_1, arg1155_1, arg1156_1, arg1157_1, arg1158_1, arg1159_1, arg1160_1, arg1161_1, arg1162_1, arg1163_1, arg1164_1, arg1165_1, arg1166_1, arg1167_1, arg1168_1, arg1169_1, arg1170_1, arg1171_1, arg1172_1, arg1173_1, arg1174_1, arg1175_1, arg1176_1, arg1177_1, arg1178_1, arg1179_1]) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] return print_performance(fn, times=times, repeat=repeat) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] if __name__ == "__main__": V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] from torch._inductor.wrapper_benchmark import compiled_module_main V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] compiled_module_main('None', benchmark_compiled_module) V0808 07:26:35.683356 1263209 torch/_inductor/graph.py:1780] [0/0] [__output_code] I0808 07:26:42.599010 1263209 torch/_inductor/graph.py:1814] [0/0] [__output_code] Output code written to: /tmp/torchinductor_sayak/df/cdfdezij3bj34icfcbbc7sivb6ag3xcupxvnkpfrjhwkkujramtz.py