From 8b5003fcc71e0d7f2897aaa1d34609d266a18270 Mon Sep 17 00:00:00 2001 From: AffectionateCurry <160357316+AffectionateCurry@users.noreply.github.com> Date: Thu, 24 Jul 2025 00:18:57 -0700 Subject: [PATCH] adjusted level 3 sizes --- KernelBench/level3/18_SqueezeNet.py | 2 +- KernelBench/level3/2_ShallowWideMLP.py | 2 +- KernelBench/level3/35_LSTM.py | 6 +++--- KernelBench/level3/36_LSTMHn.py | 6 +++--- KernelBench/level3/37_LSTMCn.py | 6 +++--- KernelBench/level3/38_LSTMBidirectional.py | 6 +++--- KernelBench/level3/39_GRU.py | 6 +++--- KernelBench/level3/40_GRUHidden.py | 6 +++--- KernelBench/level3/41_GRUBidirectional.py | 6 +++--- KernelBench/level3/42_GRUBidirectionalHidden.py | 6 +++--- KernelBench/level3/44_MiniGPTBlock.py | 2 +- KernelBench/level3/5_AlexNet.py | 2 +- 12 files changed, 28 insertions(+), 28 deletions(-) diff --git a/KernelBench/level3/18_SqueezeNet.py b/KernelBench/level3/18_SqueezeNet.py index aa6ac324..20baf78a 100644 --- a/KernelBench/level3/18_SqueezeNet.py +++ b/KernelBench/level3/18_SqueezeNet.py @@ -72,7 +72,7 @@ def forward(self, x): return torch.flatten(x, 1) # Test code -batch_size = 64 +batch_size = 32 input_channels = 3 height = 512 width = 512 diff --git a/KernelBench/level3/2_ShallowWideMLP.py b/KernelBench/level3/2_ShallowWideMLP.py index a36c34b8..3519041d 100644 --- a/KernelBench/level3/2_ShallowWideMLP.py +++ b/KernelBench/level3/2_ShallowWideMLP.py @@ -31,7 +31,7 @@ def forward(self, x): return self.network(x) # Test code -batch_size = 128 +batch_size = 64 input_size = 16384 hidden_layer_sizes = [32768, 32768] output_size = 16384 diff --git a/KernelBench/level3/35_LSTM.py b/KernelBench/level3/35_LSTM.py index b68c8b16..c669fb88 100644 --- a/KernelBench/level3/35_LSTM.py +++ b/KernelBench/level3/35_LSTM.py @@ -41,10 +41,10 @@ def forward(self, x, h0=None, c0=None): return out # === Test configuration === -batch_size = 10 +batch_size = 3 sequence_length = 512 -input_size = 128 -hidden_size = 256 +input_size = 64 +hidden_size = 128 num_layers = 6 output_size = 10 dropout = 0.0 diff --git a/KernelBench/level3/36_LSTMHn.py b/KernelBench/level3/36_LSTMHn.py index 6e25a3e6..75dc6812 100644 --- a/KernelBench/level3/36_LSTMHn.py +++ b/KernelBench/level3/36_LSTMHn.py @@ -34,10 +34,10 @@ def forward(self, x,h0,c0): return state[0] # Test code -batch_size = 10 +batch_size = 3 sequence_length = 512 -input_size = 128 -hidden_size = 256 +input_size = 64 +hidden_size = 128 num_layers = 6 output_size = 10 dropout = 0.0 diff --git a/KernelBench/level3/37_LSTMCn.py b/KernelBench/level3/37_LSTMCn.py index c5f26c96..65f16ef8 100644 --- a/KernelBench/level3/37_LSTMCn.py +++ b/KernelBench/level3/37_LSTMCn.py @@ -34,10 +34,10 @@ def forward(self, x, h0, c0): return state[1] # Test code -batch_size = 10 +batch_size = 3 sequence_length = 512 -input_size = 128 -hidden_size = 256 +input_size = 64 +hidden_size = 128 num_layers = 6 output_size = 10 dropout = 0.0 diff --git a/KernelBench/level3/38_LSTMBidirectional.py b/KernelBench/level3/38_LSTMBidirectional.py index 62b79a0b..19aa91fc 100644 --- a/KernelBench/level3/38_LSTMBidirectional.py +++ b/KernelBench/level3/38_LSTMBidirectional.py @@ -33,10 +33,10 @@ def forward(self, x,h0,c0): return out # Test code -batch_size = 10 +batch_size = 3 sequence_length = 512 -input_size = 128 -hidden_size = 256 +input_size = 64 +hidden_size = 128 num_layers = 6 output_size = 10 dropout = 0.0 diff --git a/KernelBench/level3/39_GRU.py b/KernelBench/level3/39_GRU.py index d6c0b2e7..b5972fdf 100644 --- a/KernelBench/level3/39_GRU.py +++ b/KernelBench/level3/39_GRU.py @@ -27,10 +27,10 @@ def forward(self, x,h0): return output # Test code -batch_size = 10 +batch_size = 3 seq_len = 512 -input_size = 128 -hidden_size = 256 +input_size = 64 +hidden_size = 128 num_layers = 6 def get_inputs(): diff --git a/KernelBench/level3/40_GRUHidden.py b/KernelBench/level3/40_GRUHidden.py index 007cc3e2..af0810fc 100644 --- a/KernelBench/level3/40_GRUHidden.py +++ b/KernelBench/level3/40_GRUHidden.py @@ -27,10 +27,10 @@ def forward(self, x,h0): return h_n # Test code -batch_size = 10 +batch_size = 3 seq_len = 512 -input_size = 128 -hidden_size = 256 +input_size = 64 +hidden_size = 128 num_layers = 6 def get_inputs(): diff --git a/KernelBench/level3/41_GRUBidirectional.py b/KernelBench/level3/41_GRUBidirectional.py index 69dd19b6..d6e6909a 100644 --- a/KernelBench/level3/41_GRUBidirectional.py +++ b/KernelBench/level3/41_GRUBidirectional.py @@ -28,10 +28,10 @@ def forward(self, x,h0): return output # Test code -batch_size = 10 +batch_size = 3 seq_len = 512 -input_size = 128 -hidden_size = 256 +input_size = 64 +hidden_size = 128 num_layers = 6 def get_inputs(): diff --git a/KernelBench/level3/42_GRUBidirectionalHidden.py b/KernelBench/level3/42_GRUBidirectionalHidden.py index 395a5260..4de7fc66 100644 --- a/KernelBench/level3/42_GRUBidirectionalHidden.py +++ b/KernelBench/level3/42_GRUBidirectionalHidden.py @@ -27,10 +27,10 @@ def forward(self, x,h0): return h_n # Test code -batch_size = 10 +batch_size = 3 seq_len = 512 -input_size = 128 -hidden_size = 256 +input_size = 64 +hidden_size = 128 num_layers = 6 def get_inputs(): diff --git a/KernelBench/level3/44_MiniGPTBlock.py b/KernelBench/level3/44_MiniGPTBlock.py index c1b9fad8..5d6bb42f 100644 --- a/KernelBench/level3/44_MiniGPTBlock.py +++ b/KernelBench/level3/44_MiniGPTBlock.py @@ -82,7 +82,7 @@ def forward(self, x): x = x + self.mlpf(self.ln_2(x)) return x -batch_size = 128 +batch_size = 32 max_seqlen = 1024 seq_len = 512 n_embd = 768 diff --git a/KernelBench/level3/5_AlexNet.py b/KernelBench/level3/5_AlexNet.py index 554a0cc8..ce9f8487 100644 --- a/KernelBench/level3/5_AlexNet.py +++ b/KernelBench/level3/5_AlexNet.py @@ -81,7 +81,7 @@ def forward(self, x): return x # Test code -batch_size = 1024 +batch_size = 512 num_classes = 1000 def get_inputs():