Skip to content

Commit dd1e23e

Browse files
Update test_gpu_examples.py
Added a GPU test with a 4-bit model in test_gpu_examples.py using @pytest.mark.single_gpu_tests and @require_bitsandbytes. It verifies that find_kappa_target_modules runs successfully and returns target modules.
1 parent f07890c commit dd1e23e

1 file changed

Lines changed: 32 additions & 0 deletions

File tree

tests/test_gpu_examples.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6652,3 +6652,35 @@ def test_load_adapter_save(self, tmp_path):
66526652
tmp_dir_reference = tmp_path / "reference"
66536653
tmp_dir_tp = tmp_path / "tp"
66546654
self._spawn(_test_load_adapter_save, tmp_dir_reference, tmp_dir_tp, port_offset=6)
6655+
6656+
@pytest.mark.single_gpu_tests
6657+
@require_bitsandbytes
6658+
def test_kappatune_with_4bit_model():
6659+
"""Test that KappaTune works with 4-bit quantized models on GPU."""
6660+
from peft.helpers import find_kappa_target_modules
6661+
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
6662+
import torch
6663+
6664+
# Use a very small model for faster testing
6665+
quantization_config = BitsAndBytesConfig(
6666+
load_in_4bit=True,
6667+
bnb_4bit_compute_dtype=torch.float16,
6668+
bnb_4bit_quant_type="nf4",
6669+
bnb_4bit_use_double_quant=True,
6670+
)
6671+
6672+
model = AutoModelForCausalLM.from_pretrained(
6673+
"hf-internal-testing/tiny-random-LlamaForCausalLM",
6674+
quantization_config=quantization_config,
6675+
device_map="cuda",
6676+
torch_dtype=torch.float16,
6677+
)
6678+
6679+
# Run KappaTune
6680+
targets = find_kappa_target_modules(model, top_p=0.3)
6681+
6682+
# Basic assertions
6683+
assert isinstance(targets, dict)
6684+
assert "target_modules" in targets
6685+
assert isinstance(targets["target_modules"], list)
6686+
assert len(targets["target_modules"]) > 0, "Should return at least some target modules"

0 commit comments

Comments
 (0)