-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathtest_inference.py
More file actions
199 lines (161 loc) · 6.67 KB
/
test_inference.py
File metadata and controls
199 lines (161 loc) · 6.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
#!/usr/bin/env python3
"""
Test inference quality between original and compressed models.
Usage:
python3 test_inference.py --original <path> --compressed <path>
Or with default paths:
python3 test_inference.py
"""
import argparse
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from safetensors.torch import load_file
import time
def load_model_with_weights(model_name: str, weights_path: str):
"""Load model architecture and replace weights from safetensors file."""
print(f"Loading model architecture from {model_name}...")
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
device_map="cpu",
trust_remote_code=True,
)
print(f"Loading weights from {weights_path}...")
state_dict = load_file(weights_path)
# Load weights
missing, unexpected = model.load_state_dict(state_dict, strict=False)
if missing:
print(f" Missing keys: {len(missing)}")
if unexpected:
print(f" Unexpected keys: {len(unexpected)}")
return model
def generate_text(model, tokenizer, prompt: str, max_new_tokens: int = 50) -> str:
"""Generate text from a prompt."""
inputs = tokenizer(prompt, return_tensors="pt")
with torch.no_grad():
outputs = model.generate(
inputs.input_ids,
max_new_tokens=max_new_tokens,
do_sample=False, # Deterministic for comparison
pad_token_id=tokenizer.eos_token_id,
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
def compare_logits(model1, model2, tokenizer, prompt: str):
"""Compare logits between two models for the same input."""
inputs = tokenizer(prompt, return_tensors="pt")
with torch.no_grad():
logits1 = model1(**inputs).logits
logits2 = model2(**inputs).logits
# Compute similarity metrics
cosine_sim = torch.nn.functional.cosine_similarity(
logits1.flatten().unsqueeze(0),
logits2.flatten().unsqueeze(0)
).item()
mse = torch.nn.functional.mse_loss(logits1, logits2).item()
# Top-k agreement
top5_1 = torch.topk(logits1[0, -1], k=5).indices
top5_2 = torch.topk(logits2[0, -1], k=5).indices
top5_agreement = len(set(top5_1.tolist()) & set(top5_2.tolist())) / 5
return {
"cosine_similarity": cosine_sim,
"mse": mse,
"top5_agreement": top5_agreement,
}
def main():
parser = argparse.ArgumentParser(description="Test inference quality")
parser.add_argument("--original", type=str,
default="~/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/*/model.safetensors",
help="Path to original model weights")
parser.add_argument("--compressed", type=str,
default="/tmp/qwen-compressed-30pct-int4.safetensors",
help="Path to compressed model weights")
parser.add_argument("--model-name", type=str,
default="Qwen/Qwen2.5-0.5B-Instruct",
help="HuggingFace model name for architecture")
args = parser.parse_args()
# Expand paths
import glob
import os
original_path = os.path.expanduser(args.original)
if '*' in original_path:
matches = glob.glob(original_path)
if matches:
original_path = matches[0]
else:
print(f"No match for: {original_path}")
return
compressed_path = os.path.expanduser(args.compressed)
print("\n=== Inference Quality Test ===\n")
print(f"Original: {original_path}")
print(f"Compressed: {compressed_path}")
print(f"Model: {args.model_name}")
print()
# Load tokenizer
print("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(args.model_name, trust_remote_code=True)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# Load models
print("\nLoading original model...")
start = time.time()
model_original = load_model_with_weights(args.model_name, original_path)
print(f" Loaded in {time.time() - start:.1f}s")
print("\nLoading compressed model...")
start = time.time()
model_compressed = load_model_with_weights(args.model_name, compressed_path)
print(f" Loaded in {time.time() - start:.1f}s")
# Test prompts
test_prompts = [
"The capital of France is",
"def fibonacci(n):",
"In machine learning, a neural network",
"The meaning of life is",
"To solve this math problem: 2 + 2 =",
]
print("\n" + "="*60)
print("GENERATION COMPARISON")
print("="*60)
for prompt in test_prompts:
print(f"\nPrompt: {prompt!r}")
print("-" * 40)
start = time.time()
output_original = generate_text(model_original, tokenizer, prompt, max_new_tokens=30)
time_original = time.time() - start
start = time.time()
output_compressed = generate_text(model_compressed, tokenizer, prompt, max_new_tokens=30)
time_compressed = time.time() - start
print(f"Original ({time_original:.2f}s):")
print(f" {output_original}")
print(f"Compressed ({time_compressed:.2f}s):")
print(f" {output_compressed}")
# Check if outputs match
match = "MATCH" if output_original == output_compressed else "DIFFER"
print(f"Status: {match}")
print("\n" + "="*60)
print("LOGITS COMPARISON")
print("="*60)
for prompt in test_prompts[:3]: # Just first 3 for logits
print(f"\nPrompt: {prompt!r}")
metrics = compare_logits(model_original, model_compressed, tokenizer, prompt)
print(f" Cosine Similarity: {metrics['cosine_similarity']:.6f}")
print(f" MSE: {metrics['mse']:.6e}")
print(f" Top-5 Agreement: {metrics['top5_agreement']*100:.0f}%")
print("\n" + "="*60)
print("SUMMARY")
print("="*60)
# Aggregate metrics
all_metrics = [compare_logits(model_original, model_compressed, tokenizer, p) for p in test_prompts]
avg_cosine = sum(m['cosine_similarity'] for m in all_metrics) / len(all_metrics)
avg_top5 = sum(m['top5_agreement'] for m in all_metrics) / len(all_metrics)
print(f"\nAverage Logit Cosine Similarity: {avg_cosine:.6f}")
print(f"Average Top-5 Token Agreement: {avg_top5*100:.0f}%")
if avg_cosine > 0.99:
print("\nQuality: EXCELLENT - Outputs should be nearly identical")
elif avg_cosine > 0.95:
print("\nQuality: GOOD - Minor differences expected")
elif avg_cosine > 0.90:
print("\nQuality: ACCEPTABLE - Some differences expected")
else:
print("\nQuality: DEGRADED - Significant differences expected")
if __name__ == "__main__":
main()