@@ -54,16 +54,24 @@ class SimdOpCheckHVX : public SimdOpCheckTest {
5454 isa_version = 62 ;
5555 }
5656
57+ auto valign_test_u8 = [&](int off) {
58+ return in_u8 (x + off) + in_u8 (x + off + 1 );
59+ };
60+
61+ auto valign_test_u16 = [&](int off) {
62+ return in_u16 (x + off) + in_u16 (x + off + 1 );
63+ };
64+
5765 // Verify that unaligned loads use the right instructions, and don't try to use
5866 // immediates of more than 3 bits.
59- check (" valign(v*,v*,#7)" , hvx_width / 1 , in_u8 (x + 7 ));
60- check (" vlalign(v*,v*,#7)" , hvx_width / 1 , in_u8 (x + hvx_width - 7 ));
61- check (" valign(v*,v*,r*)" , hvx_width / 1 , in_u8 (x + 8 ));
62- check (" valign(v*,v*,r*)" , hvx_width / 1 , in_u8 (x + hvx_width - 8 ));
63- check (" valign(v*,v*,#6)" , hvx_width / 1 , in_u16 (x + 3 ));
64- check (" vlalign(v*,v*,#6)" , hvx_width / 1 , in_u16 (x + hvx_width - 3 ));
65- check (" valign(v*,v*,r*)" , hvx_width / 1 , in_u16 (x + 4 ));
66- check (" valign(v*,v*,r*)" , hvx_width / 1 , in_u16 (x + hvx_width - 4 ));
67+ check (" valign(v*,v*,#7)" , hvx_width / 1 , valign_test_u8 ( 6 ));
68+ check (" vlalign(v*,v*,#7)" , hvx_width / 1 , valign_test_u8 ( hvx_width - 7 ));
69+ check (" valign(v*,v*,r*)" , hvx_width / 1 , valign_test_u8 ( 8 ));
70+ check (" valign(v*,v*,r*)" , hvx_width / 1 , valign_test_u8 ( hvx_width - 8 ));
71+ check (" valign(v*,v*,#6)" , hvx_width / 1 , valign_test_u16 ( 3 ));
72+ check (" vlalign(v*,v*,#6)" , hvx_width / 1 , valign_test_u16 ( hvx_width - 3 ));
73+ check (" valign(v*,v*,r*)" , hvx_width / 1 , valign_test_u16 ( 4 ));
74+ check (" valign(v*,v*,r*)" , hvx_width / 1 , valign_test_u16 ( hvx_width - 4 ));
6775
6876 check (" vunpack(v*.ub)" , hvx_width / 1 , u16 (u8_1));
6977 check (" vunpack(v*.ub)" , hvx_width / 1 , i16 (u8_1));
0 commit comments