__m128d
__attribute__((__vector_size__(16), __aligned__(16)));
20typedef long long__m128i
__attribute__((__vector_size__(16), __aligned__(16)));
22typedef double__m128d_u
__attribute__((__vector_size__(16), __aligned__(1)));
23typedef long long__m128i_u
28typedef long long__v2di
__attribute__((__vector_size__(16)));
33typedef unsigned long long__v2du
__attribute__((__vector_size__(16)));
34typedef unsigned short__v8hu
__attribute__((__vector_size__(16)));
35typedef unsigned char__v16qu
__attribute__((__vector_size__(16)));
39typedef signed char__v16qs
__attribute__((__vector_size__(16)));
47typedef__bf16 __v8bf
__attribute__((__vector_size__(16), __aligned__(16)));
48typedef__bf16 __m128bh
__attribute__((__vector_size__(16), __aligned__(16)));
52#if defined(__EVEX512__) && !defined(__AVX10_1_512__) 53#define __DEFAULT_FN_ATTRS \ 54 __attribute__((__always_inline__, __nodebug__, \ 55 __target__("sse2,no-evex512"), __min_vector_width__(128)))
57#define __DEFAULT_FN_ATTRS \ 58 __attribute__((__always_inline__, __nodebug__, __target__("sse2"), \
59__min_vector_width__(128)))
62#if defined(__cplusplus) && (__cplusplus >= 201103L) 63#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr 65#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS 69 (__m64) __builtin_shufflevector((__v2di)(x), __extension__(__v2di){}, 0) 70#define __anyext128(x) \ 71 (__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \ 109 return(__m128d)((__v2df)
__a+ (__v2df)
__b);
149 return(__m128d)((__v2df)
__a- (__v2df)
__b);
188 return(__m128d)((__v2df)
__a* (__v2df)
__b);
229 return(__m128d)((__v2df)
__a/ (__v2df)
__b);
253__m128d
__c= __builtin_ia32_sqrtsd((__v2df)
__b);
254 return__extension__(__m128d){
__c[0],
__a[1]};
269 return__builtin_ia32_sqrtpd((__v2df)
__a);
294 return__builtin_ia32_minsd((__v2df)
__a, (__v2df)
__b);
315 return__builtin_ia32_minpd((__v2df)
__a, (__v2df)
__b);
340 return__builtin_ia32_maxsd((__v2df)
__a, (__v2df)
__b);
361 return__builtin_ia32_maxpd((__v2df)
__a, (__v2df)
__b);
378 return(__m128d)((__v2du)
__a& (__v2du)
__b);
398 return(__m128d)(~(__v2du)
__a& (__v2du)
__b);
415 return(__m128d)((__v2du)
__a| (__v2du)
__b);
432 return(__m128d)((__v2du)
__a^ (__v2du)
__b);
452 return(__m128d)__builtin_ia32_cmpeqpd((__v2df)
__a, (__v2df)
__b);
473 return(__m128d)__builtin_ia32_cmpltpd((__v2df)
__a, (__v2df)
__b);
494 return(__m128d)__builtin_ia32_cmplepd((__v2df)
__a, (__v2df)
__b);
515 return(__m128d)__builtin_ia32_cmpltpd((__v2df)
__b, (__v2df)
__a);
536 return(__m128d)__builtin_ia32_cmplepd((__v2df)
__b, (__v2df)
__a);
558 return(__m128d)__builtin_ia32_cmpordpd((__v2df)
__a, (__v2df)
__b);
581 return(__m128d)__builtin_ia32_cmpunordpd((__v2df)
__a, (__v2df)
__b);
602 return(__m128d)__builtin_ia32_cmpneqpd((__v2df)
__a, (__v2df)
__b);
623 return(__m128d)__builtin_ia32_cmpnltpd((__v2df)
__a, (__v2df)
__b);
644 return(__m128d)__builtin_ia32_cmpnlepd((__v2df)
__a, (__v2df)
__b);
665 return(__m128d)__builtin_ia32_cmpnltpd((__v2df)
__b, (__v2df)
__a);
686 return(__m128d)__builtin_ia32_cmpnlepd((__v2df)
__b, (__v2df)
__a);
709 return(__m128d)__builtin_ia32_cmpeqsd((__v2df)
__a, (__v2df)
__b);
734 return(__m128d)__builtin_ia32_cmpltsd((__v2df)
__a, (__v2df)
__b);
759 return(__m128d)__builtin_ia32_cmplesd((__v2df)
__a, (__v2df)
__b);
784__m128d
__c= __builtin_ia32_cmpltsd((__v2df)
__b, (__v2df)
__a);
785 return__extension__(__m128d){
__c[0],
__a[1]};
810__m128d
__c= __builtin_ia32_cmplesd((__v2df)
__b, (__v2df)
__a);
811 return__extension__(__m128d){
__c[0],
__a[1]};
837 return(__m128d)__builtin_ia32_cmpordsd((__v2df)
__a, (__v2df)
__b);
864 return(__m128d)__builtin_ia32_cmpunordsd((__v2df)
__a, (__v2df)
__b);
889 return(__m128d)__builtin_ia32_cmpneqsd((__v2df)
__a, (__v2df)
__b);
914 return(__m128d)__builtin_ia32_cmpnltsd((__v2df)
__a, (__v2df)
__b);
939 return(__m128d)__builtin_ia32_cmpnlesd((__v2df)
__a, (__v2df)
__b);
964__m128d
__c= __builtin_ia32_cmpnltsd((__v2df)
__b, (__v2df)
__a);
965 return__extension__(__m128d){
__c[0],
__a[1]};
990__m128d
__c= __builtin_ia32_cmpnlesd((__v2df)
__b, (__v2df)
__a);
991 return__extension__(__m128d){
__c[0],
__a[1]};
1013 return__builtin_ia32_comisdeq((__v2df)
__a, (__v2df)
__b);
1037 return__builtin_ia32_comisdlt((__v2df)
__a, (__v2df)
__b);
1061 return__builtin_ia32_comisdle((__v2df)
__a, (__v2df)
__b);
1085 return__builtin_ia32_comisdgt((__v2df)
__a, (__v2df)
__b);
1109 return__builtin_ia32_comisdge((__v2df)
__a, (__v2df)
__b);
1133 return__builtin_ia32_comisdneq((__v2df)
__a, (__v2df)
__b);
1155 return__builtin_ia32_ucomisdeq((__v2df)
__a, (__v2df)
__b);
1179 return__builtin_ia32_ucomisdlt((__v2df)
__a, (__v2df)
__b);
1203 return__builtin_ia32_ucomisdle((__v2df)
__a, (__v2df)
__b);
1227 return__builtin_ia32_ucomisdgt((__v2df)
__a, (__v2df)
__b);
1251 return__builtin_ia32_ucomisdge((__v2df)
__a, (__v2df)
__b);
1275 return__builtin_ia32_ucomisdneq((__v2df)
__a, (__v2df)
__b);
1292 return__builtin_ia32_cvtpd2ps((__v2df)
__a);
1311 return(__m128d) __builtin_convertvector(
1312__builtin_shufflevector((__v4sf)
__a, (__v4sf)
__a, 0, 1), __v2df);
1333 return(__m128d) __builtin_convertvector(
1334__builtin_shufflevector((__v4si)
__a, (__v4si)
__a, 0, 1), __v2df);
1355 return__builtin_ia32_cvtpd2dq((__v2df)
__a);
1374 return__builtin_ia32_cvtsd2si((__v2df)
__a);
1398 return(__m128)__builtin_ia32_cvtsd2ss((__v4sf)
__a, (__v2df)
__b);
1468 return(__m128i)__builtin_ia32_cvttpd2dq((__v2df)
__a);
1488 return__builtin_ia32_cvttsd2si((__v2df)
__a);
1507 return __trunc64(__builtin_ia32_cvtpd2dq((__v2df)
__a));
1526 return __trunc64(__builtin_ia32_cvttpd2dq((__v2df)
__a));
1542 return(__m128d) __builtin_convertvector((__v2si)
__a, __v2df);
1573 return*(
const__m128d *)__dp;
1589 struct__mm_load1_pd_struct {
1592 double__u = ((
const struct__mm_load1_pd_struct *)__dp)->__u;
1593 return__extension__(__m128d){__u, __u};
1596#define _mm_load_pd1(dp) _mm_load1_pd(dp) 1613__m128d __u = *(
const__m128d *)__dp;
1614 return__builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
1632 return((
const struct__loadu_pd *)__dp)->__v;
1647 struct__loadu_si64 {
1650 long long__u = ((
const struct__loadu_si64 *)
__a)->__v;
1651 return__extension__(__m128i)(__v2di){__u, 0LL};
1666 struct__loadu_si32 {
1669 int__u = ((
const struct__loadu_si32 *)
__a)->__v;
1670 return__extension__(__m128i)(__v4si){__u, 0, 0, 0};
1685 struct__loadu_si16 {
1688 short__u = ((
const struct__loadu_si16 *)
__a)->__v;
1689 return__extension__(__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
1704 struct__mm_load_sd_struct {
1707 double__u = ((
const struct__mm_load_sd_struct *)__dp)->__u;
1708 return__extension__(__m128d){__u, 0};
1729 double const*__dp) {
1730 struct__mm_loadh_pd_struct {
1733 double__u = ((
const struct__mm_loadh_pd_struct *)__dp)->__u;
1734 return__extension__(__m128d){
__a[0], __u};
1755 double const*__dp) {
1756 struct__mm_loadl_pd_struct {
1759 double__u = ((
const struct__mm_loadl_pd_struct *)__dp)->__u;
1760 return__extension__(__m128d){__u,
__a[1]};
1775 return(__m128d)__builtin_ia32_undef128();
1793 return__extension__(__m128d){__w, 0.0};
1809 return__extension__(__m128d){__w, __w};
1844 return__extension__(__m128d){__x, __w};
1864 return__extension__(__m128d){__w, __x};
1877 return__extension__(__m128d){0.0, 0.0};
1914 struct__mm_store_sd_struct {
1917((
struct__mm_store_sd_struct *)__dp)->__u =
__a[0];
1935*(__m128d *)__dp =
__a;
1954 __a= __builtin_shufflevector((__v2df)
__a, (__v2df)
__a, 0, 0);
1991 struct__storeu_pd {
1994((
struct__storeu_pd *)__dp)->__v =
__a;
2013 __a= __builtin_shufflevector((__v2df)
__a, (__v2df)
__a, 1, 0);
2014*(__m128d *)__dp =
__a;
2030 struct__mm_storeh_pd_struct {
2033((
struct__mm_storeh_pd_struct *)__dp)->__u =
__a[1];
2049 struct__mm_storeh_pd_struct {
2052((
struct__mm_storeh_pd_struct *)__dp)->__u =
__a[0];
2073 return(__m128i)((__v16qu)
__a+ (__v16qu)
__b);
2094 return(__m128i)((__v8hu)
__a+ (__v8hu)
__b);
2115 return(__m128i)((__v4su)
__a+ (__v4su)
__b);
2131 return(__m64)(((
unsigned longlong)
__a) + ((
unsigned longlong)
__b));
2152 return(__m128i)((__v2du)
__a+ (__v2du)
__b);
2174 return(__m128i)__builtin_elementwise_add_sat((__v16qs)
__a, (__v16qs)
__b);
2196 return(__m128i)__builtin_elementwise_add_sat((__v8hi)
__a, (__v8hi)
__b);
2218 return(__m128i)__builtin_elementwise_add_sat((__v16qu)
__a, (__v16qu)
__b);
2240 return(__m128i)__builtin_elementwise_add_sat((__v8hu)
__a, (__v8hu)
__b);
2259 return(__m128i)__builtin_ia32_pavgb128((__v16qi)
__a, (__v16qi)
__b);
2278 return(__m128i)__builtin_ia32_pavgw128((__v8hi)
__a, (__v8hi)
__b);
2303 return(__m128i)__builtin_ia32_pmaddwd128((__v8hi)
__a, (__v8hi)
__b);
2322 return(__m128i)__builtin_elementwise_max((__v8hi)
__a, (__v8hi)
__b);
2341 return(__m128i)__builtin_elementwise_max((__v16qu)
__a, (__v16qu)
__b);
2360 return(__m128i)__builtin_elementwise_min((__v8hi)
__a, (__v8hi)
__b);
2379 return(__m128i)__builtin_elementwise_min((__v16qu)
__a, (__v16qu)
__b);
2398 return(__m128i)__builtin_ia32_pmulhw128((__v8hi)
__a, (__v8hi)
__b);
2417 return(__m128i)__builtin_ia32_pmulhuw128((__v8hi)
__a, (__v8hi)
__b);
2436 return(__m128i)((__v8hu)
__a* (__v8hu)
__b);
2472 return__builtin_ia32_pmuludq128((__v4si)
__a, (__v4si)
__b);
2493 return__builtin_ia32_psadbw128((__v16qi)
__a, (__v16qi)
__b);
2510 return(__m128i)((__v16qu)
__a- (__v16qu)
__b);
2527 return(__m128i)((__v8hu)
__a- (__v8hu)
__b);
2544 return(__m128i)((__v4su)
__a- (__v4su)
__b);
2561 return(__m64)((
unsigned longlong)
__a- (
unsigned long long)
__b);
2578 return(__m128i)((__v2du)
__a- (__v2du)
__b);
2600 return(__m128i)__builtin_elementwise_sub_sat((__v16qs)
__a, (__v16qs)
__b);
2622 return(__m128i)__builtin_elementwise_sub_sat((__v8hi)
__a, (__v8hi)
__b);
2643 return(__m128i)__builtin_elementwise_sub_sat((__v16qu)
__a, (__v16qu)
__b);
2664 return(__m128i)__builtin_elementwise_sub_sat((__v8hu)
__a, (__v8hu)
__b);
2681 return(__m128i)((__v2du)
__a& (__v2du)
__b);
2700 return(__m128i)(~(__v2du)
__a& (__v2du)
__b);
2716 return(__m128i)((__v2du)
__a| (__v2du)
__b);
2733 return(__m128i)((__v2du)
__a^ (__v2du)
__b);
2753#define _mm_slli_si128(a, imm) \ 2754 ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), \ 2757#define _mm_bslli_si128(a, imm) \ 2758 ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), \ 2776 return(__m128i)__builtin_ia32_psllwi128((__v8hi)
__a, __count);
2794 return(__m128i)__builtin_ia32_psllw128((__v8hi)
__a, (__v8hi)__count);
2812 return(__m128i)__builtin_ia32_pslldi128((__v4si)
__a, __count);
2830 return(__m128i)__builtin_ia32_pslld128((__v4si)
__a, (__v4si)__count);
2848 return__builtin_ia32_psllqi128((__v2di)
__a, __count);
2866 return__builtin_ia32_psllq128((__v2di)
__a, (__v2di)__count);
2885 return(__m128i)__builtin_ia32_psrawi128((__v8hi)
__a, __count);
2904 return(__m128i)__builtin_ia32_psraw128((__v8hi)
__a, (__v8hi)__count);
2923 return(__m128i)__builtin_ia32_psradi128((__v4si)
__a, __count);
2942 return(__m128i)__builtin_ia32_psrad128((__v4si)
__a, (__v4si)__count);
2962#define _mm_srli_si128(a, imm) \ 2963 ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), \ 2966#define _mm_bsrli_si128(a, imm) \ 2967 ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), \ 2985 return(__m128i)__builtin_ia32_psrlwi128((__v8hi)
__a, __count);
3003 return(__m128i)__builtin_ia32_psrlw128((__v8hi)
__a, (__v8hi)__count);
3021 return(__m128i)__builtin_ia32_psrldi128((__v4si)
__a, __count);
3039 return(__m128i)__builtin_ia32_psrld128((__v4si)
__a, (__v4si)__count);
3057 return__builtin_ia32_psrlqi128((__v2di)
__a, __count);
3075 return__builtin_ia32_psrlq128((__v2di)
__a, (__v2di)__count);
3094 return(__m128i)((__v16qi)
__a== (__v16qi)
__b);
3113 return(__m128i)((__v8hi)
__a== (__v8hi)
__b);
3132 return(__m128i)((__v4si)
__a== (__v4si)
__b);
3154 return(__m128i)((__v16qs)
__a> (__v16qs)
__b);
3174 return(__m128i)((__v8hi)
__a> (__v8hi)
__b);
3194 return(__m128i)((__v4si)
__a> (__v4si)
__b);
3276_mm_cvtsi64_sd(__m128d
__a,
long long __b) {
3297 return__builtin_ia32_cvtsd2si64((__v2df)
__a);
3317 return__builtin_ia32_cvttsd2si64((__v2df)
__a);
3332 return(__m128) __builtin_convertvector((__v4si)
__a, __v4sf);
3350 return(__m128i)__builtin_ia32_cvtps2dq((__v4sf)
__a);
3369 return(__m128i)__builtin_ia32_cvttps2dq((__v4sf)
__a);
3383 return__extension__(__m128i)(__v4si){
__a, 0, 0, 0};
3398 return__extension__(__m128i)(__v2di){
__a, 0};
3413__v4si
__b= (__v4si)
__a;
3459 struct__loadu_si128 {
3462 return((
const struct__loadu_si128 *)
__p)->__v;
3479 struct__mm_loadl_epi64_struct {
3482 return__extension__(__m128i){
3483((
const struct__mm_loadl_epi64_struct *)
__p)->__u, 0};
3496 return(__m128i)__builtin_ia32_undef128();
3517 return__extension__(__m128i)(__v2di){__q0, __q1};
3538 return _mm_set_epi64x((
long long)__q1[0], (
long long)__q0[0]);
3567 return__extension__(__m128i)(__v4si){__i0, __i1, __i2, __i3};
3606 short__w2,
short__w1,
short__w0) {
3607 return__extension__(__m128i)(__v8hi){__w0, __w1, __w2, __w3,
3608__w4, __w5, __w6, __w7};
3655 char__b10,
char__b9,
char__b8,
char__b7,
char__b6,
char__b5,
3656 char__b4,
char__b3,
char__b2,
char__b1,
char__b0) {
3657 return__extension__(__m128i)(__v16qi){
3658__b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7,
3659__b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15};
3730 return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w);
3747 return _mm_set_epi8(
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
3819 short__w5,
short__w6,
short__w7) {
3820 return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0);
3865_mm_setr_epi8(
char__b0,
char__b1,
char__b2,
char__b3,
char__b4,
char__b5,
3866 char__b6,
char__b7,
char__b8,
char__b9,
char__b10,
3867 char__b11,
char__b12,
char__b13,
char__b14,
char__b15) {
3868 return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8,
3869__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
3881 return__extension__(__m128i)(__v2di){0LL, 0LL};
3913 struct__storeu_si128 {
3916((
struct__storeu_si128 *)
__p)->__v =
__b;
3933 struct__storeu_si64 {
3936((
struct__storeu_si64 *)
__p)->__v = ((__v2di)
__b)[0];
3953 struct__storeu_si32 {
3956((
struct__storeu_si32 *)
__p)->__v = ((__v4si)
__b)[0];
3973 struct__storeu_si16 {
3976((
struct__storeu_si16 *)
__p)->__v = ((__v8hi)
__b)[0];
4003__builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n,
__p);
4021 struct__mm_storel_epi64_struct {
4024((
struct__mm_storel_epi64_struct *)
__p)->__u =
__a[0];
4043__builtin_nontemporal_store((__v2df)
__a, (__v2df *)
__p);
4061__builtin_nontemporal_store((__v2di)
__a, (__v2di *)
__p);
4077static__inline__
void 4080__builtin_ia32_movnti((
int*)
__p,
__a);
4097static__inline__
void 4098 __attribute__((__always_inline__, __nodebug__, __target__(
"sse2")))
4099_mm_stream_si64(
void*
__p,
long long __a) {
4100__builtin_ia32_movnti64((
long long*)
__p,
__a);
4104#if defined(__cplusplus) 4142#if defined(__cplusplus) 4166 return(__m128i)__builtin_ia32_packsswb128((__v8hi)
__a, (__v8hi)
__b);
4189 return(__m128i)__builtin_ia32_packssdw128((__v4si)
__a, (__v4si)
__b);
4212 return(__m128i)__builtin_ia32_packuswb128((__v8hi)
__a, (__v8hi)
__b);
4241#define _mm_extract_epi16(a, imm) \ 4242 ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \ 4269#define _mm_insert_epi16(a, b, imm) \ 4270 ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \ 4286 return__builtin_ia32_pmovmskb128((__v16qi)
__a);
4320#define _mm_shuffle_epi32(a, imm) \ 4321 ((__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm))) 4353#define _mm_shufflelo_epi16(a, imm) \ 4354 ((__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm))) 4386#define _mm_shufflehi_epi16(a, imm) \ 4387 ((__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm))) 4420 return(__m128i)__builtin_shufflevector(
4421(__v16qi)
__a, (__v16qi)
__b, 8, 16 + 8, 9, 16 + 9, 10, 16 + 10, 11,
442216 + 11, 12, 16 + 12, 13, 16 + 13, 14, 16 + 14, 15, 16 + 15);
4448 return(__m128i)__builtin_shufflevector((__v8hi)
__a, (__v8hi)
__b, 4, 8 + 4, 5,
44498 + 5, 6, 8 + 6, 7, 8 + 7);
4471 return(__m128i)__builtin_shufflevector((__v4si)
__a, (__v4si)
__b, 2, 4 + 2, 3,
4492 return(__m128i)__builtin_shufflevector((__v2di)
__a, (__v2di)
__b, 1, 2 + 1);
4526 return(__m128i)__builtin_shufflevector(
4527(__v16qi)
__a, (__v16qi)
__b, 0, 16 + 0, 1, 16 + 1, 2, 16 + 2, 3, 16 + 3, 4,
452816 + 4, 5, 16 + 5, 6, 16 + 6, 7, 16 + 7);
4555 return(__m128i)__builtin_shufflevector((__v8hi)
__a, (__v8hi)
__b, 0, 8 + 0, 1,
45568 + 1, 2, 8 + 2, 3, 8 + 3);
4578 return(__m128i)__builtin_shufflevector((__v4si)
__a, (__v4si)
__b, 0, 4 + 0, 1,
4599 return(__m128i)__builtin_shufflevector((__v2di)
__a, (__v2di)
__b, 0, 2 + 0);
4615 return(__m64)
__a[0];
4668 return__builtin_shufflevector((__v2df)
__a, (__v2df)
__b, 1, 2 + 1);
4688 return__builtin_shufflevector((__v2df)
__a, (__v2df)
__b, 0, 2 + 0);
4705 return__builtin_ia32_movmskpd((__v2df)
__a);
4735#define _mm_shuffle_pd(a, b, i) \ 4736 ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \ 4752 return(__m128)
__a;
4768 return(__m128i)
__a;
4784 return(__m128d)
__a;
4800 return(__m128i)
__a;
4816 return(__m128)
__a;
4832 return(__m128d)
__a;
4867#define _mm_cmp_pd(a, b, c) \ 4868 ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \ 4903#define _mm_cmp_sd(a, b, c) \ 4904 ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \ 4907#if defined(__cplusplus) 4920#if defined(__cplusplus) 4926#undef __DEFAULT_FN_ATTRS 4927#undef __DEFAULT_FN_ATTRS_CONSTEXPR 4929#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y)) 4931#define _MM_DENORMALS_ZERO_ON (0x0040U) 4932#define _MM_DENORMALS_ZERO_OFF (0x0000U) 4934#define _MM_DENORMALS_ZERO_MASK (0x0040U) 4936#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK) 4937#define _MM_SET_DENORMALS_ZERO_MODE(x) \ 4938 (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))static __inline__ vector float vector float vector float __c
static __inline__ vector float vector float __b
static __inline__ uint32_t volatile uint32_t * __p
static __inline__ double __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtsd_f64(__m128d __a)
Returns the low-order element of a 128-bit vector of [2 x double] as a double-precision floating-poin...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi16(__m128i __a, __m128i __b)
Unpacks the low-order (index 0-3) values from each of the two 128-bit vectors of [8 x i16] and interl...
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd1(double *__dp, __m128d __a)
Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to the upper and lower 64 bits of a...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_castpd_si128(__m128d __a)
Casts a 128-bit floating-point vector of [2 x double] into a 128-bit integer vector.
static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movepi64_pi64(__m128i __a)
Returns the lower 64 bits of a 128-bit integer vector as a 64-bit integer.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_div_pd(__m128d __a, __m128d __b)
Performs an element-by-element division of two 128-bit vectors of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setr_pd(double __w, double __x)
Constructs a 128-bit floating-point vector of [2 x double], initialized in reverse order with the spe...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a, __m128i __b)
Converts, with saturation, 16-bit signed integers from both 128-bit integer vector operands into 8-bi...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_sd(__m128d __a, __m128d __b)
Subtracts the lower double-precision value of the second operand from the lower double-precision valu...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu8(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit unsigned [16 x i8] vectors, saving the smaller value f...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si32(void const *__a)
Loads a 32-bit integer value to the low element of a 128-bit integer vector and clears the upper elem...
static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_si128(void *__p, __m128i __a)
Stores a 128-bit integer vector to a 128-bit aligned memory location.
static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_epi8(__m128i __a)
Copies the values of the most significant bits from each 8-bit element in a 128-bit integer vector of...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a, __m128i __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a, __m128i __b)
Performs a bitwise AND of two 128-bit integer vectors.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_div_sd(__m128d __a, __m128d __b)
Divides the lower double-precision value of the first operand by the lower double-precision value of ...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadu_pd(double const *__dp)
Loads a 128-bit floating-point vector of [2 x double] from an unaligned memory location.
static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsi128_si64(__m128i __a)
Moves the least significant 64 bits of a vector of [2 x i64] to a 64-bit signed integer value.
static __inline__ void __DEFAULT_FN_ATTRS _mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
Moves bytes selected by the mask from the first operand to the specified unaligned memory location.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit signed [16 x i8] vectors,...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_sd(double __w)
Constructs a 128-bit floating-point vector of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_pd1(double __w)
Constructs a 128-bit floating-point vector of [2 x double], with each of the two double-precision flo...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_pd(double const *__dp)
Loads a 128-bit floating-point vector of [2 x double] from an aligned memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a, __m128i __b)
Compares each of the corresponding 8-bit values of the 128-bit integer vectors for equality.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi32(__m128i __a, int __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a, __m128i __b)
Compares each of the corresponding signed 16-bit values of the 128-bit integer vectors to determine i...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi16(__m128i __a, __m128i __b)
Subtracts the corresponding 16-bit integer values in the operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_epi64(__m128i_u *__p, __m128i __a)
Stores the lower 64 bits of a 128-bit integer vector of [2 x i64] to a memory location.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_sd(__m128d __a, __m128d __b)
Adds lower double-precision values in both operands and returns the sum in the lower 64 bits of the r...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi32(__m128i __a, __m128i __b)
Compares each of the corresponding signed 32-bit values of the 128-bit integer vectors to determine i...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a, __m128i __b)
Performs a bitwise OR of two 128-bit integer vectors.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si16(void *__p, __m128i __b)
Stores a 16-bit integer value from the low element of a 128-bit integer vector.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_xor_pd(__m128d __a, __m128d __b)
Performs a bitwise XOR of two 128-bit vectors of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns a vecto...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_sd(double const *__dp)
Loads a 64-bit double-precision value to the low element of a 128-bit integer vector and clears the u...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si16(void const *__a)
Loads a 16-bit integer value to the low element of a 128-bit integer vector and clears the upper elem...
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_mul_su32(__m64 __a, __m64 __b)
Multiplies 32-bit unsigned integer values contained in the lower bits of the two 64-bit integer vecto...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a, __m128i __b)
Performs a bitwise AND of two 128-bit integer vectors, using the one's complement of the values conta...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a, __m128i __b)
Compares each of the corresponding signed 8-bit values of the 128-bit integer vectors to determine if...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a, __m128i __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_epi16(short __w)
Initializes all values in a 128-bit vector of [8 x i16] with the specified 16-bit value.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a, __m128i __b)
Compares each of the corresponding 16-bit values of the 128-bit integer vectors for equality.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two signed [8 x i16] vectors, saving the lower 16 bits of ea...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_epi32(__m128i __a, __m128i __b)
Subtracts the corresponding 32-bit integer values in the operands.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi16(__m128i __a, int __count)
Right-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu8(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit unsigned [16 x i8] vectors, saving the greater value f...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu16(__m128i __a, __m128i __b)
Computes the rounded averages of corresponding elements of two 128-bit unsigned [8 x i16] vectors,...
static __inline__ void __DEFAULT_FN_ATTRS _mm_store1_pd(double *__dp, __m128d __a)
Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to the upper and lower 64 bits of a...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi16(__m128i __a, __m128i __count)
Right-shifts each of 16-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi8(__m128i __a, __m128i __b)
Unpacks the low-order (index 0-7) values from two 128-bit vectors of [16 x i8] and interleaves them i...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_or_pd(__m128d __a, __m128d __b)
Performs a bitwise OR of two 128-bit vectors of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a, __m128i __b)
Compares each of the corresponding signed 16-bit values of the 128-bit integer vectors to determine i...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi32_pd(__m128i __a)
Converts the lower two integer elements of a 128-bit vector of [4 x i32] into two double-precision fl...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_epi64(__m64 __q1, __m64 __q0)
Initializes both 64-bit values in a 128-bit vector of [2 x i64] with the specified 64-bit integer val...
#define __DEFAULT_FN_ATTRS
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 8-bit signed integer values in the input and returns the di...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi64(__m128i __a, __m128i __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them into...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi32(__m128i __a, int __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_epi64x(long long __q1, long long __q0)
Initializes both 64-bit values in a 128-bit vector of [2 x i64] with the specified 64-bit integer val...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_castpd_ps(__m128d __a)
Casts a 128-bit floating-point vector of [2 x double] into a 128-bit floating-point vector of [4 x fl...
static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_pd(__m128d __a)
Extracts the sign bits of the double-precision values in the 128-bit vector of [2 x double],...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_castps_pd(__m128 __a)
Casts a 128-bit floating-point vector of [4 x float] into a 128-bit floating-point vector of [2 x dou...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a, __m128i __b)
Compares each of the corresponding 32-bit values of the 128-bit integer vectors for equality.
static __inline__ void int __a
void _mm_mfence(void)
Forces strong memory ordering (serialization) between load and store instructions preceding this inst...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_pd(double __w, double __x)
Constructs a 128-bit floating-point vector of [2 x double] initialized with the specified double-prec...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu16(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 16-bit unsigned integer values in the input and returns the...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si32(void *__p, __m128i __b)
Stores a 32-bit integer value from the low element of a 128-bit integer vector.
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvtpd_pi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setr_epi64(__m64 __q0, __m64 __q1)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 64-bit integral ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi64(__m128i __a, __m128i __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them int...
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttsd_si32(__m128d __a)
Converts the low-order element of a [2 x double] vector into a 32-bit signed truncated (rounded towar...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_epi64(__m128i __a, __m128i __b)
Subtracts the corresponding elements of two [2 x i64] vectors.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi64_si128(long long __a)
Returns a vector of [2 x i64] where the lower element is the input operand and the upper element is z...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtss_sd(__m128d __a, __m128 __b)
Converts the lower single-precision floating-point element of a 128-bit vector of [4 x float],...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi32(__m128i __a, __m128i __b)
Unpacks the low-order (index 0,1) values from two 128-bit vectors of [4 x i32] and interleaves them i...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a, __m128i __b)
Compares each of the corresponding signed 32-bit values of the 128-bit integer vectors to determine i...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a)
Calculates the square root of the each of two values stored in a 128-bit vector of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_pd(__m128d __a, __m128d __b)
Multiplies two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi16(__m128i __a, __m128i __b)
Unpacks the high-order (index 4-7) values from two 128-bit vectors of [8 x i16] and interleaves them ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtpd_epi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void)
Generates a 128-bit vector of [4 x i32] with unspecified content.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_madd_epi16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two 128-bit signed [8 x i16] vectors, producing eight interm...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu16(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit unsigned [8 x i16] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi32_si128(int __a)
Returns a vector of [4 x i32] where the lowest element is the input operand and the remaining element...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_epi64x(long long __q)
Initializes both values in a 128-bit integer vector with the specified 64-bit integer value.
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sub_si64(__m64 __a, __m64 __b)
Subtracts signed or unsigned 64-bit integer values and writes the difference to the corresponding bit...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si64(void const *__a)
Loads a 64-bit integer value to the low element of a 128-bit integer vector and clears the upper elem...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi64(__m128i __a, int __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi16(__m128i __a, int __count)
Left-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 8-bit integral v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtsd_ss(__m128 __a, __m128d __b)
Converts the lower double-precision floating-point element of a 128-bit vector of [2 x double],...
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsd_si32(__m128d __a)
Converts the low-order element of a 128-bit vector of [2 x double] into a 32-bit signed integer value...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadl_pd(__m128d __a, double const *__dp)
Loads a double-precision value into the low-order bits of a 128-bit vector of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit unsigned [16 x i8] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit signed [8 x i16] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 16-bit signed integer values in the input and returns the d...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi8(__m128i __a, __m128i __b)
Subtracts the corresponding 8-bit integer values in the operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_undefined_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] with unspecified content.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi32(__m128i __a, __m128i __b)
Unpacks the high-order (index 2,3) values from two 128-bit vectors of [4 x i32] and interleaves them ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpd_ps(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_move_epi64(__m128i __a)
Moves the lower 64 bits of a 128-bit integer vector to a 128-bit integer vector, zeroing the upper bi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epi16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two signed [8 x i16] vectors, saving the upper 16 bits of ea...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_castps_si128(__m128 __a)
Casts a 128-bit floating-point vector of [4 x float] into a 128-bit integer vector.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_epi64(__m64 __q)
Initializes both values in a 128-bit vector of [2 x i64] with the specified 64-bit value.
static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_and_pd(__m128d __a, __m128d __b)
Performs a bitwise AND of two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epu16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two unsigned [8 x i16] vectors, saving the upper 16 bits of ...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_move_sd(__m128d __a, __m128d __b)
Constructs a 128-bit floating-point vector of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi16(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit signed [8 x i16] vectors, saving the greater value fro...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a, __m128i __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a, __m128i __b)
Converts, with saturation, 16-bit signed integers from both 128-bit integer vector operands into 8-bi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi16(__m128i __a, __m128i __count)
Right-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadr_pd(double const *__dp)
Loads two double-precision values, in reverse order, from an aligned memory location into a 128-bit v...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] initialized to zero.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
Initializes the 32-bit values in a 128-bit vector of [4 x i32] with the specified 32-bit integer valu...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_sd(__m128d __a, __m128d __b)
Multiplies lower double-precision values in both operands and returns the product in the lower 64 bit...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
Initializes the 16-bit values in a 128-bit vector of [8 x i16] with the specified 16-bit integer valu...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeh_pd(double *__dp, __m128d __a)
Stores the upper 64 bits of a 128-bit vector of [2 x double] to a memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi32(__m128i __a, int __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
void _mm_lfence(void)
Forces strong memory ordering (serialization) between load instructions preceding this instruction an...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 16-bit integral ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sad_epu8(__m128i __a, __m128i __b)
Computes the absolute differences of corresponding 8-bit integer values in two 128-bit vectors.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
#define __DEFAULT_FN_ATTRS_CONSTEXPR
static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_pd(double *__dp, __m128d __a)
Stores the lower 64 bits of a 128-bit vector of [2 x double] to a memory location.
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd(double *__dp, __m128d __a)
Moves packed double-precision values from a 128-bit vector of [2 x double] to a memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si128(__m128i_u const *__p)
Moves packed integer values from an unaligned 128-bit memory location to elements in a 128-bit intege...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a, __m128i __b)
Converts, with saturation, 32-bit signed integers from both 128-bit integer vector operands into 16-b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi16(__m128i __a, __m128i __count)
Left-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi8(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [16 x i8], saving the lower 8 bits of each ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_load_si128(__m128i const *__p)
Moves packed integer values from an aligned 128-bit memory location to elements in a 128-bit integer ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_pd(void *__p, __m128d __a)
Stores a 128-bit floating point vector of [2 x double] to a 128-bit aligned memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a, __m128i __b)
Compares each of the corresponding signed 8-bit values of the 128-bit integer vectors to determine if...
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_add_si64(__m64 __a, __m64 __b)
Adds two signed or unsigned 64-bit integer values, returning the lower 64 bits of the sum.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_epi32(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [4 x i32], saving the lower 32 bits of each...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epu32(__m128i __a, __m128i __b)
Multiplies 32-bit unsigned integer values contained in the lower bits of the corresponding elements o...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a, __m128i __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtps_pd(__m128 __a)
Converts the lower two single-precision floating-point elements of a 128-bit vector of [4 x float] in...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a)
Converts a vector of [4 x float] into four signed truncated (rounded toward zero) 32-bit integers,...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadh_pd(__m128d __a, double const *__dp)
Loads a double-precision value into the high-order bits of a 128-bit vector of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_sd(__m128d __a, __m128d __b)
Calculates the square root of the lower double-precision value of the second operand and returns it i...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtsi32_sd(__m128d __a, int __b)
Converts a 32-bit signed integer value, in the second parameter, into a double-precision floating-poi...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_sd(__m128d __a, __m128d __b)
Compares lower 64-bit double-precision values of both operands, and returns the lesser of the pair of...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi64(__m128i __a, int __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_pd(__m128d __a, __m128d __b)
Adds two 128-bit vectors of [2 x double].
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si64(void *__p, __m128i __b)
Stores a 64-bit integer value from the low element of a 128-bit integer vector.
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_sd(double *__dp, __m128d __a)
Stores the lower 64 bits of a 128-bit vector of [2 x double] to a memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movpi64_epi64(__m64 __a)
Moves the 64-bit operand to a 128-bit integer vector, zeroing the upper bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load1_pd(double const *__dp)
Loads a double-precision floating-point value from a specified memory location and duplicates it to b...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpackhi_pd(__m128d __a, __m128d __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns a vecto...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi16(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit signed [8 x i16] vectors, saving the smaller value fro...
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_si128(__m128i *__p, __m128i __b)
Stores a 128-bit integer vector to a memory location aligned on a 128-bit boundary.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 32-bit integral ...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpacklo_pd(__m128d __a, __m128d __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them i...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] for...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_sd(__m128d __a, __m128d __b)
Compares lower 64-bit double-precision values of both operands, and returns the greater of the pair o...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadl_epi64(__m128i_u const *__p)
Returns a vector of [2 x i64] where the lower element is taken from the lower element of the operand,...
void _mm_pause(void)
Indicates that a spin loop is being executed for the purposes of optimizing power consumption during ...
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsi128_si32(__m128i __a)
Moves the least significant 32 bits of a vector of [4 x i32] to a 32-bit signed integer value.
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_castsi128_ps(__m128i __a)
Casts a 128-bit integer vector into a 128-bit floating-point vector of [4 x float].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu8(__m128i __a, __m128i __b)
Computes the rounded averages of corresponding elements of two 128-bit unsigned [16 x i8] vectors,...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_castsi128_pd(__m128i __a)
Casts a 128-bit integer vector into a 128-bit floating-point vector of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_epi64(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [2 x i64], saving the lower 64 bits of each...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storer_pd(double *__dp, __m128d __a)
Stores two double-precision values, in reverse order, from a 128-bit vector of [2 x double] to a 16-b...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_andnot_pd(__m128d __a, __m128d __b)
Performs a bitwise AND of two 128-bit vectors of [2 x double], using the one's complement of the valu...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi8(__m128i __a, __m128i __b)
Unpacks the high-order (index 8-15) values from two 128-bit vectors of [16 x i8] and interleaves them...
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvttpd_pi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi32_ps(__m128i __a)
Converts a vector of [4 x i32] into a vector of [4 x float].
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtpi32_pd(__m64 __a)
Converts the two signed 32-bit integer elements of a 64-bit vector of [2 x i32] into two double-preci...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi16(__m128i __a, int __count)
Right-shifts each of 16-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_pd(double *__dp, __m128d __a)
Stores a 128-bit vector of [2 x double] into an unaligned memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_epi32(int __i)
Initializes all values in a 128-bit vector of [4 x i32] with the specified 32-bit value.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
Initializes the 8-bit values in a 128-bit vector of [16 x i8] with the specified 8-bit integer values...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi16(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [8 x i16], saving the lower 16 bits of each...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_pd(double __w)
Constructs a 128-bit floating-point vector of [2 x double], with each of the two double-precision flo...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 8-bit unsigned integer values in the input and returns the ...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si128(__m128i_u *__p, __m128i __b)
Stores a 128-bit integer vector to an unaligned memory location.
double __m128d __attribute__((__vector_size__(16), __aligned__(16)))
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a, __m128i __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_pd(__m128d __a, __m128d __b)
Subtracts two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a)
Converts a vector of [4 x float] into a vector of [4 x i32].
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_epi8(char __b)
Initializes all values in a 128-bit vector of [16 x i8] with the specified 8-bit value.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_xor_si128(__m128i __a, __m128i __b)
Performs a bitwise exclusive OR of two 128-bit integer vectors.
void _mm_clflush(void const *__p)
The cache line containing __p is flushed and invalidated from all caches in the coherency domain.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
struct __storeu_i16 *__P __v
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_setzero_si64(void)
Constructs a 64-bit integer vector initialized to zero.
RetroSearch is an open source project built by @garambo | Open a GitHub Issue
Search and Browse the WWW like it's 1997 | Search results from DuckDuckGo
HTML:
3.2
| Encoding:
UTF-8
| Version:
0.7.4