/* i386 assembly routines for inner loop fraction routines in Metafont
and MetaPost. Public domain. Included in texmfmp.c.
By Wayne Sullivan <wgs@maths.ucd.ie>. */
asm(" .align 4\n"
#ifdef ASM_NEEDS_UNDERSCORE
".globl _ztakefraction\n"
"_ztakefraction:\n"
#else
".globl ztakefraction\n"
"ztakefraction:\n"
#endif
" pushl %ebp\n"
" movl %esp,%ebp\n"
" xorl %ecx,%ecx\n"
" movl 8(%ebp),%eax\n"
" cmpl $0x80000000,%eax\n"
" jz LL5\n"
" imull 12(%ebp)\n"
" orl %edx,%edx\n"
" jns LL2\n"
" negl %edx\n"
" negl %eax\n"
" sbbl %ecx,%edx\n"
" incl %ecx\n"
"LL2:\n"
" addl $0x08000000,%eax\n"
" adcl $0,%edx\n"
" cmpl $0x07ffffff,%edx\n"
" ja LL3\n"
" shrd $28, %edx,%eax\n"
"LL1: jecxz LL4\n"
" negl %eax\n"
"LL4:\n"
" movl %ebp,%esp\n"
" popl %ebp\n"
" ret\n"
"LL5: incl %ecx\n"
"LL3: movl $0x7fffffff,%eax\n"
#ifdef ASM_NEEDS_UNDERSCORE
" movb $1,_aritherror\n"
#else
" movb $1,aritherror\n"
#endif
" jmp LL1\n"
" .align 4, 0x90\n"
#ifdef ASM_NEEDS_UNDERSCORE
".globl _ztakescaled\n"
"_ztakescaled:\n"
#else
".globl ztakescaled\n"
"ztakescaled:\n"
#endif
" pushl %ebp\n"
" movl %esp,%ebp\n"
" movl 8(%ebp),%eax\n"
" xorl %ecx,%ecx\n"
" cmpl $0x80000000,%eax\n"
" jz LL5\n"
" imull 12(%ebp)\n"
" orl %edx,%edx\n"
" jns LL12\n"
" negl %edx\n"
" negl %eax\n"
" sbbl %ecx,%edx\n"
" incl %ecx\n"
"LL12:\n"
" addl $0x00008000,%eax\n"
" adcl $0,%edx\n"
" cmpl $0x00007fff,%edx\n"
" ja LL3\n"
" shrd $16, %edx,%eax\n"
" jecxz LL4\n"
" negl %eax\n"
" jmp LL4\n"
" .align 4, 0x90\n"
#ifdef ASM_NEEDS_UNDERSCORE
".globl _zmakescaled\n"
".globl _zmakefraction\n"
"_zmakescaled:\n"
#else
".globl zmakescaled\n"
".globl zmakefraction\n"
"zmakescaled:\n"
#endif
" movb $16,%cl\n"
" jmp LL30\n"
" .align 4, 0x90\n"
#ifdef ASM_NEEDS_UNDERSCORE
"_zmakefraction:\n"
#else
"zmakefraction:\n"
#endif
" movb $4,%cl\n"
"LL30:\n"
" movb $0,%ch\n"
" pushl %ebp\n"
" movl %esp,%ebp\n"
" pushl %ebx\n"
" movl 8(%ebp),%edx\n"
" xorl %eax,%eax\n"
" orl %edx,%edx\n"
" jns LL32\n"
" inc %ch\n"
" negl %edx\n"
"LL32:\n"
" movl 12(%ebp),%ebx\n"
" orl %ebx,%ebx\n"
" jns LL33\n"
" dec %ch\n"
" negl %ebx\n"
" orl %ebx,%ebx\n"
" js LL34\n"
"LL33:\n"
" orl %edx,%edx\n"
" js LL34\n"
" shrd %cl,%edx,%eax\n"
" shrl %cl,%edx\n"
" cmpl %ebx,%edx\n"
" jae LL34\n"
" divl %ebx\n"
" addl %edx,%edx\n"
" incl %edx\n"
" subl %edx,%ebx\n"
" adcl $0,%eax\n"
" jc LL34\n"
" cmpl $0x7fffffff,%eax\n"
" ja LL34\n"
"LL31: or %ch,%ch\n"
" jz LL35\n"
" negl %eax\n"
"LL35:\n"
" popl %ebx\n"
" movl %ebp,%esp\n"
" popl %ebp\n"
" ret\n"
"LL34: movl $0x7fffffff,%eax\n"
#ifdef ASM_NEEDS_UNDERSCORE
" movb $1,_aritherror\n"
#else
" movb $1,aritherror\n"
#endif
" jmp LL31\n");
|