// < uint32 q: floor(x/y)
// < uint32 r: x mod y
// < x = q*y+r
-#if defined(__GNUC__) && defined(__arm__) && !defined(NO_ASM)
+#if defined(__GNUC__) && defined(__arm__) && !defined(NO_ASM) && 0
extern "C" uint64 divu_6432_3232_ (uint32 xhi, uint32 xlo, uint32 y); // -> Quotient q
#else
extern "C" uint32 divu_6432_3232_ (uint32 xhi, uint32 xlo, uint32 y); // -> Quotient q
var uint32 _r __asm__("%g1"); \
cl_unused (q_zuweisung _q); r_zuweisung _r; \
})
-#elif defined(__GNUC__) && defined(__arm__) && !defined(NO_ASM)
+#elif defined(__GNUC__) && defined(__arm__) && !defined(NO_ASM) && 0
#define divu_6432_3232(xhi,xlo,y,q_zuweisung,r_zuweisung) \
({ var uint64 _q = divu_6432_3232_(xhi,xlo,y); /* extern in Assembler */\
q_zuweisung retval64_r0(_q); \
#else
#define divu_6432_3232(xhi,xlo,y,q_zuweisung,r_zuweisung) \
{ cl_unused (q_zuweisung divu_6432_3232_(xhi,xlo,y)); r_zuweisung divu_32_rest; }
- #if (defined(__m68k__) || defined(__sparc__) || defined(__sparc64__) || defined(__arm__) || (defined(__i386__) && !defined(WATCOM) && !defined(MICROSOFT)) || defined(__x86_64__) || defined(__hppa__)) && !defined(NO_ASM)
+ #if (defined(__m68k__) || defined(__sparc__) || defined(__sparc64__) || (defined(__arm__) && 0) || (defined(__i386__) && !defined(WATCOM) && !defined(MICROSOFT)) || defined(__x86_64__) || defined(__hppa__)) && !defined(NO_ASM)
// divu_6432_3232_ extern in Assembler
#if defined(__sparc__) || defined(__sparc64__)
extern "C" uint32 _get_g1 (void);
MOV a1,a1,LSR#16 // and back down again
BX lr
+#if 0
// extern uint32 divu_6432_3232_ (uint32 xhi, uint32 xlo, uint32 y); // -> Quotient q
// extern uint32 divu_32_rest; // -> Rest r
// see cl_low_div.cc for algorithm
MOV a2, v4, LSR v3 // remainder = r >> s
ORR a1, a1, v6, ASL #16 // return highlow32(q1,q0)
LDMFD sp!, {v1,v2,v3,v4,v5,v6,pc}
+#endif
// extern uintD* copy_loop_up (uintD* sourceptr, uintD* destptr, uintC count);
// entry