]> bbs.cooldavid.org Git - net-next-2.6.git/blame - lib/div64.c
Merge commit 'v2.6.36' into kbuild/misc
[net-next-2.6.git] / lib / div64.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
3 *
4 * Based on former do_div() implementation from asm-parisc/div64.h:
5 * Copyright (C) 1999 Hewlett-Packard Co
6 * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 *
9 * Generic C version of 64bit/32bit division and modulo, with
10 * 64bit result and 32bit remainder.
11 *
12 * The fast case for (n>>32 == 0) is handled inline by do_div().
13 *
14 * Code generated for this function might be very inefficient
15 * for some CPUs. __div64_32() can be overridden by linking arch-specific
16 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
17 */
18
1da177e4 19#include <linux/module.h>
2418f4f2 20#include <linux/math64.h>
1da177e4
LT
21
22/* Not needed on 64bit architectures */
23#if BITS_PER_LONG == 32
24
cb8c181f 25uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
1da177e4
LT
26{
27 uint64_t rem = *n;
28 uint64_t b = base;
29 uint64_t res, d = 1;
30 uint32_t high = rem >> 32;
31
32 /* Reduce the thing a bit first */
33 res = 0;
34 if (high >= base) {
35 high /= base;
36 res = (uint64_t) high << 32;
37 rem -= (uint64_t) (high*base) << 32;
38 }
39
40 while ((int64_t)b > 0 && b < rem) {
41 b = b+b;
42 d = d+d;
43 }
44
45 do {
46 if (rem >= b) {
47 rem -= b;
48 res += d;
49 }
50 b >>= 1;
51 d >>= 1;
52 } while (d);
53
54 *n = res;
55 return rem;
56}
57
58EXPORT_SYMBOL(__div64_32);
59
2418f4f2
RZ
60#ifndef div_s64_rem
61s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
62{
63 u64 quotient;
64
65 if (dividend < 0) {
66 quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
67 *remainder = -*remainder;
68 if (divisor > 0)
69 quotient = -quotient;
70 } else {
71 quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
72 if (divisor < 0)
73 quotient = -quotient;
74 }
75 return quotient;
76}
77EXPORT_SYMBOL(div_s64_rem);
78#endif
79
3927f2e8 80/* 64bit divisor, dividend and result. dynamic precision */
6f6d6a1a
RZ
81#ifndef div64_u64
82u64 div64_u64(u64 dividend, u64 divisor)
3927f2e8 83{
6f6d6a1a 84 u32 high, d;
3927f2e8 85
22b9a0a3
SH
86 high = divisor >> 32;
87 if (high) {
88 unsigned int shift = fls(high);
3927f2e8
SH
89
90 d = divisor >> shift;
91 dividend >>= shift;
22b9a0a3
SH
92 } else
93 d = divisor;
3927f2e8 94
6f6d6a1a 95 return div_u64(dividend, d);
3927f2e8 96}
6f6d6a1a
RZ
97EXPORT_SYMBOL(div64_u64);
98#endif
3927f2e8 99
1da177e4 100#endif /* BITS_PER_LONG == 32 */
f595ec96
JF
101
102/*
103 * Iterative div/mod for use when dividend is not expected to be much
104 * bigger than divisor.
105 */
106u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
107{
d5e181f7 108 return __iter_div_u64_rem(dividend, divisor, remainder);
f595ec96
JF
109}
110EXPORT_SYMBOL(iter_div_u64_rem);