]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/res_counter.h
8139cp: fix checksum broken
[net-next-2.6.git] / include / linux / res_counter.h
CommitLineData
e552b661
PE
1#ifndef __RES_COUNTER_H__
2#define __RES_COUNTER_H__
3
4/*
5 * Resource Counters
6 * Contain common data types and routines for resource accounting
7 *
8 * Copyright 2007 OpenVZ SWsoft Inc
9 *
10 * Author: Pavel Emelianov <xemul@openvz.org>
11 *
45ce80fb 12 * See Documentation/cgroups/resource_counter.txt for more
faebe9fd 13 * info about what this counter is.
e552b661
PE
14 */
15
16#include <linux/cgroup.h>
17
18/*
19 * The core object. the cgroup that wishes to account for some
20 * resource may include this counter into its structures and use
21 * the helpers described beyond
22 */
23
24struct res_counter {
25 /*
26 * the current resource consumption level
27 */
0eea1030 28 unsigned long long usage;
c84872e1
PE
29 /*
30 * the maximal value of the usage from the counter creation
31 */
32 unsigned long long max_usage;
e552b661
PE
33 /*
34 * the limit that usage cannot exceed
35 */
0eea1030 36 unsigned long long limit;
296c81d8
BS
37 /*
38 * the limit that usage can be exceed
39 */
40 unsigned long long soft_limit;
e552b661
PE
41 /*
42 * the number of unsuccessful attempts to consume the resource
43 */
0eea1030 44 unsigned long long failcnt;
e552b661
PE
45 /*
46 * the lock to protect all of the above.
47 * the routines below consider this to be IRQ-safe
48 */
49 spinlock_t lock;
28dbc4b6
BS
50 /*
51 * Parent counter, used for hierarchial resource accounting
52 */
53 struct res_counter *parent;
e552b661
PE
54};
55
c5b947b2
DN
56#define RESOURCE_MAX (unsigned long long)LLONG_MAX
57
2c7eabf3 58/**
e552b661 59 * Helpers to interact with userspace
2c7eabf3 60 * res_counter_read_u64() - returns the value of the specified member.
e552b661
PE
61 * res_counter_read/_write - put/get the specified fields from the
62 * res_counter struct to/from the user
63 *
64 * @counter: the counter in question
65 * @member: the field to work with (see RES_xxx below)
66 * @buf: the buffer to opeate on,...
67 * @nbytes: its size...
68 * @pos: and the offset.
69 */
70
2c7eabf3
PM
71u64 res_counter_read_u64(struct res_counter *counter, int member);
72
e552b661 73ssize_t res_counter_read(struct res_counter *counter, int member,
0eea1030
BS
74 const char __user *buf, size_t nbytes, loff_t *pos,
75 int (*read_strategy)(unsigned long long val, char *s));
856c13aa
PM
76
77typedef int (*write_strategy_fn)(const char *buf, unsigned long long *val);
78
79int res_counter_memparse_write_strategy(const char *buf,
80 unsigned long long *res);
81
82int res_counter_write(struct res_counter *counter, int member,
83 const char *buffer, write_strategy_fn write_strategy);
e552b661
PE
84
85/*
86 * the field descriptors. one for each member of res_counter
87 */
88
89enum {
90 RES_USAGE,
c84872e1 91 RES_MAX_USAGE,
e552b661
PE
92 RES_LIMIT,
93 RES_FAILCNT,
296c81d8 94 RES_SOFT_LIMIT,
e552b661
PE
95};
96
97/*
98 * helpers for accounting
99 */
100
28dbc4b6 101void res_counter_init(struct res_counter *counter, struct res_counter *parent);
e552b661
PE
102
103/*
104 * charge - try to consume more resource.
105 *
106 * @counter: the counter
107 * @val: the amount of the resource. each controller defines its own
108 * units, e.g. numbers, bytes, Kbytes, etc
109 *
110 * returns 0 on success and <0 if the counter->usage will exceed the
111 * counter->limit _locked call expects the counter->lock to be taken
112 */
113
f2992db2
PE
114int __must_check res_counter_charge_locked(struct res_counter *counter,
115 unsigned long val);
116int __must_check res_counter_charge(struct res_counter *counter,
4e649152 117 unsigned long val, struct res_counter **limit_fail_at);
e552b661
PE
118
119/*
120 * uncharge - tell that some portion of the resource is released
121 *
122 * @counter: the counter
123 * @val: the amount of the resource
124 *
125 * these calls check for usage underflow and show a warning on the console
126 * _locked call expects the counter->lock to be taken
127 */
128
129void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
4e649152 130void res_counter_uncharge(struct res_counter *counter, unsigned long val);
e552b661 131
66e1707b
BS
132static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
133{
134 if (cnt->usage < cnt->limit)
135 return true;
136
137 return false;
138}
139
296c81d8
BS
140static inline bool res_counter_soft_limit_check_locked(struct res_counter *cnt)
141{
142 if (cnt->usage < cnt->soft_limit)
143 return true;
144
145 return false;
146}
147
148/**
149 * Get the difference between the usage and the soft limit
150 * @cnt: The counter
151 *
152 * Returns 0 if usage is less than or equal to soft limit
153 * The difference between usage and soft limit, otherwise.
154 */
155static inline unsigned long long
156res_counter_soft_limit_excess(struct res_counter *cnt)
157{
158 unsigned long long excess;
159 unsigned long flags;
160
161 spin_lock_irqsave(&cnt->lock, flags);
162 if (cnt->usage <= cnt->soft_limit)
163 excess = 0;
164 else
165 excess = cnt->usage - cnt->soft_limit;
166 spin_unlock_irqrestore(&cnt->lock, flags);
167 return excess;
168}
169
66e1707b
BS
170/*
171 * Helper function to detect if the cgroup is within it's limit or
172 * not. It's currently called from cgroup_rss_prepare()
173 */
174static inline bool res_counter_check_under_limit(struct res_counter *cnt)
175{
176 bool ret;
177 unsigned long flags;
178
179 spin_lock_irqsave(&cnt->lock, flags);
180 ret = res_counter_limit_check_locked(cnt);
181 spin_unlock_irqrestore(&cnt->lock, flags);
182 return ret;
183}
184
296c81d8
BS
185static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt)
186{
187 bool ret;
188 unsigned long flags;
189
190 spin_lock_irqsave(&cnt->lock, flags);
191 ret = res_counter_soft_limit_check_locked(cnt);
192 spin_unlock_irqrestore(&cnt->lock, flags);
193 return ret;
194}
195
c84872e1
PE
196static inline void res_counter_reset_max(struct res_counter *cnt)
197{
198 unsigned long flags;
199
200 spin_lock_irqsave(&cnt->lock, flags);
201 cnt->max_usage = cnt->usage;
202 spin_unlock_irqrestore(&cnt->lock, flags);
203}
204
29f2a4da
PE
205static inline void res_counter_reset_failcnt(struct res_counter *cnt)
206{
207 unsigned long flags;
208
209 spin_lock_irqsave(&cnt->lock, flags);
210 cnt->failcnt = 0;
211 spin_unlock_irqrestore(&cnt->lock, flags);
212}
12b98044
KH
213
214static inline int res_counter_set_limit(struct res_counter *cnt,
215 unsigned long long limit)
216{
217 unsigned long flags;
218 int ret = -EBUSY;
219
220 spin_lock_irqsave(&cnt->lock, flags);
11d55d2c 221 if (cnt->usage <= limit) {
12b98044
KH
222 cnt->limit = limit;
223 ret = 0;
224 }
225 spin_unlock_irqrestore(&cnt->lock, flags);
226 return ret;
227}
228
296c81d8
BS
229static inline int
230res_counter_set_soft_limit(struct res_counter *cnt,
231 unsigned long long soft_limit)
232{
233 unsigned long flags;
234
235 spin_lock_irqsave(&cnt->lock, flags);
236 cnt->soft_limit = soft_limit;
237 spin_unlock_irqrestore(&cnt->lock, flags);
238 return 0;
239}
240
e552b661 241#endif