added 'struct vcmd_set_sched_v3' plus macros
[util-vserver.git] / util-vserver / kernel / sched.h
1 #if     defined(__KERNEL__) && defined(_VX_INFO_DEF_)
2
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <asm/atomic.h>
6 #include <asm/param.h>
7 #include <asm/cpumask.h>
8
9 /* context sub struct */
10
11 struct _vx_sched {
12         spinlock_t tokens_lock; /* lock for this structure */
13
14         int fill_rate;          /* Fill rate: add X tokens... */
15         int interval;           /* Divisor:   per Y jiffies   */
16         atomic_t tokens;        /* number of CPU tokens in this context */
17         int tokens_min;         /* Limit:     minimum for unhold */
18         int tokens_max;         /* Limit:     no more than N tokens */
19         uint32_t jiffies;       /* add an integral multiple of Y to this */
20
21         uint64_t ticks;         /* token tick events */
22         cpumask_t cpus_allowed; /* cpu mask for context */
23 };
24
25 static inline void vx_info_init_sched(struct _vx_sched *sched)
26 {
27         /* scheduling; hard code starting values as constants */
28         sched->fill_rate        = 1;
29         sched->interval         = 4;
30         sched->tokens_min       = HZ >> 4;
31         sched->tokens_max       = HZ >> 1;
32         sched->jiffies          = jiffies;
33         sched->tokens_lock      = SPIN_LOCK_UNLOCKED;
34
35         atomic_set(&sched->tokens, HZ >> 2);
36         sched->cpus_allowed     = CPU_MASK_ALL;
37 }
38
39 static inline int vx_info_proc_sched(struct _vx_sched *sched, char *buffer)
40 {
41         return sprintf(buffer,
42                 "Ticks:\t%16lld\n"
43                 "Token:\t\t%8d\n"
44                 "FillRate:\t%8d\n"
45                 "Interval:\t%8d\n"              
46                 "TokensMin:\t%8d\n"
47                 "TokensMax:\t%8d\n"
48                 ,sched->ticks
49                 ,atomic_read(&sched->tokens)
50                 ,sched->fill_rate
51                 ,sched->interval
52                 ,sched->tokens_min
53                 ,sched->tokens_max
54                 );
55 }
56
57
58 #else   /* _VX_INFO_DEF_ */
59 #ifndef _VX_SCHED_H
60 #define _VX_SCHED_H
61
62 #include "switch.h"
63
64 /*  sched vserver commands */
65
66 #define VCMD_set_sched          VC_CMD(SCHED, 1, 2)
67
68 struct  vcmd_set_sched_v2 {
69         int32_t fill_rate;
70         int32_t interval;
71         int32_t tokens;
72         int32_t tokens_min;
73         int32_t tokens_max;
74         uint64_t cpu_mask;
75 };
76
77 struct  vcmd_set_sched_v3 {
78         uint32_t set_mask;
79         int32_t fill_rate;
80         int32_t interval;
81         int32_t tokens;
82         int32_t tokens_min;
83         int32_t tokens_max;
84         int32_t priority_bias;
85 };
86
87 #define VXSM_FILL_RATE          0x0001
88 #define VXSM_INTERVAL           0x0002
89 #define VXSM_TOKENS             0x0010
90 #define VXSM_TOKENS_MIN         0x0020
91 #define VXSM_TOKENS_MAX         0x0030
92 #define VXSM_PRIO_BIAS          0x0100
93
94
95 #define SCHED_KEEP              (-2)
96
97 #ifdef  __KERNEL__
98
99 extern int vc_set_sched_v1(uint32_t, void __user *);
100 extern int vc_set_sched(uint32_t, void __user *);
101
102
103 #define VAVAVOOM_RATIO          50
104
105 #include "context.h"
106
107
108 /* scheduling stuff */
109
110 int effective_vavavoom(struct task_struct *, int);
111
112 int vx_tokens_recalc(struct vx_info *);
113
114 /* new stuff ;) */
115
116 static inline int vx_tokens_avail(struct vx_info *vxi)
117 {
118         return atomic_read(&vxi->sched.tokens);
119 }
120
121 static inline void vx_consume_token(struct vx_info *vxi)
122 {
123         atomic_dec(&vxi->sched.tokens);
124 }
125
126 static inline int vx_need_resched(struct task_struct *p)
127 {
128 #ifdef  CONFIG_VSERVER_HARDCPU
129         struct vx_info *vxi = p->vx_info;
130
131         if (vxi) {
132                 int tokens;
133
134                 p->time_slice--;
135                 if (atomic_read(&vxi->vx_refcount) < 1)
136                         printk("need_resched: p=%p, s=%ld, ref=%d, id=%d/%d\n",
137                                 p, p->state, atomic_read(&vxi->vx_refcount),
138                                 vxi->vx_id, p->xid);
139                 if ((tokens = vx_tokens_avail(vxi)) > 0)
140                         vx_consume_token(vxi);
141                 return ((p->time_slice == 0) || (tokens < 1));
142         }
143 #endif
144         p->time_slice--;
145         return (p->time_slice == 0);
146 }
147
148
149 #endif  /* __KERNEL__ */
150
151 #endif  /* _VX_SCHED_H */
152 #endif