added Vector_zeroEnd() function
[util-vserver.git] / util-vserver / kernel / sched.h
1 #if     defined(__KERNEL__) && defined(_VX_INFO_DEF_)
2
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <asm/atomic.h>
6 #include <asm/param.h>
7 #include <asm/cpumask.h>
8
9 /* context sub struct */
10
11 struct _vx_sched {
12         spinlock_t tokens_lock; /* lock for this structure */
13
14         int fill_rate;          /* Fill rate: add X tokens... */
15         int interval;           /* Divisor:   per Y jiffies   */
16         atomic_t tokens;        /* number of CPU tokens in this context */
17         int tokens_min;         /* Limit:     minimum for unhold */
18         int tokens_max;         /* Limit:     no more than N tokens */
19         uint32_t jiffies;       /* add an integral multiple of Y to this */
20
21         uint64_t ticks;         /* token tick events */
22         cpumask_t cpus_allowed; /* cpu mask for context */
23 };
24
25 static inline void vx_info_init_sched(struct _vx_sched *sched)
26 {
27         /* scheduling; hard code starting values as constants */
28         sched->fill_rate        = 1;
29         sched->interval         = 4;
30         sched->tokens_min       = HZ >> 4;
31         sched->tokens_max       = HZ >> 1;
32         sched->jiffies          = jiffies;
33         sched->tokens_lock      = SPIN_LOCK_UNLOCKED;
34
35         atomic_set(&sched->tokens, HZ >> 2);
36         sched->cpus_allowed     = CPU_MASK_ALL;
37 }
38
39 static inline int vx_info_proc_sched(struct _vx_sched *sched, char *buffer)
40 {
41         return sprintf(buffer,
42                 "Ticks:\t%16lld\n"
43                 "Token:\t\t%8d\n"
44                 "FillRate:\t%8d\n"
45                 "Interval:\t%8d\n"              
46                 "TokensMin:\t%8d\n"
47                 "TokensMax:\t%8d\n"
48                 ,sched->ticks
49                 ,atomic_read(&sched->tokens)
50                 ,sched->fill_rate
51                 ,sched->interval
52                 ,sched->tokens_min
53                 ,sched->tokens_max
54                 );
55 }
56
57
58 #else   /* _VX_INFO_DEF_ */
59 #ifndef _VX_SCHED_H
60 #define _VX_SCHED_H
61
62 #include "switch.h"
63
64 /*  sched vserver commands */
65
66 #define VCMD_set_sched          VC_CMD(SCHED, 1, 2)
67
68 struct  vcmd_set_sched_v2 {
69         int32_t fill_rate;
70         int32_t interval;
71         int32_t tokens;
72         int32_t tokens_min;
73         int32_t tokens_max;
74         uint64_t cpu_mask;
75 };
76
77 #define SCHED_KEEP              (-2)
78
79 #ifdef  __KERNEL__
80
81 extern int vc_set_sched_v1(uint32_t, void __user *);
82 extern int vc_set_sched(uint32_t, void __user *);
83
84
85 #define VAVAVOOM_RATIO          50
86
87 #include "context.h"
88
89
90 /* scheduling stuff */
91
92 int effective_vavavoom(struct task_struct *, int);
93
94 int vx_tokens_recalc(struct vx_info *);
95
96 /* new stuff ;) */
97
98 static inline int vx_tokens_avail(struct vx_info *vxi)
99 {
100         return atomic_read(&vxi->sched.tokens);
101 }
102
103 static inline void vx_consume_token(struct vx_info *vxi)
104 {
105         atomic_dec(&vxi->sched.tokens);
106 }
107
108 static inline int vx_need_resched(struct task_struct *p)
109 {
110 #ifdef  CONFIG_VSERVER_HARDCPU
111         struct vx_info *vxi = p->vx_info;
112
113         if (vxi) {
114                 int tokens;
115
116                 p->time_slice--;
117                 if (atomic_read(&vxi->vx_refcount) < 1)
118                         printk("need_resched: p=%p, s=%ld, ref=%d, id=%d/%d\n",
119                                 p, p->state, atomic_read(&vxi->vx_refcount),
120                                 vxi->vx_id, p->xid);
121                 if ((tokens = vx_tokens_avail(vxi)) > 0)
122                         vx_consume_token(vxi);
123                 return ((p->time_slice == 0) || (tokens < 1));
124         }
125 #endif
126         p->time_slice--;
127         return (p->time_slice == 0);
128 }
129
130
131 #endif  /* __KERNEL__ */
132
133 #endif  /* _VX_SCHED_H */
134 #endif