use $(LIBENSCVECTOR) instead of libensc_vector.a
[util-vserver.git] / util-vserver / kernel / sched.h
index 3841639..f7ac947 100644 (file)
@@ -1,54 +1,97 @@
+/* _VX_SCHED_H defined below */
+
 #if    defined(__KERNEL__) && defined(_VX_INFO_DEF_)
 
 #include <linux/spinlock.h>
 #include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <asm/atomic.h>
 #include <asm/param.h>
-#include <asm/cpumask.h>
+
+struct _vx_ticks {
+       uint64_t user_ticks;            /* token tick events */
+       uint64_t sys_ticks;             /* token tick events */
+       uint64_t hold_ticks;            /* token ticks paused */
+       uint64_t unused[5];             /* cacheline ? */
+};
 
 /* context sub struct */
 
 struct _vx_sched {
-       spinlock_t tokens_lock; /* lock for this structure */
+       atomic_t tokens;                /* number of CPU tokens */
+       spinlock_t tokens_lock;         /* lock for token bucket */
 
-       int fill_rate;          /* Fill rate: add X tokens... */
-       int interval;           /* Divisor:   per Y jiffies   */
-       int tokens;             /* number of CPU tokens in this context */
-       int tokens_min;         /* Limit:     minimum for unhold */
-       int tokens_max;         /* Limit:     no more than N tokens */
-       uint32_t jiffies;       /* add an integral multiple of Y to this */
+       int fill_rate;                  /* Fill rate: add X tokens... */
+       int interval;                   /* Divisor:   per Y jiffies   */
+       int tokens_min;                 /* Limit:     minimum for unhold */
+       int tokens_max;                 /* Limit:     no more than N tokens */
+       uint32_t jiffies;               /* last time accounted */
 
-       uint64_t ticks;         /* token tick events */
-       cpumask_t cpus_allowed; /* cpu mask for context */
+       int priority_bias;              /* bias offset for priority */
+       cpumask_t cpus_allowed;         /* cpu mask for context */
+
+       struct _vx_ticks cpu[NR_CPUS];
 };
 
 static inline void vx_info_init_sched(struct _vx_sched *sched)
 {
-        /* scheduling; hard code starting values as constants */
-        sched->fill_rate       = 1;
-        sched->interval        = 4;
-        sched->tokens  = HZ >> 2;
-        sched->tokens_min      = HZ >> 4;
-        sched->tokens_max      = HZ >> 1;
-        sched->jiffies         = jiffies;
-        sched->tokens_lock     = SPIN_LOCK_UNLOCKED;
+       int i;
+
+       /* scheduling; hard code starting values as constants */
+       sched->fill_rate        = 1;
+       sched->interval         = 4;
+       sched->tokens_min       = HZ >> 4;
+       sched->tokens_max       = HZ >> 1;
+       sched->jiffies          = jiffies;
+       sched->tokens_lock      = SPIN_LOCK_UNLOCKED;
+
+       atomic_set(&sched->tokens, HZ >> 2);
+       sched->cpus_allowed     = CPU_MASK_ALL;
+       sched->priority_bias    = 0;
+
+       for_each_cpu(i) {
+               sched->cpu[i].user_ticks        = 0;
+               sched->cpu[i].sys_ticks         = 0;
+               sched->cpu[i].hold_ticks        = 0;
+       }
+}
+
+static inline void vx_info_exit_sched(struct _vx_sched *sched)
+{
+       return;
 }
 
 static inline int vx_info_proc_sched(struct _vx_sched *sched, char *buffer)
 {
-       return sprintf(buffer,
-               "Ticks:\t%16lld\n"
+       int length = 0;
+       int i;
+
+       length += sprintf(buffer,
                "Token:\t\t%8d\n"
                "FillRate:\t%8d\n"
-               "Interval:\t%8d\n"              
+               "Interval:\t%8d\n"
                "TokensMin:\t%8d\n"
                "TokensMax:\t%8d\n"
-               ,sched->ticks
-               ,sched->tokens
+               "PrioBias:\t%8d\n"
+               ,atomic_read(&sched->tokens)
                ,sched->fill_rate
                ,sched->interval
                ,sched->tokens_min
                ,sched->tokens_max
+               ,sched->priority_bias
                );
+
+       for_each_online_cpu(i) {
+               length += sprintf(buffer + length,
+                       "cpu %d: %lld %lld %lld\n"
+                       ,i
+                       ,sched->cpu[i].user_ticks
+                       ,sched->cpu[i].sys_ticks
+                       ,sched->cpu[i].hold_ticks
+                       );
+       }
+
+       return length;
 }
 
 
@@ -60,36 +103,50 @@ static inline int vx_info_proc_sched(struct _vx_sched *sched, char *buffer)
 
 /*  sched vserver commands */
 
-#define VCMD_set_sched_v1      VC_CMD(SYSTEST, 1, 1)
+#define VCMD_set_sched_v2      VC_CMD(SCHED, 1, 2)
+#define VCMD_set_sched         VC_CMD(SCHED, 1, 3)
 
-struct  vcmd_set_sched_v1 {
+struct vcmd_set_sched_v2 {
        int32_t fill_rate;
-       int32_t period;
-       int32_t fill_level;
-       int32_t bucket_size;
+       int32_t interval;
+       int32_t tokens;
+       int32_t tokens_min;
+       int32_t tokens_max;
+       uint64_t cpu_mask;
 };
 
-#define VCMD_set_sched         VC_CMD(SCHED, 1, 2)
-
-struct  vcmd_set_sched_v2 {
+struct vcmd_set_sched_v3 {
+       uint32_t set_mask;
        int32_t fill_rate;
        int32_t interval;
        int32_t tokens;
        int32_t tokens_min;
        int32_t tokens_max;
-       uint64_t cpu_mask;
+       int32_t priority_bias;
 };
 
+
+#define VXSM_FILL_RATE         0x0001
+#define VXSM_INTERVAL          0x0002
+#define VXSM_TOKENS            0x0010
+#define VXSM_TOKENS_MIN                0x0020
+#define VXSM_TOKENS_MAX                0x0040
+#define VXSM_PRIO_BIAS         0x0100
+
 #define SCHED_KEEP             (-2)
 
 #ifdef __KERNEL__
 
-extern int vc_set_sched_v1(uint32_t, void *);
-extern int vc_set_sched(uint32_t, void *);
+extern int vc_set_sched_v1(uint32_t, void __user *);
+extern int vc_set_sched_v2(uint32_t, void __user *);
+extern int vc_set_sched(uint32_t, void __user *);
 
 
 #define VAVAVOOM_RATIO         50
 
+#define MAX_PRIO_BIAS          20
+#define MIN_PRIO_BIAS          -20
+
 #include "context.h"
 
 
@@ -99,37 +156,65 @@ int effective_vavavoom(struct task_struct *, int);
 
 int vx_tokens_recalc(struct vx_info *);
 
-/* update the token allocation for a process */
-static inline int vx_tokens_avail(struct task_struct *tsk)
-{
-       struct vx_info *vxi = tsk->vx_info;
-       int tokens;
+/* new stuff ;) */
 
-       spin_lock(&vxi->sched.tokens_lock);
-       tokens = vx_tokens_recalc(vxi);
-       spin_unlock(&vxi->sched.tokens_lock);
-       return tokens;
+static inline int vx_tokens_avail(struct vx_info *vxi)
+{
+       return atomic_read(&vxi->sched.tokens);
 }
 
-/* new stuff ;) */
+static inline void vx_consume_token(struct vx_info *vxi)
+{
+       atomic_dec(&vxi->sched.tokens);
+}
 
-static inline int vx_need_resched(struct task_struct *p, struct vx_info *vxi)
+static inline int vx_need_resched(struct task_struct *p)
 {
-       p->time_slice--;
+#ifdef CONFIG_VSERVER_HARDCPU
+       struct vx_info *vxi = p->vx_info;
+#endif
+       int slice = --p->time_slice;
+
+#ifdef CONFIG_VSERVER_HARDCPU
        if (vxi) {
-               int tokens = 0;
-
-               if (vxi->sched.tokens > 0) {
-                       spin_lock(&vxi->sched.tokens_lock);
-                       tokens = --vxi->sched.tokens;
-                       spin_unlock(&vxi->sched.tokens_lock);
-               }
-               return ((p->time_slice == 0) || (tokens == 0));
-       } else
-               return (p->time_slice == 0);
+               int tokens;
+
+               if ((tokens = vx_tokens_avail(vxi)) > 0)
+                       vx_consume_token(vxi);
+               /* for tokens > 0, one token was consumed */
+               if (tokens < 2)
+                       return 1;
+       }
+#endif
+       return (slice == 0);
 }
 
 
+static inline void vx_onhold_inc(struct vx_info *vxi)
+{
+       int onhold = atomic_read(&vxi->cvirt.nr_onhold);
+
+       atomic_inc(&vxi->cvirt.nr_onhold);
+       if (!onhold)
+               vxi->cvirt.onhold_last = jiffies;
+}
+
+static inline void __vx_onhold_update(struct vx_info *vxi)
+{
+       int cpu = smp_processor_id();
+       uint32_t now = jiffies;
+       uint32_t delta = now - vxi->cvirt.onhold_last;
+
+       vxi->cvirt.onhold_last = now;
+       vxi->sched.cpu[cpu].hold_ticks += delta;
+}
+
+static inline void vx_onhold_dec(struct vx_info *vxi)
+{
+       if (atomic_dec_and_test(&vxi->cvirt.nr_onhold))
+               __vx_onhold_update(vxi);
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* _VX_SCHED_H */