#include <linux/types.h>
-
#define MAX_S_CONTEXT 65535 /* Arbitrary limit */
#define MIN_D_CONTEXT 49152 /* dynamic contexts start here */
#ifdef __KERNEL__
-#include <linux/utsname.h>
-
-struct _vx_virt {
- int nr_threads;
- int nr_running;
- int max_threads;
- unsigned long total_forks;
-
- unsigned int bias_cswtch;
- long bias_jiffies;
- long bias_idle;
-
- struct new_utsname utsname;
-};
-
-
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
#define _VX_INFO_DEF_
+#include "cvirt.h"
#include "limit.h"
#include "sched.h"
#undef _VX_INFO_DEF_
pid_t vx_initpid; /* PID of fake init process */
- struct _vx_virt virt; /* virtual/bias stuff */
+ struct _vx_cvirt cvirt; /* virtual/bias stuff */
struct _vx_limit limit; /* vserver limits */
struct _vx_sched sched; /* vserver scheduler */
extern struct vx_info *find_vx_info(int);
extern struct vx_info *find_or_create_vx_info(int);
+extern int vx_info_id_valid(int);
extern int vx_migrate_task(struct task_struct *, struct vx_info *);
#endif /* __KERNEL__ */
#define VXF_INFO_LOCK 0x00000001
-#define VXF_INFO_NPROC 0x00000002
-#define VXF_INFO_PRIVATE 0x00000004
-#define VXF_INFO_INIT 0x00000008
+#define VXF_INFO_SCHED 0x00000002
+#define VXF_INFO_NPROC 0x00000004
+#define VXF_INFO_PRIVATE 0x00000008
-#define VXF_INFO_HIDE 0x00000010
-#define VXF_INFO_ULIMIT 0x00000020
-#define VXF_INFO_NSPACE 0x00000040
+#define VXF_INFO_INIT 0x00000010
+#define VXF_INFO_HIDE 0x00000020
+#define VXF_INFO_ULIMIT 0x00000040
+#define VXF_INFO_NSPACE 0x00000080
#define VXF_SCHED_HARD 0x00000100
#define VXF_SCHED_PRIO 0x00000200
#define VXF_VIRT_MEM 0x00010000
#define VXF_VIRT_UPTIME 0x00020000
+#define VXF_VIRT_CPU 0x00040000
+
+#define VXF_HIDE_MOUNT 0x01000000
+#define VXF_HIDE_NETIF 0x02000000
#define VXF_STATE_SETUP (1ULL<<32)
#define VXF_STATE_INIT (1ULL<<33)
+
+#define VXF_ONE_TIME (0x0003ULL<<32)
+
#define VCMD_get_ccaps VC_CMD(FLAGS, 3, 0)
#define VCMD_set_ccaps VC_CMD(FLAGS, 4, 0)
--- /dev/null
+#if defined(__KERNEL__) && defined(_VX_INFO_DEF_)
+
+#include <linux/utsname.h>
+#include <linux/rwsem.h>
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <asm/atomic.h>
+
+/* context sub struct */
+
+struct sock_acc {
+ atomic_t count;
+ atomic_t total;
+};
+
+struct _vx_cvirt {
+ int nr_threads;
+ int nr_running;
+ int max_threads;
+ unsigned long total_forks;
+
+ unsigned int bias_cswtch;
+ long bias_jiffies;
+ long bias_idle;
+ struct timespec bias_tp;
+
+ struct new_utsname utsname;
+
+ struct sock_acc sock[5][3];
+};
+
+
+static inline long vx_sock_count(struct _vx_cvirt *cvirt, int type, int pos)
+{
+ return atomic_read(&cvirt->sock[type][pos].count);
+}
+
+
+static inline long vx_sock_total(struct _vx_cvirt *cvirt, int type, int pos)
+{
+ return atomic_read(&cvirt->sock[type][pos].total);
+}
+
+
+static inline void vx_info_init_cvirt(struct _vx_cvirt *cvirt)
+{
+ int i,j;
+
+ cvirt->nr_threads = 1;
+ // new->virt.bias_cswtch = kstat.context_swtch;
+ cvirt->bias_jiffies = jiffies;
+ /* new->virt.bias_idle = init_tasks[0]->times.tms_utime +
+ init_tasks[0]->times.tms_stime;
+ */
+ do_posix_clock_monotonic_gettime(&cvirt->bias_tp);
+
+ down_read(&uts_sem);
+ cvirt->utsname = system_utsname;
+ up_read(&uts_sem);
+
+ for (i=0; i<5; i++) {
+ for (j=0; j<3; j++) {
+ atomic_set(&cvirt->sock[i][j].count, 0);
+ atomic_set(&cvirt->sock[i][j].total, 0);
+ }
+ }
+}
+
+static inline int vx_info_proc_cvirt(struct _vx_cvirt *cvirt, char *buffer)
+{
+ int i,j, length = 0;
+ static char *type[] = { "UNSPEC", "UNIX", "INET", "INET6", "OTHER" };
+
+ for (i=0; i<5; i++) {
+ length += sprintf(buffer + length,
+ "%s:", type[i]);
+ for (j=0; j<3; j++) {
+ length += sprintf(buffer + length,
+ "\t%12lu/%-12lu"
+ ,vx_sock_count(cvirt, i, j)
+ ,vx_sock_total(cvirt, i, j)
+ );
+ }
+ buffer[length++] = '\n';
+ }
+ return length;
+}
+
+#else /* _VX_INFO_DEF_ */
+#ifndef _VX_CVIRT_H
+#define _VX_CVIRT_H
+
+#include "switch.h"
+
+/* cvirt vserver commands */
+
+
+#ifdef __KERNEL__
+
+struct timespec;
+
+void vx_vsi_uptime(struct timespec *uptime);
+
+#endif /* __KERNEL__ */
+
+#endif /* _VX_CVIRT_H */
+#endif
/* context sub struct */
+#define NUM_RLIMITS 20
+
+#define VLIMIT_SOCK 16
+
+
struct _vx_limit {
atomic_t ticks;
- unsigned long rlim[RLIM_NLIMITS]; /* Per context limit */
- atomic_t res[RLIM_NLIMITS]; /* Current value */
+ unsigned long rlim[NUM_RLIMITS]; /* Per context limit */
+ atomic_t res[NUM_RLIMITS]; /* Current value */
};
static inline void vx_info_init_limit(struct _vx_limit *limit)
{
int lim;
- for (lim=0; lim<RLIM_NLIMITS; lim++)
+ for (lim=0; lim<NUM_RLIMITS; lim++) {
limit->rlim[lim] = RLIM_INFINITY;
+ atomic_set(&limit->res[lim], 0);
+ }
}
static inline int vx_info_proc_limit(struct _vx_limit *limit, char *buffer)
"VM:\t%8d/%ld\n"
"VML:\t%8d/%ld\n"
"RSS:\t%8d/%ld\n"
+ "FILES:\t%8d/%ld\n"
,atomic_read(&limit->res[RLIMIT_NPROC])
,limit->rlim[RLIMIT_NPROC]
,atomic_read(&limit->res[RLIMIT_AS])
,limit->rlim[RLIMIT_MEMLOCK]
,atomic_read(&limit->res[RLIMIT_RSS])
,limit->rlim[RLIMIT_RSS]
+ ,atomic_read(&limit->res[RLIMIT_NOFILE])
+ ,limit->rlim[RLIMIT_NOFILE]
);
}
void vx_vsi_meminfo(struct sysinfo *);
void vx_vsi_swapinfo(struct sysinfo *);
+
#endif /* __KERNEL__ */
#endif /* _VX_LIMIT_H */
struct ip_info {
- struct list_head ip_list; /* linked list of ipinfos */
+ struct list_head ip_list; /* linked list of ipinfos */
+ nid_t ip_id; /* vnet id */
atomic_t ip_refcount;
int nbipv4;
- __u32 ipv4[NB_IPV4ROOT];/* Process can only bind to these IPs */
- /* The first one is used to connect */
- /* and for bind any service */
- /* The other must be used explicity when */
- /* binding */
- __u32 mask[NB_IPV4ROOT];/* Netmask for each ipv4 */
- /* Used to select the proper source address */
- /* for sockets */
- __u32 v4_bcast; /* Broadcast address used to receive UDP packets */
+ __u32 ipv4[NB_IPV4ROOT]; /* Process can only bind to these IPs */
+ /* The first one is used to connect */
+ /* and for bind any service */
+ /* The other must be used explicity */
+ __u32 mask[NB_IPV4ROOT]; /* Netmask for each ipv4 */
+ /* Used to select the proper source */
+ /* address for sockets */
+ __u32 v4_bcast; /* Broadcast address to receive UDP */
};
void free_ip_info(struct ip_info *);
struct ip_info *create_ip_info(void);
+extern struct ip_info *find_ip_info(int);
+extern int ip_info_id_valid(int);
+
+
#endif /* __KERNEL__ */
#endif /* _VX_NETWORK_H */
#include <linux/spinlock.h>
#include <linux/jiffies.h>
+#include <asm/atomic.h>
#include <asm/param.h>
#include <asm/cpumask.h>
int fill_rate; /* Fill rate: add X tokens... */
int interval; /* Divisor: per Y jiffies */
- int tokens; /* number of CPU tokens in this context */
+ atomic_t tokens; /* number of CPU tokens in this context */
int tokens_min; /* Limit: minimum for unhold */
int tokens_max; /* Limit: no more than N tokens */
uint32_t jiffies; /* add an integral multiple of Y to this */
{
/* scheduling; hard code starting values as constants */
sched->fill_rate = 1;
- sched->interval = 4;
- sched->tokens = HZ >> 2;
+ sched->interval = 4;
sched->tokens_min = HZ >> 4;
sched->tokens_max = HZ >> 1;
sched->jiffies = jiffies;
sched->tokens_lock = SPIN_LOCK_UNLOCKED;
+
+ atomic_set(&sched->tokens, HZ >> 2);
+ sched->cpus_allowed = CPU_MASK_ALL;
}
static inline int vx_info_proc_sched(struct _vx_sched *sched, char *buffer)
"TokensMin:\t%8d\n"
"TokensMax:\t%8d\n"
,sched->ticks
- ,sched->tokens
+ ,atomic_read(&sched->tokens)
,sched->fill_rate
,sched->interval
,sched->tokens_min
int vx_tokens_recalc(struct vx_info *);
-/* update the token allocation for a process */
-static inline int vx_tokens_avail(struct task_struct *tsk)
-{
- struct vx_info *vxi = tsk->vx_info;
- int tokens;
+/* new stuff ;) */
- spin_lock(&vxi->sched.tokens_lock);
- tokens = vx_tokens_recalc(vxi);
- spin_unlock(&vxi->sched.tokens_lock);
- return tokens;
+static inline int vx_tokens_avail(struct vx_info *vxi)
+{
+ return atomic_read(&vxi->sched.tokens);
}
-/* new stuff ;) */
+static inline void vx_consume_token(struct vx_info *vxi)
+{
+ atomic_dec(&vxi->sched.tokens);
+}
static inline int vx_need_resched(struct task_struct *p, struct vx_info *vxi)
{
p->time_slice--;
if (vxi) {
- int tokens = 0;
-
- if (vxi->sched.tokens > 0) {
- spin_lock(&vxi->sched.tokens_lock);
- tokens = --vxi->sched.tokens;
- spin_unlock(&vxi->sched.tokens_lock);
- }
- return ((p->time_slice == 0) || (tokens == 0));
+ int tokens;
+ if ((tokens = vx_tokens_avail(vxi)) > 0)
+ vx_consume_token(vxi);
+
+ return ((p->time_slice == 0) || (tokens < 1));
} else
return (p->time_slice == 0);
}
/* interface version */
-#define VCI_VERSION 0x00010013
+#define VCI_VERSION 0x00010014
/* query version */