--- /dev/null
+#ifndef _VX_CONTEXT_H
+#define _VX_CONTEXT_H
+
+#include <linux/types.h>
+
+
+#define MAX_S_CONTEXT 65535 /* Arbitrary limit */
+#define MIN_D_CONTEXT 49152 /* dynamic contexts start here */
+
+#define VX_DYNAMIC_ID ((uint32_t)-1) /* id for dynamic context */
+
+#ifdef __KERNEL__
+
+#include <linux/utsname.h>
+
+struct _vx_virt {
+ int nr_threads;
+ int nr_running;
+ int max_threads;
+ unsigned long total_forks;
+
+ unsigned int bias_cswtch;
+ long bias_jiffies;
+ long bias_idle;
+
+ struct new_utsname utsname;
+};
+
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+#define _VX_INFO_DEF_
+#include "limit.h"
+#include "sched.h"
+#undef _VX_INFO_DEF_
+
+struct vx_info {
+ struct list_head vx_list; /* linked list of contexts */
+ xid_t vx_id; /* context id */
+ atomic_t vx_refcount; /* refcount */
+ struct vx_info *vx_parent; /* parent context */
+
+ struct namespace *vx_namespace; /* private namespace */
+ struct fs_struct *vx_fs; /* private namespace fs */
+ uint64_t vx_flags; /* VX_INFO_xxx */
+ uint64_t vx_bcaps; /* bounding caps (system) */
+ uint64_t vx_ccaps; /* context caps (vserver) */
+
+ pid_t vx_initpid; /* PID of fake init process */
+
+ struct _vx_virt virt; /* virtual/bias stuff */
+ struct _vx_limit limit; /* vserver limits */
+ struct _vx_sched sched; /* vserver scheduler */
+
+ char vx_name[65]; /* vserver name */
+};
+
+
+extern spinlock_t vxlist_lock;
+extern struct list_head vx_infos;
+
+
+#define VX_ADMIN 0x0001
+#define VX_WATCH 0x0002
+#define VX_DUMMY 0x0008
+
+#define VX_IDENT 0x0010
+#define VX_EQUIV 0x0020
+#define VX_PARENT 0x0040
+#define VX_CHILD 0x0080
+
+#define VX_ARG_MASK 0x00F0
+
+#define VX_DYNAMIC 0x0100
+#define VX_STATIC 0x0200
+
+#define VX_ATR_MASK 0x0F00
+
+
+void free_vx_info(struct vx_info *);
+
+extern struct vx_info *find_vx_info(int);
+extern struct vx_info *find_or_create_vx_info(int);
+
+extern int vx_migrate_task(struct task_struct *, struct vx_info *);
+
+#endif /* __KERNEL__ */
+
+#include "switch.h"
+
+/* vinfo commands */
+
+#define VCMD_task_xid VC_CMD(VINFO, 1, 0)
+#define VCMD_task_nid VC_CMD(VINFO, 2, 0)
+
+#ifdef __KERNEL__
+extern int vc_task_xid(uint32_t, void *);
+
+#endif /* __KERNEL__ */
+
+#define VCMD_vx_info VC_CMD(VINFO, 5, 0)
+#define VCMD_nx_info VC_CMD(VINFO, 6, 0)
+
+struct vcmd_vx_info_v0 {
+ uint32_t xid;
+ uint32_t initpid;
+ /* more to come */
+};
+
+#ifdef __KERNEL__
+extern int vc_vx_info(uint32_t, void *);
+
+#endif /* __KERNEL__ */
+
+#define VCMD_create_context VC_CMD(VSETUP, 1, 0)
+#define VCMD_migrate_context VC_CMD(PROCMIG, 1, 0)
+
+#ifdef __KERNEL__
+extern int vc_create_context(uint32_t, void *);
+extern int vc_migrate_context(uint32_t, void *);
+
+#endif /* __KERNEL__ */
+
+#define VCMD_get_flags VC_CMD(FLAGS, 1, 0)
+#define VCMD_set_flags VC_CMD(FLAGS, 2, 0)
+
+struct vcmd_ctx_flags_v0 {
+ uint64_t flagword;
+ uint64_t mask;
+};
+
+#ifdef __KERNEL__
+extern int vc_get_flags(uint32_t, void *);
+extern int vc_set_flags(uint32_t, void *);
+
+#endif /* __KERNEL__ */
+
+#define VXF_INFO_LOCK 0x00000001
+#define VXF_INFO_NPROC 0x00000002
+#define VXF_INFO_PRIVATE 0x00000004
+#define VXF_INFO_INIT 0x00000008
+
+#define VXF_INFO_HIDE 0x00000010
+#define VXF_INFO_ULIMIT 0x00000020
+#define VXF_INFO_NSPACE 0x00000040
+
+#define VXF_SCHED_HARD 0x00000100
+#define VXF_SCHED_PRIO 0x00000200
+#define VXF_SCHED_PAUSE 0x00000400
+
+#define VXF_VIRT_MEM 0x00010000
+#define VXF_VIRT_UPTIME 0x00020000
+
+#define VXF_STATE_SETUP (1ULL<<32)
+#define VXF_STATE_INIT (1ULL<<33)
+
+#define VCMD_get_ccaps VC_CMD(FLAGS, 3, 0)
+#define VCMD_set_ccaps VC_CMD(FLAGS, 4, 0)
+
+struct vcmd_ctx_caps_v0 {
+ uint64_t bcaps;
+ uint64_t ccaps;
+ uint64_t cmask;
+};
+
+#ifdef __KERNEL__
+extern int vc_get_ccaps(uint32_t, void *);
+extern int vc_set_ccaps(uint32_t, void *);
+
+#endif /* __KERNEL__ */
+
+#define VXC_SET_UTSNAME 0x00000001
+#define VXC_SET_RLIMIT 0x00000002
+
+#define VXC_ICMP_PING 0x00000100
+
+#define VXC_SECURE_MOUNT 0x00010000
+
+
+#endif /* _VX_CONTEXT_H */
--- /dev/null
+#ifndef _VX_INODE_H
+#define _VX_INODE_H
+
+#include "switch.h"
+
+/* inode vserver commands */
+
+#define VCMD_get_iattr_v0 VC_CMD(INODE, 1, 0)
+#define VCMD_set_iattr_v0 VC_CMD(INODE, 2, 0)
+
+#define VCMD_get_iattr VC_CMD(INODE, 1, 1)
+#define VCMD_set_iattr VC_CMD(INODE, 2, 1)
+
+struct vcmd_ctx_iattr_v0 {
+ /* device handle in id */
+ uint64_t ino;
+ uint32_t xid;
+ uint32_t flags;
+ uint32_t mask;
+};
+
+struct vcmd_ctx_iattr_v1 {
+ const char __user *name;
+ uint32_t xid;
+ uint32_t flags;
+ uint32_t mask;
+};
+
+
+#define IATTR_XID 0x01000000
+
+#define IATTR_ADMIN 0x00000001
+#define IATTR_WATCH 0x00000002
+#define IATTR_HIDE 0x00000004
+#define IATTR_FLAGS 0x00000007
+
+#define IATTR_BARRIER 0x00010000
+#define IATTR_IUNLINK 0x00020000
+#define IATTR_IMMUTABLE 0x00040000
+
+#ifdef __KERNEL__
+
+#define vx_hide_check(c,m) (((m) & IATTR_HIDE) ? vx_check(c,m) : 1)
+
+extern int vc_get_iattr_v0(uint32_t, void *);
+extern int vc_set_iattr_v0(uint32_t, void *);
+
+extern int vc_get_iattr(uint32_t, void *);
+extern int vc_set_iattr(uint32_t, void *);
+
+#endif /* __KERNEL__ */
+
+/* inode ioctls */
+
+#define FIOC_GETXFLG _IOR('x', 5, long)
+#define FIOC_SETXFLG _IOW('x', 6, long)
+
+#endif /* _VX_LEGACY_H */
--- /dev/null
+#ifndef _VX_LEGACY_H
+#define _VX_LEGACY_H
+
+#include "switch.h"
+#include "network.h"
+
+/* compatibiliy vserver commands */
+
+#define VCMD_new_s_context VC_CMD(COMPAT, 1, 1)
+#define VCMD_set_ipv4root VC_CMD(COMPAT, 2, 3)
+
+/* compatibiliy vserver arguments */
+
+struct vcmd_new_s_context_v1 {
+ uint32_t remove_cap;
+ uint32_t flags;
+};
+
+struct vcmd_set_ipv4root_v3 {
+ /* number of pairs in id */
+ uint32_t broadcast;
+ struct {
+ uint32_t ip;
+ uint32_t mask;
+ } ip_mask_pair[NB_IPV4ROOT];
+};
+
+
+#define VX_INFO_LOCK 1 /* Can't request a new vx_id */
+#define VX_INFO_NPROC 4 /* Limit number of processes in a context */
+#define VX_INFO_PRIVATE 8 /* Noone can join this security context */
+#define VX_INFO_INIT 16 /* This process wants to become the */
+ /* logical process 1 of the security */
+ /* context */
+#define VX_INFO_HIDEINFO 32 /* Hide some information in /proc */
+#define VX_INFO_ULIMIT 64 /* Use ulimit of the current process */
+ /* to become the global limits */
+ /* of the context */
+#define VX_INFO_NAMESPACE 128 /* save private namespace */
+
+
+#define NB_S_CONTEXT 16
+
+#define NB_IPV4ROOT 16
+
+
+#ifdef __KERNEL__
+extern int vc_new_s_context(uint32_t, void *);
+extern int vc_set_ipv4root(uint32_t, void *);
+
+#endif /* __KERNEL__ */
+#endif /* _VX_LEGACY_H */
--- /dev/null
+#if defined(__KERNEL__) && defined(_VX_INFO_DEF_)
+
+#include <asm/atomic.h>
+#include <asm/resource.h>
+
+/* context sub struct */
+
+struct _vx_limit {
+ atomic_t ticks;
+
+ unsigned long rlim[RLIM_NLIMITS]; /* Per context limit */
+ atomic_t res[RLIM_NLIMITS]; /* Current value */
+};
+
+static inline void vx_info_init_limit(struct _vx_limit *limit)
+{
+ int lim;
+
+ for (lim=0; lim<RLIM_NLIMITS; lim++)
+ limit->rlim[lim] = RLIM_INFINITY;
+}
+
+static inline int vx_info_proc_limit(struct _vx_limit *limit, char *buffer)
+{
+ return sprintf(buffer,
+ "PROC:\t%8d/%ld\n"
+ "VM:\t%8d/%ld\n"
+ "VML:\t%8d/%ld\n"
+ "RSS:\t%8d/%ld\n"
+ ,atomic_read(&limit->res[RLIMIT_NPROC])
+ ,limit->rlim[RLIMIT_NPROC]
+ ,atomic_read(&limit->res[RLIMIT_AS])
+ ,limit->rlim[RLIMIT_AS]
+ ,atomic_read(&limit->res[RLIMIT_MEMLOCK])
+ ,limit->rlim[RLIMIT_MEMLOCK]
+ ,atomic_read(&limit->res[RLIMIT_RSS])
+ ,limit->rlim[RLIMIT_RSS]
+ );
+}
+
+#else /* _VX_INFO_DEF_ */
+#ifndef _VX_LIMIT_H
+#define _VX_LIMIT_H
+
+#include "switch.h"
+
+/* rlimit vserver commands */
+
+#define VCMD_get_rlimit VC_CMD(RLIMIT, 1, 0)
+#define VCMD_set_rlimit VC_CMD(RLIMIT, 2, 0)
+#define VCMD_get_rlimit_mask VC_CMD(RLIMIT, 3, 0)
+
+struct vcmd_ctx_rlimit_v0 {
+ uint32_t id;
+ uint64_t minimum;
+ uint64_t softlimit;
+ uint64_t maximum;
+};
+
+struct vcmd_ctx_rlimit_mask_v0 {
+ uint32_t minimum;
+ uint32_t softlimit;
+ uint32_t maximum;
+};
+
+#define CRLIM_UNSET (0ULL)
+#define CRLIM_INFINITY (~0ULL)
+#define CRLIM_KEEP (~1ULL)
+
+#ifdef __KERNEL__
+extern int vc_get_rlimit(uint32_t, void *);
+extern int vc_set_rlimit(uint32_t, void *);
+extern int vc_get_rlimit_mask(uint32_t, void *);
+
+struct sysinfo;
+
+void vx_vsi_meminfo(struct sysinfo *);
+void vx_vsi_swapinfo(struct sysinfo *);
+
+#endif /* __KERNEL__ */
+
+#endif /* _VX_LIMIT_H */
+#endif
--- /dev/null
+#ifndef _VX_NAMESPACE_H
+#define _VX_NAMESPACE_H
+
+#include <linux/types.h>
+
+
+/* virtual host info names */
+
+#define VCMD_vx_set_vhi_name VC_CMD(VHOST, 1, 0)
+#define VCMD_vx_get_vhi_name VC_CMD(VHOST, 2, 0)
+
+struct vcmd_vx_vhi_name_v0 {
+ uint32_t field;
+ char name[65];
+};
+
+
+enum vx_vhi_name_field {
+ VHIN_CONTEXT=0,
+ VHIN_SYSNAME,
+ VHIN_NODENAME,
+ VHIN_RELEASE,
+ VHIN_VERSION,
+ VHIN_MACHINE,
+ VHIN_DOMAINNAME,
+};
+
+
+#ifdef __KERNEL__
+extern int vc_set_vhi_name(uint32_t, void *);
+extern int vc_get_vhi_name(uint32_t, void *);
+
+#endif /* __KERNEL__ */
+
+#define VCMD_enter_namespace VC_CMD(PROCALT, 1, 0)
+#define VCMD_cleanup_namespace VC_CMD(PROCALT, 2, 0)
+#define VCMD_set_namespace VC_CMD(PROCALT, 3, 0)
+
+#ifdef __KERNEL__
+
+struct vx_info;
+struct namespace;
+struct fs_struct;
+
+extern int vx_set_namespace(struct vx_info *, struct namespace *, struct fs_struct *);
+
+extern int vc_enter_namespace(uint32_t, void *);
+extern int vc_cleanup_namespace(uint32_t, void *);
+extern int vc_set_namespace(uint32_t, void *);
+
+#endif /* __KERNEL__ */
+#endif /* _VX_NAMESPACE_H */
--- /dev/null
+#ifndef _VX_NETWORK_H
+#define _VX_NETWORK_H
+
+
+#define NB_IPV4ROOT 16
+
+#ifdef __KERNEL__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/utsname.h>
+#include <asm/resource.h>
+#include <asm/atomic.h>
+
+
+struct ip_info {
+ struct list_head ip_list; /* linked list of ipinfos */
+ atomic_t ip_refcount;
+ int nbipv4;
+ __u32 ipv4[NB_IPV4ROOT];/* Process can only bind to these IPs */
+ /* The first one is used to connect */
+ /* and for bind any service */
+ /* The other must be used explicity when */
+ /* binding */
+ __u32 mask[NB_IPV4ROOT];/* Netmask for each ipv4 */
+ /* Used to select the proper source address */
+ /* for sockets */
+ __u32 v4_bcast; /* Broadcast address used to receive UDP packets */
+};
+
+
+extern spinlock_t iplist_lock;
+extern struct list_head ip_infos;
+
+
+void free_ip_info(struct ip_info *);
+struct ip_info *create_ip_info(void);
+
+#endif /* __KERNEL__ */
+#endif /* _VX_NETWORK_H */
--- /dev/null
+#if defined(__KERNEL__) && defined(_VX_INFO_DEF_)
+
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <asm/param.h>
+#include <asm/cpumask.h>
+
+/* context sub struct */
+
+struct _vx_sched {
+ spinlock_t tokens_lock; /* lock for this structure */
+
+ int fill_rate; /* Fill rate: add X tokens... */
+ int interval; /* Divisor: per Y jiffies */
+ int tokens; /* number of CPU tokens in this context */
+ int tokens_min; /* Limit: minimum for unhold */
+ int tokens_max; /* Limit: no more than N tokens */
+ uint32_t jiffies; /* add an integral multiple of Y to this */
+
+ uint64_t ticks; /* token tick events */
+ cpumask_t cpus_allowed; /* cpu mask for context */
+};
+
+static inline void vx_info_init_sched(struct _vx_sched *sched)
+{
+ /* scheduling; hard code starting values as constants */
+ sched->fill_rate = 1;
+ sched->interval = 4;
+ sched->tokens = HZ >> 2;
+ sched->tokens_min = HZ >> 4;
+ sched->tokens_max = HZ >> 1;
+ sched->jiffies = jiffies;
+ sched->tokens_lock = SPIN_LOCK_UNLOCKED;
+}
+
+static inline int vx_info_proc_sched(struct _vx_sched *sched, char *buffer)
+{
+ return sprintf(buffer,
+ "Ticks:\t%16lld\n"
+ "Token:\t\t%8d\n"
+ "FillRate:\t%8d\n"
+ "Interval:\t%8d\n"
+ "TokensMin:\t%8d\n"
+ "TokensMax:\t%8d\n"
+ ,sched->ticks
+ ,sched->tokens
+ ,sched->fill_rate
+ ,sched->interval
+ ,sched->tokens_min
+ ,sched->tokens_max
+ );
+}
+
+
+#else /* _VX_INFO_DEF_ */
+#ifndef _VX_SCHED_H
+#define _VX_SCHED_H
+
+#include "switch.h"
+
+/* sched vserver commands */
+
+#define VCMD_set_sched_v1 VC_CMD(SYSTEST, 1, 1)
+
+struct vcmd_set_sched_v1 {
+ int32_t fill_rate;
+ int32_t period;
+ int32_t fill_level;
+ int32_t bucket_size;
+};
+
+#define VCMD_set_sched VC_CMD(SCHED, 1, 2)
+
+struct vcmd_set_sched_v2 {
+ int32_t fill_rate;
+ int32_t interval;
+ int32_t tokens;
+ int32_t tokens_min;
+ int32_t tokens_max;
+ uint64_t cpu_mask;
+};
+
+#define SCHED_KEEP (-2)
+
+#ifdef __KERNEL__
+
+extern int vc_set_sched_v1(uint32_t, void *);
+extern int vc_set_sched(uint32_t, void *);
+
+
+#define VAVAVOOM_RATIO 50
+
+#include "context.h"
+
+
+/* scheduling stuff */
+
+int effective_vavavoom(struct task_struct *, int);
+
+int vx_tokens_recalc(struct vx_info *);
+
+/* update the token allocation for a process */
+static inline int vx_tokens_avail(struct task_struct *tsk)
+{
+ struct vx_info *vxi = tsk->vx_info;
+ int tokens;
+
+ spin_lock(&vxi->sched.tokens_lock);
+ tokens = vx_tokens_recalc(vxi);
+ spin_unlock(&vxi->sched.tokens_lock);
+ return tokens;
+}
+
+/* new stuff ;) */
+
+static inline int vx_need_resched(struct task_struct *p, struct vx_info *vxi)
+{
+ p->time_slice--;
+ if (vxi) {
+ int tokens = 0;
+
+ if (vxi->sched.tokens > 0) {
+ spin_lock(&vxi->sched.tokens_lock);
+ tokens = --vxi->sched.tokens;
+ spin_unlock(&vxi->sched.tokens_lock);
+ }
+ return ((p->time_slice == 0) || (tokens == 0));
+ } else
+ return (p->time_slice == 0);
+}
+
+
+#endif /* __KERNEL__ */
+
+#endif /* _VX_SCHED_H */
+#endif
--- /dev/null
+#ifndef _VX_SIGNAL_H
+#define _VX_SIGNAL_H
+
+#include "switch.h"
+
+/* context signalling */
+
+#define VCMD_ctx_kill VC_CMD(PROCTRL, 1, 0)
+
+struct vcmd_ctx_kill_v0 {
+ int32_t pid;
+ int32_t sig;
+};
+
+#ifdef __KERNEL__
+extern int vc_ctx_kill(uint32_t, void *);
+
+#endif /* __KERNEL__ */
+#endif /* _VX_SIGNAL_H */
--- /dev/null
+#ifndef _VX_SWITCH_H
+#define _VX_SWITCH_H
+
+#include <linux/types.h>
+
+#define VC_CATEGORY(c) (((c) >> 24) & 0x3F)
+#define VC_COMMAND(c) (((c) >> 16) & 0xFF)
+#define VC_VERSION(c) ((c) & 0xFFF)
+
+#define VC_CMD(c,i,v) ((((VC_CAT_ ## c) & 0x3F) << 24) \
+ | (((i) & 0xFF) << 16) | ((v) & 0xFFF))
+
+/*
+
+ Syscall Matrix V2.5
+
+ |VERSION|CREATE |MODIFY |MIGRATE|CONTROL|EXPERIM| |SPECIAL|SPECIAL|
+ |STATS |DESTROY|ALTER |CHANGE |LIMIT |TEST | | | |
+ |INFO |SETUP | |MOVE | | | | | |
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+ SYSTEM |VERSION| | | | | | |DEVICES| |
+ HOST | 00| 01| 02| 03| 04| 05| | 06| 07|
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+ CPU | | |PROCALT|PROCMIG|PROCTRL| | |SCHED. | |
+ PROCESS| 08| 09| 10| 11| 12| 13| | 14| 15|
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+ MEMORY | | | | | | | |SWAP | |
+ | 16| 17| 18| 19| 20| 21| | 22| 23|
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+ NETWORK| | | | | | | |SERIAL | |
+ | 24| 25| 26| 27| 28| 29| | 30| 31|
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+ DISK | | | | | | | |INODE | |
+ VFS | 32| 33| 34| 35| 36| 37| | 38| 39|
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+ OTHER | | | | | | | |VINFO | |
+ | 40| 41| 42| 43| 44| 45| | 46| 47|
+ =======+=======+=======+=======+=======+=======+=======+ +=======+=======+
+ SPECIAL| | | | |FLAGS | | | | |
+ | 48| 49| 50| 51| 52| 53| | 54| 55|
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+ SPECIAL| | | | |RLIMIT |SYSCALL| | |COMPAT |
+ | 56| 57| 58| 59| 60|TEST 61| | 62| 63|
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+
+*/
+
+#define VC_CAT_VERSION 0
+
+#define VC_CAT_VSETUP 1
+#define VC_CAT_VHOST 2
+
+#define VC_CAT_PROCALT 10
+#define VC_CAT_PROCMIG 11
+#define VC_CAT_PROCTRL 12
+
+#define VC_CAT_SCHED 14
+#define VC_CAT_INODE 38
+
+#define VC_CAT_VINFO 46
+
+#define VC_CAT_FLAGS 52
+#define VC_CAT_RLIMIT 60
+
+#define VC_CAT_SYSTEST 61
+#define VC_CAT_COMPAT 63
+
+/* interface version */
+
+#define VCI_VERSION 0x00010013
+
+
+/* query version */
+
+#define VCMD_get_version VC_CMD(VERSION, 0, 0)
+
+
+#ifdef __KERNEL__
+
+#include <linux/errno.h>
+
+#define ENOTSUP -EOPNOTSUPP
+
+#else /* __KERNEL__ */
+#define __user
+#endif /* __KERNEL__ */
+
+#endif /* _VX_SWITCH_H */