man_MANS =
-kernel_HDRS = \
- kernel/context.h \
- kernel/context_cmd.h \
- kernel/cvirt.h \
- kernel/cvirt_cmd.h \
- kernel/cvirt_def.h \
- kernel/debug.h \
- kernel/debug_cmd.h \
- kernel/dlimit.h \
- kernel/dlimit_cmd.h \
- kernel/inode.h \
- kernel/inode_cmd.h \
- kernel/legacy.h \
- kernel/limit.h \
- kernel/limit_cmd.h \
- kernel/limit_def.h \
- kernel/limit_int.h \
- kernel/namespace.h \
- kernel/namespace_cmd.h \
- kernel/network.h \
- kernel/network_cmd.h \
- kernel/sched.h \
- kernel/sched_cmd.h \
- kernel/sched_def.h \
- kernel/signal.h \
- kernel/signal_cmd.h \
- kernel/switch.h \
- kernel/xid.h
-
+kernel_HDRS = $(wildcard kernel/*.h)
EXTRA_PROGRAMS =
AH_TEMPLATE(VC_ENABLE_API_V13OBS, [Enable support for some obsoleted API of vserver 1.3.x])
AH_TEMPLATE(VC_ENABLE_API_V13, [Enable support for API of vserver 1.3.x])
AH_TEMPLATE(VC_ENABLE_API_NET, [Enable support for network context API])
+AH_TEMPLATE(VC_ENABLE_API_V21, [Enable support for API of vserver 2.1.x])
AH_TEMPLATE(VC_ENABLE_API_OLDPROC, [Enable API for a backward compatible /proc parsing])
AH_TEMPLATE(VC_ENABLE_API_OLDUTS, [Enable API for a backward compatible uts handling])
AC_MSG_CHECKING([for supported APIs])
AC_ARG_ENABLE([apis],
[AC_HELP_STRING([--enable-apis=APIS],
- [enable support for the given apis; possible values are: legacy,compat,v11,fscompat,v13obs,v13,net, ALL,NOLEGACY (default: v13,net)])],
+ [enable support for the given apis; possible values are: legacy,compat,v11,fscompat,v13obs,v13,net, ALL,NOLEGACY (default: v13,net,v21)])],
[],
- [enable_apis=v13,net])
+ [enable_apis=v13,net,v21])
-test x"$enable_apis" != xALL || enable_apis='legacy,compat,v11,fscompat,v13obs,v13,net'
-test x"$enable_apis" != xNOLEGACY || enable_apis='compat,v11,fscompat,v13,net'
+test x"$enable_apis" != xALL || enable_apis='legacy,compat,v11,fscompat,v13obs,v13,net,v21'
+test x"$enable_apis" != xNOLEGACY || enable_apis='compat,v11,fscompat,v13,net,v21'
enable_api_oldproc=
enable_api_olduts=
old_IFS=$IFS
AC_DEFINE(VC_ENABLE_API_V13, 1);;
(v13) AC_DEFINE(VC_ENABLE_API_V13, 1);;
(net) AC_DEFINE(VC_ENABLE_API_NET, 1);;
+ (v21) AC_DEFINE(VC_ENABLE_API_V21, 1);;
(oldproc) enable_api_oldproc=2;;
(olduts) enable_api_olduts=2;;
(*) AC_MSG_ERROR(['$i' is not a supported API]);;
--- /dev/null
+#ifndef _VX_CACCT_H
+#define _VX_CACCT_H
+
+
+enum sock_acc_field {
+ VXA_SOCK_UNSPEC = 0,
+ VXA_SOCK_UNIX,
+ VXA_SOCK_INET,
+ VXA_SOCK_INET6,
+ VXA_SOCK_PACKET,
+ VXA_SOCK_OTHER,
+ VXA_SOCK_SIZE /* array size */
+};
+
+#endif /* _VX_CACCT_H */
--- /dev/null
+#ifndef _VX_CACCT_CMD_H
+#define _VX_CACCT_CMD_H
+
+
+/* virtual host info name commands */
+
+#define VCMD_sock_stat VC_CMD(VSTAT, 5, 0)
+
+struct vcmd_sock_stat_v0 {
+ uint32_t field;
+ uint32_t count[3];
+ uint64_t total[3];
+};
+
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+
+extern int vc_sock_stat(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+#endif /* _VX_CACCT_CMD_H */
--- /dev/null
+#ifndef _VX_CACCT_DEF_H
+#define _VX_CACCT_DEF_H
+
+#include <asm/atomic.h>
+#include <linux/vserver/cacct.h>
+
+
+struct _vx_sock_acc {
+ atomic_t count;
+ atomic_t total;
+};
+
+/* context sub struct */
+
+struct _vx_cacct {
+ struct _vx_sock_acc sock[VXA_SOCK_SIZE][3];
+ atomic_t slab[8];
+ atomic_t page[6][8];
+};
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+static inline void __dump_vx_cacct(struct _vx_cacct *cacct)
+{
+ int i,j;
+
+ printk("\t_vx_cacct:");
+ for (i=0; i<6; i++) {
+ struct _vx_sock_acc *ptr = cacct->sock[i];
+
+ printk("\t [%d] =", i);
+ for (j=0; j<3; j++) {
+ printk(" [%d] = %8d, %8d", j,
+ atomic_read(&ptr[j].count),
+ atomic_read(&ptr[j].total));
+ }
+ printk("\n");
+ }
+}
+
+#endif
+
+#endif /* _VX_CACCT_DEF_H */
--- /dev/null
+#ifndef _VX_CACCT_INT_H
+#define _VX_CACCT_INT_H
+
+
+#ifdef __KERNEL__
+
+static inline
+unsigned long vx_sock_count(struct _vx_cacct *cacct, int type, int pos)
+{
+ return atomic_read(&cacct->sock[type][pos].count);
+}
+
+
+static inline
+unsigned long vx_sock_total(struct _vx_cacct *cacct, int type, int pos)
+{
+ return atomic_read(&cacct->sock[type][pos].total);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _VX_CACCT_INT_H */
#define _VX_CONTEXT_H
#include <linux/types.h>
+//#include <linux/capability.h>
#define MAX_S_CONTEXT 65535 /* Arbitrary limit */
+
+#ifdef CONFIG_VSERVER_DYNAMIC_IDS
#define MIN_D_CONTEXT 49152 /* dynamic contexts start here */
+#else
+#define MIN_D_CONTEXT 65536
+#endif
#define VX_DYNAMIC_ID ((uint32_t)-1) /* id for dynamic context */
#define VXF_VIRT_UPTIME 0x00020000
#define VXF_VIRT_CPU 0x00040000
#define VXF_VIRT_LOAD 0x00080000
+#define VXF_VIRT_TIME 0x00100000
#define VXF_HIDE_MOUNT 0x01000000
#define VXF_HIDE_NETIF 0x02000000
+#define VXF_HIDE_VINFO 0x04000000
#define VXF_STATE_SETUP (1ULL<<32)
#define VXF_STATE_INIT (1ULL<<33)
+#define VXF_STATE_ADMIN (1ULL<<34)
#define VXF_SC_HELPER (1ULL<<36)
#define VXF_REBOOT_KILL (1ULL<<37)
#define VXF_IGNEG_NICE (1ULL<<52)
-#define VXF_ONE_TIME (0x0003ULL<<32)
+#define VXF_ONE_TIME (0x0007ULL<<32)
-#define VXF_INIT_SET (VXF_STATE_SETUP|VXF_STATE_INIT)
+#define VXF_INIT_SET (VXF_STATE_SETUP|VXF_STATE_INIT|VXF_STATE_ADMIN)
/* context migration */
#define VXC_BINARY_MOUNT 0x00040000
#define VXC_QUOTA_CTL 0x00100000
+#define VXC_ADMIN_MAPPER 0x00200000
+#define VXC_ADMIN_CLOOP 0x00400000
/* context state changes */
#include "limit_def.h"
#include "sched_def.h"
#include "cvirt_def.h"
+#include "cacct_def.h"
+
+struct _vx_info_pc {
+ struct _vx_sched_pc sched_pc;
+ struct _vx_cvirt_pc cvirt_pc;
+};
struct vx_info {
struct hlist_node vx_hlist; /* linked list of contexts */
uint64_t vx_flags; /* context flags */
uint64_t vx_bcaps; /* bounding caps (system) */
uint64_t vx_ccaps; /* context caps (vserver) */
+ kernel_cap_t vx_cap_bset; /* the guest's bset */
struct task_struct *vx_reaper; /* guest reaper process */
pid_t vx_initpid; /* PID of guest init */
struct _vx_cvirt cvirt; /* virtual/bias stuff */
struct _vx_cacct cacct; /* context accounting */
+#ifndef CONFIG_SMP
+ struct _vx_info_pc info_pc; /* per cpu data */
+#else
+ struct _vx_info_pc *ptr_pc; /* per cpu array */
+#endif
+
wait_queue_head_t vx_wait; /* context exit waitqueue */
int reboot_cmd; /* last sys_reboot() cmd */
int exit_code; /* last process exit code */
char vx_name[65]; /* vserver name */
};
+#ifndef CONFIG_SMP
+#define vx_ptr_pc(vxi) (&(vxi)->info_pc)
+#define vx_per_cpu(vxi, v, id) vx_ptr_pc(vxi)->v
+#else
+#define vx_ptr_pc(vxi) ((vxi)->ptr_pc)
+#define vx_per_cpu(vxi, v, id) per_cpu_ptr(vx_ptr_pc(vxi), id)->v
+#endif
+
+#define vx_cpu(vxi, v) vx_per_cpu(vxi, v, smp_processor_id())
+
+
+struct vx_info_save {
+ struct vx_info *vxi;
+ xid_t xid;
+};
+
/* status flags */
#define VXS_HASHED 0x0001
#define VXS_PAUSED 0x0010
-#define VXS_ONHOLD 0x0020
#define VXS_SHUTDOWN 0x0100
#define VXS_HELPER 0x1000
#define VXS_RELEASED 0x8000
#define VX_ATR_MASK 0x0F00
+#ifdef CONFIG_VSERVER_PRIVACY
+#define VX_ADMIN_P (0)
+#define VX_WATCH_P (0)
+#else
+#define VX_ADMIN_P VX_ADMIN
+#define VX_WATCH_P VX_WATCH
+#endif
+
extern void claim_vx_info(struct vx_info *, struct task_struct *);
extern void release_vx_info(struct vx_info *, struct task_struct *);
};
#ifdef __KERNEL__
-extern int vc_vx_info(uint32_t, void __user *);
+extern int vc_vx_info(struct vx_info *, void __user *);
#endif /* __KERNEL__ */
+#define VCMD_ctx_stat VC_CMD(VSTAT, 0, 0)
+
+struct vcmd_ctx_stat_v0 {
+ uint32_t usecnt;
+ uint32_t tasks;
+ /* more to come */
+};
+
+#ifdef __KERNEL__
+extern int vc_ctx_stat(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
/* context commands */
#ifdef __KERNEL__
extern int vc_ctx_create(uint32_t, void __user *);
-extern int vc_ctx_migrate(uint32_t, void __user *);
+extern int vc_ctx_migrate(struct vx_info *, void __user *);
#endif /* __KERNEL__ */
};
#ifdef __KERNEL__
-extern int vc_get_cflags(uint32_t, void __user *);
-extern int vc_set_cflags(uint32_t, void __user *);
+extern int vc_get_cflags(struct vx_info *, void __user *);
+extern int vc_set_cflags(struct vx_info *, void __user *);
#endif /* __KERNEL__ */
/* context caps commands */
-#define VCMD_get_ccaps VC_CMD(FLAGS, 3, 0)
-#define VCMD_set_ccaps VC_CMD(FLAGS, 4, 0)
+#define VCMD_get_ccaps_v0 VC_CMD(FLAGS, 3, 0)
+#define VCMD_set_ccaps_v0 VC_CMD(FLAGS, 4, 0)
struct vcmd_ctx_caps_v0 {
uint64_t bcaps;
uint64_t cmask;
};
+#define VCMD_get_ccaps VC_CMD(FLAGS, 3, 1)
+#define VCMD_set_ccaps VC_CMD(FLAGS, 4, 1)
+
+struct vcmd_ctx_caps_v1 {
+ uint64_t ccaps;
+ uint64_t cmask;
+};
+
+#ifdef __KERNEL__
+extern int vc_get_ccaps_v0(struct vx_info *, void __user *);
+extern int vc_set_ccaps_v0(struct vx_info *, void __user *);
+extern int vc_get_ccaps(struct vx_info *, void __user *);
+extern int vc_set_ccaps(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+
+
+/* bcaps commands */
+
+#define VCMD_get_bcaps VC_CMD(FLAGS, 9, 0)
+#define VCMD_set_bcaps VC_CMD(FLAGS,10, 0)
+
+struct vcmd_bcaps {
+ uint64_t bcaps;
+ uint64_t bmask;
+};
+
#ifdef __KERNEL__
-extern int vc_get_ccaps(uint32_t, void __user *);
-extern int vc_set_ccaps(uint32_t, void __user *);
+extern int vc_get_bcaps(struct vx_info *, void __user *);
+extern int vc_set_bcaps(struct vx_info *, void __user *);
#endif /* __KERNEL__ */
#endif /* _VX_CONTEXT_CMD_H */
int vx_do_syslog(int, char __user *, int);
#endif /* __KERNEL__ */
-#else /* _VX_CVIRT_H */
-#warning duplicate inclusion
#endif /* _VX_CVIRT_H */
#include <linux/compiler.h>
-extern int vc_set_vhi_name(uint32_t, void __user *);
-extern int vc_get_vhi_name(uint32_t, void __user *);
+extern int vc_set_vhi_name(struct vx_info *, void __user *);
+extern int vc_get_vhi_name(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+
+#define VCMD_virt_stat VC_CMD(VSTAT, 3, 0)
+
+struct vcmd_virt_stat_v0 {
+ uint64_t offset;
+ uint64_t uptime;
+ uint32_t nr_threads;
+ uint32_t nr_running;
+ uint32_t nr_uninterruptible;
+ uint32_t nr_onhold;
+ uint32_t nr_forks;
+ uint32_t load[3];
+};
+
+#ifdef __KERNEL__
+extern int vc_virt_stat(struct vx_info *, void __user *);
#endif /* __KERNEL__ */
#endif /* _VX_CVIRT_CMD_H */
/* context sub struct */
struct _vx_cvirt {
- int max_threads; /* maximum allowed threads */
+// int max_threads; /* maximum allowed threads */
atomic_t nr_threads; /* number of current threads */
atomic_t nr_running; /* number of running threads */
atomic_t nr_uninterruptible; /* number of uninterruptible threads */
atomic_t nr_onhold; /* processes on hold */
uint32_t onhold_last; /* jiffies when put on hold */
+ struct timeval bias_tv; /* time offset to the host */
struct timespec bias_idle;
struct timespec bias_uptime; /* context creation point */
uint64_t bias_clock; /* offset in clock_t */
spinlock_t load_lock; /* lock for the load averages */
atomic_t load_updates; /* nr of load updates done so far */
- uint32_t load_last; /* last time load was cacled */
+ uint32_t load_last; /* last time load was calculated */
uint32_t load[3]; /* load averages 1,5,15 */
atomic_t total_forks; /* number of forks so far */
- struct _vx_usage_stat cpustat[NR_CPUS];
-
struct _vx_syslog syslog;
};
-struct _vx_sock_acc {
- atomic_t count;
- atomic_t total;
+struct _vx_cvirt_pc {
+ struct _vx_usage_stat cpustat;
};
-/* context sub struct */
-struct _vx_cacct {
- unsigned long total_forks;
+#ifdef CONFIG_VSERVER_DEBUG
- struct _vx_sock_acc sock[5][3];
-};
+static inline void __dump_vx_cvirt(struct _vx_cvirt *cvirt)
+{
+ printk("\t_vx_cvirt:\n");
+ printk("\t threads: %4d, %4d, %4d, %4d\n",
+ atomic_read(&cvirt->nr_threads),
+ atomic_read(&cvirt->nr_running),
+ atomic_read(&cvirt->nr_uninterruptible),
+ atomic_read(&cvirt->nr_onhold));
+ /* add rest here */
+ printk("\t total_forks = %d\n", atomic_read(&cvirt->total_forks));
+}
+
+#endif
#endif /* _VX_CVIRT_DEF_H */
extern unsigned int vx_debug_switch;
extern unsigned int vx_debug_xid;
extern unsigned int vx_debug_nid;
+extern unsigned int vx_debug_tag;
extern unsigned int vx_debug_net;
extern unsigned int vx_debug_limit;
extern unsigned int vx_debug_cres;
extern unsigned int vx_debug_dlim;
+extern unsigned int vx_debug_quota;
extern unsigned int vx_debug_cvirt;
extern unsigned int vx_debug_misc;
printk(VX_WARNLEVEL f "\n" , ##x); \
} while (0)
+
#define vxd_path(d,m) \
({ static char _buffer[PATH_MAX]; \
d_path((d), (m), _buffer, sizeof(_buffer)); })
#define vxd_cond_path(n) \
((n) ? vxd_path((n)->dentry, (n)->mnt) : "<null>" )
+
+struct vx_info;
+
+void dump_vx_info(struct vx_info *, int);
+void dump_vx_info_inactive(int);
+
#else /* CONFIG_VSERVER_DEBUG */
#define vx_debug_switch 0
#define vx_debug_xid 0
#define vx_debug_nid 0
+#define vx_debug_tag 0
#define vx_debug_net 0
#define vx_debug_limit 0
#define vx_debug_cres 0
#endif /* CONFIG_VSERVER_DEBUG */
-/* history stuff */
-
-#ifdef CONFIG_VSERVER_HISTORY
-
-
-extern unsigned volatile int vxh_active;
-
-struct _vxhe_vxi {
- struct vx_info *ptr;
- unsigned xid;
- unsigned usecnt;
- unsigned tasks;
-};
-
-struct _vxhe_set_clr {
- void *data;
-};
-
-struct _vxhe_loc_lookup {
- unsigned arg;
-};
-
-enum {
- VXH_UNUSED=0,
- VXH_THROW_OOPS=1,
-
- VXH_GET_VX_INFO,
- VXH_PUT_VX_INFO,
- VXH_INIT_VX_INFO,
- VXH_SET_VX_INFO,
- VXH_CLR_VX_INFO,
- VXH_CLAIM_VX_INFO,
- VXH_RELEASE_VX_INFO,
- VXH_ALLOC_VX_INFO,
- VXH_DEALLOC_VX_INFO,
- VXH_HASH_VX_INFO,
- VXH_UNHASH_VX_INFO,
- VXH_LOC_VX_INFO,
- VXH_LOOKUP_VX_INFO,
- VXH_CREATE_VX_INFO,
-};
-
-struct _vx_hist_entry {
- void *loc;
- unsigned short seq;
- unsigned short type;
- struct _vxhe_vxi vxi;
- union {
- struct _vxhe_set_clr sc;
- struct _vxhe_loc_lookup ll;
- };
-};
-
-struct _vx_hist_entry *vxh_advance(void *loc);
-
-
-static inline
-void __vxh_copy_vxi(struct _vx_hist_entry *entry, struct vx_info *vxi)
-{
- entry->vxi.ptr = vxi;
- if (vxi) {
- entry->vxi.usecnt = atomic_read(&vxi->vx_usecnt);
- entry->vxi.tasks = atomic_read(&vxi->vx_tasks);
- entry->vxi.xid = vxi->vx_id;
- }
-}
-
-
-#define __HERE__ current_text_addr()
-
-#define __VXH_BODY(__type, __data, __here) \
- struct _vx_hist_entry *entry; \
- \
- preempt_disable(); \
- entry = vxh_advance(__here); \
- __data; \
- entry->type = __type; \
- preempt_enable();
-
-
- /* pass vxi only */
-
-#define __VXH_SMPL \
- __vxh_copy_vxi(entry, vxi)
-
-static inline
-void __vxh_smpl(struct vx_info *vxi, int __type, void *__here)
-{
- __VXH_BODY(__type, __VXH_SMPL, __here)
-}
-
- /* pass vxi and data (void *) */
-
-#define __VXH_DATA \
- __vxh_copy_vxi(entry, vxi); \
- entry->sc.data = data
-
-static inline
-void __vxh_data(struct vx_info *vxi, void *data,
- int __type, void *__here)
-{
- __VXH_BODY(__type, __VXH_DATA, __here)
-}
-
- /* pass vxi and arg (long) */
-
-#define __VXH_LONG \
- __vxh_copy_vxi(entry, vxi); \
- entry->ll.arg = arg
-
-static inline
-void __vxh_long(struct vx_info *vxi, long arg,
- int __type, void *__here)
-{
- __VXH_BODY(__type, __VXH_LONG, __here)
-}
-
-
-static inline
-void __vxh_throw_oops(void *__here)
-{
- __VXH_BODY(VXH_THROW_OOPS, {}, __here);
- /* prevent further acquisition */
- vxh_active = 0;
-}
-
-
-#define vxh_throw_oops() __vxh_throw_oops(__HERE__);
-
-#define __vxh_get_vx_info(v,h) __vxh_smpl(v, VXH_GET_VX_INFO, h);
-#define __vxh_put_vx_info(v,h) __vxh_smpl(v, VXH_PUT_VX_INFO, h);
-
-#define __vxh_init_vx_info(v,d,h) \
- __vxh_data(v,d, VXH_INIT_VX_INFO, h);
-#define __vxh_set_vx_info(v,d,h) \
- __vxh_data(v,d, VXH_SET_VX_INFO, h);
-#define __vxh_clr_vx_info(v,d,h) \
- __vxh_data(v,d, VXH_CLR_VX_INFO, h);
-
-#define __vxh_claim_vx_info(v,d,h) \
- __vxh_data(v,d, VXH_CLAIM_VX_INFO, h);
-#define __vxh_release_vx_info(v,d,h) \
- __vxh_data(v,d, VXH_RELEASE_VX_INFO, h);
-
-#define vxh_alloc_vx_info(v) \
- __vxh_smpl(v, VXH_ALLOC_VX_INFO, __HERE__);
-#define vxh_dealloc_vx_info(v) \
- __vxh_smpl(v, VXH_DEALLOC_VX_INFO, __HERE__);
-
-#define vxh_hash_vx_info(v) \
- __vxh_smpl(v, VXH_HASH_VX_INFO, __HERE__);
-#define vxh_unhash_vx_info(v) \
- __vxh_smpl(v, VXH_UNHASH_VX_INFO, __HERE__);
-
-#define vxh_loc_vx_info(v,l) \
- __vxh_long(v,l, VXH_LOC_VX_INFO, __HERE__);
-#define vxh_lookup_vx_info(v,l) \
- __vxh_long(v,l, VXH_LOOKUP_VX_INFO, __HERE__);
-#define vxh_create_vx_info(v,l) \
- __vxh_long(v,l, VXH_CREATE_VX_INFO, __HERE__);
-
-extern void vxh_dump_history(void);
-
-
-#else /* CONFIG_VSERVER_HISTORY */
-
-#define __HERE__ 0
-
-#define vxh_throw_oops() do { } while (0)
-
-#define __vxh_get_vx_info(v,h) do { } while (0)
-#define __vxh_put_vx_info(v,h) do { } while (0)
-
-#define __vxh_init_vx_info(v,d,h) do { } while (0)
-#define __vxh_set_vx_info(v,d,h) do { } while (0)
-#define __vxh_clr_vx_info(v,d,h) do { } while (0)
-
-#define __vxh_claim_vx_info(v,d,h) do { } while (0)
-#define __vxh_release_vx_info(v,d,h) do { } while (0)
-
-#define vxh_alloc_vx_info(v) do { } while (0)
-#define vxh_dealloc_vx_info(v) do { } while (0)
-
-#define vxh_hash_vx_info(v) do { } while (0)
-#define vxh_unhash_vx_info(v) do { } while (0)
-
-#define vxh_loc_vx_info(a,v) do { } while (0)
-#define vxh_lookup_vx_info(a,v) do { } while (0)
-#define vxh_create_vx_info(a,v) do { } while (0)
-
-#define vxh_dump_history() do { } while (0)
-
-
-#endif /* CONFIG_VSERVER_HISTORY */
-
-
#ifdef CONFIG_VSERVER_DEBUG
#define vxd_assert_lock(l) assert_spin_locked(l)
#define vxd_assert(c,f,x...) vxlprintk(!(c), \
#define VCMD_dump_history VC_CMD(DEBUG, 1, 0)
+#define VCMD_read_history VC_CMD(DEBUG, 5, 0)
+#define VCMD_read_monitor VC_CMD(DEBUG, 6, 0)
+
+struct vcmd_read_history_v0 {
+ uint32_t index;
+ uint32_t count;
+ char __user *data;
+};
+
+struct vcmd_read_monitor_v0 {
+ uint32_t index;
+ uint32_t count;
+ char __user *data;
+};
+
+
#ifdef __KERNEL__
+#ifdef CONFIG_COMPAT
+
+#include <asm/compat.h>
+
+struct vcmd_read_history_v0_x32 {
+ uint32_t index;
+ uint32_t count;
+ compat_uptr_t data_ptr;
+};
+
+struct vcmd_read_monitor_v0_x32 {
+ uint32_t index;
+ uint32_t count;
+ compat_uptr_t data_ptr;
+};
+
+#endif /* CONFIG_COMPAT */
+
extern int vc_dump_history(uint32_t);
+extern int vc_read_history(uint32_t, void __user *);
+extern int vc_read_monitor(uint32_t, void __user *);
+
+#ifdef CONFIG_COMPAT
+
+extern int vc_read_history_x32(uint32_t, void __user *);
+extern int vc_read_monitor_x32(uint32_t, void __user *);
+
+#endif /* CONFIG_COMPAT */
+
#endif /* __KERNEL__ */
#endif /* _VX_DEBUG_CMD_H */
#include "switch.h"
-#define CDLIM_UNSET (0ULL)
-#define CDLIM_INFINITY (~0ULL)
-#define CDLIM_KEEP (~1ULL)
-
#ifdef __KERNEL__
+/* keep in sync with CDLIM_INFINITY */
+
+#define DLIM_INFINITY (~0ULL)
+
#include <linux/spinlock.h>
struct super_block;
struct dl_info {
struct hlist_node dl_hlist; /* linked list of contexts */
struct rcu_head dl_rcu; /* the rcu head */
- xid_t dl_xid; /* context id */
+ tag_t dl_tag; /* context tag */
atomic_t dl_usecnt; /* usage count */
atomic_t dl_refcnt; /* reference count */
spinlock_t dl_lock; /* protect the values */
- uint64_t dl_space_used; /* used space in bytes */
- uint64_t dl_space_total; /* maximum space in bytes */
- uint32_t dl_inodes_used; /* used inodes */
- uint32_t dl_inodes_total; /* maximum inodes */
+ unsigned long long dl_space_used; /* used space in bytes */
+ unsigned long long dl_space_total; /* maximum space in bytes */
+ unsigned long dl_inodes_used; /* used inodes */
+ unsigned long dl_inodes_total; /* maximum inodes */
unsigned int dl_nrlmult; /* non root limit mult */
};
extern void rcu_free_dl_info(struct rcu_head *);
extern void unhash_dl_info(struct dl_info *);
-extern struct dl_info *locate_dl_info(struct super_block *, xid_t);
+extern struct dl_info *locate_dl_info(struct super_block *, tag_t);
struct kstatfs;
uint32_t flags;
};
+#define CDLIM_UNSET ((uint32_t)0UL)
+#define CDLIM_INFINITY ((uint32_t)~0UL)
+#define CDLIM_KEEP ((uint32_t)~1UL)
#ifdef __KERNEL__
#ifdef CONFIG_COMPAT
+#include <asm/compat.h>
+
struct vcmd_ctx_dlimit_base_v0_x32 {
compat_uptr_t name_ptr;
uint32_t flags;
--- /dev/null
+#ifndef _VX_GLOBAL_H
+#define _VX_GLOBAL_H
+
+
+extern atomic_t vx_global_ctotal;
+extern atomic_t vx_global_cactive;
+
+#endif /* _VX_GLOBAL_H */
--- /dev/null
+#ifndef _VX_HISTORY_H
+#define _VX_HISTORY_H
+
+
+enum {
+ VXH_UNUSED=0,
+ VXH_THROW_OOPS=1,
+
+ VXH_GET_VX_INFO,
+ VXH_PUT_VX_INFO,
+ VXH_INIT_VX_INFO,
+ VXH_SET_VX_INFO,
+ VXH_CLR_VX_INFO,
+ VXH_CLAIM_VX_INFO,
+ VXH_RELEASE_VX_INFO,
+ VXH_ALLOC_VX_INFO,
+ VXH_DEALLOC_VX_INFO,
+ VXH_HASH_VX_INFO,
+ VXH_UNHASH_VX_INFO,
+ VXH_LOC_VX_INFO,
+ VXH_LOOKUP_VX_INFO,
+ VXH_CREATE_VX_INFO,
+};
+
+struct _vxhe_vxi {
+ struct vx_info *ptr;
+ unsigned xid;
+ unsigned usecnt;
+ unsigned tasks;
+};
+
+struct _vxhe_set_clr {
+ void *data;
+};
+
+struct _vxhe_loc_lookup {
+ unsigned arg;
+};
+
+struct _vx_hist_entry {
+ void *loc;
+ unsigned short seq;
+ unsigned short type;
+ struct _vxhe_vxi vxi;
+ union {
+ struct _vxhe_set_clr sc;
+ struct _vxhe_loc_lookup ll;
+ };
+};
+
+#ifdef CONFIG_VSERVER_HISTORY
+
+extern unsigned volatile int vxh_active;
+
+struct _vx_hist_entry *vxh_advance(void *loc);
+
+
+static inline
+void __vxh_copy_vxi(struct _vx_hist_entry *entry, struct vx_info *vxi)
+{
+ entry->vxi.ptr = vxi;
+ if (vxi) {
+ entry->vxi.usecnt = atomic_read(&vxi->vx_usecnt);
+ entry->vxi.tasks = atomic_read(&vxi->vx_tasks);
+ entry->vxi.xid = vxi->vx_id;
+ }
+}
+
+
+#define __HERE__ current_text_addr()
+
+#define __VXH_BODY(__type, __data, __here) \
+ struct _vx_hist_entry *entry; \
+ \
+ preempt_disable(); \
+ entry = vxh_advance(__here); \
+ __data; \
+ entry->type = __type; \
+ preempt_enable();
+
+
+ /* pass vxi only */
+
+#define __VXH_SMPL \
+ __vxh_copy_vxi(entry, vxi)
+
+static inline
+void __vxh_smpl(struct vx_info *vxi, int __type, void *__here)
+{
+ __VXH_BODY(__type, __VXH_SMPL, __here)
+}
+
+ /* pass vxi and data (void *) */
+
+#define __VXH_DATA \
+ __vxh_copy_vxi(entry, vxi); \
+ entry->sc.data = data
+
+static inline
+void __vxh_data(struct vx_info *vxi, void *data,
+ int __type, void *__here)
+{
+ __VXH_BODY(__type, __VXH_DATA, __here)
+}
+
+ /* pass vxi and arg (long) */
+
+#define __VXH_LONG \
+ __vxh_copy_vxi(entry, vxi); \
+ entry->ll.arg = arg
+
+static inline
+void __vxh_long(struct vx_info *vxi, long arg,
+ int __type, void *__here)
+{
+ __VXH_BODY(__type, __VXH_LONG, __here)
+}
+
+
+static inline
+void __vxh_throw_oops(void *__here)
+{
+ __VXH_BODY(VXH_THROW_OOPS, {}, __here);
+ /* prevent further acquisition */
+ vxh_active = 0;
+}
+
+
+#define vxh_throw_oops() __vxh_throw_oops(__HERE__);
+
+#define __vxh_get_vx_info(v,h) __vxh_smpl(v, VXH_GET_VX_INFO, h);
+#define __vxh_put_vx_info(v,h) __vxh_smpl(v, VXH_PUT_VX_INFO, h);
+
+#define __vxh_init_vx_info(v,d,h) \
+ __vxh_data(v,d, VXH_INIT_VX_INFO, h);
+#define __vxh_set_vx_info(v,d,h) \
+ __vxh_data(v,d, VXH_SET_VX_INFO, h);
+#define __vxh_clr_vx_info(v,d,h) \
+ __vxh_data(v,d, VXH_CLR_VX_INFO, h);
+
+#define __vxh_claim_vx_info(v,d,h) \
+ __vxh_data(v,d, VXH_CLAIM_VX_INFO, h);
+#define __vxh_release_vx_info(v,d,h) \
+ __vxh_data(v,d, VXH_RELEASE_VX_INFO, h);
+
+#define vxh_alloc_vx_info(v) \
+ __vxh_smpl(v, VXH_ALLOC_VX_INFO, __HERE__);
+#define vxh_dealloc_vx_info(v) \
+ __vxh_smpl(v, VXH_DEALLOC_VX_INFO, __HERE__);
+
+#define vxh_hash_vx_info(v) \
+ __vxh_smpl(v, VXH_HASH_VX_INFO, __HERE__);
+#define vxh_unhash_vx_info(v) \
+ __vxh_smpl(v, VXH_UNHASH_VX_INFO, __HERE__);
+
+#define vxh_loc_vx_info(v,l) \
+ __vxh_long(v,l, VXH_LOC_VX_INFO, __HERE__);
+#define vxh_lookup_vx_info(v,l) \
+ __vxh_long(v,l, VXH_LOOKUP_VX_INFO, __HERE__);
+#define vxh_create_vx_info(v,l) \
+ __vxh_long(v,l, VXH_CREATE_VX_INFO, __HERE__);
+
+extern void vxh_dump_history(void);
+
+
+#else /* CONFIG_VSERVER_HISTORY */
+
+#define __HERE__ 0
+
+#define vxh_throw_oops() do { } while (0)
+
+#define __vxh_get_vx_info(v,h) do { } while (0)
+#define __vxh_put_vx_info(v,h) do { } while (0)
+
+#define __vxh_init_vx_info(v,d,h) do { } while (0)
+#define __vxh_set_vx_info(v,d,h) do { } while (0)
+#define __vxh_clr_vx_info(v,d,h) do { } while (0)
+
+#define __vxh_claim_vx_info(v,d,h) do { } while (0)
+#define __vxh_release_vx_info(v,d,h) do { } while (0)
+
+#define vxh_alloc_vx_info(v) do { } while (0)
+#define vxh_dealloc_vx_info(v) do { } while (0)
+
+#define vxh_hash_vx_info(v) do { } while (0)
+#define vxh_unhash_vx_info(v) do { } while (0)
+
+#define vxh_loc_vx_info(a,v) do { } while (0)
+#define vxh_lookup_vx_info(a,v) do { } while (0)
+#define vxh_create_vx_info(a,v) do { } while (0)
+
+#define vxh_dump_history() do { } while (0)
+
+
+#endif /* CONFIG_VSERVER_HISTORY */
+
+#endif /* _VX_HISTORY_H */
#define _VX_INODE_H
-#define IATTR_XID 0x01000000
+#define IATTR_TAG 0x01000000
#define IATTR_ADMIN 0x00000001
#define IATTR_WATCH 0x00000002
#ifdef CONFIG_COMPAT
+#include <asm/compat.h>
+
struct vcmd_ctx_iattr_v1_x32 {
compat_uptr_t name_ptr;
uint32_t xid;
#define VLIMIT_OPENFD 17
#define VLIMIT_ANON 18
#define VLIMIT_SHMEM 19
+#define VLIMIT_SEMARY 20
+#define VLIMIT_NSEMS 21
+#define VLIMIT_DENTRY 22
#ifdef __KERNEL__
+#define VLIM_NOCHECK (1L << VLIMIT_DENTRY)
+
+/* keep in sync with CRLIM_INFINITY */
+
+#define VLIM_INFINITY (~0ULL)
+
+#ifndef RLIM_INFINITY
+#warning RLIM_INFINITY is undefined
+#endif
+
+#define __rlim_val(l,r,v) ((l)->res[(r)].v)
+
+#define __rlim_soft(l,r) __rlim_val(l,r,soft)
+#define __rlim_hard(l,r) __rlim_val(l,r,hard)
+
+#define __rlim_rcur(l,r) __rlim_val(l,r,rcur)
+#define __rlim_rmin(l,r) __rlim_val(l,r,rmin)
+#define __rlim_rmax(l,r) __rlim_val(l,r,rmax)
+
+#define __rlim_lhit(l,r) __rlim_val(l,r,lhit)
+#define __rlim_hit(l,r) atomic_inc(&__rlim_lhit(l,r))
+
+typedef atomic_long_t rlim_atomic_t;
+typedef unsigned long rlim_t;
+
+#define __rlim_get(l,r) atomic_long_read(&__rlim_rcur(l,r))
+#define __rlim_set(l,r,v) atomic_long_set(&__rlim_rcur(l,r), v)
+#define __rlim_inc(l,r) atomic_long_inc(&__rlim_rcur(l,r))
+#define __rlim_dec(l,r) atomic_long_dec(&__rlim_rcur(l,r))
+#define __rlim_add(l,r,v) atomic_long_add(v, &__rlim_rcur(l,r))
+#define __rlim_sub(l,r,v) atomic_long_sub(v, &__rlim_rcur(l,r))
+
+
+#if (RLIM_INFINITY == VLIM_INFINITY)
+#define VX_VLIM(r) ((long long)(long)(r))
+#define VX_RLIM(v) ((rlim_t)(v))
+#else
+#define VX_VLIM(r) (((r) == RLIM_INFINITY) \
+ ? VLIM_INFINITY : (long long)(r))
+#define VX_RLIM(v) (((v) == VLIM_INFINITY) \
+ ? RLIM_INFINITY : (rlim_t)(v))
+#endif
+
struct sysinfo;
void vx_vsi_meminfo(struct sysinfo *);
#define VCMD_get_rlimit VC_CMD(RLIMIT, 1, 0)
#define VCMD_set_rlimit VC_CMD(RLIMIT, 2, 0)
#define VCMD_get_rlimit_mask VC_CMD(RLIMIT, 3, 0)
+#define VCMD_reset_minmax VC_CMD(RLIMIT, 9, 0)
struct vcmd_ctx_rlimit_v0 {
uint32_t id;
uint32_t maximum;
};
+#define VCMD_rlimit_stat VC_CMD(VSTAT, 1, 0)
+
+struct vcmd_rlimit_stat_v0 {
+ uint32_t id;
+ uint32_t hits;
+ uint64_t value;
+ uint64_t minimum;
+ uint64_t maximum;
+};
+
#define CRLIM_UNSET (0ULL)
#define CRLIM_INFINITY (~0ULL)
#define CRLIM_KEEP (~1ULL)
#include <linux/compiler.h>
-extern int vc_get_rlimit(uint32_t, void __user *);
-extern int vc_set_rlimit(uint32_t, void __user *);
extern int vc_get_rlimit_mask(uint32_t, void __user *);
+extern int vc_get_rlimit(struct vx_info *, void __user *);
+extern int vc_set_rlimit(struct vx_info *, void __user *);
+extern int vc_reset_minmax(struct vx_info *, void __user *);
+
+extern int vc_rlimit_stat(struct vx_info *, void __user *);
#ifdef CONFIG_IA32_EMULATION
-extern int vc_get_rlimit_x32(uint32_t, void __user *);
-extern int vc_set_rlimit_x32(uint32_t, void __user *);
+extern int vc_get_rlimit_x32(struct vx_info *, void __user *);
+extern int vc_set_rlimit_x32(struct vx_info *, void __user *);
#endif /* CONFIG_IA32_EMULATION */
#include "limit.h"
+struct _vx_res_limit {
+ rlim_t soft; /* Context soft limit */
+ rlim_t hard; /* Context hard limit */
+
+ rlim_atomic_t rcur; /* Current value */
+ rlim_t rmin; /* Context minimum */
+ rlim_t rmax; /* Context maximum */
+
+ atomic_t lhit; /* Limit hits */
+};
+
/* context sub struct */
struct _vx_limit {
- atomic_t ticks;
-
- unsigned long rlim[NUM_LIMITS]; /* Context limit */
- unsigned long rmax[NUM_LIMITS]; /* Context maximum */
- atomic_t rcur[NUM_LIMITS]; /* Current value */
- atomic_t lhit[NUM_LIMITS]; /* Limit hits */
+ struct _vx_res_limit res[NUM_LIMITS];
};
+#ifdef CONFIG_VSERVER_DEBUG
+
+static inline void __dump_vx_limit(struct _vx_limit *limit)
+{
+ int i;
+
+ printk("\t_vx_limit:");
+ for (i=0; i<NUM_LIMITS; i++) {
+ printk("\t [%2d] = %8lu %8lu/%8lu, %8ld/%8ld, %8d\n",
+ i, (unsigned long)__rlim_get(limit, i),
+ (unsigned long)__rlim_rmin(limit, i),
+ (unsigned long)__rlim_rmax(limit, i),
+ (long)__rlim_soft(limit, i),
+ (long)__rlim_hard(limit, i),
+ atomic_read(&__rlim_lhit(limit, i)));
+ }
+}
+
+#endif
#endif /* _VX_LIMIT_DEF_H */
#ifdef __KERNEL__
-#define VXD_RCRES(r) VXD_CBIT(cres, (r))
-#define VXD_RLIMIT(r) VXD_CBIT(limit, (r))
+#define VXD_RCRES_COND(r) VXD_CBIT(cres, (r))
+#define VXD_RLIMIT_COND(r) VXD_CBIT(limit, (r))
extern const char *vlimit_name[NUM_LIMITS];
static inline void __vx_acc_cres(struct vx_info *vxi,
int res, int dir, void *_data, char *_file, int _line)
{
- if (VXD_RCRES(res))
- vxlprintk(1, "vx_acc_cres[%5d,%s,%2d]: %5d%s (%p)",
+ if (VXD_RCRES_COND(res))
+ vxlprintk(1, "vx_acc_cres[%5d,%s,%2d]: %5ld%s (%p)",
(vxi ? vxi->vx_id : -1), vlimit_name[res], res,
- (vxi ? atomic_read(&vxi->limit.rcur[res]) : 0),
+ (vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
(dir > 0) ? "++" : "--", _data, _file, _line);
if (!vxi)
return;
if (dir > 0)
- atomic_inc(&vxi->limit.rcur[res]);
+ __rlim_inc(&vxi->limit, res);
else
- atomic_dec(&vxi->limit.rcur[res]);
+ __rlim_dec(&vxi->limit, res);
}
static inline void __vx_add_cres(struct vx_info *vxi,
int res, int amount, void *_data, char *_file, int _line)
{
- if (VXD_RCRES(res))
- vxlprintk(1, "vx_add_cres[%5d,%s,%2d]: %5d += %5d (%p)",
+ if (VXD_RCRES_COND(res))
+ vxlprintk(1, "vx_add_cres[%5d,%s,%2d]: %5ld += %5d (%p)",
(vxi ? vxi->vx_id : -1), vlimit_name[res], res,
- (vxi ? atomic_read(&vxi->limit.rcur[res]) : 0),
+ (vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
amount, _data, _file, _line);
if (amount == 0)
return;
if (!vxi)
return;
- atomic_add(amount, &vxi->limit.rcur[res]);
+ __rlim_add(&vxi->limit, res, amount);
}
static inline int __vx_cres_avail(struct vx_info *vxi,
int res, int num, char *_file, int _line)
{
- unsigned long value;
+ rlim_t value;
- if (VXD_RLIMIT(res))
- vxlprintk(1, "vx_cres_avail[%5d,%s,%2d]: %5ld > %5d + %5d",
+ if (VXD_RLIMIT_COND(res))
+ vxlprintk(1, "vx_cres_avail[%5d,%s,%2d]: %5ld/%5ld > %5ld + %5d",
(vxi ? vxi->vx_id : -1), vlimit_name[res], res,
- (vxi ? vxi->limit.rlim[res] : 1),
- (vxi ? atomic_read(&vxi->limit.rcur[res]) : 0),
+ (vxi ? (long)__rlim_soft(&vxi->limit, res) : -1),
+ (vxi ? (long)__rlim_hard(&vxi->limit, res) : -1),
+ (vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
num, _file, _line);
if (num == 0)
return 1;
if (!vxi)
return 1;
- value = atomic_read(&vxi->limit.rcur[res]);
+ value = __rlim_get(&vxi->limit, res);
- if (value > vxi->limit.rmax[res])
- vxi->limit.rmax[res] = value;
+ if (value > __rlim_rmax(&vxi->limit, res))
+ __rlim_rmax(&vxi->limit, res) = value;
+ else if (value < __rlim_rmin(&vxi->limit, res))
+ __rlim_rmin(&vxi->limit, res) = value;
- if (vxi->limit.rlim[res] == RLIM_INFINITY)
- return 1;
+ if (__rlim_soft(&vxi->limit, res) == RLIM_INFINITY)
+ return -1;
+ if (value + num <= __rlim_soft(&vxi->limit, res))
+ return -1;
- if (value + num <= vxi->limit.rlim[res])
+ if (__rlim_hard(&vxi->limit, res) == RLIM_INFINITY)
+ return 1;
+ if (value + num <= __rlim_hard(&vxi->limit, res))
return 1;
- atomic_inc(&vxi->limit.lhit[res]);
+ __rlim_hit(&vxi->limit, res);
return 0;
}
#endif /* __KERNEL__ */
-#endif /* _VX_LIMIT_H */
+#endif /* _VX_LIMIT_INT_H */
--- /dev/null
+#ifndef _VX_MONITOR_H
+#define _VX_MONITOR_H
+
+
+enum {
+ VXM_UNUSED = 0,
+
+ VXM_SYNC = 0x10,
+
+ VXM_UPDATE = 0x20,
+ VXM_UPDATE_1,
+ VXM_UPDATE_2,
+
+ VXM_RQINFO_1 = 0x24,
+ VXM_RQINFO_2,
+
+ VXM_ACTIVATE = 0x40,
+ VXM_DEACTIVATE,
+ VXM_IDLE,
+
+ VXM_HOLD = 0x44,
+ VXM_UNHOLD,
+
+ VXM_MIGRATE = 0x48,
+ VXM_RESCHED,
+
+ /* all other bits are flags */
+ VXM_SCHED = 0x80,
+};
+
+struct _vxm_update_1 {
+ uint32_t tokens_max;
+ uint32_t fill_rate;
+ uint32_t interval;
+};
+
+struct _vxm_update_2 {
+ uint32_t tokens_min;
+ uint32_t fill_rate;
+ uint32_t interval;
+};
+
+struct _vxm_rqinfo_1 {
+ uint16_t running;
+ uint16_t onhold;
+ uint16_t iowait;
+ uint16_t uintr;
+ uint32_t idle_tokens;
+};
+
+struct _vxm_rqinfo_2 {
+ uint32_t norm_time;
+ uint32_t idle_time;
+ uint32_t idle_skip;
+};
+
+struct _vxm_sched {
+ uint32_t tokens;
+ uint32_t norm_time;
+ uint32_t idle_time;
+};
+
+struct _vxm_task {
+ uint16_t pid;
+ uint16_t state;
+};
+
+struct _vxm_event {
+ uint32_t jif;
+ union {
+ uint32_t seq;
+ uint32_t sec;
+ };
+ union {
+ uint32_t tokens;
+ uint32_t nsec;
+ struct _vxm_task tsk;
+ };
+};
+
+struct _vx_mon_entry {
+ uint16_t type;
+ uint16_t xid;
+ union {
+ struct _vxm_event ev;
+ struct _vxm_sched sd;
+ struct _vxm_update_1 u1;
+ struct _vxm_update_2 u2;
+ struct _vxm_rqinfo_1 q1;
+ struct _vxm_rqinfo_2 q2;
+ };
+};
+
+
+#endif /* _VX_MONITOR_H */
#define VCMD_enter_namespace VC_CMD(PROCALT, 1, 0)
-/* XXX: This is not available in recent kernels */
+/* XXX: This is not available in recent kernels */
#define VCMD_cleanup_namespace VC_CMD(PROCALT, 2, 0)
#define VCMD_set_namespace_v0 VC_CMD(PROCALT, 3, 0)
#define VCMD_set_namespace VC_CMD(PROCALT, 3, 1)
#ifdef __KERNEL__
-extern int vc_enter_namespace(uint32_t, void __user *);
-extern int vc_set_namespace(uint32_t, void __user *);
+extern int vc_enter_namespace(struct vx_info *, void __user *);
+extern int vc_set_namespace(struct vx_info *, void __user *);
#endif /* __KERNEL__ */
#endif /* _VX_NAMESPACE_CMD_H */
/* network flags */
+#define NXF_INFO_LOCK 0x00000001
+
#define NXF_STATE_SETUP (1ULL<<32)
+#define NXF_STATE_ADMIN (1ULL<<34)
#define NXF_SC_HELPER (1ULL<<36)
#define NXF_PERSISTENT (1ULL<<38)
-#define NXF_ONE_TIME (0x0001ULL<<32)
+#define NXF_ONE_TIME (0x0005ULL<<32)
-#define NXF_INIT_SET (0)
+#define NXF_INIT_SET (NXF_STATE_ADMIN)
/* address types */
#define NXS_SHUTDOWN 0x0100
#define NXS_RELEASED 0x8000
+/* check conditions */
+
+#define NX_ADMIN 0x0001
+#define NX_WATCH 0x0002
+#define NX_BLEND 0x0004
+#define NX_HOSTID 0x0008
+
+#define NX_IDENT 0x0010
+#define NX_EQUIV 0x0020
+#define NX_PARENT 0x0040
+#define NX_CHILD 0x0080
+
+#define NX_ARG_MASK 0x00F0
+
+#define NX_DYNAMIC 0x0100
+#define NX_STATIC 0x0200
+
+#define NX_ATR_MASK 0x0F00
+
+
extern struct nx_info *lookup_nx_info(int);
extern int get_nid_list(int, unsigned int *, int);
};
#ifdef __KERNEL__
-extern int vc_nx_info(uint32_t, void __user *);
+extern int vc_nx_info(struct nx_info *, void __user *);
#endif /* __KERNEL__ */
#ifdef __KERNEL__
extern int vc_net_create(uint32_t, void __user *);
-extern int vc_net_migrate(uint32_t, void __user *);
+extern int vc_net_migrate(struct nx_info *, void __user *);
-extern int vc_net_add(uint32_t, void __user *);
-extern int vc_net_remove(uint32_t, void __user *);
+extern int vc_net_add(struct nx_info *, void __user *);
+extern int vc_net_remove(struct nx_info *, void __user *);
#endif /* __KERNEL__ */
};
#ifdef __KERNEL__
-extern int vc_get_nflags(uint32_t, void __user *);
-extern int vc_set_nflags(uint32_t, void __user *);
+extern int vc_get_nflags(struct nx_info *, void __user *);
+extern int vc_set_nflags(struct nx_info *, void __user *);
#endif /* __KERNEL__ */
};
#ifdef __KERNEL__
-extern int vc_get_ncaps(uint32_t, void __user *);
-extern int vc_set_ncaps(uint32_t, void __user *);
+extern int vc_get_ncaps(struct nx_info *, void __user *);
+extern int vc_set_ncaps(struct nx_info *, void __user *);
#endif /* __KERNEL__ */
#endif /* _VX_CONTEXT_CMD_H */
void vx_update_load(struct vx_info *);
-struct task_struct;
+int vx_tokens_recalc(struct _vx_sched_pc *,
+ unsigned long *, unsigned long *, int [2]);
-int vx_effective_vavavoom(struct vx_info *, int);
-
-int vx_tokens_recalc(struct vx_info *);
+void vx_update_sched_param(struct _vx_sched *sched,
+ struct _vx_sched_pc *sched_pc);
#endif /* __KERNEL__ */
#else /* _VX_SCHED_H */
/* sched vserver commands */
#define VCMD_set_sched_v2 VC_CMD(SCHED, 1, 2)
-#define VCMD_set_sched VC_CMD(SCHED, 1, 3)
+#define VCMD_set_sched_v3 VC_CMD(SCHED, 1, 3)
+#define VCMD_set_sched VC_CMD(SCHED, 1, 4)
struct vcmd_set_sched_v2 {
int32_t fill_rate;
int32_t priority_bias;
};
+struct vcmd_set_sched_v4 {
+ uint32_t set_mask;
+ int32_t fill_rate;
+ int32_t interval;
+ int32_t tokens;
+ int32_t tokens_min;
+ int32_t tokens_max;
+ int32_t prio_bias;
+ int32_t cpu_id;
+ int32_t bucket_id;
+};
+
#define VXSM_FILL_RATE 0x0001
#define VXSM_INTERVAL 0x0002
+#define VXSM_FILL_RATE2 0x0004
+#define VXSM_INTERVAL2 0x0008
#define VXSM_TOKENS 0x0010
#define VXSM_TOKENS_MIN 0x0020
#define VXSM_TOKENS_MAX 0x0040
#define VXSM_PRIO_BIAS 0x0100
-#define SCHED_KEEP (-2)
+#define VXSM_IDLE_TIME 0x0200
+#define VXSM_FORCE 0x0400
+
+#define VXSM_V3_MASK 0x0173
+#define VXSM_SET_MASK 0x01FF
+
+#define VXSM_CPU_ID 0x1000
+#define VXSM_BUCKET_ID 0x2000
+
+#define SCHED_KEEP (-2) /* only for v2 */
#ifdef __KERNEL__
#include <linux/compiler.h>
-extern int vc_set_sched_v1(uint32_t, void __user *);
-extern int vc_set_sched_v2(uint32_t, void __user *);
-extern int vc_set_sched(uint32_t, void __user *);
+extern int vc_set_sched_v2(struct vx_info *, void __user *);
+extern int vc_set_sched_v3(struct vx_info *, void __user *);
+extern int vc_set_sched(struct vx_info *, void __user *);
#endif /* __KERNEL__ */
#endif /* _VX_SCHED_CMD_H */
#include <asm/param.h>
-struct _vx_ticks {
- uint64_t user_ticks; /* token tick events */
- uint64_t sys_ticks; /* token tick events */
- uint64_t hold_ticks; /* token ticks paused */
- uint64_t unused[5]; /* cacheline ? */
-};
-
/* context sub struct */
struct _vx_sched {
- atomic_t tokens; /* number of CPU tokens */
spinlock_t tokens_lock; /* lock for token bucket */
- int fill_rate; /* Fill rate: add X tokens... */
- int interval; /* Divisor: per Y jiffies */
+ int tokens; /* number of CPU tokens */
+ int fill_rate[2]; /* Fill rate: add X tokens... */
+ int interval[2]; /* Divisor: per Y jiffies */
int tokens_min; /* Limit: minimum for unhold */
int tokens_max; /* Limit: no more than N tokens */
- uint32_t jiffies; /* last time accounted */
- int priority_bias; /* bias offset for priority */
+ unsigned update_mask; /* which features should be updated */
+ cpumask_t update; /* CPUs which should update */
+
+ int prio_bias; /* bias offset for priority */
int vavavoom; /* last calculated vavavoom */
+};
+
+struct _vx_sched_pc {
+ int tokens; /* number of CPU tokens */
+ int flags; /* bucket flags */
- cpumask_t cpus_allowed; /* cpu mask for context */
+ int fill_rate[2]; /* Fill rate: add X tokens... */
+ int interval[2]; /* Divisor: per Y jiffies */
+ int tokens_min; /* Limit: minimum for unhold */
+ int tokens_max; /* Limit: no more than N tokens */
- struct _vx_ticks cpu[NR_CPUS];
+ unsigned long norm_time; /* last time accounted */
+ unsigned long idle_time; /* non linear time for fair sched */
+ unsigned long token_time; /* token time for accounting */
+ unsigned long onhold; /* jiffies when put on hold */
+
+ uint64_t user_ticks; /* token tick events */
+ uint64_t sys_ticks; /* token tick events */
+ uint64_t hold_ticks; /* token ticks paused */
};
+
+#define VXSF_ONHOLD 0x0001
+#define VXSF_IDLE_TIME 0x0100
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+static inline void __dump_vx_sched(struct _vx_sched *sched)
+{
+ printk("\t_vx_sched:\n");
+ printk("\t tokens: %4d/%4d, %4d/%4d, %4d, %4d\n",
+ sched->fill_rate[0], sched->interval[0],
+ sched->fill_rate[1], sched->interval[1],
+ sched->tokens_min, sched->tokens_max);
+ printk("\t priority = %4d, %4d\n",
+ sched->prio_bias, sched->vavavoom);
+}
+
+#endif
+
#endif /* _VX_SCHED_DEF_H */
#ifdef __KERNEL__
-extern int vc_ctx_kill(uint32_t, void __user *);
-extern int vc_wait_exit(uint32_t, void __user *);
+extern int vc_ctx_kill(struct vx_info *, void __user *);
+extern int vc_wait_exit(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+
+/* process alteration commands */
+
+#define VCMD_get_pflags VC_CMD(PROCALT, 1, 0)
+#define VCMD_set_pflags VC_CMD(PROCALT, 2, 0)
+
+struct vcmd_pflags_v0 {
+ uint32_t flagword;
+ uint32_t mask;
+};
+
+#ifdef __KERNEL__
+
+extern int vc_get_pflags(uint32_t pid, void __user *);
+extern int vc_set_pflags(uint32_t pid, void __user *);
#endif /* __KERNEL__ */
#endif /* _VX_SIGNAL_CMD_H */
DISK | | | | |DLIMIT | | |INODE | |
VFS | 32| 33| 34| 35| 36| 37| | 38| 39|
-------+-------+-------+-------+-------+-------+-------+ +-------+-------+
- OTHER | | | | | | | |VINFO | |
+ OTHER |VSTAT | | | | | | |VINFO | |
| 40| 41| 42| 43| 44| 45| | 46| 47|
=======+=======+=======+=======+=======+=======+=======+ +=======+=======+
SPECIAL|EVENT | | | |FLAGS | | | | |
#define VC_CAT_DLIMIT 36
#define VC_CAT_INODE 38
+#define VC_CAT_VSTAT 40
#define VC_CAT_VINFO 46
#define VC_CAT_EVENT 48
/* interface version */
-#define VCI_VERSION 0x00020002
+#define VCI_VERSION 0x00020102
#define VCI_LEGACY_VERSION 0x000100FF
/* query version */
#define VCMD_get_version VC_CMD(VERSION, 0, 0)
+#define VCMD_get_vci VC_CMD(VERSION, 1, 0)
#ifdef __KERNEL__
--- /dev/null
+#ifndef _DX_TAG_H
+#define _DX_TAG_H
+
+
+#define DX_TAG(in) (IS_TAGGED(in))
+
+
+#ifdef CONFIG_DX_TAG_NFSD
+#define DX_TAG_NFSD 1
+#else
+#define DX_TAG_NFSD 0
+#endif
+
+
+#ifdef CONFIG_TAGGING_NONE
+
+#define MAX_UID 0xFFFFFFFF
+#define MAX_GID 0xFFFFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag) (0)
+
+#define TAGINO_UID(cond, uid, tag) (uid)
+#define TAGINO_GID(cond, gid, tag) (gid)
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_GID16
+
+#define MAX_UID 0xFFFFFFFF
+#define MAX_GID 0x0000FFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag) \
+ ((cond) ? (((gid) >> 16) & 0xFFFF) : 0)
+
+#define TAGINO_UID(cond, uid, tag) (uid)
+#define TAGINO_GID(cond, gid, tag) \
+ ((cond) ? (((gid) & 0xFFFF) | ((tag) << 16)) : (gid))
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_ID24
+
+#define MAX_UID 0x00FFFFFF
+#define MAX_GID 0x00FFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag) \
+ ((cond) ? ((((uid) >> 16) & 0xFF00) | (((gid) >> 24) & 0xFF)) : 0)
+
+#define TAGINO_UID(cond, uid, tag) \
+ ((cond) ? (((uid) & 0xFFFFFF) | (((tag) & 0xFF00) << 16)) : (uid))
+#define TAGINO_GID(cond, gid, tag) \
+ ((cond) ? (((gid) & 0xFFFFFF) | (((tag) & 0x00FF) << 24)) : (gid))
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_UID16
+
+#define MAX_UID 0x0000FFFF
+#define MAX_GID 0xFFFFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag) \
+ ((cond) ? (((uid) >> 16) & 0xFFFF) : 0)
+
+#define TAGINO_UID(cond, uid, tag) \
+ ((cond) ? (((uid) & 0xFFFF) | ((tag) << 16)) : (uid))
+#define TAGINO_GID(cond, gid, tag) (gid)
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_INTERN
+
+#define MAX_UID 0xFFFFFFFF
+#define MAX_GID 0xFFFFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag) \
+ ((cond) ? (tag) : 0)
+
+#define TAGINO_UID(cond, uid, tag) (uid)
+#define TAGINO_GID(cond, gid, tag) (gid)
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_RUNTIME
+
+#define MAX_UID 0xFFFFFFFF
+#define MAX_GID 0xFFFFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag) (0)
+
+#define TAGINO_UID(cond, uid, tag) (uid)
+#define TAGINO_GID(cond, gid, tag) (gid)
+
+#endif
+
+
+#ifndef CONFIG_TAGGING_NONE
+#define dx_current_fstag(sb) \
+ ((sb)->s_flags & MS_TAGGED ? dx_current_tag(): 0)
+#else
+#define dx_current_fstag(sb) (0)
+#endif
+
+#ifndef CONFIG_TAGGING_INTERN
+#define TAGINO_TAG(cond, tag) (0)
+#else
+#define TAGINO_TAG(cond, tag) ((cond) ? (tag) : 0)
+#endif
+
+#define INOTAG_UID(cond, uid, gid) \
+ ((cond) ? ((uid) & MAX_UID) : (uid))
+#define INOTAG_GID(cond, uid, gid) \
+ ((cond) ? ((gid) & MAX_GID) : (gid))
+
+
+static inline uid_t dx_map_uid(uid_t uid)
+{
+ if ((uid > MAX_UID) && (uid != -1))
+ uid = -2;
+ return (uid & MAX_UID);
+}
+
+static inline gid_t dx_map_gid(gid_t gid)
+{
+ if ((gid > MAX_GID) && (gid != -1))
+ gid = -2;
+ return (gid & MAX_GID);
+}
+
+
+#ifdef CONFIG_VSERVER_LEGACY
+#define FIOC_GETTAG _IOR('x', 1, long)
+#define FIOC_SETTAG _IOW('x', 2, long)
+#define FIOC_SETTAGJ _IOW('x', 3, long)
+#endif
+
+#ifdef CONFIG_PROPAGATE
+
+int dx_parse_tag(char *string, tag_t *tag, int remove);
+
+void __dx_propagate_tag(struct nameidata *nd, struct inode *inode);
+
+#define dx_propagate_tag(n,i) __dx_propagate_tag(n,i)
+
+#else
+#define dx_propagate_tag(n,i) do { } while (0)
+#endif
+
+#endif /* _DX_TAG_H */
+++ /dev/null
-#ifndef _VX_XID_H
-#define _VX_XID_H
-
-#include <linux/config.h>
-
-
-#define XID_TAG(in) (IS_TAGXID(in))
-
-
-#ifdef CONFIG_XID_TAG_NFSD
-#define XID_TAG_NFSD 1
-#else
-#define XID_TAG_NFSD 0
-#endif
-
-
-#ifdef CONFIG_INOXID_NONE
-
-#define MAX_UID 0xFFFFFFFF
-#define MAX_GID 0xFFFFFFFF
-
-#define INOXID_XID(tag, uid, gid, xid) (0)
-
-#define XIDINO_UID(tag, uid, xid) (uid)
-#define XIDINO_GID(tag, gid, xid) (gid)
-
-#endif
-
-
-#ifdef CONFIG_INOXID_GID16
-
-#define MAX_UID 0xFFFFFFFF
-#define MAX_GID 0x0000FFFF
-
-#define INOXID_XID(tag, uid, gid, xid) \
- ((tag) ? (((gid) >> 16) & 0xFFFF) : 0)
-
-#define XIDINO_UID(tag, uid, xid) (uid)
-#define XIDINO_GID(tag, gid, xid) \
- ((tag) ? (((gid) & 0xFFFF) | ((xid) << 16)) : (gid))
-
-#endif
-
-
-#ifdef CONFIG_INOXID_UGID24
-
-#define MAX_UID 0x00FFFFFF
-#define MAX_GID 0x00FFFFFF
-
-#define INOXID_XID(tag, uid, gid, xid) \
- ((tag) ? ((((uid) >> 16) & 0xFF00) | (((gid) >> 24) & 0xFF)) : 0)
-
-#define XIDINO_UID(tag, uid, xid) \
- ((tag) ? (((uid) & 0xFFFFFF) | (((xid) & 0xFF00) << 16)) : (uid))
-#define XIDINO_GID(tag, gid, xid) \
- ((tag) ? (((gid) & 0xFFFFFF) | (((xid) & 0x00FF) << 24)) : (gid))
-
-#endif
-
-
-#ifdef CONFIG_INOXID_UID16
-
-#define MAX_UID 0x0000FFFF
-#define MAX_GID 0xFFFFFFFF
-
-#define INOXID_XID(tag, uid, gid, xid) \
- ((tag) ? (((uid) >> 16) & 0xFFFF) : 0)
-
-#define XIDINO_UID(tag, uid, xid) \
- ((tag) ? (((uid) & 0xFFFF) | ((xid) << 16)) : (uid))
-#define XIDINO_GID(tag, gid, xid) (gid)
-
-#endif
-
-
-#ifdef CONFIG_INOXID_INTERN
-
-#define MAX_UID 0xFFFFFFFF
-#define MAX_GID 0xFFFFFFFF
-
-#define INOXID_XID(tag, uid, gid, xid) \
- ((tag) ? (xid) : 0)
-
-#define XIDINO_UID(tag, uid, xid) (uid)
-#define XIDINO_GID(tag, gid, xid) (gid)
-
-#endif
-
-
-#ifdef CONFIG_INOXID_RUNTIME
-
-#define MAX_UID 0xFFFFFFFF
-#define MAX_GID 0xFFFFFFFF
-
-#define INOXID_XID(tag, uid, gid, xid) (0)
-
-#define XIDINO_UID(tag, uid, xid) (uid)
-#define XIDINO_GID(tag, gid, xid) (gid)
-
-#endif
-
-
-#ifndef CONFIG_INOXID_NONE
-#define vx_current_fsxid(sb) \
- ((sb)->s_flags & MS_TAGXID ? current->xid : 0)
-#else
-#define vx_current_fsxid(sb) (0)
-#endif
-
-#ifndef CONFIG_INOXID_INTERN
-#define XIDINO_XID(tag, xid) (0)
-#else
-#define XIDINO_XID(tag, xid) ((tag) ? (xid) : 0)
-#endif
-
-#define INOXID_UID(tag, uid, gid) \
- ((tag) ? ((uid) & MAX_UID) : (uid))
-#define INOXID_GID(tag, uid, gid) \
- ((tag) ? ((gid) & MAX_GID) : (gid))
-
-
-static inline uid_t vx_map_uid(uid_t uid)
-{
- if ((uid > MAX_UID) && (uid != -1))
- uid = -2;
- return (uid & MAX_UID);
-}
-
-static inline gid_t vx_map_gid(gid_t gid)
-{
- if ((gid > MAX_GID) && (gid != -1))
- gid = -2;
- return (gid & MAX_GID);
-}
-
-
-#ifdef CONFIG_VSERVER_LEGACY
-#define FIOC_GETXID _IOR('x', 1, long)
-#define FIOC_SETXID _IOW('x', 2, long)
-#define FIOC_SETXIDJ _IOW('x', 3, long)
-#endif
-
-int vx_parse_xid(char *string, xid_t *xid, int remove);
-void vx_propagate_xid(struct nameidata *nd, struct inode *inode);
-
-#endif /* _VX_XID_H */
lib/ncaps_list-net.c \
lib/nflags-net.c \
lib/nflags_list-net.c
+lib_v21_SRCS = lib/syscall_setccaps-v21.hc \
+ lib/syscall_setsched-v21.hc
if ENSC_HAVE_C99_COMPILER
lib_v13_SRCS += lib/syscall_adddlimit-v13.hc \
$(lib_management_SRCS) \
$(lib_v11_SRCS) \
$(lib_v13_SRCS) \
+ $(lib_v21_SRCS) \
$(ensc_fmt_SRCS)
include_HEADERS += lib/vserver.h
DECL("secure_remount", VC_VXC_SECURE_REMOUNT),
DECL("binary_mount", VC_VXC_BINARY_MOUNT),
DECL("quota_ctl", VC_VXC_QUOTA_CTL),
+ DECL("admin_mapper", VC_VXC_ADMIN_MAPPER),
+ DECL("admin_cloop", VC_VXC_ADMIN_CLOOP),
// some deprecated values...
DECL("mount", VC_VXC_SECURE_MOUNT),
DECL("remount", VC_VXC_SECURE_REMOUNT),
DECL("virt_uptime", VC_VXF_VIRT_UPTIME),
DECL("virt_cpu", VC_VXF_VIRT_CPU),
DECL("virt_load", VC_VXF_VIRT_LOAD),
+ DECL("virt_time", VC_VXF_VIRT_TIME),
DECL("hide_mount", VC_VXF_HIDE_MOUNT),
DECL("hide_netif", VC_VXF_HIDE_NETIF),
+ DECL("hide_vinfo", VC_VXF_HIDE_VINFO),
DECL("state_setup", VC_VXF_STATE_SETUP),
DECL("state_init", VC_VXF_STATE_INIT),
+ DECL("state_admin", VC_VXF_STATE_ADMIN),
DECL("sc_helper", VC_VXF_SC_HELPER),
DECL("persistent", VC_VXF_PERSISTENT),
#define DECL(STR, VAL) { STR, sizeof(STR)-1, VAL }
static struct Mapping_uint64 const VALUES[] = {
-#warning Add the 'nflags' values here
- DECL("", 0)
+ DECL("lock", VC_NXF_INFO_LOCK),
+
+ DECL("state_setup", VC_NXF_STATE_SETUP),
+ DECL("state_admin", VC_NXF_STATE_ADMIN),
+
+ DECL("sc_helper", VC_NXF_SC_HELPER),
+ DECL("persistent", VC_NXF_PERSISTENT),
};
uint_least64_t
k_caps.ccaps = caps->ccaps;
k_caps.cmask = caps->cmask;
- return vserver(VCMD_set_ccaps, CTX_USER2KERNEL(xid), &k_caps);
+ return vserver(VCMD_set_ccaps_v0, CTX_USER2KERNEL(xid), &k_caps);
}
--- /dev/null
+// $Id$ --*- c -*--
+
+// Copyright (C) 2006 Daniel Hokka Zakrisson <daniel@hozac.com>
+//
+// This program is free software; you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation; version 2 of the License.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+static inline ALWAYSINLINE int
+vc_set_ccaps_v21(xid_t xid, struct vc_ctx_caps const *caps)
+{
+ struct vcmd_ctx_caps_v1 k_ccaps;
+ struct vcmd_bcaps k_bcaps;
+ int ret;
+
+ k_bcaps.bcaps = caps->bcaps;
+ k_bcaps.bmask = caps->bmask;
+ k_ccaps.ccaps = caps->ccaps;
+ k_ccaps.cmask = caps->cmask;
+
+ ret = vserver(VCMD_set_ccaps, CTX_USER2KERNEL(xid), &k_ccaps);
+ if (ret)
+ return ret;
+ return vserver(VCMD_set_bcaps, CTX_USER2KERNEL(xid), &k_bcaps);
+}
#endif
#include "vserver.h"
-#include "vserver-internal.h"
#include "virtual.h"
+#if defined(VC_ENABLE_API_V13) && defined(VC_ENABLE_API_V21)
+# define VC_MULTIVERSION_SYSCALL 1
+#endif
+#include "vserver-internal.h"
+
#if defined(VC_ENABLE_API_V13)
# include "syscall_setccaps-v13.hc"
#endif
-#if defined(VC_ENABLE_API_V13)
+#if defined(VC_ENABLE_API_V21)
+# include "syscall_setccaps-v21.hc"
+#endif
+
+#if defined(VC_ENABLE_API_V13) || defined(VC_ENABLE_API_V21)
int
vc_set_ccaps(xid_t xid, struct vc_ctx_caps const *caps)
{
return -1;
}
- CALL_VC(CALL_VC_V13A(vc_set_ccaps, xid, caps));
+ CALL_VC(CALL_VC_V21(vc_set_ccaps, xid, caps),
+ CALL_VC_V13A(vc_set_ccaps, xid, caps));
}
#endif
{
struct vcmd_set_sched_v3 k_data;
- // This expression will be evaluated at compile-time
- if (sizeof(struct vcmd_set_sched_v3)==sizeof(struct vc_set_sched) &&
- X(set_mask) && X(fill_rate) && X(interval) && X(tokens) &&
- X(tokens_min) && X(tokens_max) && X(priority_bias))
- return vserver(VCMD_set_sched, CTX_USER2KERNEL(xid),
- const_cast(struct vc_set_sched *)(data));
- else {
- k_data.set_mask = data->set_mask;
- k_data.fill_rate = data->fill_rate;
- k_data.interval = data->interval;
- k_data.tokens = data->tokens;
- k_data.tokens_min = data->tokens_min;
- k_data.tokens_max = data->tokens_max;
- k_data.priority_bias = data->priority_bias;
-
- return vserver(VCMD_set_sched, CTX_USER2KERNEL(xid), &k_data);
- }
+ k_data.set_mask = data->set_mask & VC_VXSM_V3_MASK;
+ k_data.fill_rate = data->fill_rate;
+ k_data.interval = data->interval;
+ k_data.tokens = data->tokens;
+ k_data.tokens_min = data->tokens_min;
+ k_data.tokens_max = data->tokens_max;
+ k_data.priority_bias = data->priority_bias;
+
+ return vserver(VCMD_set_sched_v3, CTX_USER2KERNEL(xid), &k_data);
}
--- /dev/null
+// $Id$ --*- c -*--
+
+// Copyright (C) 2006 Daniel Hokka Zakrisson <daniel@hozac.com>
+//
+// This program is free software; you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation; version 2 of the License.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "vserver.h"
+
+static inline ALWAYSINLINE int
+vc_set_sched_v21(xid_t xid, struct vc_set_sched const *data)
+{
+ struct vcmd_set_sched_v4 k_data;
+
+ k_data.set_mask = data->set_mask;
+ k_data.fill_rate = data->fill_rate;
+ k_data.interval = data->interval;
+ k_data.tokens = data->tokens;
+ k_data.tokens_min = data->tokens_min;
+ k_data.tokens_max = data->tokens_max;
+ k_data.prio_bias = data->priority_bias;
+ k_data.cpu_id = data->cpu_id;
+ k_data.bucket_id = data->bucked_it;
+
+ return vserver(VCMD_set_sched, CTX_USER2KERNEL(xid), &k_data);
+}
#include "vserver.h"
#include "virtual.h"
-#if defined(VC_ENABLE_API_V13OBS) && defined(VC_ENABLE_API_V13)
-# define VC_MULTIVERSION_SYSCALL 1
-#endif
+#define VC_MULTIVERSION_SYSCALL 1
#include "vserver-internal.h"
#ifdef VC_ENABLE_API_V13
# include "syscall_setsched-v13obs.hc"
#endif
+#ifdef VC_ENABLE_API_V21
+# include "syscall_setsched-v21.hc"
+#endif
+
int
vc_set_sched(xid_t xid, struct vc_set_sched const *data)
{
- CALL_VC(CALL_VC_V13B (vc_set_sched,xid,data),
+ CALL_VC(CALL_VC_V21 (vc_set_sched,xid,data),
+ CALL_VC_V13B (vc_set_sched,xid,data),
CALL_VC_V13OBS(vc_set_sched,xid,data));
}
#endif
+#ifdef VC_ENABLE_API_V21
+# define CALL_VC_V21(F,...) CALL_VC_GENERAL(0x00020100, v21, F, __VA_ARGS__)
+#else
+# define CALL_VC_V21(F,...) CALL_VC_NOOP
+#endif
+
+
#ifdef VC_ENABLE_API_NET
# define CALL_VC_NET(F,...) CALL_VC_GENERAL(0x00010016, net, F, __VA_ARGS__)
#else
#define VC_VXF_VIRT_UPTIME 0x00020000ull
#define VC_VXF_VIRT_CPU 0x00040000ull
#define VC_VXF_VIRT_LOAD 0x00080000ull
+#define VC_VXF_VIRT_TIME 0x00100000ull
#define VC_VXF_HIDE_MOUNT 0x01000000ull
#define VC_VXF_HIDE_NETIF 0x02000000ull
+#define VC_VXF_HIDE_VINFO 0x04000000ull
#define VC_VXF_STATE_SETUP (1ULL<<32)
#define VC_VXF_STATE_INIT (1ULL<<33)
+#define VC_VXF_STATE_ADMIN (1ULL<<34)
#define VC_VXF_SC_HELPER (1ULL<<36)
#define VC_VXF_REBOOT_KILL (1ULL<<37)
#define VC_VXC_BINARY_MOUNT 0x00040000ull
#define VC_VXC_QUOTA_CTL 0x00100000ull
+#define VC_VXC_ADMIN_MAPPER 0x00200000ull
+#define VC_VXC_ADMIN_LOOP 0x00400000ull
-#define VC_VXSM_FILL_RATE 0x0001
-#define VC_VXSM_INTERVAL 0x0002
-#define VC_VXSM_TOKENS 0x0010
-#define VC_VXSM_TOKENS_MIN 0x0020
-#define VC_VXSM_TOKENS_MAX 0x0040
-#define VC_VXSM_PRIO_BIAS 0x0100
+// the scheduler flags
+#define VC_VXSM_FILL_RATE 0x0001
+#define VC_VXSM_INTERVAL 0x0002
+#define VC_VXSM_FILL_RATE2 0x0004
+#define VC_VXSM_INTERVAL2 0x0008
+#define VC_VXSM_TOKENS 0x0010
+#define VC_VXSM_TOKENS_MIN 0x0020
+#define VC_VXSM_TOKENS_MAX 0x0040
+#define VC_VXSM_PRIO_BIAS 0x0100
+#define VC_VXSM_CPU_ID 0x1000
+#define VC_VXSM_BUCKET_ID 0x2000
+
+#define VC_VXSM_IDLE_TIME 0x0200
+#define VC_VXSM_FORCE 0x0400
+
+#define VC_VXSM_V3_MASK 0x0173
+
+
+// the network flags
+#define VC_NXF_INFO_LOCK 0x00000001ull
+
+#define VC_NXF_STATE_SETUP (1ULL<<32)
+#define VC_NXF_STATE_ADMIN (1ULL<<34)
+
+#define VC_NXF_SC_HELPER (1ULL<<36)
+#define VC_NXF_PERSISTENT (1ULL<<38)
+
#define VC_BAD_PERSONALITY ((uint_least32_t)(-1))
int_least32_t tokens_min;
int_least32_t tokens_max;
int_least32_t priority_bias;
+ int_least32_t cpu_id;
+ int_least32_t bucket_id;
};
int vc_set_sched(xid_t xid, struct vc_set_sched const *);