#define VXF_STATE_SETUP (1ULL<<32)
#define VXF_STATE_INIT (1ULL<<33)
-#define VXF_STATE_HELPER (1ULL<<36)
+#define VXF_SC_HELPER (1ULL<<36)
+#define VXF_REBOOT_KILL (1ULL<<37)
+#define VXF_PERSISTENT (1ULL<<38)
#define VXF_FORK_RSS (1ULL<<48)
#define VXF_PROLIFIC (1ULL<<49)
#define VXF_INIT_SET (VXF_STATE_SETUP|VXF_STATE_INIT)
+/* context migration */
+
+#define VXM_SET_INIT 0x00000001
+#define VXM_SET_REAPER 0x00000002
+
/* context caps */
#define VXC_CAP_MASK 0x00000000
uint64_t vx_bcaps; /* bounding caps (system) */
uint64_t vx_ccaps; /* context caps (vserver) */
- pid_t vx_initpid; /* PID of fake init process */
-
- wait_queue_head_t vx_wait; /* context exit waitqueue */
+ struct task_struct *vx_reaper; /* guest reaper process */
+ pid_t vx_initpid; /* PID of guest init */
struct _vx_limit limit; /* vserver limits */
struct _vx_sched sched; /* vserver scheduler */
struct _vx_cvirt cvirt; /* virtual/bias stuff */
struct _vx_cacct cacct; /* context accounting */
+ wait_queue_head_t vx_wait; /* context exit waitqueue */
+ int reboot_cmd; /* last sys_reboot() cmd */
+ int exit_code; /* last process exit code */
+
char vx_name[65]; /* vserver name */
};
#define VXS_PAUSED 0x0010
#define VXS_ONHOLD 0x0020
#define VXS_SHUTDOWN 0x0100
+#define VXS_HELPER 0x1000
#define VXS_RELEASED 0x8000
/* check conditions */
extern void claim_vx_info(struct vx_info *, struct task_struct *);
extern void release_vx_info(struct vx_info *, struct task_struct *);
-extern struct vx_info *locate_vx_info(int);
-extern struct vx_info *locate_or_create_vx_info(int);
+extern struct vx_info *lookup_vx_info(int);
+extern struct vx_info *lookup_or_create_vx_info(int);
extern int get_xid_list(int, unsigned int *, int);
extern int xid_is_hashed(xid_t);
#ifndef _VX_DEBUG_H
#define _VX_DEBUG_H
-#ifndef CONFIG_VSERVER
-#warning config options missing
-#endif
#define VXD_CBIT(n,m) (vx_debug_ ## n & (1 << (m)))
#define VXD_CMIN(n,m) (vx_debug_ ## n > (m))
extern unsigned int vx_debug_nid;
extern unsigned int vx_debug_net;
extern unsigned int vx_debug_limit;
+extern unsigned int vx_debug_cres;
extern unsigned int vx_debug_dlim;
extern unsigned int vx_debug_cvirt;
extern unsigned int vx_debug_misc;
printk(VX_WARNLEVEL f "\n" , ##x); \
} while (0)
-
#define vxd_path(d,m) \
({ static char _buffer[PATH_MAX]; \
d_path((d), (m), _buffer, sizeof(_buffer)); })
+#define vxd_cond_path(n) \
+ ((n) ? vxd_path((n)->dentry, (n)->mnt) : "<null>" )
+
#else /* CONFIG_VSERVER_DEBUG */
#define vx_debug_switch 0
#define vx_debug_nid 0
#define vx_debug_net 0
#define vx_debug_limit 0
+#define vx_debug_cres 0
#define vx_debug_dlim 0
#define vx_debug_cvirt 0
#define vxwprintk(x...) do { } while (0)
#define vxd_path "<none>"
+#define vxd_cond_path vxd_path
#endif /* CONFIG_VSERVER_DEBUG */
struct _vx_hist_entry *vxh_advance(void *loc);
-#define VXH_HERE(__type) \
- ({ __label__ __vxh_##__type; \
- __vxh_##__type:; \
- &&__vxh_##__type; })
-
-
-static inline void __vxh_copy_vxi(struct _vx_hist_entry *entry, struct vx_info *vxi)
+static inline
+void __vxh_copy_vxi(struct _vx_hist_entry *entry, struct vx_info *vxi)
{
entry->vxi.ptr = vxi;
if (vxi) {
}
-#define __VXH_BODY(__type, __data) \
+#define __HERE__ current_text_addr()
+
+#define __VXH_BODY(__type, __data, __here) \
struct _vx_hist_entry *entry; \
\
preempt_disable(); \
- entry = vxh_advance(VXH_HERE(__type)); \
+ entry = vxh_advance(__here); \
__data; \
entry->type = __type; \
preempt_enable();
/* pass vxi only */
-#define __VXH_SIMPLE \
+
+#define __VXH_SMPL \
__vxh_copy_vxi(entry, vxi)
-#define VXH_SIMPLE(__name, __type) \
-static inline void __name(struct vx_info *vxi) \
-{ \
- __VXH_BODY(__type, __VXH_SIMPLE) \
+static inline
+void __vxh_smpl(struct vx_info *vxi, int __type, void *__here)
+{
+ __VXH_BODY(__type, __VXH_SMPL, __here)
}
/* pass vxi and data (void *) */
+
#define __VXH_DATA \
__vxh_copy_vxi(entry, vxi); \
entry->sc.data = data
-#define VXH_DATA(__name, __type) \
-static inline \
-void __name(struct vx_info *vxi, void *data) \
-{ \
- __VXH_BODY(__type, __VXH_DATA) \
+static inline
+void __vxh_data(struct vx_info *vxi, void *data,
+ int __type, void *__here)
+{
+ __VXH_BODY(__type, __VXH_DATA, __here)
}
/* pass vxi and arg (long) */
-#define __VXH_LARG \
+
+#define __VXH_LONG \
__vxh_copy_vxi(entry, vxi); \
entry->ll.arg = arg
-#define VXH_LARG(__name, __type) \
-static inline \
-void __name(struct vx_info *vxi, long arg) \
-{ \
- __VXH_BODY(__type, __VXH_LARG) \
+static inline
+void __vxh_long(struct vx_info *vxi, long arg,
+ int __type, void *__here)
+{
+ __VXH_BODY(__type, __VXH_LONG, __here)
}
-static inline void vxh_throw_oops(void)
+static inline
+void __vxh_throw_oops(void *__here)
{
- __VXH_BODY(VXH_THROW_OOPS, {});
+ __VXH_BODY(VXH_THROW_OOPS, {}, __here);
/* prevent further acquisition */
vxh_active = 0;
}
-VXH_SIMPLE(vxh_get_vx_info, VXH_GET_VX_INFO);
-VXH_SIMPLE(vxh_put_vx_info, VXH_PUT_VX_INFO);
-VXH_DATA(vxh_init_vx_info, VXH_INIT_VX_INFO);
-VXH_DATA(vxh_set_vx_info, VXH_SET_VX_INFO);
-VXH_DATA(vxh_clr_vx_info, VXH_CLR_VX_INFO);
+#define vxh_throw_oops() __vxh_throw_oops(__HERE__);
+
+#define __vxh_get_vx_info(v,h) __vxh_smpl(v, VXH_GET_VX_INFO, h);
+#define __vxh_put_vx_info(v,h) __vxh_smpl(v, VXH_PUT_VX_INFO, h);
+
+#define __vxh_init_vx_info(v,d,h) \
+ __vxh_data(v,d, VXH_INIT_VX_INFO, h);
+#define __vxh_set_vx_info(v,d,h) \
+ __vxh_data(v,d, VXH_SET_VX_INFO, h);
+#define __vxh_clr_vx_info(v,d,h) \
+ __vxh_data(v,d, VXH_CLR_VX_INFO, h);
-VXH_DATA(vxh_claim_vx_info, VXH_CLAIM_VX_INFO);
-VXH_DATA(vxh_release_vx_info, VXH_RELEASE_VX_INFO);
+#define __vxh_claim_vx_info(v,d,h) \
+ __vxh_data(v,d, VXH_CLAIM_VX_INFO, h);
+#define __vxh_release_vx_info(v,d,h) \
+ __vxh_data(v,d, VXH_RELEASE_VX_INFO, h);
-VXH_SIMPLE(vxh_alloc_vx_info, VXH_ALLOC_VX_INFO);
-VXH_SIMPLE(vxh_dealloc_vx_info, VXH_DEALLOC_VX_INFO);
+#define vxh_alloc_vx_info(v) \
+ __vxh_smpl(v, VXH_ALLOC_VX_INFO, __HERE__);
+#define vxh_dealloc_vx_info(v) \
+ __vxh_smpl(v, VXH_DEALLOC_VX_INFO, __HERE__);
-VXH_SIMPLE(vxh_hash_vx_info, VXH_HASH_VX_INFO);
-VXH_SIMPLE(vxh_unhash_vx_info, VXH_UNHASH_VX_INFO);
+#define vxh_hash_vx_info(v) \
+ __vxh_smpl(v, VXH_HASH_VX_INFO, __HERE__);
+#define vxh_unhash_vx_info(v) \
+ __vxh_smpl(v, VXH_UNHASH_VX_INFO, __HERE__);
-VXH_LARG(vxh_loc_vx_info, VXH_LOC_VX_INFO);
-VXH_LARG(vxh_lookup_vx_info, VXH_LOOKUP_VX_INFO);
-VXH_LARG(vxh_create_vx_info, VXH_CREATE_VX_INFO);
+#define vxh_loc_vx_info(v,l) \
+ __vxh_long(v,l, VXH_LOC_VX_INFO, __HERE__);
+#define vxh_lookup_vx_info(v,l) \
+ __vxh_long(v,l, VXH_LOOKUP_VX_INFO, __HERE__);
+#define vxh_create_vx_info(v,l) \
+ __vxh_long(v,l, VXH_CREATE_VX_INFO, __HERE__);
extern void vxh_dump_history(void);
#else /* CONFIG_VSERVER_HISTORY */
+#define __HERE__ 0
#define vxh_throw_oops() do { } while (0)
-#define vxh_get_vx_info(v) do { } while (0)
-#define vxh_put_vx_info(v) do { } while (0)
+#define __vxh_get_vx_info(v,h) do { } while (0)
+#define __vxh_put_vx_info(v,h) do { } while (0)
-#define vxh_init_vx_info(v,d) do { } while (0)
-#define vxh_set_vx_info(v,d) do { } while (0)
-#define vxh_clr_vx_info(v,d) do { } while (0)
+#define __vxh_init_vx_info(v,d,h) do { } while (0)
+#define __vxh_set_vx_info(v,d,h) do { } while (0)
+#define __vxh_clr_vx_info(v,d,h) do { } while (0)
-#define vxh_claim_vx_info(v,d) do { } while (0)
-#define vxh_release_vx_info(v,d) do { } while (0)
+#define __vxh_claim_vx_info(v,d,h) do { } while (0)
+#define __vxh_release_vx_info(v,d,h) do { } while (0)
#define vxh_alloc_vx_info(v) do { } while (0)
#define vxh_dealloc_vx_info(v) do { } while (0)