2017-02-21 07:43:02 -05:00
|
|
|
/*-
|
|
|
|
|
* Copyright (c) 2017 Hans Petter Selasky
|
|
|
|
|
* All rights reserved.
|
|
|
|
|
*
|
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
|
* are met:
|
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
|
* notice unmodified, this list of conditions, and the following
|
|
|
|
|
* disclaimer.
|
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
|
*
|
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
|
|
#include <linux/compat.h>
|
2017-06-18 15:22:05 -04:00
|
|
|
#include <linux/completion.h>
|
2017-02-21 07:43:02 -05:00
|
|
|
#include <linux/mm.h>
|
|
|
|
|
#include <linux/kthread.h>
|
2021-03-11 01:34:42 -05:00
|
|
|
#include <linux/moduleparam.h>
|
2017-02-21 07:43:02 -05:00
|
|
|
|
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
|
#include <sys/eventhandler.h>
|
|
|
|
|
#include <sys/malloc.h>
|
2021-03-11 01:34:42 -05:00
|
|
|
#include <sys/sysctl.h>
|
|
|
|
|
#include <vm/uma.h>
|
|
|
|
|
|
|
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
|
extern u_int first_msi_irq, num_msi_irqs;
|
|
|
|
|
#endif
|
2017-02-21 07:43:02 -05:00
|
|
|
|
|
|
|
|
static eventhandler_tag linuxkpi_thread_dtor_tag;
|
|
|
|
|
|
2021-03-11 01:34:42 -05:00
|
|
|
static uma_zone_t linux_current_zone;
|
|
|
|
|
static uma_zone_t linux_mm_zone;
|
2017-02-21 07:43:02 -05:00
|
|
|
|
2021-03-11 01:48:22 -05:00
|
|
|
/* check if another thread already has a mm_struct */
|
|
|
|
|
static struct mm_struct *
|
|
|
|
|
find_other_mm(struct proc *p)
|
|
|
|
|
{
|
|
|
|
|
struct thread *td;
|
|
|
|
|
struct task_struct *ts;
|
|
|
|
|
struct mm_struct *mm;
|
|
|
|
|
|
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
|
|
|
|
FOREACH_THREAD_IN_PROC(p, td) {
|
|
|
|
|
ts = td->td_lkpi_task;
|
|
|
|
|
if (ts == NULL)
|
|
|
|
|
continue;
|
|
|
|
|
mm = ts->mm;
|
|
|
|
|
if (mm == NULL)
|
|
|
|
|
continue;
|
|
|
|
|
/* try to share other mm_struct */
|
|
|
|
|
if (atomic_inc_not_zero(&mm->mm_users))
|
|
|
|
|
return (mm);
|
|
|
|
|
}
|
|
|
|
|
return (NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-21 07:43:02 -05:00
|
|
|
int
|
|
|
|
|
linux_alloc_current(struct thread *td, int flags)
|
|
|
|
|
{
|
2017-04-06 05:07:01 -04:00
|
|
|
struct proc *proc;
|
2017-02-21 07:43:02 -05:00
|
|
|
struct task_struct *ts;
|
2021-03-11 01:48:22 -05:00
|
|
|
struct mm_struct *mm, *mm_other;
|
2017-02-21 07:43:02 -05:00
|
|
|
|
|
|
|
|
MPASS(td->td_lkpi_task == NULL);
|
|
|
|
|
|
2021-03-11 01:34:42 -05:00
|
|
|
if ((td->td_pflags & TDP_ITHREAD) != 0 || !THREAD_CAN_SLEEP()) {
|
|
|
|
|
flags &= ~M_WAITOK;
|
|
|
|
|
flags |= M_NOWAIT | M_USE_RESERVE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ts = uma_zalloc(linux_current_zone, flags | M_ZERO);
|
|
|
|
|
if (ts == NULL) {
|
|
|
|
|
if ((flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK)
|
|
|
|
|
panic("linux_alloc_current: failed to allocate task");
|
2017-02-21 07:43:02 -05:00
|
|
|
return (ENOMEM);
|
2021-03-11 01:34:42 -05:00
|
|
|
}
|
2021-03-11 01:48:22 -05:00
|
|
|
mm = NULL;
|
2017-03-17 06:30:06 -04:00
|
|
|
|
2017-04-06 05:07:01 -04:00
|
|
|
/* setup new task structure */
|
2017-02-21 07:43:02 -05:00
|
|
|
atomic_set(&ts->kthread_flags, 0);
|
|
|
|
|
ts->task_thread = td;
|
|
|
|
|
ts->comm = td->td_name;
|
|
|
|
|
ts->pid = td->td_tid;
|
2019-05-16 13:53:36 -04:00
|
|
|
ts->group_leader = ts;
|
2017-03-17 11:40:24 -04:00
|
|
|
atomic_set(&ts->usage, 1);
|
2017-11-11 06:01:50 -05:00
|
|
|
atomic_set(&ts->state, TASK_RUNNING);
|
2017-06-18 15:22:05 -04:00
|
|
|
init_completion(&ts->parked);
|
|
|
|
|
init_completion(&ts->exited);
|
2017-03-17 06:30:06 -04:00
|
|
|
|
2017-04-06 05:07:01 -04:00
|
|
|
proc = td->td_proc;
|
|
|
|
|
|
|
|
|
|
PROC_LOCK(proc);
|
2021-03-11 01:48:22 -05:00
|
|
|
mm_other = find_other_mm(proc);
|
2017-04-06 05:07:01 -04:00
|
|
|
|
2021-03-11 01:48:22 -05:00
|
|
|
/* use allocated mm_struct as a fallback */
|
|
|
|
|
if (mm_other == NULL) {
|
|
|
|
|
PROC_UNLOCK(proc);
|
|
|
|
|
mm = uma_zalloc(linux_mm_zone, flags | M_ZERO);
|
|
|
|
|
if (mm == NULL) {
|
|
|
|
|
if ((flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK)
|
|
|
|
|
panic(
|
|
|
|
|
"linux_alloc_current: failed to allocate mm");
|
|
|
|
|
uma_zfree(linux_current_zone, mm);
|
|
|
|
|
return (ENOMEM);
|
|
|
|
|
}
|
2017-04-06 05:07:01 -04:00
|
|
|
|
2021-03-11 01:48:22 -05:00
|
|
|
PROC_LOCK(proc);
|
|
|
|
|
mm_other = find_other_mm(proc);
|
|
|
|
|
if (mm_other == NULL) {
|
|
|
|
|
/* setup new mm_struct */
|
|
|
|
|
init_rwsem(&mm->mmap_sem);
|
|
|
|
|
atomic_set(&mm->mm_count, 1);
|
|
|
|
|
atomic_set(&mm->mm_users, 1);
|
2017-04-06 05:07:01 -04:00
|
|
|
/* set mm_struct pointer */
|
2021-03-11 01:48:22 -05:00
|
|
|
ts->mm = mm;
|
|
|
|
|
/* clear pointer to not free memory */
|
|
|
|
|
mm = NULL;
|
|
|
|
|
} else {
|
2017-04-06 05:07:01 -04:00
|
|
|
ts->mm = mm_other;
|
|
|
|
|
}
|
2021-03-11 01:48:22 -05:00
|
|
|
} else {
|
|
|
|
|
ts->mm = mm_other;
|
2017-04-06 05:07:01 -04:00
|
|
|
}
|
2017-03-17 06:30:06 -04:00
|
|
|
|
|
|
|
|
/* store pointer to task struct */
|
2017-02-21 07:43:02 -05:00
|
|
|
td->td_lkpi_task = ts;
|
2017-04-06 05:07:01 -04:00
|
|
|
PROC_UNLOCK(proc);
|
|
|
|
|
|
|
|
|
|
/* free mm_struct pointer, if any */
|
2021-03-11 01:34:42 -05:00
|
|
|
uma_zfree(linux_mm_zone, mm);
|
2017-04-06 05:07:01 -04:00
|
|
|
|
2017-02-21 07:43:02 -05:00
|
|
|
return (0);
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-17 06:30:06 -04:00
|
|
|
struct mm_struct *
|
|
|
|
|
linux_get_task_mm(struct task_struct *task)
|
|
|
|
|
{
|
|
|
|
|
struct mm_struct *mm;
|
|
|
|
|
|
|
|
|
|
mm = task->mm;
|
2017-05-31 09:01:27 -04:00
|
|
|
if (mm != NULL) {
|
2017-03-17 06:30:06 -04:00
|
|
|
atomic_inc(&mm->mm_users);
|
|
|
|
|
return (mm);
|
|
|
|
|
}
|
|
|
|
|
return (NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
linux_mm_dtor(struct mm_struct *mm)
|
|
|
|
|
{
|
2021-03-11 01:34:42 -05:00
|
|
|
uma_zfree(linux_mm_zone, mm);
|
2017-03-17 06:30:06 -04:00
|
|
|
}
|
|
|
|
|
|
2017-02-21 07:43:02 -05:00
|
|
|
void
|
|
|
|
|
linux_free_current(struct task_struct *ts)
|
|
|
|
|
{
|
2017-03-17 06:30:06 -04:00
|
|
|
mmput(ts->mm);
|
2021-03-11 01:34:42 -05:00
|
|
|
uma_zfree(linux_current_zone, ts);
|
2017-02-21 07:43:02 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
linuxkpi_thread_dtor(void *arg __unused, struct thread *td)
|
|
|
|
|
{
|
|
|
|
|
struct task_struct *ts;
|
|
|
|
|
|
|
|
|
|
ts = td->td_lkpi_task;
|
|
|
|
|
if (ts == NULL)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
td->td_lkpi_task = NULL;
|
2017-03-17 11:40:24 -04:00
|
|
|
put_task_struct(ts);
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-02 06:42:58 -04:00
|
|
|
static struct task_struct *
|
|
|
|
|
linux_get_pid_task_int(pid_t pid, const bool do_get)
|
2017-03-17 11:40:24 -04:00
|
|
|
{
|
|
|
|
|
struct thread *td;
|
2017-04-06 06:26:03 -04:00
|
|
|
struct proc *p;
|
2020-07-02 06:42:58 -04:00
|
|
|
struct task_struct *ts;
|
2017-03-17 11:40:24 -04:00
|
|
|
|
2020-07-02 06:42:58 -04:00
|
|
|
if (pid > PID_MAX) {
|
|
|
|
|
/* try to find corresponding thread */
|
|
|
|
|
td = tdfind(pid, -1);
|
|
|
|
|
if (td != NULL) {
|
|
|
|
|
ts = td->td_lkpi_task;
|
|
|
|
|
if (do_get && ts != NULL)
|
|
|
|
|
get_task_struct(ts);
|
|
|
|
|
PROC_UNLOCK(td->td_proc);
|
|
|
|
|
return (ts);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
/* try to find corresponding procedure */
|
|
|
|
|
p = pfind(pid);
|
|
|
|
|
if (p != NULL) {
|
|
|
|
|
FOREACH_THREAD_IN_PROC(p, td) {
|
|
|
|
|
ts = td->td_lkpi_task;
|
|
|
|
|
if (ts != NULL) {
|
|
|
|
|
if (do_get)
|
|
|
|
|
get_task_struct(ts);
|
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
|
return (ts);
|
|
|
|
|
}
|
2017-04-06 06:26:03 -04:00
|
|
|
}
|
2020-07-02 06:42:58 -04:00
|
|
|
PROC_UNLOCK(p);
|
2017-04-06 06:26:03 -04:00
|
|
|
}
|
|
|
|
|
}
|
2017-03-17 11:40:24 -04:00
|
|
|
return (NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct task_struct *
|
2020-07-02 06:42:58 -04:00
|
|
|
linux_pid_task(pid_t pid)
|
2017-03-17 11:40:24 -04:00
|
|
|
{
|
2020-07-02 06:42:58 -04:00
|
|
|
return (linux_get_pid_task_int(pid, false));
|
|
|
|
|
}
|
2017-04-06 06:26:03 -04:00
|
|
|
|
2020-07-02 06:42:58 -04:00
|
|
|
struct task_struct *
|
|
|
|
|
linux_get_pid_task(pid_t pid)
|
|
|
|
|
{
|
|
|
|
|
return (linux_get_pid_task_int(pid, true));
|
2017-02-21 07:43:02 -05:00
|
|
|
}
|
|
|
|
|
|
2019-03-13 14:51:33 -04:00
|
|
|
bool
|
|
|
|
|
linux_task_exiting(struct task_struct *task)
|
|
|
|
|
{
|
2020-07-01 04:23:57 -04:00
|
|
|
struct thread *td;
|
2019-03-13 14:51:33 -04:00
|
|
|
struct proc *p;
|
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
|
|
ret = false;
|
2020-07-01 04:23:57 -04:00
|
|
|
|
|
|
|
|
/* try to find corresponding thread */
|
|
|
|
|
td = tdfind(task->pid, -1);
|
|
|
|
|
if (td != NULL) {
|
|
|
|
|
p = td->td_proc;
|
|
|
|
|
} else {
|
|
|
|
|
/* try to find corresponding procedure */
|
|
|
|
|
p = pfind(task->pid);
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-13 14:51:33 -04:00
|
|
|
if (p != NULL) {
|
|
|
|
|
if ((p->p_flag & P_WEXIT) != 0)
|
|
|
|
|
ret = true;
|
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
|
}
|
|
|
|
|
return (ret);
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 01:34:42 -05:00
|
|
|
static int lkpi_task_resrv;
|
|
|
|
|
SYSCTL_INT(_compat_linuxkpi, OID_AUTO, task_struct_reserve,
|
|
|
|
|
CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &lkpi_task_resrv, 0,
|
|
|
|
|
"Number of struct task and struct mm to reserve for non-sleepable "
|
|
|
|
|
"allocations");
|
|
|
|
|
|
2017-02-21 07:43:02 -05:00
|
|
|
static void
|
|
|
|
|
linux_current_init(void *arg __unused)
|
|
|
|
|
{
|
2021-03-11 01:34:42 -05:00
|
|
|
TUNABLE_INT_FETCH("compat.linuxkpi.task_struct_reserve",
|
|
|
|
|
&lkpi_task_resrv);
|
|
|
|
|
if (lkpi_task_resrv == 0) {
|
|
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
|
/*
|
|
|
|
|
* Number of interrupt threads plus per-cpu callout
|
|
|
|
|
* SWI threads.
|
|
|
|
|
*/
|
|
|
|
|
lkpi_task_resrv = first_msi_irq + num_msi_irqs + MAXCPU;
|
|
|
|
|
#else
|
|
|
|
|
lkpi_task_resrv = 1024; /* XXXKIB arbitrary */
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
linux_current_zone = uma_zcreate("lkpicurr",
|
|
|
|
|
sizeof(struct task_struct), NULL, NULL, NULL, NULL,
|
|
|
|
|
UMA_ALIGN_PTR, 0);
|
|
|
|
|
uma_zone_reserve(linux_current_zone, lkpi_task_resrv);
|
|
|
|
|
uma_prealloc(linux_current_zone, lkpi_task_resrv);
|
|
|
|
|
linux_mm_zone = uma_zcreate("lkpimm",
|
|
|
|
|
sizeof(struct task_struct), NULL, NULL, NULL, NULL,
|
|
|
|
|
UMA_ALIGN_PTR, 0);
|
|
|
|
|
uma_zone_reserve(linux_mm_zone, lkpi_task_resrv);
|
|
|
|
|
uma_prealloc(linux_mm_zone, lkpi_task_resrv);
|
2021-05-21 07:17:42 -04:00
|
|
|
|
|
|
|
|
atomic_thread_fence_seq_cst();
|
|
|
|
|
|
|
|
|
|
linuxkpi_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor,
|
|
|
|
|
linuxkpi_thread_dtor, NULL, EVENTHANDLER_PRI_ANY);
|
2021-05-20 11:28:20 -04:00
|
|
|
lkpi_alloc_current = linux_alloc_current;
|
2017-02-21 07:43:02 -05:00
|
|
|
}
|
2021-03-11 01:08:51 -05:00
|
|
|
SYSINIT(linux_current, SI_SUB_EVENTHANDLER, SI_ORDER_SECOND,
|
|
|
|
|
linux_current_init, NULL);
|
2017-02-21 07:43:02 -05:00
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
linux_current_uninit(void *arg __unused)
|
|
|
|
|
{
|
2017-07-09 18:57:00 -04:00
|
|
|
struct proc *p;
|
|
|
|
|
struct task_struct *ts;
|
|
|
|
|
struct thread *td;
|
|
|
|
|
|
2021-05-21 07:17:42 -04:00
|
|
|
lkpi_alloc_current = linux_alloc_current_noop;
|
|
|
|
|
|
|
|
|
|
atomic_thread_fence_seq_cst();
|
|
|
|
|
|
2017-07-09 18:57:00 -04:00
|
|
|
sx_slock(&allproc_lock);
|
|
|
|
|
FOREACH_PROC_IN_SYSTEM(p) {
|
|
|
|
|
PROC_LOCK(p);
|
|
|
|
|
FOREACH_THREAD_IN_PROC(p, td) {
|
|
|
|
|
if ((ts = td->td_lkpi_task) != NULL) {
|
|
|
|
|
td->td_lkpi_task = NULL;
|
|
|
|
|
put_task_struct(ts);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
|
}
|
|
|
|
|
sx_sunlock(&allproc_lock);
|
2021-05-21 07:17:42 -04:00
|
|
|
|
2021-05-20 11:28:20 -04:00
|
|
|
thread_reap_barrier();
|
2021-05-21 07:17:42 -04:00
|
|
|
|
2017-02-21 07:43:02 -05:00
|
|
|
EVENTHANDLER_DEREGISTER(thread_dtor, linuxkpi_thread_dtor_tag);
|
2021-05-20 11:28:20 -04:00
|
|
|
|
2021-03-11 01:34:42 -05:00
|
|
|
uma_zdestroy(linux_current_zone);
|
|
|
|
|
uma_zdestroy(linux_mm_zone);
|
2017-02-21 07:43:02 -05:00
|
|
|
}
|
2021-03-11 01:08:51 -05:00
|
|
|
SYSUNINIT(linux_current, SI_SUB_EVENTHANDLER, SI_ORDER_SECOND,
|
|
|
|
|
linux_current_uninit, NULL);
|