patch-2.1.90 linux/kernel/sched.c
Next file: linux/kernel/signal.c
Previous file: linux/kernel/module.c
Back to the patch index
Back to the overall index
- Lines: 226
- Date:
Sun Mar 15 10:13:07 1998
- Orig file:
v2.1.89/linux/kernel/sched.c
- Orig date:
Thu Feb 12 20:56:13 1998
diff -u --recursive --new-file v2.1.89/linux/kernel/sched.c linux/kernel/sched.c
@@ -155,9 +155,9 @@
* The run-queue lock locks the parts that actually access
* and change the run-queues, and have to be interrupt-safe.
*/
-rwlock_t tasklist_lock = RW_LOCK_UNLOCKED;
-spinlock_t scheduler_lock = SPIN_LOCK_UNLOCKED;
-spinlock_t runqueue_lock = SPIN_LOCK_UNLOCKED;
+spinlock_t scheduler_lock = SPIN_LOCK_UNLOCKED; /* should be aquired first */
+spinlock_t runqueue_lock = SPIN_LOCK_UNLOCKED; /* second */
+rwlock_t tasklist_lock = RW_LOCK_UNLOCKED; /* third */
/*
* Wake up a process. Put it on the run-queue if it's not
@@ -201,14 +201,20 @@
*/
static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu)
{
+ int policy = p->policy;
int weight;
+ if (policy & SCHED_YIELD) {
+ p->policy = policy & ~SCHED_YIELD;
+ return 0;
+ }
+
/*
* Realtime process, select the first one on the
* runqueue (taking priorities within processes
* into account).
*/
- if (p->policy != SCHED_OTHER)
+ if (policy != SCHED_OTHER)
return 1000 + p->rt_priority;
/*
@@ -228,9 +234,10 @@
weight += PROC_CHANGE_PENALTY;
#endif
- /* .. and a slight advantage to the current process */
- if (p == prev)
+ /* .. and a slight advantage to the current thread */
+ if (p->mm == prev->mm)
weight += 1;
+ weight += p->priority;
}
return weight;
@@ -1253,10 +1260,11 @@
static inline struct task_struct *find_process_by_pid(pid_t pid)
{
+ struct task_struct *tsk = current;
+
if (pid)
- return find_task_by_pid(pid);
- else
- return current;
+ tsk = find_task_by_pid(pid);
+ return tsk;
}
static int setscheduler(pid_t pid, int policy,
@@ -1264,48 +1272,70 @@
{
struct sched_param lp;
struct task_struct *p;
+ int retval;
+ retval = -EINVAL;
if (!param || pid < 0)
- return -EINVAL;
+ goto out_nounlock;
+ retval = -EFAULT;
if (copy_from_user(&lp, param, sizeof(struct sched_param)))
- return -EFAULT;
+ goto out_nounlock;
+
+ /*
+ * We play safe to avoid deadlocks.
+ */
+ spin_lock_irq(&scheduler_lock);
+ spin_lock(&runqueue_lock);
+ read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
+
+ retval = -ESRCH;
if (!p)
- return -ESRCH;
+ goto out_unlock;
if (policy < 0)
policy = p->policy;
- else if (policy != SCHED_FIFO && policy != SCHED_RR &&
- policy != SCHED_OTHER)
- return -EINVAL;
+ else {
+ retval = -EINVAL;
+ if (policy != SCHED_FIFO && policy != SCHED_RR &&
+ policy != SCHED_OTHER)
+ goto out_unlock;
+ }
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
* priority for SCHED_OTHER is 0.
*/
+ retval = -EINVAL;
if (lp.sched_priority < 0 || lp.sched_priority > 99)
- return -EINVAL;
+ goto out_unlock;
if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
- return -EINVAL;
+ goto out_unlock;
+ retval = -EPERM;
if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser())
- return -EPERM;
+ goto out_unlock;
if ((current->euid != p->euid) && (current->euid != p->uid) &&
!suser())
- return -EPERM;
+ goto out_unlock;
+ retval = 0;
p->policy = policy;
p->rt_priority = lp.sched_priority;
- spin_lock(&scheduler_lock);
- spin_lock_irq(&runqueue_lock);
if (p->next_run)
move_last_runqueue(p);
- spin_unlock_irq(&runqueue_lock);
- spin_unlock(&scheduler_lock);
+
need_resched = 1;
- return 0;
+
+out_unlock:
+ read_unlock(&tasklist_lock);
+ spin_unlock(&runqueue_lock);
+ spin_unlock_irq(&scheduler_lock);
+
+out_nounlock:
+ return retval;
}
asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
@@ -1322,42 +1352,64 @@
asmlinkage int sys_sched_getscheduler(pid_t pid)
{
struct task_struct *p;
+ int retval;
+ retval = -EINVAL;
if (pid < 0)
- return -EINVAL;
+ goto out_nounlock;
+
+ read_lock(&tasklist_lock);
+ retval = -ESRCH;
p = find_process_by_pid(pid);
if (!p)
- return -ESRCH;
+ goto out_unlock;
- return p->policy;
+ retval = p->policy;
+
+out_unlock:
+ read_unlock(&tasklist_lock);
+
+out_nounlock:
+ return retval;
}
asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
{
struct task_struct *p;
struct sched_param lp;
+ int retval;
+ retval = -EINVAL;
if (!param || pid < 0)
- return -EINVAL;
+ goto out_nounlock;
+ read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
+ retval = -ESRCH;
if (!p)
- return -ESRCH;
-
+ goto out_unlock;
lp.sched_priority = p->rt_priority;
- return copy_to_user(param, &lp, sizeof(struct sched_param)) ? -EFAULT : 0;
+ read_unlock(&tasklist_lock);
+
+ /*
+ * This one might sleep, we cannot do it with a spinlock held ...
+ */
+ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
+
+out_nounlock:
+ return retval;
+
+out_unlock:
+ read_unlock(&tasklist_lock);
+ return retval;
}
asmlinkage int sys_sched_yield(void)
{
- /*
- * This is not really right. We'd like to reschedule
- * just _once_ with this process having a zero count.
- */
- current->counter = 0;
spin_lock(&scheduler_lock);
spin_lock_irq(&runqueue_lock);
+ current->policy |= SCHED_YIELD;
move_last_runqueue(current);
spin_unlock_irq(&runqueue_lock);
spin_unlock(&scheduler_lock);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov