562 lines
15 KiB
C
562 lines
15 KiB
C
#include "schedule.h"
|
|
#include "memory.h"
|
|
#include "interrupts.h"
|
|
#include "fpu.h"
|
|
#include "console.h"
|
|
#include "string.h"
|
|
#include "fat.h"
|
|
#include "hd.h"
|
|
#include "minix.h"
|
|
|
|
struct task_t *current_task = (void *)0;
|
|
struct task_t *first_task;
|
|
struct task_t *last_task;
|
|
|
|
struct tss_t system_tss;
|
|
|
|
extern unsigned long timer_ticks;
|
|
|
|
struct mailbox_t **mailboxes;
|
|
unsigned int mbox_count;
|
|
|
|
extern int page_directory;
|
|
extern void switch_to(void);
|
|
extern void switch_to_force(void);
|
|
extern void init_tss(struct tss_t *s);
|
|
|
|
extern unsigned char *start_free_mem;
|
|
extern unsigned char *end_free_mem;
|
|
|
|
extern void cli();
|
|
extern void sti();
|
|
|
|
extern unsigned int _sys_stack;
|
|
|
|
static int current_pid = 0;
|
|
unsigned char stack_spaces[256];
|
|
|
|
static int gui_timer = 0;
|
|
static int block_buffer_trim_timer = 0;
|
|
|
|
char *stack_alloc(void) {
|
|
char * blk;
|
|
unsigned long mask, offset, index, i;
|
|
|
|
for (i=0;i<256 * 8;i++) {
|
|
mask = 1;
|
|
index = i / 8;
|
|
offset = i % 8;
|
|
|
|
if (!(stack_spaces[index] & (mask << offset))) {
|
|
stack_spaces[index] |= (mask << offset);
|
|
|
|
blk = (char *)(0x31000000 + (i * KRNL_STACK_SIZE));
|
|
|
|
return blk;
|
|
}
|
|
}
|
|
return (char *)0;
|
|
}
|
|
|
|
int sched_check_user_pages(unsigned int address) {
|
|
struct task_t *task = first_task;
|
|
int i;
|
|
while(task != (void *)0) {
|
|
if (task != current_task) {
|
|
|
|
for (i=0;i<task->user_pages_cnt;i++) {
|
|
if (task->user_pages[i] == address) {
|
|
return task->pid;
|
|
}
|
|
}
|
|
}
|
|
task = task->next_task;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
void stack_free(char *blk) {
|
|
unsigned long mask, offset, index;
|
|
|
|
index = (((unsigned long)blk - 0x31000000) / KRNL_STACK_SIZE) / 8;
|
|
offset = (((unsigned long)blk - 0x31000000) / KRNL_STACK_SIZE) % 8;
|
|
|
|
mask = ~(1<<offset);
|
|
stack_spaces[index] &= mask;
|
|
}
|
|
|
|
void init_scheduler(void) {
|
|
memset((char *)&system_tss, 0, sizeof(struct tss_t));
|
|
system_tss.ss0 = 0x10;
|
|
|
|
init_tss(&system_tss);
|
|
|
|
mbox_count = 0;
|
|
|
|
current_task = (struct task_t *)dbmalloc(sizeof(struct task_t), "init_scheduler");
|
|
int i;
|
|
|
|
memset(current_task, 0, sizeof(struct task_t));
|
|
|
|
for (i=0;i<256;i++) {
|
|
current_task->filehandles[i].free = 1;
|
|
}
|
|
|
|
current_task->ss = 0x10;
|
|
current_task->pid = ++current_pid;
|
|
current_task->next_task = (void*)0;
|
|
current_task->cr3 = page_directory;
|
|
current_task->kstack = _sys_stack + KRNL_STACK_SIZE;
|
|
current_task->state = TASK_RUNNING;
|
|
first_task = current_task;
|
|
last_task = current_task;
|
|
strcpy(current_task->name, "KERNEL");
|
|
for (i=0;i<USER_STACK_SIZE/0x1000;i++) {
|
|
current_task->user_stack_pages[i] = 0;
|
|
}
|
|
current_task->user_pages_at = 0x40000000;
|
|
current_task->user_pages = (void *)0;
|
|
current_task->user_pages_virt = (void *)0;
|
|
current_task->user_pages_cnt = 0;
|
|
current_task->parent_task = (void *)0;
|
|
current_task->waiting_socket_count = 0;
|
|
current_task->sem_count = 0;
|
|
for (i=0;i<64;i++) {
|
|
current_task->user_env_pages[i] = 0;
|
|
}
|
|
|
|
for (i=0;i<256;i++) {
|
|
stack_spaces[i] = 0;
|
|
}
|
|
}
|
|
|
|
void sched_free_task(struct task_t *this_task) {
|
|
int i;
|
|
|
|
// kill children
|
|
struct task_t *task = first_task;
|
|
|
|
for (task = first_task; task != (void *)0; task = task->next_task) {
|
|
if (task->parent_task == this_task) {
|
|
task->timetorun = 0;
|
|
task->state = TASK_ZOMBIE;
|
|
task->exit_status = 127;
|
|
task->zombie_ttl = 5;
|
|
}
|
|
}
|
|
|
|
// stop parent task from waiting
|
|
// free stuff
|
|
// free any userpages
|
|
// free stack virt space
|
|
|
|
for (i=0;i<this_task->window_count;i++) {
|
|
|
|
gui_destroy_window(this_task->window_list[i]->serialno, this_task);
|
|
}
|
|
|
|
if (this_task->window_list != (void *)0) {
|
|
free(this_task->window_list);
|
|
}
|
|
|
|
stack_free((char *)(this_task->kstack - KRNL_STACK_SIZE));
|
|
|
|
// free stack phys space
|
|
for (i=0;i<KRNL_STACK_SIZE/0x1000;i++) {
|
|
mem_free((char *)this_task->stack_pages[i], "sched_free_task");
|
|
}
|
|
|
|
// free user stack space
|
|
for (i=0;i<USER_STACK_SIZE/0x1000;i++) {
|
|
mem_free((char *)this_task->user_stack_pages[i], "sched_free_task");
|
|
}
|
|
|
|
// free argv, env areas
|
|
for (i=0;i<64;i++) {
|
|
mem_free((char *)this_task->user_env_pages[i], "sched_free_task");
|
|
}
|
|
|
|
mem_free_page_dir((unsigned int *)this_task->cr3);
|
|
|
|
// mem_clear_user_pages();
|
|
|
|
dbfree(this_task->user_pages, "user pages");
|
|
this_task->user_pages = (void *)0;
|
|
for (i=0;i<this_task->waiting_socket_count;i++) {
|
|
free(this_task->waiting_sockets[i]);
|
|
}
|
|
|
|
// free page directory
|
|
|
|
|
|
if (last_task == this_task) {
|
|
last_task = current_task;
|
|
}
|
|
|
|
current_task->next_task = this_task->next_task;
|
|
|
|
free(this_task);
|
|
}
|
|
|
|
void schedule(struct regs *r) {
|
|
struct task_t *previous_task = current_task;
|
|
// struct task_t *sem_task = first_task;
|
|
// unsigned int *sem_return;
|
|
int i;
|
|
unsigned int *status;
|
|
unsigned int *wait_return;
|
|
|
|
if (block_buffer_trim_timer == 20) {
|
|
hd_buffer_trim();
|
|
block_buffer_trim_timer = 0;
|
|
}
|
|
|
|
block_buffer_trim_timer++;
|
|
if (gui_timer == 5 ) {
|
|
gui_flip();
|
|
gui_timer = 0;
|
|
}
|
|
gui_timer++;
|
|
|
|
if (current_task->state == TASK_RUNNING && current_task->timetorun > 0) {
|
|
current_task->timetorun--;
|
|
} else {
|
|
do {
|
|
if (current_task->next_task == (void*)0) {
|
|
current_task = first_task;
|
|
} else {
|
|
if (current_task->next_task->state == TASK_FINISHED) {
|
|
sched_free_task(current_task->next_task);
|
|
continue;
|
|
}
|
|
|
|
if (current_task->next_task->state == TASK_SLEEPING) {
|
|
if (current_task->next_task->timetorun <= 0) {
|
|
current_task->next_task->state = TASK_RUNNING;
|
|
} else {
|
|
current_task->next_task->timetorun--;
|
|
}
|
|
}
|
|
|
|
if (current_task->next_task->state == TASK_SEM_WAIT) {
|
|
for (i=0;i<current_task->next_task->sem_count;i++) {
|
|
if (current_task->next_task->semaphores[i]->timeout > 0) {
|
|
current_task->next_task->semaphores[i]->timeout -= 10;
|
|
if (current_task->next_task->semaphores[i]->timeout < 0) {
|
|
current_task->next_task->semaphores[i]->timeout = 0;
|
|
current_task->next_task->semaphores[i]->state = 0;
|
|
}
|
|
}
|
|
|
|
if (current_task->next_task->semaphores[i]->state == 0) {
|
|
current_task->next_task->state = TASK_RUNNING;
|
|
}
|
|
}
|
|
if (current_task->next_task->state == TASK_SEM_WAIT) {
|
|
current_task = current_task->next_task;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
if (current_task->next_task->cr3 > (unsigned int)start_free_mem && current_task->next_task->cr3 < (unsigned int)end_free_mem) {
|
|
current_task = current_task->next_task;
|
|
} else {
|
|
kprintf("PID %d Corrupted CR3 %p Current Task %d\n", current_task->next_task->pid, current_task->next_task->cr3, current_task->pid);
|
|
}
|
|
if (current_task->state == TASK_ZOMBIE) {
|
|
if (current_task->parent_task->state == TASK_WAITING) {
|
|
wait_return = (unsigned int *)(current_task->parent_task->esp + 44);
|
|
status = (unsigned int *)(current_task->parent_task->esp + 32);
|
|
|
|
*wait_return = current_task->pid;
|
|
*status = current_task->exit_status;
|
|
current_task->parent_task->state = TASK_RUNNING;
|
|
current_task->state = TASK_FINISHED;
|
|
} else {
|
|
current_task->zombie_ttl--;
|
|
if (current_task->zombie_ttl <= 0) {
|
|
current_task->state = TASK_FINISHED;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} while (current_task->state != TASK_RUNNING);
|
|
}
|
|
if (previous_task != current_task) {
|
|
// do switch
|
|
if (current_task == first_task) {
|
|
current_task->timetorun = 1;
|
|
} else {
|
|
current_task->timetorun = 5;
|
|
}
|
|
fpu_disable();
|
|
|
|
if (r->int_no == 0x30) {
|
|
switch_to_force();
|
|
} else {
|
|
switch_to();
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
struct task_t *sched_new_task() {
|
|
struct task_t *new_task = (struct task_t *)malloc(sizeof(struct task_t));
|
|
memset(new_task, 0, sizeof(struct task_t));
|
|
new_task->pid = ++current_pid;
|
|
|
|
new_task->cr3 = (unsigned int)mem_new_pagedirectory(new_task);
|
|
new_task->next_task = (void *)0;
|
|
|
|
new_task->state = TASK_NOTRUNNING;
|
|
|
|
unsigned char *stack_space = stack_alloc();
|
|
|
|
if (stack_space == (void *)0) {
|
|
mem_free((char *)new_task->cr3, "sched_new_task");
|
|
return (void *)0;
|
|
}
|
|
|
|
new_task->window_count = 0;
|
|
new_task->window_list = (void *)0;
|
|
new_task->kstack = (unsigned int)(stack_space + KRNL_STACK_SIZE);
|
|
new_task->esp = new_task->kstack;
|
|
new_task->ustack = 0xf0200000 + USER_STACK_SIZE;
|
|
new_task->selected_device = current_task->selected_device;
|
|
new_task->parent_task = current_task;
|
|
new_task->fpu_enabled = 0;
|
|
new_task->waiting_socket_count = 0;
|
|
new_task->sem_count = 0;
|
|
strcpy(new_task->name, current_task->name);
|
|
int i;
|
|
for (i=0;i<KRNL_STACK_SIZE/0x1000;i++) {
|
|
new_task->stack_pages[i] = (unsigned int)mem_alloc();
|
|
mem_map_page(new_task->stack_pages[i], (unsigned int)(stack_space + (i * 0x1000)), 3);
|
|
mem_map_page_in(new_task->stack_pages[i], (unsigned int)(stack_space + (i * 0x1000)), new_task->cr3, 3);
|
|
memset(stack_space + (i * 0x1000), 0, 0x1000);
|
|
}
|
|
for (i=0;i<256;i++) {
|
|
new_task->filehandles[i].free = current_task->filehandles[i].free;
|
|
|
|
if (new_task->filehandles[i].free == 0) {
|
|
new_task->filehandles[i].filepath = (char *)malloc(strlen(current_task->filehandles[i].filepath) + 1);
|
|
strcpy(new_task->filehandles[i].filepath, current_task->filehandles[i].filepath);
|
|
if (strcmp(new_task->filehandles[i].filepath, "PIPE") == 0) {
|
|
struct quinn_pipe_t *pipe = (struct quinn_pipe_t *)current_task->filehandles[i].fs_specific;
|
|
pipe->ref++;
|
|
new_task->filehandles[i].fs_specific = pipe;
|
|
}
|
|
new_task->filehandles[i].device = current_task->filehandles[i].device;
|
|
new_task->filehandles[i].info = (struct vfs_file_handle_info_t *)malloc(sizeof(struct vfs_file_handle_info_t));
|
|
new_task->filehandles[i].info->position = current_task->filehandles[i].info->position;
|
|
new_task->filehandles[i].info->size = current_task->filehandles[i].info->size;
|
|
new_task->filehandles[i].info->ref = 1;
|
|
if (current_task->filehandles[i].device != (void *)0) {
|
|
if (current_task->filehandles[i].device->fs == 2) {
|
|
new_task->filehandles[i].fs_specific = malloc(sizeof(struct fat_file_info));
|
|
memcpy(new_task->filehandles[i].fs_specific, current_task->filehandles[i].fs_specific, sizeof(struct fat_file_info));
|
|
} else if (current_task->filehandles[i].device->fs == 3) {
|
|
new_task->filehandles[i].fs_specific = malloc(sizeof(struct minix_file_info));
|
|
memcpy(new_task->filehandles[i].fs_specific, current_task->filehandles[i].fs_specific, sizeof(struct minix_file_info));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
mem_cpy_pages((struct task_t *)current_task, new_task);
|
|
last_task->next_task = new_task;
|
|
last_task = new_task;
|
|
return new_task;
|
|
}
|
|
|
|
void wait(struct regs *r) {
|
|
current_task->state = TASK_WAITING;
|
|
schedule(r);
|
|
}
|
|
|
|
|
|
void sleep(int microseconds) {
|
|
int start = timer_ticks;
|
|
|
|
while (start + microseconds * 10 < timer_ticks) {
|
|
yield((void *)0);
|
|
}
|
|
}
|
|
|
|
void yield(struct regs *r) {
|
|
current_task->state = TASK_SLEEPING;
|
|
current_task->timetorun += 5;
|
|
if (current_task->timetorun > 10) {
|
|
current_task->timetorun = 10;
|
|
}
|
|
}
|
|
|
|
unsigned int sched_sem_new(unsigned char state) {
|
|
struct semaphore_t *newsem;
|
|
|
|
if (current_task->sem_count == 0) {
|
|
current_task->semaphores = (struct semaphore_t **)malloc(sizeof(struct semaphore_t *));
|
|
} else {
|
|
current_task->semaphores = (struct semaphore_t **)realloc(current_task->semaphores, sizeof(struct semaphore_t *) * (current_task->sem_count + 1));
|
|
}
|
|
|
|
newsem = (struct semaphore_t *)malloc(sizeof(struct semaphore_t));
|
|
|
|
newsem->id = timer_ticks;
|
|
newsem->state = state;
|
|
|
|
current_task->semaphores[current_task->sem_count] = newsem;
|
|
current_task->sem_count++;
|
|
return newsem->id;
|
|
}
|
|
|
|
void sched_sem_free(unsigned int id) {
|
|
int i;
|
|
int j;
|
|
|
|
for (i=0;i<current_task->sem_count;i++) {
|
|
if (current_task->semaphores[i]->id == id) {
|
|
free(current_task->semaphores[i]);
|
|
for (j=i;j<current_task->sem_count-1;j++) {
|
|
current_task->semaphores[j] = current_task->semaphores[j+1];
|
|
}
|
|
current_task->semaphores = (struct semaphore_t **)realloc(current_task->semaphores, sizeof(struct semaphore_t *) * (current_task->sem_count - 1));
|
|
current_task->sem_count--;
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
int sched_kill_pid(unsigned int pid) {
|
|
for (struct task_t *task = first_task; task != (void *)0; task = task->next_task) {
|
|
if (task->pid == pid) {
|
|
task->timetorun = 0;
|
|
task->state = TASK_ZOMBIE;
|
|
task->exit_status = 127;
|
|
task->zombie_ttl = 5;
|
|
return 0;
|
|
}
|
|
}
|
|
return -1;
|
|
}
|
|
|
|
struct proc_info_t {
|
|
int pid;
|
|
char name[32];
|
|
};
|
|
|
|
int sched_proc_info(unsigned char *buffer, int len, int last_pid) {
|
|
struct proc_info_t *procinfo = (struct proc_info_t *)buffer;
|
|
struct task_t *at;
|
|
|
|
at = first_task;
|
|
|
|
int ret = 0;
|
|
for (int i = 0;i<len / sizeof(struct proc_info_t); i++) {
|
|
if (at == (void *)0) {
|
|
return ret;
|
|
}
|
|
|
|
if (at->pid > last_pid) {
|
|
procinfo[ret].pid = at->pid;
|
|
memcpy(procinfo[ret].name, at->name, 32);
|
|
ret++;
|
|
}
|
|
|
|
at = at->next_task;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
void sched_sem_signal(unsigned int id, unsigned char state) {
|
|
int i;
|
|
struct task_t *sem_task = current_task;
|
|
|
|
do {
|
|
for (i=0;i<sem_task->sem_count;i++) {
|
|
if (sem_task->semaphores[i]->id == id) {
|
|
sem_task->semaphores[i]->state = state;
|
|
}
|
|
}
|
|
sem_task = sem_task->next_task;
|
|
} while (sem_task != (void *)0);
|
|
}
|
|
|
|
void sched_sem_wait(struct regs *r, unsigned int id, int timeout) {
|
|
int i;
|
|
int waiting = 1;
|
|
for (i=0;i<current_task->sem_count;i++) {
|
|
if (current_task->semaphores[i]->id == id) {
|
|
current_task->semaphores[i]->state = 1;
|
|
current_task->semaphores[i]->timeout = timeout;
|
|
} else {
|
|
if (current_task->semaphores[i]->state != 1) {
|
|
waiting = 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (!waiting) {
|
|
current_task->state = TASK_SEM_WAIT;
|
|
schedule(r);
|
|
}
|
|
}
|
|
|
|
int sched_mailbox_new(unsigned int size) {
|
|
struct mailbox_t *newmbox;
|
|
|
|
if (mbox_count == 0) {
|
|
mailboxes = (struct mailbox_t **)malloc(sizeof(struct mailbox_t *));
|
|
} else {
|
|
mailboxes = (struct mailbox_t **)realloc(mailboxes, sizeof(struct mailbox_t *) * (mbox_count + 1));
|
|
}
|
|
|
|
newmbox = (struct mailbox_t *)malloc(sizeof(struct mailbox_t));
|
|
|
|
newmbox->id = timer_ticks;
|
|
newmbox->size = size;
|
|
newmbox->data = (void *)0;
|
|
|
|
mailboxes[mbox_count] = newmbox;
|
|
|
|
return newmbox->id;
|
|
}
|
|
#if 0
|
|
void sched_mailbox_free(unsigned int id) {
|
|
int i;
|
|
int j;
|
|
|
|
for (i=0;i<mbox_count;i++) {
|
|
if (mailboxes[i]->id == id) {
|
|
if (mailboxes[i]->data != (void *)0) {
|
|
free(mailboxes[i]->data);
|
|
}
|
|
free(mailboxes[i]);
|
|
for (j=i;j<mbox_count-1;j++) {
|
|
mailboxes[j] = mailboxes[j+1];
|
|
}
|
|
mailboxes = (struct mailbox_t **)realloc(mailboxes, sizeof(struct mailbox_t *) * (mbox_count - 1));
|
|
mbox_count--;
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
void sched_mailbox_post(unsigned int id, void *msg) {
|
|
|
|
}
|
|
|
|
int sched_mailbox_trypost(unsigned int id, void *msg) {
|
|
|
|
}
|
|
|
|
unsigned int sched_mailbox_fetch(unsigned int id, void **msg, unsigned int timeout) {
|
|
|
|
}
|
|
|
|
unsigned int sched_mailbox_tryfetch(unsigned int id, void **msg) {
|
|
|
|
}
|
|
|
|
#endif
|