616 lines
17 KiB
C
616 lines
17 KiB
C
#include "pvec.h"
|
|
#include "gui.h"
|
|
#include "socket.h"
|
|
#include "schedule.h"
|
|
#include "memory.h"
|
|
#include "interrupts.h"
|
|
#include "fpu.h"
|
|
#include "console.h"
|
|
#include "string.h"
|
|
#include "ahci.h"
|
|
#include "pata.h"
|
|
#include "hd.h"
|
|
#include "minix.h"
|
|
|
|
struct task_t *current_task = (void *)0;
|
|
struct task_t *first_task;
|
|
struct task_t *last_task;
|
|
|
|
struct tss_t system_tss;
|
|
|
|
extern unsigned long timer_ticks;
|
|
|
|
struct mailbox_t **mailboxes;
|
|
unsigned int mbox_count;
|
|
|
|
extern int page_directory;
|
|
extern void switch_to(void);
|
|
extern void switch_to_force(void);
|
|
extern void init_tss(struct tss_t *s);
|
|
|
|
extern unsigned char *start_free_mem;
|
|
extern unsigned char *end_free_mem;
|
|
|
|
extern void cli();
|
|
extern void sti();
|
|
|
|
extern unsigned int _sys_stack;
|
|
|
|
static int current_pid = 0;
|
|
unsigned char stack_spaces[256];
|
|
|
|
static int gui_timer = 0;
|
|
static int block_buffer_trim_timer = 0;
|
|
|
|
char *stack_alloc(void) {
|
|
char *blk;
|
|
unsigned long mask, offset, index, i;
|
|
|
|
for (i = 0; i < 256 * 8; i++) {
|
|
mask = 1;
|
|
index = i / 8;
|
|
offset = i % 8;
|
|
|
|
if (!(stack_spaces[index] & (mask << offset))) {
|
|
stack_spaces[index] |= (mask << offset);
|
|
|
|
blk = (char *)(0x31000000 + (i * KRNL_STACK_SIZE));
|
|
|
|
return blk;
|
|
}
|
|
}
|
|
return (char *)0;
|
|
}
|
|
|
|
int sched_check_user_pages(unsigned int address) {
|
|
struct task_t *task = first_task;
|
|
int i;
|
|
while (task != (void *)0) {
|
|
if (task != current_task) {
|
|
|
|
for (i = 0; i < task->user_pages_cnt; i++) {
|
|
if (task->user_pages[i] == address) {
|
|
return task->pid;
|
|
}
|
|
}
|
|
}
|
|
task = task->next_task;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
void stack_free(char *blk) {
|
|
unsigned long mask, offset, index;
|
|
|
|
index = (((unsigned long)blk - 0x31000000) / KRNL_STACK_SIZE) / 8;
|
|
offset = (((unsigned long)blk - 0x31000000) / KRNL_STACK_SIZE) % 8;
|
|
|
|
mask = ~(1 << offset);
|
|
stack_spaces[index] &= mask;
|
|
}
|
|
|
|
void init_scheduler(void) {
|
|
memset((char *)&system_tss, 0, sizeof(struct tss_t));
|
|
system_tss.ss0 = 0x10;
|
|
|
|
init_tss(&system_tss);
|
|
|
|
mbox_count = 0;
|
|
|
|
current_task = (struct task_t *)dbmalloc(sizeof(struct task_t), "init_scheduler");
|
|
int i;
|
|
|
|
memset(current_task, 0, sizeof(struct task_t));
|
|
|
|
for (i = 0; i < 256; i++) {
|
|
current_task->filehandles[i].free = 1;
|
|
}
|
|
|
|
current_task->ss = 0x10;
|
|
current_task->pid = ++current_pid;
|
|
current_task->process_group = current_task->pid;
|
|
current_task->next_task = (void *)0;
|
|
current_task->cr3 = page_directory;
|
|
current_task->kstack = _sys_stack + KRNL_STACK_SIZE;
|
|
current_task->state = TASK_RUNNING;
|
|
current_task->priority = 1;
|
|
first_task = current_task;
|
|
last_task = current_task;
|
|
strcpy(current_task->name, "KERNEL");
|
|
for (i = 0; i < USER_STACK_SIZE / 0x1000; i++) {
|
|
current_task->user_stack_pages[i] = 0;
|
|
}
|
|
current_task->user_pages_at = 0x40000000;
|
|
current_task->user_pages = (void *)0;
|
|
current_task->user_pages_virt = (void *)0;
|
|
current_task->user_pages_cnt = 0;
|
|
current_task->parent_task = (void *)0;
|
|
current_task->waiting_socket_count = 0;
|
|
|
|
current_task->cwd = (char *)malloc(2);
|
|
current_task->cwd[0] = '/';
|
|
current_task->cwd[1] = '\0';
|
|
init_ptr_vector(¤t_task->sockets);
|
|
for (i = 0; i < 64; i++) {
|
|
current_task->user_env_pages[i] = 0;
|
|
}
|
|
|
|
for (i = 0; i < 256; i++) {
|
|
stack_spaces[i] = 0;
|
|
}
|
|
}
|
|
|
|
void sched_tcp_read_wakeup(struct socket_t *s) {
|
|
struct task_t *task = first_task;
|
|
for (task = first_task; task != (void *)0; task = task->next_task) {
|
|
for (int i = 0; i < ptr_vector_len(&task->sockets); i++) {
|
|
if (s == ptr_vector_get(&task->sockets, i)) {
|
|
if (task->state == TASK_SLEEPING && (task->sleep_reason == SLEEP_TCP_READ || task->sleep_reason == SLEEP_TCP_POLL)) {
|
|
task->state = TASK_RUNNING;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void sched_remove_socket(struct socket_t *s) {
|
|
for (int i = 0; i < ptr_vector_len(¤t_task->sockets); i++) {
|
|
if (s == ptr_vector_get(¤t_task->sockets, i)) {
|
|
ptr_vector_del(¤t_task->sockets, i);
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
void sched_remove_socket_from_all_tasks(struct socket_t *s) {
|
|
struct task_t *task = first_task;
|
|
for (task = first_task; task != (void *)0; task = task->next_task) {
|
|
for (int i = 0; i < ptr_vector_len(&task->sockets); i++) {
|
|
if (s == ptr_vector_get(&task->sockets, i)) {
|
|
ptr_vector_del(&task->sockets, i);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void sched_free_task(struct task_t *this_task) {
|
|
int i;
|
|
struct task_t *task;
|
|
unsigned char not_last_process_in_group = 0;
|
|
// check if we're the last task in the process group (ie. are we a thread)
|
|
|
|
for (task = first_task; task != NULL; task = task->next_task) {
|
|
if (task != this_task && task->process_group == this_task->process_group) {
|
|
not_last_process_in_group = 1;
|
|
break;
|
|
}
|
|
}
|
|
if (!not_last_process_in_group) {
|
|
// kill children
|
|
for (task = first_task; task != (void *)0; task = task->next_task) {
|
|
if (task->parent_task == this_task) {
|
|
task->timetorun = 0;
|
|
task->state = TASK_ZOMBIE;
|
|
task->exit_status = 127;
|
|
task->zombie_ttl = 5;
|
|
}
|
|
}
|
|
|
|
// stop parent task from waiting
|
|
// free stuff
|
|
// free any userpages
|
|
// free stack virt space
|
|
|
|
while (ptr_vector_len(&this_task->sockets)) {
|
|
struct socket_t *s = ptr_vector_del(&this_task->sockets, 0);
|
|
socket_close(s->serial);
|
|
}
|
|
|
|
destroy_ptr_vector(&this_task->sockets);
|
|
|
|
while (ptr_vector_len(&this_task->window_list)) {
|
|
struct window_t *w = ptr_vector_del(&this_task->window_list, 0);
|
|
gui_destroy_window(w->serialno);
|
|
}
|
|
destroy_ptr_vector(&this_task->window_list);
|
|
}
|
|
|
|
// threads have seperate stacks...
|
|
stack_free((char *)(this_task->kstack - KRNL_STACK_SIZE));
|
|
|
|
// free stack phys space
|
|
for (i = 0; i < KRNL_STACK_SIZE / 0x1000; i++) {
|
|
mem_free((char *)this_task->stack_pages[i], "sched_free_task");
|
|
}
|
|
|
|
// free user stack space
|
|
if (this_task->process_group != 1) {
|
|
for (i = 0; i < USER_STACK_SIZE / 0x1000; i++) {
|
|
mem_free((char *)this_task->user_stack_pages[i], "sched_free_task");
|
|
}
|
|
}
|
|
|
|
if (!not_last_process_in_group) {
|
|
// free argv, env areas
|
|
if (this_task->process_group != 1) {
|
|
for (i = 0; i < 64; i++) {
|
|
mem_free((char *)this_task->user_env_pages[i], "sched_free_task");
|
|
}
|
|
|
|
for (i = 0; i < this_task->user_pages_cnt; i++) {
|
|
mem_free((char *)this_task->user_pages[i], "sched clear user pages");
|
|
}
|
|
|
|
dbfree(this_task->user_pages, "user pages");
|
|
|
|
this_task->user_pages = (void *)0;
|
|
|
|
for (i = 0; i < this_task->waiting_socket_count; i++) {
|
|
free(this_task->waiting_sockets[i]);
|
|
}
|
|
|
|
free(this_task->cwd);
|
|
}
|
|
// free page directory
|
|
mem_free_page_dir((unsigned int *)this_task->cr3);
|
|
}
|
|
|
|
if (last_task == this_task) {
|
|
last_task = current_task;
|
|
}
|
|
|
|
current_task->next_task = this_task->next_task;
|
|
|
|
free(this_task);
|
|
}
|
|
|
|
void schedule(struct regs *r) {
|
|
struct task_t *previous_task = current_task;
|
|
|
|
int i;
|
|
unsigned int *status;
|
|
unsigned int *wait_return;
|
|
|
|
if (block_buffer_trim_timer == 200) {
|
|
hd_buffer_trim();
|
|
block_buffer_trim_timer = 0;
|
|
} else {
|
|
block_buffer_trim_timer++;
|
|
}
|
|
if (gui_timer == 5) {
|
|
gui_flip();
|
|
gui_timer = 0;
|
|
} else {
|
|
gui_timer++;
|
|
}
|
|
|
|
if (current_task->state == TASK_RUNNING && current_task->timetorun > 0) {
|
|
current_task->timetorun--;
|
|
} else {
|
|
do {
|
|
if (current_task->next_task == (void *)0) {
|
|
current_task = first_task;
|
|
} else {
|
|
if (current_task->next_task->state == TASK_FINISHED) {
|
|
sched_free_task(current_task->next_task);
|
|
continue;
|
|
}
|
|
|
|
if (current_task->next_task->state == TASK_SLEEPING) {
|
|
if (current_task->next_task->timetorun <= 0 && current_task->next_task->sleep_reason == SLEEP_YIELD) {
|
|
current_task->next_task->state = TASK_RUNNING;
|
|
} else if (current_task->next_task->sleep_reason == SLEEP_USER && current_task->next_task->sleep_until <= timer_ticks) {
|
|
current_task->next_task->state = TASK_RUNNING;
|
|
} else if (current_task->next_task->timetorun <= 0) {
|
|
current_task->next_task->timetorun += current_task->next_task->priority;
|
|
} else {
|
|
current_task->next_task->timetorun--;
|
|
}
|
|
}
|
|
|
|
// if (current_task->next_task->cr3 > (unsigned int)start_free_mem && current_task->next_task->cr3 < (unsigned int)end_free_mem) {
|
|
current_task = current_task->next_task;
|
|
//} else {
|
|
// kprintf("PID %d Corrupted CR3 %p Current Task %d\n", current_task->next_task->pid, current_task->next_task->cr3, current_task->pid);
|
|
//}
|
|
if (current_task->state == TASK_ZOMBIE) {
|
|
if (current_task->parent_task->state == TASK_WAITING) {
|
|
wait_return = (unsigned int *)(current_task->parent_task->esp + 44);
|
|
status = (unsigned int *)(current_task->parent_task->esp + 32);
|
|
|
|
*wait_return = current_task->pid;
|
|
*status = current_task->exit_status;
|
|
current_task->parent_task->state = TASK_RUNNING;
|
|
current_task->state = TASK_FINISHED;
|
|
} else {
|
|
current_task->zombie_ttl--;
|
|
if (current_task->zombie_ttl <= 0) {
|
|
current_task->state = TASK_FINISHED;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} while (current_task->state != TASK_RUNNING);
|
|
}
|
|
if (previous_task != current_task) {
|
|
// do switch
|
|
current_task->timetorun = current_task->priority;
|
|
|
|
fpu_disable();
|
|
|
|
if (r->int_no == 0x30) {
|
|
switch_to_force();
|
|
} else {
|
|
switch_to();
|
|
}
|
|
}
|
|
}
|
|
|
|
struct task_t *sched_new_task() {
|
|
struct task_t *new_task = (struct task_t *)malloc(sizeof(struct task_t));
|
|
memset(new_task, 0, sizeof(struct task_t));
|
|
new_task->pid = ++current_pid;
|
|
new_task->process_group = new_task->pid;
|
|
|
|
new_task->cr3 = (unsigned int)mem_new_pagedirectory(new_task);
|
|
new_task->next_task = (void *)0;
|
|
|
|
new_task->state = TASK_NOTRUNNING;
|
|
|
|
unsigned char *stack_space = (unsigned char *)stack_alloc();
|
|
|
|
if (stack_space == (void *)0) {
|
|
mem_free((char *)new_task->cr3, "sched_new_task");
|
|
return (void *)0;
|
|
}
|
|
new_task->kstack = (unsigned int)(stack_space + KRNL_STACK_SIZE);
|
|
new_task->esp = new_task->kstack;
|
|
new_task->ustack = 0xf0200000 + USER_STACK_SIZE;
|
|
new_task->selected_device = current_task->selected_device;
|
|
new_task->parent_task = current_task;
|
|
new_task->fpu_enabled = 0;
|
|
new_task->waiting_socket_count = 0;
|
|
new_task->cwd = (char *)malloc(strlen(current_task->cwd) + 1);
|
|
new_task->priority = 5;
|
|
|
|
strcpy(new_task->cwd, current_task->cwd);
|
|
strcpy(new_task->name, current_task->name);
|
|
int i;
|
|
|
|
init_ptr_vector(&new_task->window_list);
|
|
|
|
init_ptr_vector(&new_task->sockets);
|
|
for (i = 0; i < ptr_vector_len(¤t_task->sockets); i++) {
|
|
struct socket_t *s = ptr_vector_get(¤t_task->sockets, i);
|
|
s->ref++;
|
|
ptr_vector_append(&new_task->sockets, s);
|
|
}
|
|
|
|
for (i = 0; i < KRNL_STACK_SIZE / 0x1000; i++) {
|
|
new_task->stack_pages[i] = (unsigned int)mem_alloc();
|
|
mem_map_page(new_task->stack_pages[i], (unsigned int)(stack_space + (i * 0x1000)), 3);
|
|
mem_map_page_in(new_task->stack_pages[i], (unsigned int)(stack_space + (i * 0x1000)), new_task->cr3, 3);
|
|
memset(stack_space + (i * 0x1000), 0, 0x1000);
|
|
}
|
|
|
|
for (i = 0; i < 256; i++) {
|
|
new_task->filehandles[i].free = current_task->filehandles[i].free;
|
|
|
|
if (new_task->filehandles[i].free == 0) {
|
|
new_task->filehandles[i].filepath = (char *)malloc(strlen(current_task->filehandles[i].filepath) + 1);
|
|
strcpy(new_task->filehandles[i].filepath, current_task->filehandles[i].filepath);
|
|
if (strcmp(new_task->filehandles[i].filepath, "PIPE") == 0) {
|
|
struct quinn_pipe_t *pipe = (struct quinn_pipe_t *)current_task->filehandles[i].fs_specific;
|
|
pipe->ref++;
|
|
new_task->filehandles[i].fs_specific = pipe;
|
|
}
|
|
new_task->filehandles[i].device = current_task->filehandles[i].device;
|
|
new_task->filehandles[i].info = (struct vfs_file_handle_info_t *)malloc(sizeof(struct vfs_file_handle_info_t));
|
|
new_task->filehandles[i].info->position = current_task->filehandles[i].info->position;
|
|
new_task->filehandles[i].info->size = current_task->filehandles[i].info->size;
|
|
new_task->filehandles[i].info->ref = 1;
|
|
if (current_task->filehandles[i].device != (void *)0) {
|
|
if (current_task->filehandles[i].device->fs == 3) {
|
|
new_task->filehandles[i].fs_specific = malloc(sizeof(struct minix_file_info));
|
|
memcpy(new_task->filehandles[i].fs_specific, current_task->filehandles[i].fs_specific, sizeof(struct minix_file_info));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
if (!mem_cpy_pages(current_task, new_task)) {
|
|
kprintf("mem_cpy_pages failed!\n");
|
|
}
|
|
last_task->next_task = new_task;
|
|
last_task = new_task;
|
|
return new_task;
|
|
}
|
|
|
|
extern void setup_task();
|
|
|
|
void sched_new_ktask(void *func, void *ctxt) {
|
|
struct task_t *new_task = (struct task_t *)malloc(sizeof(struct task_t));
|
|
memset(new_task, 0, sizeof(struct task_t));
|
|
|
|
new_task->pid = ++current_pid;
|
|
new_task->process_group = current_task->pid;
|
|
|
|
new_task->state = TASK_NOTRUNNING;
|
|
|
|
new_task->cr3 = mem_new_pagedirectory(new_task);
|
|
unsigned char *stack_space = (unsigned char *)stack_alloc();
|
|
new_task->kstack = (unsigned int)(stack_space + KRNL_STACK_SIZE);
|
|
new_task->esp = new_task->kstack;
|
|
|
|
for (unsigned int i = 0; i < KRNL_STACK_SIZE / 0x1000; i++) {
|
|
new_task->stack_pages[i] = (unsigned int)mem_alloc();
|
|
mem_map_page(new_task->stack_pages[i], (unsigned int)(stack_space + (i * 0x1000)), 3);
|
|
mem_map_page_in(new_task->stack_pages[i], (unsigned int)(stack_space + (i * 0x1000)), new_task->cr3, 3);
|
|
memset(stack_space + (i * 0x1000), 0, 0x1000);
|
|
}
|
|
|
|
strcpy(new_task->name, current_task->name);
|
|
|
|
unsigned int *stack_setup = (unsigned int *)new_task->esp;
|
|
|
|
*--stack_setup = current_task->ss;
|
|
*--stack_setup = 0;
|
|
*--stack_setup = current_task->eflags;
|
|
*--stack_setup = 0x08;
|
|
*--stack_setup = setup_task;
|
|
|
|
*--stack_setup = 0;
|
|
*--stack_setup = 0;
|
|
|
|
*--stack_setup = (unsigned int)func; // EAX
|
|
*--stack_setup = 0; // ECX
|
|
*--stack_setup = 0; // EDX
|
|
*--stack_setup = (unsigned int)ctxt; // EBX
|
|
*--stack_setup = 0; // Just an offset, no value
|
|
*--stack_setup = new_task->esp - 8; // EBP
|
|
*--stack_setup = 0; // ESI
|
|
*--stack_setup = 0; // EDI
|
|
|
|
*--stack_setup = 0x10; // DS
|
|
*--stack_setup = 0x10; // ES
|
|
*--stack_setup = 0x10; // FS
|
|
*--stack_setup = 0x10; // GS
|
|
|
|
new_task->esp = (unsigned int)stack_setup;
|
|
new_task->fpu_enabled = 0;
|
|
last_task->next_task = new_task;
|
|
last_task = new_task;
|
|
|
|
new_task->state = TASK_RUNNING;
|
|
}
|
|
|
|
void wait(struct regs *r) {
|
|
current_task->state = TASK_WAITING;
|
|
schedule(r);
|
|
}
|
|
|
|
void sleep(struct regs *r) {
|
|
current_task->sleep_until = timer_ticks + r->ebx;
|
|
current_task->state = TASK_SLEEPING;
|
|
current_task->sleep_reason = SLEEP_USER;
|
|
current_task->timetorun = 0;
|
|
schedule(r);
|
|
}
|
|
|
|
void yield(struct regs *r) {
|
|
if (current_task->state == TASK_RUNNING) {
|
|
current_task->state = TASK_SLEEPING;
|
|
current_task->sleep_reason = SLEEP_YIELD;
|
|
}
|
|
current_task->timetorun = 0;
|
|
schedule(r);
|
|
}
|
|
|
|
int sched_kill_pid(unsigned int pid) {
|
|
for (struct task_t *task = first_task; task != (void *)0; task = task->next_task) {
|
|
if (task->pid == pid) {
|
|
task->timetorun = 0;
|
|
task->state = TASK_ZOMBIE;
|
|
task->exit_status = 127;
|
|
task->zombie_ttl = 5;
|
|
return 0;
|
|
}
|
|
}
|
|
return -1;
|
|
}
|
|
|
|
struct proc_info_t {
|
|
int pid;
|
|
char name[32];
|
|
int state;
|
|
int sleep_reason;
|
|
};
|
|
|
|
int sched_proc_info(unsigned char *buffer, int len, int last_pid) {
|
|
struct proc_info_t *procinfo = (struct proc_info_t *)buffer;
|
|
struct task_t *at;
|
|
|
|
at = first_task;
|
|
|
|
int ret = 0;
|
|
for (int i = 0; i < len / sizeof(struct proc_info_t); i++) {
|
|
if (at == (void *)0) {
|
|
return ret;
|
|
}
|
|
|
|
if (at->pid > last_pid) {
|
|
procinfo[ret].pid = at->pid;
|
|
memcpy(procinfo[ret].name, at->name, 32);
|
|
procinfo[ret].state = at->state;
|
|
if (at->state == TASK_SLEEPING) {
|
|
procinfo[ret].sleep_reason = at->sleep_reason;
|
|
} else {
|
|
procinfo[ret].sleep_reason = 0;
|
|
}
|
|
ret++;
|
|
}
|
|
|
|
at = at->next_task;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
int sched_mailbox_new(unsigned int size) {
|
|
struct mailbox_t *newmbox;
|
|
|
|
if (mbox_count == 0) {
|
|
mailboxes = (struct mailbox_t **)malloc(sizeof(struct mailbox_t *));
|
|
} else {
|
|
mailboxes = (struct mailbox_t **)realloc(mailboxes, sizeof(struct mailbox_t *) * (mbox_count + 1));
|
|
}
|
|
|
|
newmbox = (struct mailbox_t *)malloc(sizeof(struct mailbox_t));
|
|
|
|
newmbox->id = timer_ticks;
|
|
newmbox->size = size;
|
|
newmbox->data = (void *)0;
|
|
|
|
mailboxes[mbox_count] = newmbox;
|
|
|
|
return newmbox->id;
|
|
}
|
|
#if 0
|
|
void sched_mailbox_free(unsigned int id) {
|
|
int i;
|
|
int j;
|
|
|
|
for (i=0;i<mbox_count;i++) {
|
|
if (mailboxes[i]->id == id) {
|
|
if (mailboxes[i]->data != (void *)0) {
|
|
free(mailboxes[i]->data);
|
|
}
|
|
free(mailboxes[i]);
|
|
for (j=i;j<mbox_count-1;j++) {
|
|
mailboxes[j] = mailboxes[j+1];
|
|
}
|
|
mailboxes = (struct mailbox_t **)realloc(mailboxes, sizeof(struct mailbox_t *) * (mbox_count - 1));
|
|
mbox_count--;
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
void sched_mailbox_post(unsigned int id, void *msg) {
|
|
|
|
}
|
|
|
|
int sched_mailbox_trypost(unsigned int id, void *msg) {
|
|
|
|
}
|
|
|
|
unsigned int sched_mailbox_fetch(unsigned int id, void **msg, unsigned int timeout) {
|
|
|
|
}
|
|
|
|
unsigned int sched_mailbox_tryfetch(unsigned int id, void **msg) {
|
|
|
|
}
|
|
|
|
#endif
|