quinn-os/virtnet.c
2021-12-22 20:37:06 +10:00

355 lines
11 KiB
C

#include "pvec.h"
#include "ether.h"
#include "virtio.h"
#include "pci.h"
#include "console.h"
#include "string.h"
#include "memory.h"
#include "interrupts.h"
#include "io.h"
#define PAGE_SIZE 4096
#define PAGE_MASK (~(0xffffffff << 12))
static __inline__ unsigned long round_up_to_page(unsigned long addr) {
if ((addr & PAGE_MASK) != 0) {
addr &= ~(PAGE_MASK);
addr += PAGE_SIZE;
}
return addr;
}
extern struct ptr_vector ether_devs;
void virtnet_poll(struct virtio_device_info *dev)
{
struct virtio_queue* vq = &dev->queues[0]; // RX queue
virtio_disable_interrupts(vq);
if (vq->last_used_index == vq->used->index) return;
while (vq->last_used_index != vq->used->index) {
unsigned short index = vq->last_used_index % vq->queue_size;
unsigned short buffer_index = vq->used->rings[index].index;
unsigned char *vbuf = (unsigned char *)dev->vbuffer[0] + (vq->qbuffers[buffer_index].address - (unsigned long)vq->buffer);
unsigned short buffers = ((struct virtio_net_header *)vbuf)->num_buffers;
if (buffers > 1) {
unsigned int bufferSize = 0;
for(int i = 0; i < buffers; i++)
{
bufferSize += vq->qbuffers[(buffer_index + i) % vq->queue_size].length;
}
vbuf = (unsigned char *)malloc(bufferSize);
unsigned int offset = 0;
for (int i = 0; i < buffers; i++)
{
unsigned char *obuf = (unsigned char *)dev->vbuffer[0] + (vq->qbuffers[(buffer_index + i) % vq->queue_size].address - (unsigned long)vq->buffer);
memcpy((void *)((unsigned long)vbuf + offset),
obuf,
vq->qbuffers[(buffer_index + i) % vq->queue_size].length);
offset += vq->qbuffers[(buffer_index + i) % vq->queue_size].length;
}
}
unsigned int length = vq->used->rings[index].length - sizeof(struct virtio_net_header);
ether_receive((struct ether_t *)dev->data, vbuf + sizeof(struct virtio_net_header), length);
for (int i = 0; i < buffers; i++) {
struct virtio_buffer_info bi;
bi.size = 1526;
bi.ibuffer = 0;
bi.flags = VIRTIO_DESC_FLAG_WRITE_ONLY;
virtio_send_buffer(dev, 0, &bi, 1);
}
vq->last_used_index++;
if (buffers > 1) {
free(vbuf);
}
}
virtio_enable_interrupts(vq);
}
int virtnet_send(struct virtio_device_info *dev, char *packet, int len) {
if (len > 1526)
{
kprintf("Can't send, frame too big\n");
return 0;
}
struct virtio_buffer_info bi[2];
struct virtio_net_header h;
h.flags = 0;
h.gso_type = 0;
h.header_length = 0;
h.gso_size = 0;
h.checksum_start = 0;
h.checksum_offset = 0;
h.num_buffers = 0;
bi[0].ibuffer = (unsigned char *)&h;
bi[0].size = sizeof(struct virtio_net_header);
bi[0].flags = 0;
bi[1].ibuffer = (unsigned char *)packet;
bi[1].size = len;
bi[1].flags = 0;
virtio_send_buffer(dev, 1, bi, 2);
return 0;
}
void virtnet_isr(struct regs *r)
{
unsigned char v;
for (int i=0;i<ptr_vector_len(&ether_devs);i++) {
struct ether_t *card = ptr_vector_get(&ether_devs, i);
if (card->type == 2) {
struct virtio_device_info* virtio_dev = (struct virtio_device_info *)card->data;
if (virtio_dev->iobase==0) continue;
v = inportb(virtio_dev->iobase + 0x13);
if ((v & 1) == 1)
{
virtnet_poll(virtio_dev);
struct virtio_queue* vq = &virtio_dev->queues[1]; // TX queue
while (vq->last_used_index != vq->used->index) {
unsigned short nindex = vq->last_used_index % vq->queue_size;
unsigned short buffer_index = vq->used->rings[nindex].index;
vq->qbuffers[buffer_index].length = 0;
while (vq->qbuffers[buffer_index].next) {
vq->qbuffers[buffer_index++].next = 0;
buffer_index %= vq->queue_size;
vq->qbuffers[buffer_index].length = 0;
}
vq->last_used_index++;
}
}
}
}
}
struct ether_t *init_virtnet(int count) {
unsigned long i;
struct pci_device *pci_dev;
struct virtio_device_info *virtnet_dev = (struct virtio_device_info *)malloc(sizeof(struct virtio_device_info));
unsigned char v;
if (virtnet_dev == (void *)0) {
return (void *)0;
}
if (!pci_find_device_by_vendor_virtio(0x1, &pci_dev, count)) {
free(virtnet_dev);
return 0;
}
for (i=0;i<6;i++)
{
unsigned int m = pci_dev->base[i];
if (m==0) continue;
if (m&1)
{
virtnet_dev->iobase = m & 0xFFFC;
}
else
{
virtnet_dev->memoryAddress = m & 0xFFFFFFF0;
}
}
pci_set_master(pci_dev, 1);
pci_set_mem_enable(pci_dev, 1);
pci_set_io_enable(pci_dev, 1);
virtnet_dev->irq = pci_dev->irq;
if (!irq_install_handler(pci_dev->irq, virtnet_isr, 1)) {
kprintf("Failed to install IRQ handler...\n");
free(virtnet_dev);
return (void *)0;
}
struct ether_t *ether_dev = (struct ether_t *)malloc(sizeof(struct ether_t));
memset(ether_dev, 0, sizeof(struct ether_t));
ether_dev->data = virtnet_dev;
ether_dev->type = 2;
// reset device
outportb(virtnet_dev->iobase + 0x12, 0);
// Tell the device that we have noticed it
outportb(virtnet_dev->iobase + 0x12, VIRTIO_ACKNOWLEDGE);
// Tell the device that we will support it.
outportb(virtnet_dev->iobase + 0x12, VIRTIO_ACKNOWLEDGE | VIRTIO_DRIVER);
// Get the features that this device supports. Different host may implement different features
// for each device. The list of device-specific features can be found in the spec
unsigned long supportedFeatures = inportl(virtnet_dev->iobase + 0x00);
supportedFeatures = 0;
// do not use control queue
DISABLE_FEATURE(supportedFeatures,VIRTIO_CTRL_VQ);
// Disable tcp/udp packet size
DISABLE_FEATURE(supportedFeatures,VIRTIO_GUEST_TSO4);
DISABLE_FEATURE(supportedFeatures,VIRTIO_GUEST_TSO6);
DISABLE_FEATURE(supportedFeatures,VIRTIO_GUEST_UFO);
DISABLE_FEATURE(supportedFeatures,VIRTIO_GUEST_ECN);
DISABLE_FEATURE(supportedFeatures,VIRTIO_EVENT_IDX);
ENABLE_FEATURE(supportedFeatures,VIRTIO_MRG_RXBUF);
ENABLE_FEATURE(supportedFeatures, VIRTIO_MAC);
DISABLE_FEATURE(supportedFeatures,VIRTIO_CSUM);
//supportedFeatures = 0x10;
// This is called the "negotiation". You will negotiate, with the device, what features you will support.
// You can disable features in the supportedFeatures bitfield. You would disable
// features that your driver doesn't implement. But you cannot enable more features
// than what is currently specified in the supportedFeatures.
outportl(virtnet_dev->iobase + 0x04, supportedFeatures);
// Tell the device that we are OK with those features
outportb(virtnet_dev->iobase + 0x12, VIRTIO_ACKNOWLEDGE | VIRTIO_DRIVER | VIRTIO_FEATURES_OK);
v = inportb(virtnet_dev->iobase + 0x12);
if ((v & VIRTIO_FEATURES_OK) == 0)
{
kprintf("Feature set not accepted!\n");
free(virtnet_dev);
free(ether_dev);
return (void *)0;
}
// Initialize queues
for (int q = 0; q < 16; q++) {
virtio_queue_setup(virtnet_dev, q);
}
virtnet_dev->data = ether_dev;
struct virtio_queue* rx = &virtnet_dev->queues[0];
struct virtio_queue* tx = &virtnet_dev->queues[1];
// check if both queues were found.
if (rx->qbuffers_base == 0 || tx->qbuffers_base == 0)
{
kprintf("Base address == 0!\n");
free(virtnet_dev);
free(ether_dev);
return (void *)0;
}
unsigned int physrx = (unsigned int)mem_alloc_pages(round_up_to_page(rx->queue_size * 1526) / PAGE_SIZE);
unsigned int virtrx = mem_pci_sbrk(round_up_to_page(rx->queue_size * 1526));
for (int m = 0; m < round_up_to_page(rx->queue_size * 1526); m += PAGE_SIZE) {
mem_map_page(physrx + m, virtrx + m, 3);
memset((void *)(virtrx + m), 0, PAGE_SIZE);
}
virtnet_dev->vbuffer[0] = (long unsigned int)virtrx;
virtnet_dev->vbuffer_len[0] = round_up_to_page(rx->queue_size * 1526);
rx->buffer = (unsigned char*)physrx;
rx->chunk_size = 1526;
rx->available->index = -1;
rx->used->index = 0;
virtio_enable_interrupts(rx);
// add all buffers in queue so we can receive data
for (i = 0; i < rx->queue_size; i++)
{
struct virtio_buffer_info bi;
bi.size = 1526;
bi.ibuffer = 0;
bi.flags = VIRTIO_DESC_FLAG_WRITE_ONLY;
virtio_send_buffer(virtnet_dev,0,&bi,1);
}
// setup the send buffers
unsigned int phystx = (unsigned int)mem_alloc_pages(round_up_to_page(tx->queue_size * 1526) / PAGE_SIZE);
unsigned int virttx = mem_pci_sbrk(round_up_to_page(tx->queue_size * 1526));
for (int m = 0; m < round_up_to_page(tx->queue_size * 1526); m += PAGE_SIZE) {
mem_map_page(phystx + m, virttx + m, 3);
memset((void *)(virttx + m), 0, PAGE_SIZE);
}
virtnet_dev->vbuffer[1] = (long unsigned int)virttx;
virtnet_dev->vbuffer_len[1] = round_up_to_page(tx->queue_size * 1526);
tx->buffer = (unsigned char *)phystx;
tx->chunk_size = 1526;
tx->available->index = 0;
tx->available->flags = 0;
virtio_enable_interrupts(tx);
outportw(virtnet_dev->iobase + 0x10, 1); // tell the device that the available queue index changed
// The MAC address can be found in the 6 bytes at iobase+0x14..0x19. You must access those bytes one by one.
for (int m = 0; m < 6; m++) {
ether_dev->mac[m] = inportb(virtnet_dev->iobase + 0x14 + m);
}
outportb(virtnet_dev->iobase + 0x12, VIRTIO_ACKNOWLEDGE | VIRTIO_DRIVER | VIRTIO_FEATURES_OK | VIRTIO_DRIVER_OK);
//kprintf("%p\n", virttx);
kprintf("ETHER: Found virtio Ethernet Controller MAC %x:%x:%x:%x:%x:%x IRQ %d\n", ether_dev->mac[0], ether_dev->mac[1], ether_dev->mac[2], ether_dev->mac[3], ether_dev->mac[4], ether_dev->mac[5], pci_dev->irq);
return ether_dev;
}
void virtnet_enable(struct ether_t *ether) {
struct virtio_device_info* virtio_dev = (struct virtio_device_info *)ether->data;
virtio_enable_interrupts(&virtio_dev->queues[0]);
ether->state = 1;
}
void virtnet_disable(struct ether_t *ether) {
struct virtio_device_info* virtio_dev = (struct virtio_device_info *)ether->data;
virtio_disable_interrupts(&virtio_dev->queues[0]);
ether->state = 0;
}