Codebase list seabios / 2357dbb
Update upstream source from tag 'upstream/1.16.2' Update to upstream version '1.16.2' with Debian dir 2bfc10351e81863025de411fc3ee80d7b68f2f6f Michael Tokarev 1 year, 26 days ago
13 changed file(s) with 200 addition(s) and 73 deletion(s). Raw diff Collapse all Expand all
0 1.16.0
0 1.16.2
1616 // Maximum number of map entries in the e820 map
1717 #define BUILD_MAX_E820 32
1818 // Space to reserve in high-memory for tables
19 #define BUILD_MAX_HIGHTABLE (256*1024)
19 #define BUILD_MIN_HIGHTABLE (256*1024)
20 #define BUILD_MAX_HIGHTABLE (16*1024*1024)
2021 // Largest supported externaly facing drive id
2122 #define BUILD_MAX_EXTDRIVE 16
2223 // Number of bytes the smbios may be and still live in the f-segment
416416 break;
417417 case 0x01: /* one */
418418 break;
419 case 0x06: /* AliasOp */
420 offset += parse_namestring(s, ptr + offset, "SourceObject");
421 offset += parse_namestring(s, ptr + offset, "AliasObject");
422 break;
419423 case 0x08: /* name op */
420424 offset += parse_namestring(s, ptr + offset, "name");
421425 offset += parse_termobj(s, ptr + offset);
3131 {
3232 // Read in current PAM settings from pci config space
3333 union pamdata_u pamdata;
34 pamdata.data32[0] = pci_config_readl(bdf, ALIGN_DOWN(pam0, 4));
35 pamdata.data32[1] = pci_config_readl(bdf, ALIGN_DOWN(pam0, 4) + 4);
34 pamdata.data32[0] = pci_ioconfig_readl(bdf, ALIGN_DOWN(pam0, 4));
35 pamdata.data32[1] = pci_ioconfig_readl(bdf, ALIGN_DOWN(pam0, 4) + 4);
3636 u8 *pam = &pamdata.data8[pam0 & 0x03];
3737
3838 // Make ram from 0xc0000-0xf0000 writable
4545 pam[0] = 0x30;
4646
4747 // Write PAM settings back to pci config space
48 pci_config_writel(bdf, ALIGN_DOWN(pam0, 4), pamdata.data32[0]);
49 pci_config_writel(bdf, ALIGN_DOWN(pam0, 4) + 4, pamdata.data32[1]);
48 pci_ioconfig_writel(bdf, ALIGN_DOWN(pam0, 4), pamdata.data32[0]);
49 pci_ioconfig_writel(bdf, ALIGN_DOWN(pam0, 4) + 4, pamdata.data32[1]);
5050
5151 if (!ram_present)
5252 // Copy bios.
5858 static void
5959 make_bios_writable_intel(u16 bdf, u32 pam0)
6060 {
61 int reg = pci_config_readb(bdf, pam0);
61 int reg = pci_ioconfig_readb(bdf, pam0);
6262 if (!(reg & 0x10)) {
6363 // QEMU doesn't fully implement the piix shadow capabilities -
6464 // if ram isn't backing the bios segment when shadowing is
124124 // At this point, statically allocated variables can't be written,
125125 // so do this search manually.
126126 int bdf;
127 foreachbdf(bdf, 0) {
128 u32 vendev = pci_config_readl(bdf, PCI_VENDOR_ID);
127 pci_ioconfig_foreachbdf(bdf, 0) {
128 u32 vendev = pci_ioconfig_readl(bdf, PCI_VENDOR_ID);
129129 u16 vendor = vendev & 0xffff, device = vendev >> 16;
130130 if (vendor == PCI_VENDOR_ID_INTEL
131131 && device == PCI_DEVICE_ID_INTEL_82441) {
3939 u32 e820_nr;
4040 } PACKED;
4141
42 static void validate_info(struct xen_seabios_info *t)
42 static struct xen_seabios_info *validate_info(void)
4343 {
44 if ( memcmp(t->signature, "XenHVMSeaBIOS", 14) )
45 panic("Bad Xen info signature\n");
44 struct xen_seabios_info *t = (void *)INFO_PHYSICAL_ADDRESS;
4645
47 if ( t->length < sizeof(struct xen_seabios_info) )
48 panic("Bad Xen info length\n");
46 if ( memcmp(t->signature, "XenHVMSeaBIOS", 14) ) {
47 dprintf(1, "Bad Xen info signature\n");
48 return NULL;
49 }
4950
50 if (checksum(t, t->length) != 0)
51 panic("Bad Xen info checksum\n");
51 if ( t->length < sizeof(struct xen_seabios_info) ) {
52 dprintf(1, "Bad Xen info length\n");
53 return NULL;
54 }
55
56 if (checksum(t, t->length) != 0) {
57 dprintf(1, "Bad Xen info checksum\n");
58 return NULL;
59 }
60 return t;
5261 }
5362
5463 void xen_preinit(void)
8594 dprintf(1, "No Xen hypervisor found.\n");
8695 return;
8796 }
88 PlatformRunningOn = PF_QEMU|PF_XEN;
97 if (validate_info())
98 PlatformRunningOn = PF_QEMU|PF_XEN;
99 else
100 dprintf(1, "Not enabling Xen support due to lack of Xen info\n");
89101 }
90102
91103 static int hypercall_xen_version( int cmd, void *arg)
121133
122134 void xen_biostable_setup(void)
123135 {
124 struct xen_seabios_info *info = (void *)INFO_PHYSICAL_ADDRESS;
125 void **tables = (void*)info->tables;
136 struct xen_seabios_info *info = validate_info();
137 void **tables;
126138 int i;
127139
140 if (!info)
141 panic("Xen info corrupted\n");
142
143 tables = (void*)info->tables;
128144 dprintf(1, "xen: copy BIOS tables...\n");
129145 for (i=0; i<info->tables_nr; i++)
130146 copy_table(tables[i]);
135151 void xen_ramsize_preinit(void)
136152 {
137153 int i;
138 struct xen_seabios_info *info = (void *)INFO_PHYSICAL_ADDRESS;
139 struct e820entry *e820 = (struct e820entry *)info->e820;
140 validate_info(info);
154 struct xen_seabios_info *info = validate_info();
155 struct e820entry *e820;
156
157 if (!info)
158 panic("Xen info corrupted\n");
141159
142160 dprintf(1, "xen: copy e820...\n");
143161
162 e820 = (struct e820entry *)info->e820;
144163 for (i = 0; i < info->e820_nr; i++) {
145164 struct e820entry *e = &e820[i];
146165 e820_add(e->start, e->size, e->type);
2525 return 0x80000000 | (bdf << 8) | (addr & 0xfc);
2626 }
2727
28 void pci_ioconfig_writel(u16 bdf, u32 addr, u32 val)
29 {
30 outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD);
31 outl(val, PORT_PCI_DATA);
32 }
33
2834 void pci_config_writel(u16 bdf, u32 addr, u32 val)
2935 {
3036 if (!MODESEGMENT && mmconfig) {
3137 writel(mmconfig_addr(bdf, addr), val);
3238 } else {
33 outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD);
34 outl(val, PORT_PCI_DATA);
35 }
39 pci_ioconfig_writel(bdf, addr, val);
40 }
41 }
42
43 void pci_ioconfig_writew(u16 bdf, u32 addr, u16 val)
44 {
45 outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD);
46 outw(val, PORT_PCI_DATA + (addr & 2));
3647 }
3748
3849 void pci_config_writew(u16 bdf, u32 addr, u16 val)
4051 if (!MODESEGMENT && mmconfig) {
4152 writew(mmconfig_addr(bdf, addr), val);
4253 } else {
43 outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD);
44 outw(val, PORT_PCI_DATA + (addr & 2));
45 }
54 pci_ioconfig_writew(bdf, addr, val);
55 }
56 }
57
58 void pci_ioconfig_writeb(u16 bdf, u32 addr, u8 val)
59 {
60 outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD);
61 outb(val, PORT_PCI_DATA + (addr & 3));
4662 }
4763
4864 void pci_config_writeb(u16 bdf, u32 addr, u8 val)
5066 if (!MODESEGMENT && mmconfig) {
5167 writeb(mmconfig_addr(bdf, addr), val);
5268 } else {
53 outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD);
54 outb(val, PORT_PCI_DATA + (addr & 3));
55 }
69 pci_ioconfig_writeb(bdf, addr, val);
70 }
71 }
72
73 u32 pci_ioconfig_readl(u16 bdf, u32 addr)
74 {
75 outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD);
76 return inl(PORT_PCI_DATA);
5677 }
5778
5879 u32 pci_config_readl(u16 bdf, u32 addr)
6081 if (!MODESEGMENT && mmconfig) {
6182 return readl(mmconfig_addr(bdf, addr));
6283 } else {
63 outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD);
64 return inl(PORT_PCI_DATA);
65 }
84 return pci_ioconfig_readl(bdf, addr);
85 }
86 }
87
88 u16 pci_ioconfig_readw(u16 bdf, u32 addr)
89 {
90 outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD);
91 return inw(PORT_PCI_DATA + (addr & 2));
6692 }
6793
6894 u16 pci_config_readw(u16 bdf, u32 addr)
7096 if (!MODESEGMENT && mmconfig) {
7197 return readw(mmconfig_addr(bdf, addr));
7298 } else {
73 outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD);
74 return inw(PORT_PCI_DATA + (addr & 2));
75 }
99 return pci_ioconfig_readw(bdf, addr);
100 }
101 }
102
103 u8 pci_ioconfig_readb(u16 bdf, u32 addr)
104 {
105 outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD);
106 return inb(PORT_PCI_DATA + (addr & 3));
76107 }
77108
78109 u8 pci_config_readb(u16 bdf, u32 addr)
80111 if (!MODESEGMENT && mmconfig) {
81112 return readb(mmconfig_addr(bdf, addr));
82113 } else {
83 outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD);
84 return inb(PORT_PCI_DATA + (addr & 3));
114 return pci_ioconfig_readb(bdf, addr);
85115 }
86116 }
87117
124154 }
125155
126156 return 0;
157 }
158
159 // Helper function for pci_ioconfig_foreachbdf() macro - return next device
160 int pci_ioconfig_next(int bdf, int bus)
161 {
162 if (pci_bdf_to_fn(bdf) == 0
163 && (pci_ioconfig_readb(bdf, PCI_HEADER_TYPE) & 0x80) == 0)
164 // Last found device wasn't a multi-function device - skip to
165 // the next device.
166 bdf += 8;
167 else
168 bdf += 1;
169
170 for (;;) {
171 if (pci_bdf_to_bus(bdf) != bus)
172 return -1;
173
174 u16 v = pci_ioconfig_readw(bdf, PCI_VENDOR_ID);
175 if (v != 0x0000 && v != 0xffff)
176 // Device is present.
177 return bdf;
178
179 if (pci_bdf_to_fn(bdf) == 0)
180 bdf += 8;
181 else
182 bdf += 1;
183 }
127184 }
128185
129186 // Helper function for foreachbdf() macro - return next device
2626 return (bus << 8) | devfn;
2727 }
2828
29 #define pci_ioconfig_foreachbdf(BDF, BUS) \
30 for (BDF=pci_ioconfig_next(pci_bus_devfn_to_bdf((BUS), 0)-1, (BUS)) \
31 ; BDF >= 0 \
32 ; BDF=pci_ioconfig_next(BDF, (BUS)))
33
2934 #define foreachbdf(BDF, BUS) \
3035 for (BDF=pci_next(pci_bus_devfn_to_bdf((BUS), 0)-1, (BUS)) \
3136 ; BDF >= 0 \
3237 ; BDF=pci_next(BDF, (BUS)))
3338
39 // standard PCI configration access mechanism
40 void pci_ioconfig_writel(u16 bdf, u32 addr, u32 val);
41 void pci_ioconfig_writew(u16 bdf, u32 addr, u16 val);
42 void pci_ioconfig_writeb(u16 bdf, u32 addr, u8 val);
43 u32 pci_ioconfig_readl(u16 bdf, u32 addr);
44 u16 pci_ioconfig_readw(u16 bdf, u32 addr);
45 u8 pci_ioconfig_readb(u16 bdf, u32 addr);
46 int pci_ioconfig_next(int bdf, int bus);
47
48 // PCI configuration access using either PCI CAM or PCIe ECAM
3449 void pci_config_writel(u16 bdf, u32 addr, u32 val);
3550 void pci_config_writew(u16 bdf, u32 addr, u16 val);
3651 void pci_config_writeb(u16 bdf, u32 addr, u8 val);
3853 u16 pci_config_readw(u16 bdf, u32 addr);
3954 u8 pci_config_readb(u16 bdf, u32 addr);
4055 void pci_config_maskw(u16 bdf, u32 addr, u16 off, u16 on);
41 void pci_enable_mmconfig(u64 addr, const char *name);
4256 u8 pci_find_capability(u16 bdf, u8 cap_id, u8 cap);
4357 int pci_next(int bdf, int bus);
58
59 void pci_enable_mmconfig(u64 addr, const char *name);
4460 int pci_probe_host(void);
4561 void pci_reboot(void);
4662
2121
2222 // Send USB HID protocol message.
2323 static int
24 set_protocol(struct usb_pipe *pipe, u16 val)
24 set_protocol(struct usb_pipe *pipe, u16 val, u16 inferface)
2525 {
2626 struct usb_ctrlrequest req;
2727 req.bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
2828 req.bRequest = HID_REQ_SET_PROTOCOL;
2929 req.wValue = val;
30 req.wIndex = 0;
30 req.wIndex = inferface;
3131 req.wLength = 0;
3232 return usb_send_default_control(pipe, &req, NULL);
3333 }
7575 }
7676
7777 // Enable "boot" protocol.
78 int ret = set_protocol(usbdev->defpipe, 0);
79 if (ret)
80 return -1;
78 int ret = set_protocol(usbdev->defpipe, 0, usbdev->iface->bInterfaceNumber);
79 if (ret) {
80 dprintf(3, "Failed to set boot protocol\n");
81 return -1;
82 }
83
8184 // Periodically send reports to enable key repeat.
8285 ret = set_idle(usbdev->defpipe, KEYREPEATMS);
8386 if (ret)
117120 }
118121
119122 // Enable "boot" protocol.
120 int ret = set_protocol(usbdev->defpipe, 0);
123 int ret = set_protocol(usbdev->defpipe, 0, usbdev->iface->bInterfaceNumber);
121124 if (ret)
122125 return -1;
123126
371371 void *config_end = (void*)config + config->wTotalLength;
372372 struct usb_interface_descriptor *iface = (void*)(&config[1]);
373373 for (;;) {
374 if (!num_iface-- || (void*)iface + iface->bLength > config_end)
374 if (!num_iface || (void*)iface + iface->bLength > config_end)
375375 // Not a supported device.
376376 goto fail;
377 if (iface->bDescriptorType == USB_DT_INTERFACE
378 && (iface->bInterfaceClass == USB_CLASS_HUB
377 if (iface->bDescriptorType == USB_DT_INTERFACE) {
378 num_iface--;
379 if (iface->bInterfaceClass == USB_CLASS_HUB
379380 || (iface->bInterfaceClass == USB_CLASS_MASS_STORAGE
380381 && (iface->bInterfaceProtocol == US_PR_BULK
381382 || iface->bInterfaceProtocol == US_PR_UAS))
382383 || (iface->bInterfaceClass == USB_CLASS_HID
383 && iface->bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT)))
384 break;
384 && iface->bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT))
385 break;
386 }
385387 iface = (void*)iface + iface->bLength;
386388 }
387389
9191 u16 blk_num_max;
9292
9393 if (vdrive->drive.blksize != 0 && max_io_size != 0)
94 blk_num_max = (u16)max_io_size / vdrive->drive.blksize;
94 blk_num_max = (u16)(max_io_size / vdrive->drive.blksize);
9595 else
9696 /* default blk_num_max if hardware doesnot advise a proper value */
97 blk_num_max = 8;
97 blk_num_max = 64;
9898
9999 if (op->count <= blk_num_max) {
100100 virtio_blk_op_one_segment(vdrive, write, sg);
150150 vdrive->drive.cntl_id = pci->bdf;
151151
152152 vp_init_simple(&vdrive->vp, pci);
153 if (vp_find_vq(&vdrive->vp, 0, &vdrive->vq) < 0 ) {
154 dprintf(1, "fail to find vq for virtio-blk %pP\n", pci);
155 goto fail;
156 }
157153
158154 if (vdrive->vp.use_modern) {
159155 struct vp_device *vp = &vdrive->vp;
211207 vp_read(&vp->device, struct virtio_blk_config, heads);
212208 vdrive->drive.pchs.sector =
213209 vp_read(&vp->device, struct virtio_blk_config, sectors);
214 } else {
210 }
211
212 if (vp_find_vq(&vdrive->vp, 0, &vdrive->vq) < 0 ) {
213 dprintf(1, "fail to find vq for virtio-blk %pP\n", pci);
214 goto fail;
215 }
216
217 if (!vdrive->vp.use_modern) {
215218 struct virtio_blk_config cfg;
216219 vp_get_legacy(&vdrive->vp, 0, &cfg, sizeof(cfg));
217220
271274 vdrive->drive.cntl_id = (u32)mmio;
272275
273276 vp_init_mmio(&vdrive->vp, mmio);
274 if (vp_find_vq(&vdrive->vp, 0, &vdrive->vq) < 0 ) {
275 dprintf(1, "fail to find vq for virtio-blk-mmio %p\n", mmio);
276 goto fail;
277 }
278277
279278 struct vp_device *vp = &vdrive->vp;
280279 u64 features = vp_get_features(vp);
293292 goto fail;
294293 }
295294
295 if (vp_find_vq(&vdrive->vp, 0, &vdrive->vq) < 0 ) {
296 dprintf(1, "fail to find vq for virtio-blk-mmio %p\n", mmio);
297 goto fail;
298 }
299
296300 if (features & max_segment_size)
297301 vdrive->drive.max_segment_size =
298302 vp_read(&vp->device, struct virtio_blk_config, size_max);
192192 if (vp->use_mmio) {
193193 vp_write(&vp->common, virtio_mmio_cfg, device_feature_select, 0);
194194 f0 = vp_read(&vp->common, virtio_mmio_cfg, device_feature);
195 f1 = 0;
195 vp_write(&vp->common, virtio_mmio_cfg, device_feature_select, 1);
196 f1 = vp_read(&vp->common, virtio_mmio_cfg, device_feature);
196197 } else if (vp->use_modern) {
197198 vp_write(&vp->common, virtio_pci_common_cfg, device_feature_select, 0);
198199 f0 = vp_read(&vp->common, virtio_pci_common_cfg, device_feature);
213214 f1 = features >> 32;
214215
215216 if (vp->use_mmio) {
216 vp_write(&vp->common, virtio_mmio_cfg, guest_feature_select, f0);
217 vp_write(&vp->common, virtio_mmio_cfg, guest_feature_select, 0);
217218 vp_write(&vp->common, virtio_mmio_cfg, guest_feature, f0);
219 vp_write(&vp->common, virtio_mmio_cfg, guest_feature_select, 1);
220 vp_write(&vp->common, virtio_mmio_cfg, guest_feature, f1);
218221 } else if (vp->use_modern) {
219222 vp_write(&vp->common, virtio_pci_common_cfg, guest_feature_select, 0);
220223 vp_write(&vp->common, virtio_pci_common_cfg, guest_feature, f0);
238238 vp_init_mmio(vp, mmio);
239239 u8 status = VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER;
240240
241 u64 features = vp_get_features(vp);
242 u64 version1 = 1ull << VIRTIO_F_VERSION_1;
243 if (features & version1) {
244 u64 iommu_platform = 1ull << VIRTIO_F_IOMMU_PLATFORM;
245
246 vp_set_features(vp, features & (version1 | iommu_platform));
247 vp_set_status(vp, VIRTIO_CONFIG_S_FEATURES_OK);
248 if (!(vp_get_status(vp) & VIRTIO_CONFIG_S_FEATURES_OK)) {
249 dprintf(1, "device didn't accept features: %pP\n", mmio);
250 goto fail;
251 }
252 }
253
241254 if (vp_find_vq(vp, 2, &vq) < 0 ) {
242255 dprintf(1, "fail to find vq for virtio-scsi-mmio %p\n", mmio);
243256 goto fail;
421421 e820_add(BUILD_BIOS_ADDR, BUILD_BIOS_SIZE, E820_RESERVED);
422422
423423 // Populate temp high ram
424 u32 highram = 0;
424 u32 highram_start = 0;
425 u32 highram_size = 0;
425426 int i;
426427 for (i=e820_count-1; i>=0; i--) {
427428 struct e820entry *en = &e820_list[i];
431432 if (en->type != E820_RAM || end > 0xffffffff)
432433 continue;
433434 u32 s = en->start, e = end;
434 if (!highram) {
435 u32 newe = ALIGN_DOWN(e - BUILD_MAX_HIGHTABLE, MALLOC_MIN_ALIGN);
436 if (newe <= e && newe >= s) {
437 highram = newe;
438 e = newe;
435 if (!highram_start) {
436 u32 new_max = ALIGN_DOWN(e - BUILD_MAX_HIGHTABLE, MALLOC_MIN_ALIGN);
437 u32 new_min = ALIGN_DOWN(e - BUILD_MIN_HIGHTABLE, MALLOC_MIN_ALIGN);
438 if (new_max <= e && new_max >= s + BUILD_MAX_HIGHTABLE) {
439 highram_start = e = new_max;
440 highram_size = BUILD_MAX_HIGHTABLE;
441 } else if (new_min <= e && new_min >= s) {
442 highram_start = e = new_min;
443 highram_size = BUILD_MIN_HIGHTABLE;
439444 }
440445 }
441446 alloc_add(&ZoneTmpHigh, s, e);
443448
444449 // Populate regions
445450 alloc_add(&ZoneTmpLow, BUILD_STACK_ADDR, BUILD_EBDA_MINIMUM);
446 if (highram) {
447 alloc_add(&ZoneHigh, highram, highram + BUILD_MAX_HIGHTABLE);
448 e820_add(highram, BUILD_MAX_HIGHTABLE, E820_RESERVED);
451 if (highram_start) {
452 alloc_add(&ZoneHigh, highram_start, highram_start + highram_size);
453 e820_add(highram_start, highram_size, E820_RESERVED);
449454 }
450455 }
451456