Update upstream source from tag 'upstream/1.16.2'
Update to upstream version '1.16.2'
with Debian dir 2bfc10351e81863025de411fc3ee80d7b68f2f6f
Michael Tokarev
1 year, 26 days ago
16 | 16 | // Maximum number of map entries in the e820 map |
17 | 17 | #define BUILD_MAX_E820 32 |
18 | 18 | // Space to reserve in high-memory for tables |
19 | #define BUILD_MAX_HIGHTABLE (256*1024) | |
19 | #define BUILD_MIN_HIGHTABLE (256*1024) | |
20 | #define BUILD_MAX_HIGHTABLE (16*1024*1024) | |
20 | 21 | // Largest supported externaly facing drive id |
21 | 22 | #define BUILD_MAX_EXTDRIVE 16 |
22 | 23 | // Number of bytes the smbios may be and still live in the f-segment |
416 | 416 | break; |
417 | 417 | case 0x01: /* one */ |
418 | 418 | break; |
419 | case 0x06: /* AliasOp */ | |
420 | offset += parse_namestring(s, ptr + offset, "SourceObject"); | |
421 | offset += parse_namestring(s, ptr + offset, "AliasObject"); | |
422 | break; | |
419 | 423 | case 0x08: /* name op */ |
420 | 424 | offset += parse_namestring(s, ptr + offset, "name"); |
421 | 425 | offset += parse_termobj(s, ptr + offset); |
31 | 31 | { |
32 | 32 | // Read in current PAM settings from pci config space |
33 | 33 | union pamdata_u pamdata; |
34 | pamdata.data32[0] = pci_config_readl(bdf, ALIGN_DOWN(pam0, 4)); | |
35 | pamdata.data32[1] = pci_config_readl(bdf, ALIGN_DOWN(pam0, 4) + 4); | |
34 | pamdata.data32[0] = pci_ioconfig_readl(bdf, ALIGN_DOWN(pam0, 4)); | |
35 | pamdata.data32[1] = pci_ioconfig_readl(bdf, ALIGN_DOWN(pam0, 4) + 4); | |
36 | 36 | u8 *pam = &pamdata.data8[pam0 & 0x03]; |
37 | 37 | |
38 | 38 | // Make ram from 0xc0000-0xf0000 writable |
45 | 45 | pam[0] = 0x30; |
46 | 46 | |
47 | 47 | // Write PAM settings back to pci config space |
48 | pci_config_writel(bdf, ALIGN_DOWN(pam0, 4), pamdata.data32[0]); | |
49 | pci_config_writel(bdf, ALIGN_DOWN(pam0, 4) + 4, pamdata.data32[1]); | |
48 | pci_ioconfig_writel(bdf, ALIGN_DOWN(pam0, 4), pamdata.data32[0]); | |
49 | pci_ioconfig_writel(bdf, ALIGN_DOWN(pam0, 4) + 4, pamdata.data32[1]); | |
50 | 50 | |
51 | 51 | if (!ram_present) |
52 | 52 | // Copy bios. |
58 | 58 | static void |
59 | 59 | make_bios_writable_intel(u16 bdf, u32 pam0) |
60 | 60 | { |
61 | int reg = pci_config_readb(bdf, pam0); | |
61 | int reg = pci_ioconfig_readb(bdf, pam0); | |
62 | 62 | if (!(reg & 0x10)) { |
63 | 63 | // QEMU doesn't fully implement the piix shadow capabilities - |
64 | 64 | // if ram isn't backing the bios segment when shadowing is |
124 | 124 | // At this point, statically allocated variables can't be written, |
125 | 125 | // so do this search manually. |
126 | 126 | int bdf; |
127 | foreachbdf(bdf, 0) { | |
128 | u32 vendev = pci_config_readl(bdf, PCI_VENDOR_ID); | |
127 | pci_ioconfig_foreachbdf(bdf, 0) { | |
128 | u32 vendev = pci_ioconfig_readl(bdf, PCI_VENDOR_ID); | |
129 | 129 | u16 vendor = vendev & 0xffff, device = vendev >> 16; |
130 | 130 | if (vendor == PCI_VENDOR_ID_INTEL |
131 | 131 | && device == PCI_DEVICE_ID_INTEL_82441) { |
39 | 39 | u32 e820_nr; |
40 | 40 | } PACKED; |
41 | 41 | |
42 | static void validate_info(struct xen_seabios_info *t) | |
42 | static struct xen_seabios_info *validate_info(void) | |
43 | 43 | { |
44 | if ( memcmp(t->signature, "XenHVMSeaBIOS", 14) ) | |
45 | panic("Bad Xen info signature\n"); | |
44 | struct xen_seabios_info *t = (void *)INFO_PHYSICAL_ADDRESS; | |
46 | 45 | |
47 | if ( t->length < sizeof(struct xen_seabios_info) ) | |
48 | panic("Bad Xen info length\n"); | |
46 | if ( memcmp(t->signature, "XenHVMSeaBIOS", 14) ) { | |
47 | dprintf(1, "Bad Xen info signature\n"); | |
48 | return NULL; | |
49 | } | |
49 | 50 | |
50 | if (checksum(t, t->length) != 0) | |
51 | panic("Bad Xen info checksum\n"); | |
51 | if ( t->length < sizeof(struct xen_seabios_info) ) { | |
52 | dprintf(1, "Bad Xen info length\n"); | |
53 | return NULL; | |
54 | } | |
55 | ||
56 | if (checksum(t, t->length) != 0) { | |
57 | dprintf(1, "Bad Xen info checksum\n"); | |
58 | return NULL; | |
59 | } | |
60 | return t; | |
52 | 61 | } |
53 | 62 | |
54 | 63 | void xen_preinit(void) |
85 | 94 | dprintf(1, "No Xen hypervisor found.\n"); |
86 | 95 | return; |
87 | 96 | } |
88 | PlatformRunningOn = PF_QEMU|PF_XEN; | |
97 | if (validate_info()) | |
98 | PlatformRunningOn = PF_QEMU|PF_XEN; | |
99 | else | |
100 | dprintf(1, "Not enabling Xen support due to lack of Xen info\n"); | |
89 | 101 | } |
90 | 102 | |
91 | 103 | static int hypercall_xen_version( int cmd, void *arg) |
121 | 133 | |
122 | 134 | void xen_biostable_setup(void) |
123 | 135 | { |
124 | struct xen_seabios_info *info = (void *)INFO_PHYSICAL_ADDRESS; | |
125 | void **tables = (void*)info->tables; | |
136 | struct xen_seabios_info *info = validate_info(); | |
137 | void **tables; | |
126 | 138 | int i; |
127 | 139 | |
140 | if (!info) | |
141 | panic("Xen info corrupted\n"); | |
142 | ||
143 | tables = (void*)info->tables; | |
128 | 144 | dprintf(1, "xen: copy BIOS tables...\n"); |
129 | 145 | for (i=0; i<info->tables_nr; i++) |
130 | 146 | copy_table(tables[i]); |
135 | 151 | void xen_ramsize_preinit(void) |
136 | 152 | { |
137 | 153 | int i; |
138 | struct xen_seabios_info *info = (void *)INFO_PHYSICAL_ADDRESS; | |
139 | struct e820entry *e820 = (struct e820entry *)info->e820; | |
140 | validate_info(info); | |
154 | struct xen_seabios_info *info = validate_info(); | |
155 | struct e820entry *e820; | |
156 | ||
157 | if (!info) | |
158 | panic("Xen info corrupted\n"); | |
141 | 159 | |
142 | 160 | dprintf(1, "xen: copy e820...\n"); |
143 | 161 | |
162 | e820 = (struct e820entry *)info->e820; | |
144 | 163 | for (i = 0; i < info->e820_nr; i++) { |
145 | 164 | struct e820entry *e = &e820[i]; |
146 | 165 | e820_add(e->start, e->size, e->type); |
25 | 25 | return 0x80000000 | (bdf << 8) | (addr & 0xfc); |
26 | 26 | } |
27 | 27 | |
28 | void pci_ioconfig_writel(u16 bdf, u32 addr, u32 val) | |
29 | { | |
30 | outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD); | |
31 | outl(val, PORT_PCI_DATA); | |
32 | } | |
33 | ||
28 | 34 | void pci_config_writel(u16 bdf, u32 addr, u32 val) |
29 | 35 | { |
30 | 36 | if (!MODESEGMENT && mmconfig) { |
31 | 37 | writel(mmconfig_addr(bdf, addr), val); |
32 | 38 | } else { |
33 | outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD); | |
34 | outl(val, PORT_PCI_DATA); | |
35 | } | |
39 | pci_ioconfig_writel(bdf, addr, val); | |
40 | } | |
41 | } | |
42 | ||
43 | void pci_ioconfig_writew(u16 bdf, u32 addr, u16 val) | |
44 | { | |
45 | outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD); | |
46 | outw(val, PORT_PCI_DATA + (addr & 2)); | |
36 | 47 | } |
37 | 48 | |
38 | 49 | void pci_config_writew(u16 bdf, u32 addr, u16 val) |
40 | 51 | if (!MODESEGMENT && mmconfig) { |
41 | 52 | writew(mmconfig_addr(bdf, addr), val); |
42 | 53 | } else { |
43 | outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD); | |
44 | outw(val, PORT_PCI_DATA + (addr & 2)); | |
45 | } | |
54 | pci_ioconfig_writew(bdf, addr, val); | |
55 | } | |
56 | } | |
57 | ||
58 | void pci_ioconfig_writeb(u16 bdf, u32 addr, u8 val) | |
59 | { | |
60 | outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD); | |
61 | outb(val, PORT_PCI_DATA + (addr & 3)); | |
46 | 62 | } |
47 | 63 | |
48 | 64 | void pci_config_writeb(u16 bdf, u32 addr, u8 val) |
50 | 66 | if (!MODESEGMENT && mmconfig) { |
51 | 67 | writeb(mmconfig_addr(bdf, addr), val); |
52 | 68 | } else { |
53 | outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD); | |
54 | outb(val, PORT_PCI_DATA + (addr & 3)); | |
55 | } | |
69 | pci_ioconfig_writeb(bdf, addr, val); | |
70 | } | |
71 | } | |
72 | ||
73 | u32 pci_ioconfig_readl(u16 bdf, u32 addr) | |
74 | { | |
75 | outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD); | |
76 | return inl(PORT_PCI_DATA); | |
56 | 77 | } |
57 | 78 | |
58 | 79 | u32 pci_config_readl(u16 bdf, u32 addr) |
60 | 81 | if (!MODESEGMENT && mmconfig) { |
61 | 82 | return readl(mmconfig_addr(bdf, addr)); |
62 | 83 | } else { |
63 | outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD); | |
64 | return inl(PORT_PCI_DATA); | |
65 | } | |
84 | return pci_ioconfig_readl(bdf, addr); | |
85 | } | |
86 | } | |
87 | ||
88 | u16 pci_ioconfig_readw(u16 bdf, u32 addr) | |
89 | { | |
90 | outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD); | |
91 | return inw(PORT_PCI_DATA + (addr & 2)); | |
66 | 92 | } |
67 | 93 | |
68 | 94 | u16 pci_config_readw(u16 bdf, u32 addr) |
70 | 96 | if (!MODESEGMENT && mmconfig) { |
71 | 97 | return readw(mmconfig_addr(bdf, addr)); |
72 | 98 | } else { |
73 | outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD); | |
74 | return inw(PORT_PCI_DATA + (addr & 2)); | |
75 | } | |
99 | return pci_ioconfig_readw(bdf, addr); | |
100 | } | |
101 | } | |
102 | ||
103 | u8 pci_ioconfig_readb(u16 bdf, u32 addr) | |
104 | { | |
105 | outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD); | |
106 | return inb(PORT_PCI_DATA + (addr & 3)); | |
76 | 107 | } |
77 | 108 | |
78 | 109 | u8 pci_config_readb(u16 bdf, u32 addr) |
80 | 111 | if (!MODESEGMENT && mmconfig) { |
81 | 112 | return readb(mmconfig_addr(bdf, addr)); |
82 | 113 | } else { |
83 | outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD); | |
84 | return inb(PORT_PCI_DATA + (addr & 3)); | |
114 | return pci_ioconfig_readb(bdf, addr); | |
85 | 115 | } |
86 | 116 | } |
87 | 117 | |
124 | 154 | } |
125 | 155 | |
126 | 156 | return 0; |
157 | } | |
158 | ||
159 | // Helper function for pci_ioconfig_foreachbdf() macro - return next device | |
160 | int pci_ioconfig_next(int bdf, int bus) | |
161 | { | |
162 | if (pci_bdf_to_fn(bdf) == 0 | |
163 | && (pci_ioconfig_readb(bdf, PCI_HEADER_TYPE) & 0x80) == 0) | |
164 | // Last found device wasn't a multi-function device - skip to | |
165 | // the next device. | |
166 | bdf += 8; | |
167 | else | |
168 | bdf += 1; | |
169 | ||
170 | for (;;) { | |
171 | if (pci_bdf_to_bus(bdf) != bus) | |
172 | return -1; | |
173 | ||
174 | u16 v = pci_ioconfig_readw(bdf, PCI_VENDOR_ID); | |
175 | if (v != 0x0000 && v != 0xffff) | |
176 | // Device is present. | |
177 | return bdf; | |
178 | ||
179 | if (pci_bdf_to_fn(bdf) == 0) | |
180 | bdf += 8; | |
181 | else | |
182 | bdf += 1; | |
183 | } | |
127 | 184 | } |
128 | 185 | |
129 | 186 | // Helper function for foreachbdf() macro - return next device |
26 | 26 | return (bus << 8) | devfn; |
27 | 27 | } |
28 | 28 | |
29 | #define pci_ioconfig_foreachbdf(BDF, BUS) \ | |
30 | for (BDF=pci_ioconfig_next(pci_bus_devfn_to_bdf((BUS), 0)-1, (BUS)) \ | |
31 | ; BDF >= 0 \ | |
32 | ; BDF=pci_ioconfig_next(BDF, (BUS))) | |
33 | ||
29 | 34 | #define foreachbdf(BDF, BUS) \ |
30 | 35 | for (BDF=pci_next(pci_bus_devfn_to_bdf((BUS), 0)-1, (BUS)) \ |
31 | 36 | ; BDF >= 0 \ |
32 | 37 | ; BDF=pci_next(BDF, (BUS))) |
33 | 38 | |
39 | // standard PCI configration access mechanism | |
40 | void pci_ioconfig_writel(u16 bdf, u32 addr, u32 val); | |
41 | void pci_ioconfig_writew(u16 bdf, u32 addr, u16 val); | |
42 | void pci_ioconfig_writeb(u16 bdf, u32 addr, u8 val); | |
43 | u32 pci_ioconfig_readl(u16 bdf, u32 addr); | |
44 | u16 pci_ioconfig_readw(u16 bdf, u32 addr); | |
45 | u8 pci_ioconfig_readb(u16 bdf, u32 addr); | |
46 | int pci_ioconfig_next(int bdf, int bus); | |
47 | ||
48 | // PCI configuration access using either PCI CAM or PCIe ECAM | |
34 | 49 | void pci_config_writel(u16 bdf, u32 addr, u32 val); |
35 | 50 | void pci_config_writew(u16 bdf, u32 addr, u16 val); |
36 | 51 | void pci_config_writeb(u16 bdf, u32 addr, u8 val); |
38 | 53 | u16 pci_config_readw(u16 bdf, u32 addr); |
39 | 54 | u8 pci_config_readb(u16 bdf, u32 addr); |
40 | 55 | void pci_config_maskw(u16 bdf, u32 addr, u16 off, u16 on); |
41 | void pci_enable_mmconfig(u64 addr, const char *name); | |
42 | 56 | u8 pci_find_capability(u16 bdf, u8 cap_id, u8 cap); |
43 | 57 | int pci_next(int bdf, int bus); |
58 | ||
59 | void pci_enable_mmconfig(u64 addr, const char *name); | |
44 | 60 | int pci_probe_host(void); |
45 | 61 | void pci_reboot(void); |
46 | 62 |
21 | 21 | |
22 | 22 | // Send USB HID protocol message. |
23 | 23 | static int |
24 | set_protocol(struct usb_pipe *pipe, u16 val) | |
24 | set_protocol(struct usb_pipe *pipe, u16 val, u16 inferface) | |
25 | 25 | { |
26 | 26 | struct usb_ctrlrequest req; |
27 | 27 | req.bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; |
28 | 28 | req.bRequest = HID_REQ_SET_PROTOCOL; |
29 | 29 | req.wValue = val; |
30 | req.wIndex = 0; | |
30 | req.wIndex = inferface; | |
31 | 31 | req.wLength = 0; |
32 | 32 | return usb_send_default_control(pipe, &req, NULL); |
33 | 33 | } |
75 | 75 | } |
76 | 76 | |
77 | 77 | // Enable "boot" protocol. |
78 | int ret = set_protocol(usbdev->defpipe, 0); | |
79 | if (ret) | |
80 | return -1; | |
78 | int ret = set_protocol(usbdev->defpipe, 0, usbdev->iface->bInterfaceNumber); | |
79 | if (ret) { | |
80 | dprintf(3, "Failed to set boot protocol\n"); | |
81 | return -1; | |
82 | } | |
83 | ||
81 | 84 | // Periodically send reports to enable key repeat. |
82 | 85 | ret = set_idle(usbdev->defpipe, KEYREPEATMS); |
83 | 86 | if (ret) |
117 | 120 | } |
118 | 121 | |
119 | 122 | // Enable "boot" protocol. |
120 | int ret = set_protocol(usbdev->defpipe, 0); | |
123 | int ret = set_protocol(usbdev->defpipe, 0, usbdev->iface->bInterfaceNumber); | |
121 | 124 | if (ret) |
122 | 125 | return -1; |
123 | 126 |
371 | 371 | void *config_end = (void*)config + config->wTotalLength; |
372 | 372 | struct usb_interface_descriptor *iface = (void*)(&config[1]); |
373 | 373 | for (;;) { |
374 | if (!num_iface-- || (void*)iface + iface->bLength > config_end) | |
374 | if (!num_iface || (void*)iface + iface->bLength > config_end) | |
375 | 375 | // Not a supported device. |
376 | 376 | goto fail; |
377 | if (iface->bDescriptorType == USB_DT_INTERFACE | |
378 | && (iface->bInterfaceClass == USB_CLASS_HUB | |
377 | if (iface->bDescriptorType == USB_DT_INTERFACE) { | |
378 | num_iface--; | |
379 | if (iface->bInterfaceClass == USB_CLASS_HUB | |
379 | 380 | || (iface->bInterfaceClass == USB_CLASS_MASS_STORAGE |
380 | 381 | && (iface->bInterfaceProtocol == US_PR_BULK |
381 | 382 | || iface->bInterfaceProtocol == US_PR_UAS)) |
382 | 383 | || (iface->bInterfaceClass == USB_CLASS_HID |
383 | && iface->bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT))) | |
384 | break; | |
384 | && iface->bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT)) | |
385 | break; | |
386 | } | |
385 | 387 | iface = (void*)iface + iface->bLength; |
386 | 388 | } |
387 | 389 |
91 | 91 | u16 blk_num_max; |
92 | 92 | |
93 | 93 | if (vdrive->drive.blksize != 0 && max_io_size != 0) |
94 | blk_num_max = (u16)max_io_size / vdrive->drive.blksize; | |
94 | blk_num_max = (u16)(max_io_size / vdrive->drive.blksize); | |
95 | 95 | else |
96 | 96 | /* default blk_num_max if hardware doesnot advise a proper value */ |
97 | blk_num_max = 8; | |
97 | blk_num_max = 64; | |
98 | 98 | |
99 | 99 | if (op->count <= blk_num_max) { |
100 | 100 | virtio_blk_op_one_segment(vdrive, write, sg); |
150 | 150 | vdrive->drive.cntl_id = pci->bdf; |
151 | 151 | |
152 | 152 | vp_init_simple(&vdrive->vp, pci); |
153 | if (vp_find_vq(&vdrive->vp, 0, &vdrive->vq) < 0 ) { | |
154 | dprintf(1, "fail to find vq for virtio-blk %pP\n", pci); | |
155 | goto fail; | |
156 | } | |
157 | 153 | |
158 | 154 | if (vdrive->vp.use_modern) { |
159 | 155 | struct vp_device *vp = &vdrive->vp; |
211 | 207 | vp_read(&vp->device, struct virtio_blk_config, heads); |
212 | 208 | vdrive->drive.pchs.sector = |
213 | 209 | vp_read(&vp->device, struct virtio_blk_config, sectors); |
214 | } else { | |
210 | } | |
211 | ||
212 | if (vp_find_vq(&vdrive->vp, 0, &vdrive->vq) < 0 ) { | |
213 | dprintf(1, "fail to find vq for virtio-blk %pP\n", pci); | |
214 | goto fail; | |
215 | } | |
216 | ||
217 | if (!vdrive->vp.use_modern) { | |
215 | 218 | struct virtio_blk_config cfg; |
216 | 219 | vp_get_legacy(&vdrive->vp, 0, &cfg, sizeof(cfg)); |
217 | 220 | |
271 | 274 | vdrive->drive.cntl_id = (u32)mmio; |
272 | 275 | |
273 | 276 | vp_init_mmio(&vdrive->vp, mmio); |
274 | if (vp_find_vq(&vdrive->vp, 0, &vdrive->vq) < 0 ) { | |
275 | dprintf(1, "fail to find vq for virtio-blk-mmio %p\n", mmio); | |
276 | goto fail; | |
277 | } | |
278 | 277 | |
279 | 278 | struct vp_device *vp = &vdrive->vp; |
280 | 279 | u64 features = vp_get_features(vp); |
293 | 292 | goto fail; |
294 | 293 | } |
295 | 294 | |
295 | if (vp_find_vq(&vdrive->vp, 0, &vdrive->vq) < 0 ) { | |
296 | dprintf(1, "fail to find vq for virtio-blk-mmio %p\n", mmio); | |
297 | goto fail; | |
298 | } | |
299 | ||
296 | 300 | if (features & max_segment_size) |
297 | 301 | vdrive->drive.max_segment_size = |
298 | 302 | vp_read(&vp->device, struct virtio_blk_config, size_max); |
192 | 192 | if (vp->use_mmio) { |
193 | 193 | vp_write(&vp->common, virtio_mmio_cfg, device_feature_select, 0); |
194 | 194 | f0 = vp_read(&vp->common, virtio_mmio_cfg, device_feature); |
195 | f1 = 0; | |
195 | vp_write(&vp->common, virtio_mmio_cfg, device_feature_select, 1); | |
196 | f1 = vp_read(&vp->common, virtio_mmio_cfg, device_feature); | |
196 | 197 | } else if (vp->use_modern) { |
197 | 198 | vp_write(&vp->common, virtio_pci_common_cfg, device_feature_select, 0); |
198 | 199 | f0 = vp_read(&vp->common, virtio_pci_common_cfg, device_feature); |
213 | 214 | f1 = features >> 32; |
214 | 215 | |
215 | 216 | if (vp->use_mmio) { |
216 | vp_write(&vp->common, virtio_mmio_cfg, guest_feature_select, f0); | |
217 | vp_write(&vp->common, virtio_mmio_cfg, guest_feature_select, 0); | |
217 | 218 | vp_write(&vp->common, virtio_mmio_cfg, guest_feature, f0); |
219 | vp_write(&vp->common, virtio_mmio_cfg, guest_feature_select, 1); | |
220 | vp_write(&vp->common, virtio_mmio_cfg, guest_feature, f1); | |
218 | 221 | } else if (vp->use_modern) { |
219 | 222 | vp_write(&vp->common, virtio_pci_common_cfg, guest_feature_select, 0); |
220 | 223 | vp_write(&vp->common, virtio_pci_common_cfg, guest_feature, f0); |
238 | 238 | vp_init_mmio(vp, mmio); |
239 | 239 | u8 status = VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER; |
240 | 240 | |
241 | u64 features = vp_get_features(vp); | |
242 | u64 version1 = 1ull << VIRTIO_F_VERSION_1; | |
243 | if (features & version1) { | |
244 | u64 iommu_platform = 1ull << VIRTIO_F_IOMMU_PLATFORM; | |
245 | ||
246 | vp_set_features(vp, features & (version1 | iommu_platform)); | |
247 | vp_set_status(vp, VIRTIO_CONFIG_S_FEATURES_OK); | |
248 | if (!(vp_get_status(vp) & VIRTIO_CONFIG_S_FEATURES_OK)) { | |
249 | dprintf(1, "device didn't accept features: %pP\n", mmio); | |
250 | goto fail; | |
251 | } | |
252 | } | |
253 | ||
241 | 254 | if (vp_find_vq(vp, 2, &vq) < 0 ) { |
242 | 255 | dprintf(1, "fail to find vq for virtio-scsi-mmio %p\n", mmio); |
243 | 256 | goto fail; |
421 | 421 | e820_add(BUILD_BIOS_ADDR, BUILD_BIOS_SIZE, E820_RESERVED); |
422 | 422 | |
423 | 423 | // Populate temp high ram |
424 | u32 highram = 0; | |
424 | u32 highram_start = 0; | |
425 | u32 highram_size = 0; | |
425 | 426 | int i; |
426 | 427 | for (i=e820_count-1; i>=0; i--) { |
427 | 428 | struct e820entry *en = &e820_list[i]; |
431 | 432 | if (en->type != E820_RAM || end > 0xffffffff) |
432 | 433 | continue; |
433 | 434 | u32 s = en->start, e = end; |
434 | if (!highram) { | |
435 | u32 newe = ALIGN_DOWN(e - BUILD_MAX_HIGHTABLE, MALLOC_MIN_ALIGN); | |
436 | if (newe <= e && newe >= s) { | |
437 | highram = newe; | |
438 | e = newe; | |
435 | if (!highram_start) { | |
436 | u32 new_max = ALIGN_DOWN(e - BUILD_MAX_HIGHTABLE, MALLOC_MIN_ALIGN); | |
437 | u32 new_min = ALIGN_DOWN(e - BUILD_MIN_HIGHTABLE, MALLOC_MIN_ALIGN); | |
438 | if (new_max <= e && new_max >= s + BUILD_MAX_HIGHTABLE) { | |
439 | highram_start = e = new_max; | |
440 | highram_size = BUILD_MAX_HIGHTABLE; | |
441 | } else if (new_min <= e && new_min >= s) { | |
442 | highram_start = e = new_min; | |
443 | highram_size = BUILD_MIN_HIGHTABLE; | |
439 | 444 | } |
440 | 445 | } |
441 | 446 | alloc_add(&ZoneTmpHigh, s, e); |
443 | 448 | |
444 | 449 | // Populate regions |
445 | 450 | alloc_add(&ZoneTmpLow, BUILD_STACK_ADDR, BUILD_EBDA_MINIMUM); |
446 | if (highram) { | |
447 | alloc_add(&ZoneHigh, highram, highram + BUILD_MAX_HIGHTABLE); | |
448 | e820_add(highram, BUILD_MAX_HIGHTABLE, E820_RESERVED); | |
451 | if (highram_start) { | |
452 | alloc_add(&ZoneHigh, highram_start, highram_start + highram_size); | |
453 | e820_add(highram_start, highram_size, E820_RESERVED); | |
449 | 454 | } |
450 | 455 | } |
451 | 456 |