1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
|
/*
* QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
*
* PAPR Inter-VM Logical Lan, aka ibmveth
*
* Copyright (c) 2010,2011 David Gibson, IBM Corporation.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "net/net.h"
#include "migration/vmstate.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
#include "hw/qdev-properties.h"
#include "sysemu/sysemu.h"
#include "trace.h"
#include <libfdt.h>
#define ETH_ALEN 6
#define MAX_PACKET_SIZE 65536
/* Compatibility flags for migration */
#define SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT 0
#define SPAPRVLAN_FLAG_RX_BUF_POOLS (1 << SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT)
/*
* Virtual LAN device
*/
typedef uint64_t vlan_bd_t;
#define VLAN_BD_VALID 0x8000000000000000ULL
#define VLAN_BD_TOGGLE 0x4000000000000000ULL
#define VLAN_BD_NO_CSUM 0x0200000000000000ULL
#define VLAN_BD_CSUM_GOOD 0x0100000000000000ULL
#define VLAN_BD_LEN_MASK 0x00ffffff00000000ULL
#define VLAN_BD_LEN(bd) (((bd) & VLAN_BD_LEN_MASK) >> 32)
#define VLAN_BD_ADDR_MASK 0x00000000ffffffffULL
#define VLAN_BD_ADDR(bd) ((bd) & VLAN_BD_ADDR_MASK)
#define VLAN_VALID_BD(addr, len) (VLAN_BD_VALID | \
(((len) << 32) & VLAN_BD_LEN_MASK) | \
(addr & VLAN_BD_ADDR_MASK))
#define VLAN_RXQC_TOGGLE 0x80
#define VLAN_RXQC_VALID 0x40
#define VLAN_RXQC_NO_CSUM 0x02
#define VLAN_RXQC_CSUM_GOOD 0x01
#define VLAN_RQ_ALIGNMENT 16
#define VLAN_RXQ_BD_OFF 0
#define VLAN_FILTER_BD_OFF 8
#define VLAN_RX_BDS_OFF 16
/*
* The final 8 bytes of the buffer list is a counter of frames dropped
* because there was not a buffer in the buffer list capable of holding
* the frame. We must avoid it, or the operating system will report garbage
* for this statistic.
*/
#define VLAN_RX_BDS_LEN (SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF - 8)
#define VLAN_MAX_BUFS (VLAN_RX_BDS_LEN / 8)
#define TYPE_VIO_SPAPR_VLAN_DEVICE "spapr-vlan"
#define VIO_SPAPR_VLAN_DEVICE(obj) \
OBJECT_CHECK(SpaprVioVlan, (obj), TYPE_VIO_SPAPR_VLAN_DEVICE)
#define RX_POOL_MAX_BDS 4096
#define RX_MAX_POOLS 5
typedef struct {
int32_t bufsize;
int32_t count;
vlan_bd_t bds[RX_POOL_MAX_BDS];
} RxBufPool;
typedef struct SpaprVioVlan {
SpaprVioDevice sdev;
NICConf nicconf;
NICState *nic;
MACAddr perm_mac;
bool isopen;
hwaddr buf_list;
uint32_t add_buf_ptr, use_buf_ptr, rx_bufs;
hwaddr rxq_ptr;
QEMUTimer *rxp_timer;
uint32_t compat_flags; /* Compatibility flags for migration */
RxBufPool *rx_pool[RX_MAX_POOLS]; /* Receive buffer descriptor pools */
} SpaprVioVlan;
static int spapr_vlan_can_receive(NetClientState *nc)
{
SpaprVioVlan *dev = qemu_get_nic_opaque(nc);
return (dev->isopen && dev->rx_bufs > 0);
}
/**
* The last 8 bytes of the receive buffer list page (that has been
* supplied by the guest with the H_REGISTER_LOGICAL_LAN call) contain
* a counter for frames that have been dropped because there was no
* suitable receive buffer available. This function is used to increase
* this counter by one.
*/
static void spapr_vlan_record_dropped_rx_frame(SpaprVioVlan *dev)
{
uint64_t cnt;
cnt = vio_ldq(&dev->sdev, dev->buf_list + 4096 - 8);
vio_stq(&dev->sdev, dev->buf_list + 4096 - 8, cnt + 1);
}
/**
* Get buffer descriptor from one of our receive buffer pools
*/
static vlan_bd_t spapr_vlan_get_rx_bd_from_pool(SpaprVioVlan *dev,
size_t size)
{
vlan_bd_t bd;
int pool;
for (pool = 0; pool < RX_MAX_POOLS; pool++) {
if (dev->rx_pool[pool]->count > 0 &&
dev->rx_pool[pool]->bufsize >= size + 8) {
break;
}
}
if (pool == RX_MAX_POOLS) {
/* Failed to find a suitable buffer */
return 0;
}
trace_spapr_vlan_get_rx_bd_from_pool_found(pool,
dev->rx_pool[pool]->count,
dev->rx_bufs);
/* Remove the buffer from the pool */
dev->rx_pool[pool]->count--;
bd = dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count];
dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count] = 0;
return bd;
}
/**
* Get buffer descriptor from the receive buffer list page that has been
* supplied by the guest with the H_REGISTER_LOGICAL_LAN call
*/
static vlan_bd_t spapr_vlan_get_rx_bd_from_page(SpaprVioVlan *dev,
size_t size)
{
int buf_ptr = dev->use_buf_ptr;
vlan_bd_t bd;
do {
buf_ptr += 8;
if (buf_ptr >= VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF) {
buf_ptr = VLAN_RX_BDS_OFF;
}
bd = vio_ldq(&dev->sdev, dev->buf_list + buf_ptr);
trace_spapr_vlan_get_rx_bd_from_page(buf_ptr, (uint64_t)bd);
} while ((!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8)
&& buf_ptr != dev->use_buf_ptr);
if (!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8) {
/* Failed to find a suitable buffer */
return 0;
}
/* Remove the buffer from the pool */
dev->use_buf_ptr = buf_ptr;
vio_stq(&dev->sdev, dev->buf_list + dev->use_buf_ptr, 0);
trace_spapr_vlan_get_rx_bd_from_page_found(dev->use_buf_ptr, dev->rx_bufs);
return bd;
}
static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
size_t size)
{
SpaprVioVlan *dev = qemu_get_nic_opaque(nc);
SpaprVioDevice *sdev = VIO_SPAPR_DEVICE(dev);
vlan_bd_t rxq_bd = vio_ldq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF);
vlan_bd_t bd;
uint64_t handle;
uint8_t control;
trace_spapr_vlan_receive(sdev->qdev.id, dev->rx_bufs);
if (!dev->isopen) {
return -1;
}
if (!dev->rx_bufs) {
spapr_vlan_record_dropped_rx_frame(dev);
return 0;
}
if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
bd = spapr_vlan_get_rx_bd_from_pool(dev, size);
} else {
bd = spapr_vlan_get_rx_bd_from_page(dev, size);
}
if (!bd) {
spapr_vlan_record_dropped_rx_frame(dev);
return 0;
}
dev->rx_bufs--;
/* Transfer the packet data */
if (spapr_vio_dma_write(sdev, VLAN_BD_ADDR(bd) + 8, buf, size) < 0) {
return -1;
}
trace_spapr_vlan_receive_dma_completed();
/* Update the receive queue */
control = VLAN_RXQC_TOGGLE | VLAN_RXQC_VALID;
if (rxq_bd & VLAN_BD_TOGGLE) {
control ^= VLAN_RXQC_TOGGLE;
}
handle = vio_ldq(sdev, VLAN_BD_ADDR(bd));
vio_stq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8, handle);
vio_stl(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 4, size);
vio_sth(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8);
vio_stb(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control);
trace_spapr_vlan_receive_wrote(dev->rxq_ptr,
vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
dev->rxq_ptr),
vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
dev->rxq_ptr + 8));
dev->rxq_ptr += 16;
if (dev->rxq_ptr >= VLAN_BD_LEN(rxq_bd)) {
dev->rxq_ptr = 0;
vio_stq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF, rxq_bd ^ VLAN_BD_TOGGLE);
}
if (sdev->signal_state & 1) {
spapr_vio_irq_pulse(sdev);
}
return size;
}
static NetClientInfo net_spapr_vlan_info = {
.type = NET_CLIENT_DRIVER_NIC,
.size = sizeof(NICState),
.can_receive = spapr_vlan_can_receive,
.receive = spapr_vlan_receive,
};
static void spapr_vlan_flush_rx_queue(void *opaque)
{
SpaprVioVlan *dev = opaque;
qemu_flush_queued_packets(qemu_get_queue(dev->nic));
}
static void spapr_vlan_reset_rx_pool(RxBufPool *rxp)
{
/*
* Use INT_MAX as bufsize so that unused buffers are moved to the end
* of the list during the qsort in spapr_vlan_add_rxbuf_to_pool() later.
*/
rxp->bufsize = INT_MAX;
rxp->count = 0;
memset(rxp->bds, 0, sizeof(rxp->bds));
}
static void spapr_vlan_reset(SpaprVioDevice *sdev)
{
SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
int i;
dev->buf_list = 0;
dev->rx_bufs = 0;
dev->isopen = 0;
if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
for (i = 0; i < RX_MAX_POOLS; i++) {
spapr_vlan_reset_rx_pool(dev->rx_pool[i]);
}
}
memcpy(&dev->nicconf.macaddr.a, &dev->perm_mac.a,
sizeof(dev->nicconf.macaddr.a));
qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
}
static void spapr_vlan_realize(SpaprVioDevice *sdev, Error **errp)
{
SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
qemu_macaddr_default_if_unset(&dev->nicconf.macaddr);
memcpy(&dev->perm_mac.a, &dev->nicconf.macaddr.a, sizeof(dev->perm_mac.a));
dev->nic = qemu_new_nic(&net_spapr_vlan_info, &dev->nicconf,
object_get_typename(OBJECT(sdev)), sdev->qdev.id, dev);
qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
dev->rxp_timer = timer_new_us(QEMU_CLOCK_VIRTUAL, spapr_vlan_flush_rx_queue,
dev);
}
static void spapr_vlan_instance_init(Object *obj)
{
SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(obj);
int i;
device_add_bootindex_property(obj, &dev->nicconf.bootindex,
"bootindex", "",
DEVICE(dev), NULL);
if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
for (i = 0; i < RX_MAX_POOLS; i++) {
dev->rx_pool[i] = g_new(RxBufPool, 1);
spapr_vlan_reset_rx_pool(dev->rx_pool[i]);
}
}
}
static void spapr_vlan_instance_finalize(Object *obj)
{
SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(obj);
int i;
if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
for (i = 0; i < RX_MAX_POOLS; i++) {
g_free(dev->rx_pool[i]);
dev->rx_pool[i] = NULL;
}
}
if (dev->rxp_timer) {
timer_del(dev->rxp_timer);
timer_free(dev->rxp_timer);
}
}
void spapr_vlan_create(SpaprVioBus *bus, NICInfo *nd)
{
DeviceState *dev;
dev = qdev_create(&bus->bus, "spapr-vlan");
qdev_set_nic_properties(dev, nd);
qdev_init_nofail(dev);
}
static int spapr_vlan_devnode(SpaprVioDevice *dev, void *fdt, int node_off)
{
SpaprVioVlan *vdev = VIO_SPAPR_VLAN_DEVICE(dev);
uint8_t padded_mac[8] = {0, 0};
int ret;
/* Some old phyp versions give the mac address in an 8-byte
* property. The kernel driver (before 3.10) has an insane workaround;
* rather than doing the obvious thing and checking the property
* length, it checks whether the first byte has 0b10 in the low
* bits. If a correct 6-byte property has a different first byte
* the kernel will get the wrong mac address, overrunning its
* buffer in the process (read only, thank goodness).
*
* Here we return a 6-byte address unless that would break a pre-3.10
* driver. In that case we return a padded 8-byte address to allow the old
* workaround to succeed. */
if ((vdev->nicconf.macaddr.a[0] & 0x3) == 0x2) {
ret = fdt_setprop(fdt, node_off, "local-mac-address",
&vdev->nicconf.macaddr, ETH_ALEN);
} else {
memcpy(&padded_mac[2], &vdev->nicconf.macaddr, ETH_ALEN);
ret = fdt_setprop(fdt, node_off, "local-mac-address",
padded_mac, sizeof(padded_mac));
}
if (ret < 0) {
return ret;
}
ret = fdt_setprop_cell(fdt, node_off, "ibm,mac-address-filters", 0);
if (ret < 0) {
return ret;
}
return 0;
}
static int check_bd(SpaprVioVlan *dev, vlan_bd_t bd,
target_ulong alignment)
{
if ((VLAN_BD_ADDR(bd) % alignment)
|| (VLAN_BD_LEN(bd) % alignment)) {
return -1;
}
if (!spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd),
VLAN_BD_LEN(bd), DMA_DIRECTION_FROM_DEVICE)
|| !spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd),
VLAN_BD_LEN(bd), DMA_DIRECTION_TO_DEVICE)) {
return -1;
}
return 0;
}
static target_ulong h_register_logical_lan(PowerPCCPU *cpu,
SpaprMachineState *spapr,
target_ulong opcode,
target_ulong *args)
{
target_ulong reg = args[0];
target_ulong buf_list = args[1];
target_ulong rec_queue = args[2];
target_ulong filter_list = args[3];
SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
vlan_bd_t filter_list_bd;
if (!dev) {
return H_PARAMETER;
}
if (dev->isopen) {
hcall_dprintf("H_REGISTER_LOGICAL_LAN called twice without "
"H_FREE_LOGICAL_LAN\n");
return H_RESOURCE;
}
if (check_bd(dev, VLAN_VALID_BD(buf_list, SPAPR_TCE_PAGE_SIZE),
SPAPR_TCE_PAGE_SIZE) < 0) {
hcall_dprintf("Bad buf_list 0x" TARGET_FMT_lx "\n", buf_list);
return H_PARAMETER;
}
filter_list_bd = VLAN_VALID_BD(filter_list, SPAPR_TCE_PAGE_SIZE);
if (check_bd(dev, filter_list_bd, SPAPR_TCE_PAGE_SIZE) < 0) {
hcall_dprintf("Bad filter_list 0x" TARGET_FMT_lx "\n", filter_list);
return H_PARAMETER;
}
if (!(rec_queue & VLAN_BD_VALID)
|| (check_bd(dev, rec_queue, VLAN_RQ_ALIGNMENT) < 0)) {
hcall_dprintf("Bad receive queue\n");
return H_PARAMETER;
}
dev->buf_list = buf_list;
sdev->signal_state = 0;
rec_queue &= ~VLAN_BD_TOGGLE;
/* Initialize the buffer list */
vio_stq(sdev, buf_list, rec_queue);
vio_stq(sdev, buf_list + 8, filter_list_bd);
spapr_vio_dma_set(sdev, buf_list + VLAN_RX_BDS_OFF, 0,
SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF);
dev->add_buf_ptr = VLAN_RX_BDS_OFF - 8;
dev->use_buf_ptr = VLAN_RX_BDS_OFF - 8;
dev->rx_bufs = 0;
dev->rxq_ptr = 0;
/* Initialize the receive queue */
spapr_vio_dma_set(sdev, VLAN_BD_ADDR(rec_queue), 0, VLAN_BD_LEN(rec_queue));
dev->isopen = 1;
qemu_flush_queued_packets(qemu_get_queue(dev->nic));
return H_SUCCESS;
}
static target_ulong h_free_logical_lan(PowerPCCPU *cpu,
SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
target_ulong reg = args[0];
SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
if (!dev) {
return H_PARAMETER;
}
if (!dev->isopen) {
hcall_dprintf("H_FREE_LOGICAL_LAN called without "
"H_REGISTER_LOGICAL_LAN\n");
return H_RESOURCE;
}
spapr_vlan_reset(sdev);
return H_SUCCESS;
}
/**
* Used for qsort, this function compares two RxBufPools by size.
*/
static int rx_pool_size_compare(const void *p1, const void *p2)
{
const RxBufPool *pool1 = *(RxBufPool **)p1;
const RxBufPool *pool2 = *(RxBufPool **)p2;
if (pool1->bufsize < pool2->bufsize) {
return -1;
}
return pool1->bufsize > pool2->bufsize;
}
/**
* Search for a matching buffer pool with exact matching size,
* or return -1 if no matching pool has been found.
*/
static int spapr_vlan_get_rx_pool_id(SpaprVioVlan *dev, int size)
{
int pool;
for (pool = 0; pool < RX_MAX_POOLS; pool++) {
if (dev->rx_pool[pool]->bufsize == size) {
return pool;
}
}
return -1;
}
/**
* Enqueuing receive buffer by adding it to one of our receive buffer pools
*/
static target_long spapr_vlan_add_rxbuf_to_pool(SpaprVioVlan *dev,
target_ulong buf)
{
int size = VLAN_BD_LEN(buf);
int pool;
pool = spapr_vlan_get_rx_pool_id(dev, size);
if (pool < 0) {
/*
* No matching pool found? Try to use a new one. If the guest used all
* pools before, but changed the size of one pool in the meantime, we might
* need to recycle that pool here (if it's empty already). Thus scan
* all buffer pools now, starting with the last (likely empty) one.
*/
for (pool = RX_MAX_POOLS - 1; pool >= 0 ; pool--) {
if (dev->rx_pool[pool]->count == 0) {
dev->rx_pool[pool]->bufsize = size;
/*
* Sort pools by size so that spapr_vlan_receive()
* can later find the smallest buffer pool easily.
*/
qsort(dev->rx_pool, RX_MAX_POOLS, sizeof(dev->rx_pool[0]),
rx_pool_size_compare);
pool = spapr_vlan_get_rx_pool_id(dev, size);
trace_spapr_vlan_add_rxbuf_to_pool_create(pool,
VLAN_BD_LEN(buf));
break;
}
}
}
/* Still no usable pool? Give up */
if (pool < 0 || dev->rx_pool[pool]->count >= RX_POOL_MAX_BDS) {
return H_RESOURCE;
}
trace_spapr_vlan_add_rxbuf_to_pool(pool, VLAN_BD_LEN(buf),
dev->rx_pool[pool]->count);
dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count++] = buf;
return 0;
}
/**
* This is the old way of enqueuing receive buffers: Add it to the rx queue
* page that has been supplied by the guest (which is quite limited in size).
*/
static target_long spapr_vlan_add_rxbuf_to_page(SpaprVioVlan *dev,
target_ulong buf)
{
vlan_bd_t bd;
if (dev->rx_bufs >= VLAN_MAX_BUFS) {
return H_RESOURCE;
}
do {
dev->add_buf_ptr += 8;
if (dev->add_buf_ptr >= VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF) {
dev->add_buf_ptr = VLAN_RX_BDS_OFF;
}
bd = vio_ldq(&dev->sdev, dev->buf_list + dev->add_buf_ptr);
} while (bd & VLAN_BD_VALID);
vio_stq(&dev->sdev, dev->buf_list + dev->add_buf_ptr, buf);
trace_spapr_vlan_add_rxbuf_to_page(dev->add_buf_ptr, dev->rx_bufs, buf);
return 0;
}
static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu,
SpaprMachineState *spapr,
target_ulong opcode,
target_ulong *args)
{
target_ulong reg = args[0];
target_ulong buf = args[1];
SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
target_long ret;
trace_spapr_vlan_h_add_logical_lan_buffer(reg, buf);
if (!sdev) {
hcall_dprintf("Bad device\n");
return H_PARAMETER;
}
if ((check_bd(dev, buf, 4) < 0)
|| (VLAN_BD_LEN(buf) < 16)) {
hcall_dprintf("Bad buffer enqueued\n");
return H_PARAMETER;
}
if (!dev->isopen) {
return H_RESOURCE;
}
if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
ret = spapr_vlan_add_rxbuf_to_pool(dev, buf);
} else {
ret = spapr_vlan_add_rxbuf_to_page(dev, buf);
}
if (ret) {
return ret;
}
dev->rx_bufs++;
/*
* Give guest some more time to add additional RX buffers before we
* flush the receive queue, so that e.g. fragmented IP packets can
* be passed to the guest in one go later (instead of passing single
* fragments if there is only one receive buffer available).
*/
timer_mod(dev->rxp_timer, qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) + 500);
return H_SUCCESS;
}
static target_ulong h_send_logical_lan(PowerPCCPU *cpu,
SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
target_ulong reg = args[0];
target_ulong *bufs = args + 1;
target_ulong continue_token = args[7];
SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
unsigned total_len;
uint8_t *lbuf, *p;
int i, nbufs;
int ret;
trace_spapr_vlan_h_send_logical_lan(reg, continue_token);
if (!sdev) {
return H_PARAMETER;
}
trace_spapr_vlan_h_send_logical_lan_rxbufs(dev->rx_bufs);
if (!dev->isopen) {
return H_DROPPED;
}
if (continue_token) {
return H_HARDWARE; /* FIXME actually handle this */
}
total_len = 0;
for (i = 0; i < 6; i++) {
trace_spapr_vlan_h_send_logical_lan_buf_desc(bufs[i]);
if (!(bufs[i] & VLAN_BD_VALID)) {
break;
}
total_len += VLAN_BD_LEN(bufs[i]);
}
nbufs = i;
trace_spapr_vlan_h_send_logical_lan_total(nbufs, total_len);
if (total_len == 0) {
return H_SUCCESS;
}
if (total_len > MAX_PACKET_SIZE) {
/* Don't let the guest force too large an allocation */
return H_RESOURCE;
}
lbuf = alloca(total_len);
p = lbuf;
for (i = 0; i < nbufs; i++) {
ret = spapr_vio_dma_read(sdev, VLAN_BD_ADDR(bufs[i]),
p, VLAN_BD_LEN(bufs[i]));
if (ret < 0) {
return ret;
}
p += VLAN_BD_LEN(bufs[i]);
}
qemu_send_packet(qemu_get_queue(dev->nic), lbuf, total_len);
return H_SUCCESS;
}
static target_ulong h_multicast_ctrl(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
target_ulong reg = args[0];
SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
if (!dev) {
return H_PARAMETER;
}
return H_SUCCESS;
}
static target_ulong h_change_logical_lan_mac(PowerPCCPU *cpu,
SpaprMachineState *spapr,
target_ulong opcode,
target_ulong *args)
{
target_ulong reg = args[0];
target_ulong macaddr = args[1];
SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
int i;
for (i = 0; i < ETH_ALEN; i++) {
dev->nicconf.macaddr.a[ETH_ALEN - i - 1] = macaddr & 0xff;
macaddr >>= 8;
}
qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
return H_SUCCESS;
}
static Property spapr_vlan_properties[] = {
DEFINE_SPAPR_PROPERTIES(SpaprVioVlan, sdev),
DEFINE_NIC_PROPERTIES(SpaprVioVlan, nicconf),
DEFINE_PROP_BIT("use-rx-buffer-pools", SpaprVioVlan,
compat_flags, SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT, true),
DEFINE_PROP_END_OF_LIST(),
};
static bool spapr_vlan_rx_buffer_pools_needed(void *opaque)
{
SpaprVioVlan *dev = opaque;
return (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) != 0;
}
static const VMStateDescription vmstate_rx_buffer_pool = {
.name = "spapr_llan/rx_buffer_pool",
.version_id = 1,
.minimum_version_id = 1,
.needed = spapr_vlan_rx_buffer_pools_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32(bufsize, RxBufPool),
VMSTATE_INT32(count, RxBufPool),
VMSTATE_UINT64_ARRAY(bds, RxBufPool, RX_POOL_MAX_BDS),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_rx_pools = {
.name = "spapr_llan/rx_pools",
.version_id = 1,
.minimum_version_id = 1,
.needed = spapr_vlan_rx_buffer_pools_needed,
.fields = (VMStateField[]) {
VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(rx_pool, SpaprVioVlan,
RX_MAX_POOLS, 1,
vmstate_rx_buffer_pool, RxBufPool),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_spapr_llan = {
.name = "spapr_llan",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_SPAPR_VIO(sdev, SpaprVioVlan),
/* LLAN state */
VMSTATE_BOOL(isopen, SpaprVioVlan),
VMSTATE_UINT64(buf_list, SpaprVioVlan),
VMSTATE_UINT32(add_buf_ptr, SpaprVioVlan),
VMSTATE_UINT32(use_buf_ptr, SpaprVioVlan),
VMSTATE_UINT32(rx_bufs, SpaprVioVlan),
VMSTATE_UINT64(rxq_ptr, SpaprVioVlan),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * []) {
&vmstate_rx_pools,
NULL
}
};
static void spapr_vlan_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass);
k->realize = spapr_vlan_realize;
k->reset = spapr_vlan_reset;
k->devnode = spapr_vlan_devnode;
k->dt_name = "l-lan";
k->dt_type = "network";
k->dt_compatible = "IBM,l-lan";
k->signal_mask = 0x1;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
dc->props = spapr_vlan_properties;
k->rtce_window_size = 0x10000000;
dc->vmsd = &vmstate_spapr_llan;
}
static const TypeInfo spapr_vlan_info = {
.name = TYPE_VIO_SPAPR_VLAN_DEVICE,
.parent = TYPE_VIO_SPAPR_DEVICE,
.instance_size = sizeof(SpaprVioVlan),
.class_init = spapr_vlan_class_init,
.instance_init = spapr_vlan_instance_init,
.instance_finalize = spapr_vlan_instance_finalize,
};
static void spapr_vlan_register_types(void)
{
spapr_register_hypercall(H_REGISTER_LOGICAL_LAN, h_register_logical_lan);
spapr_register_hypercall(H_FREE_LOGICAL_LAN, h_free_logical_lan);
spapr_register_hypercall(H_SEND_LOGICAL_LAN, h_send_logical_lan);
spapr_register_hypercall(H_ADD_LOGICAL_LAN_BUFFER,
h_add_logical_lan_buffer);
spapr_register_hypercall(H_MULTICAST_CTRL, h_multicast_ctrl);
spapr_register_hypercall(H_CHANGE_LOGICAL_LAN_MAC,
h_change_logical_lan_mac);
type_register_static(&spapr_vlan_info);
}
type_init(spapr_vlan_register_types)
|