This source file includes following definitions.
- marshal_virt_to_resize
- marshal_clone_to_rele
- ba_init
- find_free_range
- ba_alloc
- validate_alloc
- ba_free
- ba_clone
- ba_space
- cxlflash_ba_terminate
- init_vlun
- write_same16
- grow_lxt
- shrink_lxt
- _cxlflash_vlun_resize
- cxlflash_vlun_resize
- cxlflash_restore_luntable
- get_num_ports
- init_luntable
- cxlflash_disk_virtual_open
- clone_lxt
- cxlflash_disk_clone
1
2
3
4
5
6
7
8
9
10
11 #include <linux/interrupt.h>
12 #include <linux/pci.h>
13 #include <linux/syscalls.h>
14 #include <asm/unaligned.h>
15 #include <asm/bitsperlong.h>
16
17 #include <scsi/scsi_cmnd.h>
18 #include <scsi/scsi_host.h>
19 #include <uapi/scsi/cxlflash_ioctl.h>
20
21 #include "sislite.h"
22 #include "common.h"
23 #include "vlun.h"
24 #include "superpipe.h"
25
26
27
28
29
30
31 static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
32 struct dk_cxlflash_resize *resize)
33 {
34 resize->hdr = virt->hdr;
35 resize->context_id = virt->context_id;
36 resize->rsrc_handle = virt->rsrc_handle;
37 resize->req_size = virt->lun_size;
38 resize->last_lba = virt->last_lba;
39 }
40
41
42
43
44
45
46 static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
47 struct dk_cxlflash_release *release)
48 {
49 release->hdr = clone->hdr;
50 release->context_id = clone->context_id_dst;
51 }
52
53
54
55
56
57
58
59 static int ba_init(struct ba_lun *ba_lun)
60 {
61 struct ba_lun_info *bali = NULL;
62 int lun_size_au = 0, i = 0;
63 int last_word_underflow = 0;
64 u64 *lam;
65
66 pr_debug("%s: Initializing LUN: lun_id=%016llx "
67 "ba_lun->lsize=%lx ba_lun->au_size=%lX\n",
68 __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size);
69
70
71 lun_size_au = ba_lun->lsize / ba_lun->au_size;
72 if (lun_size_au == 0) {
73 pr_debug("%s: Requested LUN size of 0!\n", __func__);
74 return -EINVAL;
75 }
76
77
78 bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL);
79 if (unlikely(!bali)) {
80 pr_err("%s: Failed to allocate lun_info lun_id=%016llx\n",
81 __func__, ba_lun->lun_id);
82 return -ENOMEM;
83 }
84
85 bali->total_aus = lun_size_au;
86 bali->lun_bmap_size = lun_size_au / BITS_PER_LONG;
87
88 if (lun_size_au % BITS_PER_LONG)
89 bali->lun_bmap_size++;
90
91
92 bali->lun_alloc_map = kzalloc((bali->lun_bmap_size * sizeof(u64)),
93 GFP_KERNEL);
94 if (unlikely(!bali->lun_alloc_map)) {
95 pr_err("%s: Failed to allocate lun allocation map: "
96 "lun_id=%016llx\n", __func__, ba_lun->lun_id);
97 kfree(bali);
98 return -ENOMEM;
99 }
100
101
102 bali->free_aun_cnt = lun_size_au;
103
104 for (i = 0; i < bali->lun_bmap_size; i++)
105 bali->lun_alloc_map[i] = 0xFFFFFFFFFFFFFFFFULL;
106
107
108 last_word_underflow = (bali->lun_bmap_size * BITS_PER_LONG);
109 last_word_underflow -= bali->free_aun_cnt;
110 if (last_word_underflow > 0) {
111 lam = &bali->lun_alloc_map[bali->lun_bmap_size - 1];
112 for (i = (HIBIT - last_word_underflow + 1);
113 i < BITS_PER_LONG;
114 i++)
115 clear_bit(i, (ulong *)lam);
116 }
117
118
119 bali->free_high_idx = bali->lun_bmap_size;
120
121
122 bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)),
123 GFP_KERNEL);
124 if (unlikely(!bali->aun_clone_map)) {
125 pr_err("%s: Failed to allocate clone map: lun_id=%016llx\n",
126 __func__, ba_lun->lun_id);
127 kfree(bali->lun_alloc_map);
128 kfree(bali);
129 return -ENOMEM;
130 }
131
132
133 ba_lun->ba_lun_handle = bali;
134
135 pr_debug("%s: Successfully initialized the LUN: "
136 "lun_id=%016llx bitmap size=%x, free_aun_cnt=%llx\n",
137 __func__, ba_lun->lun_id, bali->lun_bmap_size,
138 bali->free_aun_cnt);
139 return 0;
140 }
141
142
143
144
145
146
147
148
149
150
151 static int find_free_range(u32 low,
152 u32 high,
153 struct ba_lun_info *bali, int *bit_word)
154 {
155 int i;
156 u64 bit_pos = -1;
157 ulong *lam, num_bits;
158
159 for (i = low; i < high; i++)
160 if (bali->lun_alloc_map[i] != 0) {
161 lam = (ulong *)&bali->lun_alloc_map[i];
162 num_bits = (sizeof(*lam) * BITS_PER_BYTE);
163 bit_pos = find_first_bit(lam, num_bits);
164
165 pr_devel("%s: Found free bit %llu in LUN "
166 "map entry %016llx at bitmap index = %d\n",
167 __func__, bit_pos, bali->lun_alloc_map[i], i);
168
169 *bit_word = i;
170 bali->free_aun_cnt--;
171 clear_bit(bit_pos, lam);
172 break;
173 }
174
175 return bit_pos;
176 }
177
178
179
180
181
182
183
184 static u64 ba_alloc(struct ba_lun *ba_lun)
185 {
186 u64 bit_pos = -1;
187 int bit_word = 0;
188 struct ba_lun_info *bali = NULL;
189
190 bali = ba_lun->ba_lun_handle;
191
192 pr_debug("%s: Received block allocation request: "
193 "lun_id=%016llx free_aun_cnt=%llx\n",
194 __func__, ba_lun->lun_id, bali->free_aun_cnt);
195
196 if (bali->free_aun_cnt == 0) {
197 pr_debug("%s: No space left on LUN: lun_id=%016llx\n",
198 __func__, ba_lun->lun_id);
199 return -1ULL;
200 }
201
202
203 bit_pos = find_free_range(bali->free_curr_idx,
204 bali->free_high_idx, bali, &bit_word);
205 if (bit_pos == -1) {
206 bit_pos = find_free_range(bali->free_low_idx,
207 bali->free_curr_idx,
208 bali, &bit_word);
209 if (bit_pos == -1) {
210 pr_debug("%s: Could not find an allocation unit on LUN:"
211 " lun_id=%016llx\n", __func__, ba_lun->lun_id);
212 return -1ULL;
213 }
214 }
215
216
217 if (bit_pos == HIBIT)
218 bali->free_curr_idx = bit_word + 1;
219 else
220 bali->free_curr_idx = bit_word;
221
222 pr_debug("%s: Allocating AU number=%llx lun_id=%016llx "
223 "free_aun_cnt=%llx\n", __func__,
224 ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id,
225 bali->free_aun_cnt);
226
227 return (u64) ((bit_word * BITS_PER_LONG) + bit_pos);
228 }
229
230
231
232
233
234
235
236
237 static int validate_alloc(struct ba_lun_info *bali, u64 aun)
238 {
239 int idx = 0, bit_pos = 0;
240
241 idx = aun / BITS_PER_LONG;
242 bit_pos = aun % BITS_PER_LONG;
243
244 if (test_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]))
245 return -1;
246
247 return 0;
248 }
249
250
251
252
253
254
255
256
257 static int ba_free(struct ba_lun *ba_lun, u64 to_free)
258 {
259 int idx = 0, bit_pos = 0;
260 struct ba_lun_info *bali = NULL;
261
262 bali = ba_lun->ba_lun_handle;
263
264 if (validate_alloc(bali, to_free)) {
265 pr_debug("%s: AUN %llx is not allocated on lun_id=%016llx\n",
266 __func__, to_free, ba_lun->lun_id);
267 return -1;
268 }
269
270 pr_debug("%s: Received a request to free AU=%llx lun_id=%016llx "
271 "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id,
272 bali->free_aun_cnt);
273
274 if (bali->aun_clone_map[to_free] > 0) {
275 pr_debug("%s: AUN %llx lun_id=%016llx cloned. Clone count=%x\n",
276 __func__, to_free, ba_lun->lun_id,
277 bali->aun_clone_map[to_free]);
278 bali->aun_clone_map[to_free]--;
279 return 0;
280 }
281
282 idx = to_free / BITS_PER_LONG;
283 bit_pos = to_free % BITS_PER_LONG;
284
285 set_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]);
286 bali->free_aun_cnt++;
287
288 if (idx < bali->free_low_idx)
289 bali->free_low_idx = idx;
290 else if (idx > bali->free_high_idx)
291 bali->free_high_idx = idx;
292
293 pr_debug("%s: Successfully freed AU bit_pos=%x bit map index=%x "
294 "lun_id=%016llx free_aun_cnt=%llx\n", __func__, bit_pos, idx,
295 ba_lun->lun_id, bali->free_aun_cnt);
296
297 return 0;
298 }
299
300
301
302
303
304
305
306
307 static int ba_clone(struct ba_lun *ba_lun, u64 to_clone)
308 {
309 struct ba_lun_info *bali = ba_lun->ba_lun_handle;
310
311 if (validate_alloc(bali, to_clone)) {
312 pr_debug("%s: AUN=%llx not allocated on lun_id=%016llx\n",
313 __func__, to_clone, ba_lun->lun_id);
314 return -1;
315 }
316
317 pr_debug("%s: Received a request to clone AUN %llx on lun_id=%016llx\n",
318 __func__, to_clone, ba_lun->lun_id);
319
320 if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) {
321 pr_debug("%s: AUN %llx on lun_id=%016llx hit max clones already\n",
322 __func__, to_clone, ba_lun->lun_id);
323 return -1;
324 }
325
326 bali->aun_clone_map[to_clone]++;
327
328 return 0;
329 }
330
331
332
333
334
335
336
337 static u64 ba_space(struct ba_lun *ba_lun)
338 {
339 struct ba_lun_info *bali = ba_lun->ba_lun_handle;
340
341 return bali->free_aun_cnt;
342 }
343
344
345
346
347
348
349
350 void cxlflash_ba_terminate(struct ba_lun *ba_lun)
351 {
352 struct ba_lun_info *bali = ba_lun->ba_lun_handle;
353
354 if (bali) {
355 kfree(bali->aun_clone_map);
356 kfree(bali->lun_alloc_map);
357 kfree(bali);
358 ba_lun->ba_lun_handle = NULL;
359 }
360 }
361
362
363
364
365
366
367
368 static int init_vlun(struct llun_info *lli)
369 {
370 int rc = 0;
371 struct glun_info *gli = lli->parent;
372 struct blka *blka = &gli->blka;
373
374 memset(blka, 0, sizeof(*blka));
375 mutex_init(&blka->mutex);
376
377
378 blka->ba_lun.lun_id = lli->lun_index;
379 blka->ba_lun.lsize = gli->max_lba + 1;
380 blka->ba_lun.lba_size = gli->blk_len;
381
382 blka->ba_lun.au_size = MC_CHUNK_SIZE;
383 blka->nchunk = blka->ba_lun.lsize / MC_CHUNK_SIZE;
384
385 rc = ba_init(&blka->ba_lun);
386 if (unlikely(rc))
387 pr_debug("%s: cannot init block_alloc, rc=%d\n", __func__, rc);
388
389 pr_debug("%s: returning rc=%d lli=%p\n", __func__, rc, lli);
390 return rc;
391 }
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419 static int write_same16(struct scsi_device *sdev,
420 u64 lba,
421 u32 nblks)
422 {
423 u8 *cmd_buf = NULL;
424 u8 *scsi_cmd = NULL;
425 int rc = 0;
426 int result = 0;
427 u64 offset = lba;
428 int left = nblks;
429 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
430 struct device *dev = &cfg->dev->dev;
431 const u32 s = ilog2(sdev->sector_size) - 9;
432 const u32 to = sdev->request_queue->rq_timeout;
433 const u32 ws_limit = blk_queue_get_max_sectors(sdev->request_queue,
434 REQ_OP_WRITE_SAME) >> s;
435
436 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
437 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
438 if (unlikely(!cmd_buf || !scsi_cmd)) {
439 rc = -ENOMEM;
440 goto out;
441 }
442
443 while (left > 0) {
444
445 scsi_cmd[0] = WRITE_SAME_16;
446 scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0;
447 put_unaligned_be64(offset, &scsi_cmd[2]);
448 put_unaligned_be32(ws_limit < left ? ws_limit : left,
449 &scsi_cmd[10]);
450
451
452 up_read(&cfg->ioctl_rwsem);
453 result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
454 CMD_BUFSIZE, NULL, NULL, to,
455 CMD_RETRIES, 0, 0, NULL);
456 down_read(&cfg->ioctl_rwsem);
457 rc = check_state(cfg);
458 if (rc) {
459 dev_err(dev, "%s: Failed state result=%08x\n",
460 __func__, result);
461 rc = -ENODEV;
462 goto out;
463 }
464
465 if (result) {
466 dev_err_ratelimited(dev, "%s: command failed for "
467 "offset=%lld result=%08x\n",
468 __func__, offset, result);
469 rc = -EIO;
470 goto out;
471 }
472 left -= ws_limit;
473 offset += ws_limit;
474 }
475
476 out:
477 kfree(cmd_buf);
478 kfree(scsi_cmd);
479 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
480 return rc;
481 }
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500 static int grow_lxt(struct afu *afu,
501 struct scsi_device *sdev,
502 ctx_hndl_t ctxid,
503 res_hndl_t rhndl,
504 struct sisl_rht_entry *rhte,
505 u64 *new_size)
506 {
507 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
508 struct device *dev = &cfg->dev->dev;
509 struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL;
510 struct llun_info *lli = sdev->hostdata;
511 struct glun_info *gli = lli->parent;
512 struct blka *blka = &gli->blka;
513 u32 av_size;
514 u32 ngrps, ngrps_old;
515 u64 aun;
516 u64 delta = *new_size - rhte->lxt_cnt;
517 u64 my_new_size;
518 int i, rc = 0;
519
520
521
522
523
524
525 mutex_lock(&blka->mutex);
526 av_size = ba_space(&blka->ba_lun);
527 if (unlikely(av_size <= 0)) {
528 dev_dbg(dev, "%s: ba_space error av_size=%d\n",
529 __func__, av_size);
530 mutex_unlock(&blka->mutex);
531 rc = -ENOSPC;
532 goto out;
533 }
534
535 if (av_size < delta)
536 delta = av_size;
537
538 lxt_old = rhte->lxt_start;
539 ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
540 ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt + delta);
541
542 if (ngrps != ngrps_old) {
543
544 lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
545 GFP_KERNEL);
546 if (unlikely(!lxt)) {
547 mutex_unlock(&blka->mutex);
548 rc = -ENOMEM;
549 goto out;
550 }
551
552
553 memcpy(lxt, lxt_old, (sizeof(*lxt) * rhte->lxt_cnt));
554 } else
555 lxt = lxt_old;
556
557
558 my_new_size = rhte->lxt_cnt + delta;
559
560
561 for (i = rhte->lxt_cnt; i < my_new_size; i++) {
562
563
564
565
566
567
568 aun = ba_alloc(&blka->ba_lun);
569 if ((aun == -1ULL) || (aun >= blka->nchunk))
570 dev_dbg(dev, "%s: ba_alloc error allocated chunk=%llu "
571 "max=%llu\n", __func__, aun, blka->nchunk - 1);
572
573
574 lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) |
575 (lli->lun_index << LXT_LUNIDX_SHIFT) |
576 (RHT_PERM_RW << LXT_PERM_SHIFT |
577 lli->port_sel));
578 }
579
580 mutex_unlock(&blka->mutex);
581
582
583
584
585
586 dma_wmb();
587
588 rhte->lxt_start = lxt;
589 dma_wmb();
590
591 rhte->lxt_cnt = my_new_size;
592 dma_wmb();
593
594 rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
595 if (unlikely(rc))
596 rc = -EAGAIN;
597
598
599 if (lxt != lxt_old)
600 kfree(lxt_old);
601 *new_size = my_new_size;
602 out:
603 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
604 return rc;
605 }
606
607
608
609
610
611
612
613
614
615
616
617
618 static int shrink_lxt(struct afu *afu,
619 struct scsi_device *sdev,
620 res_hndl_t rhndl,
621 struct sisl_rht_entry *rhte,
622 struct ctx_info *ctxi,
623 u64 *new_size)
624 {
625 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
626 struct device *dev = &cfg->dev->dev;
627 struct sisl_lxt_entry *lxt, *lxt_old;
628 struct llun_info *lli = sdev->hostdata;
629 struct glun_info *gli = lli->parent;
630 struct blka *blka = &gli->blka;
631 ctx_hndl_t ctxid = DECODE_CTXID(ctxi->ctxid);
632 bool needs_ws = ctxi->rht_needs_ws[rhndl];
633 bool needs_sync = !ctxi->err_recovery_active;
634 u32 ngrps, ngrps_old;
635 u64 aun;
636 u64 delta = rhte->lxt_cnt - *new_size;
637 u64 my_new_size;
638 int i, rc = 0;
639
640 lxt_old = rhte->lxt_start;
641 ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
642 ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt - delta);
643
644 if (ngrps != ngrps_old) {
645
646 if (ngrps) {
647 lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
648 GFP_KERNEL);
649 if (unlikely(!lxt)) {
650 rc = -ENOMEM;
651 goto out;
652 }
653
654
655 memcpy(lxt, lxt_old,
656 (sizeof(*lxt) * (rhte->lxt_cnt - delta)));
657 } else
658 lxt = NULL;
659 } else
660 lxt = lxt_old;
661
662
663 my_new_size = rhte->lxt_cnt - delta;
664
665
666
667
668
669 rhte->lxt_cnt = my_new_size;
670 dma_wmb();
671
672 rhte->lxt_start = lxt;
673 dma_wmb();
674
675 if (needs_sync) {
676 rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
677 if (unlikely(rc))
678 rc = -EAGAIN;
679 }
680
681 if (needs_ws) {
682
683
684
685
686 ctxi->unavail = true;
687 mutex_unlock(&ctxi->mutex);
688 }
689
690
691 mutex_lock(&blka->mutex);
692 for (i = delta - 1; i >= 0; i--) {
693 aun = lxt_old[my_new_size + i].rlba_base >> MC_CHUNK_SHIFT;
694 if (needs_ws)
695 write_same16(sdev, aun, MC_CHUNK_SIZE);
696 ba_free(&blka->ba_lun, aun);
697 }
698 mutex_unlock(&blka->mutex);
699
700 if (needs_ws) {
701
702 mutex_lock(&ctxi->mutex);
703 ctxi->unavail = false;
704 }
705
706
707 if (lxt != lxt_old)
708 kfree(lxt_old);
709 *new_size = my_new_size;
710 out:
711 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
712 return rc;
713 }
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729 int _cxlflash_vlun_resize(struct scsi_device *sdev,
730 struct ctx_info *ctxi,
731 struct dk_cxlflash_resize *resize)
732 {
733 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
734 struct device *dev = &cfg->dev->dev;
735 struct llun_info *lli = sdev->hostdata;
736 struct glun_info *gli = lli->parent;
737 struct afu *afu = cfg->afu;
738 bool put_ctx = false;
739
740 res_hndl_t rhndl = resize->rsrc_handle;
741 u64 new_size;
742 u64 nsectors;
743 u64 ctxid = DECODE_CTXID(resize->context_id),
744 rctxid = resize->context_id;
745
746 struct sisl_rht_entry *rhte;
747
748 int rc = 0;
749
750
751
752
753
754 nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len;
755 new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE);
756
757 dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu req_size=%llu new_size=%llu\n",
758 __func__, ctxid, resize->rsrc_handle, resize->req_size,
759 new_size);
760
761 if (unlikely(gli->mode != MODE_VIRTUAL)) {
762 dev_dbg(dev, "%s: LUN mode does not support resize mode=%d\n",
763 __func__, gli->mode);
764 rc = -EINVAL;
765 goto out;
766
767 }
768
769 if (!ctxi) {
770 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
771 if (unlikely(!ctxi)) {
772 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
773 __func__, ctxid);
774 rc = -EINVAL;
775 goto out;
776 }
777
778 put_ctx = true;
779 }
780
781 rhte = get_rhte(ctxi, rhndl, lli);
782 if (unlikely(!rhte)) {
783 dev_dbg(dev, "%s: Bad resource handle rhndl=%u\n",
784 __func__, rhndl);
785 rc = -EINVAL;
786 goto out;
787 }
788
789 if (new_size > rhte->lxt_cnt)
790 rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
791 else if (new_size < rhte->lxt_cnt)
792 rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
793 else {
794
795
796
797
798
799
800
801
802 rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
803 if (unlikely(rc)) {
804 rc = -EAGAIN;
805 goto out;
806 }
807 }
808
809 resize->hdr.return_flags = 0;
810 resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
811 resize->last_lba /= CXLFLASH_BLOCK_SIZE;
812 resize->last_lba--;
813
814 out:
815 if (put_ctx)
816 put_context(ctxi);
817 dev_dbg(dev, "%s: resized to %llu returning rc=%d\n",
818 __func__, resize->last_lba, rc);
819 return rc;
820 }
821
822 int cxlflash_vlun_resize(struct scsi_device *sdev,
823 struct dk_cxlflash_resize *resize)
824 {
825 return _cxlflash_vlun_resize(sdev, NULL, resize);
826 }
827
828
829
830
831
832 void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
833 {
834 struct llun_info *lli, *temp;
835 u32 lind;
836 int k;
837 struct device *dev = &cfg->dev->dev;
838 __be64 __iomem *fc_port_luns;
839
840 mutex_lock(&global.mutex);
841
842 list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
843 if (!lli->in_table)
844 continue;
845
846 lind = lli->lun_index;
847 dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
848
849 for (k = 0; k < cfg->num_fc_ports; k++)
850 if (lli->port_sel & (1 << k)) {
851 fc_port_luns = get_fc_port_luns(cfg, k);
852 writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
853 dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
854 }
855 }
856
857 mutex_unlock(&global.mutex);
858 }
859
860
861
862
863
864
865
866 static inline u8 get_num_ports(u32 psm)
867 {
868 static const u8 bits[16] = { 0, 1, 1, 2, 1, 2, 2, 3,
869 1, 2, 2, 3, 2, 3, 3, 4 };
870
871 return bits[psm & 0xf];
872 }
873
874
875
876
877
878
879
880
881
882
883
884
885 static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
886 {
887 u32 chan;
888 u32 lind;
889 u32 nports;
890 int rc = 0;
891 int k;
892 struct device *dev = &cfg->dev->dev;
893 __be64 __iomem *fc_port_luns;
894
895 mutex_lock(&global.mutex);
896
897 if (lli->in_table)
898 goto out;
899
900 nports = get_num_ports(lli->port_sel);
901 if (nports == 0 || nports > cfg->num_fc_ports) {
902 WARN(1, "Unsupported port configuration nports=%u", nports);
903 rc = -EIO;
904 goto out;
905 }
906
907 if (nports > 1) {
908
909
910
911
912 for (k = 0; k < cfg->num_fc_ports; k++) {
913 if (!(lli->port_sel & (1 << k)))
914 continue;
915
916 if (cfg->promote_lun_index == cfg->last_lun_index[k]) {
917 rc = -ENOSPC;
918 goto out;
919 }
920 }
921
922 lind = lli->lun_index = cfg->promote_lun_index;
923 dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
924
925 for (k = 0; k < cfg->num_fc_ports; k++) {
926 if (!(lli->port_sel & (1 << k)))
927 continue;
928
929 fc_port_luns = get_fc_port_luns(cfg, k);
930 writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
931 dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
932 }
933
934 cfg->promote_lun_index++;
935 } else {
936
937
938
939
940 chan = PORTMASK2CHAN(lli->port_sel);
941 if (cfg->promote_lun_index == cfg->last_lun_index[chan]) {
942 rc = -ENOSPC;
943 goto out;
944 }
945
946 lind = lli->lun_index = cfg->last_lun_index[chan];
947 fc_port_luns = get_fc_port_luns(cfg, chan);
948 writeq_be(lli->lun_id[chan], &fc_port_luns[lind]);
949 cfg->last_lun_index[chan]--;
950 dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n\t%d=%llx\n",
951 __func__, lind, chan, lli->lun_id[chan]);
952 }
953
954 lli->in_table = true;
955 out:
956 mutex_unlock(&global.mutex);
957 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
958 return rc;
959 }
960
961
962
963
964
965
966
967
968
969
970
971
972
973 int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
974 {
975 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
976 struct device *dev = &cfg->dev->dev;
977 struct llun_info *lli = sdev->hostdata;
978 struct glun_info *gli = lli->parent;
979
980 struct dk_cxlflash_uvirtual *virt = (struct dk_cxlflash_uvirtual *)arg;
981 struct dk_cxlflash_resize resize;
982
983 u64 ctxid = DECODE_CTXID(virt->context_id),
984 rctxid = virt->context_id;
985 u64 lun_size = virt->lun_size;
986 u64 last_lba = 0;
987 u64 rsrc_handle = -1;
988
989 int rc = 0;
990
991 struct ctx_info *ctxi = NULL;
992 struct sisl_rht_entry *rhte = NULL;
993
994 dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
995
996
997 mutex_lock(&gli->mutex);
998 if (gli->mode == MODE_NONE) {
999 rc = init_vlun(lli);
1000 if (rc) {
1001 dev_err(dev, "%s: init_vlun failed rc=%d\n",
1002 __func__, rc);
1003 rc = -ENOMEM;
1004 goto err0;
1005 }
1006 }
1007
1008 rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true);
1009 if (unlikely(rc)) {
1010 dev_err(dev, "%s: Failed attach to LUN (VIRTUAL)\n", __func__);
1011 goto err0;
1012 }
1013 mutex_unlock(&gli->mutex);
1014
1015 rc = init_luntable(cfg, lli);
1016 if (rc) {
1017 dev_err(dev, "%s: init_luntable failed rc=%d\n", __func__, rc);
1018 goto err1;
1019 }
1020
1021 ctxi = get_context(cfg, rctxid, lli, 0);
1022 if (unlikely(!ctxi)) {
1023 dev_err(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1024 rc = -EINVAL;
1025 goto err1;
1026 }
1027
1028 rhte = rhte_checkout(ctxi, lli);
1029 if (unlikely(!rhte)) {
1030 dev_err(dev, "%s: too many opens ctxid=%llu\n",
1031 __func__, ctxid);
1032 rc = -EMFILE;
1033 goto err1;
1034 }
1035
1036 rsrc_handle = (rhte - ctxi->rht_start);
1037
1038
1039 rhte->nmask = MC_RHT_NMASK;
1040 rhte->fp = SISL_RHT_FP(0U, ctxi->rht_perms);
1041
1042
1043 marshal_virt_to_resize(virt, &resize);
1044 resize.rsrc_handle = rsrc_handle;
1045 rc = _cxlflash_vlun_resize(sdev, ctxi, &resize);
1046 if (rc) {
1047 dev_err(dev, "%s: resize failed rc=%d\n", __func__, rc);
1048 goto err2;
1049 }
1050 last_lba = resize.last_lba;
1051
1052 if (virt->hdr.flags & DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME)
1053 ctxi->rht_needs_ws[rsrc_handle] = true;
1054
1055 virt->hdr.return_flags = 0;
1056 virt->last_lba = last_lba;
1057 virt->rsrc_handle = rsrc_handle;
1058
1059 if (get_num_ports(lli->port_sel) > 1)
1060 virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE;
1061 out:
1062 if (likely(ctxi))
1063 put_context(ctxi);
1064 dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
1065 __func__, rsrc_handle, rc, last_lba);
1066 return rc;
1067
1068 err2:
1069 rhte_checkin(ctxi, rhte);
1070 err1:
1071 cxlflash_lun_detach(gli);
1072 goto out;
1073 err0:
1074
1075 cxlflash_ba_terminate(&gli->blka.ba_lun);
1076 mutex_unlock(&gli->mutex);
1077 goto out;
1078 }
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 static int clone_lxt(struct afu *afu,
1092 struct blka *blka,
1093 ctx_hndl_t ctxid,
1094 res_hndl_t rhndl,
1095 struct sisl_rht_entry *rhte,
1096 struct sisl_rht_entry *rhte_src)
1097 {
1098 struct cxlflash_cfg *cfg = afu->parent;
1099 struct device *dev = &cfg->dev->dev;
1100 struct sisl_lxt_entry *lxt = NULL;
1101 bool locked = false;
1102 u32 ngrps;
1103 u64 aun;
1104 int j;
1105 int i = 0;
1106 int rc = 0;
1107
1108 ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
1109
1110 if (ngrps) {
1111
1112 lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
1113 GFP_KERNEL);
1114 if (unlikely(!lxt)) {
1115 rc = -ENOMEM;
1116 goto out;
1117 }
1118
1119
1120 memcpy(lxt, rhte_src->lxt_start,
1121 (sizeof(*lxt) * rhte_src->lxt_cnt));
1122
1123
1124
1125
1126
1127
1128 mutex_lock(&blka->mutex);
1129 locked = true;
1130 for (i = 0; i < rhte_src->lxt_cnt; i++) {
1131 aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
1132 if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
1133 rc = -EIO;
1134 goto err;
1135 }
1136 }
1137 }
1138
1139
1140
1141
1142
1143 dma_wmb();
1144
1145 rhte->lxt_start = lxt;
1146 dma_wmb();
1147
1148 rhte->lxt_cnt = rhte_src->lxt_cnt;
1149 dma_wmb();
1150
1151 rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
1152 if (unlikely(rc)) {
1153 rc = -EAGAIN;
1154 goto err2;
1155 }
1156
1157 out:
1158 if (locked)
1159 mutex_unlock(&blka->mutex);
1160 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1161 return rc;
1162 err2:
1163
1164 rhte->lxt_cnt = 0;
1165 dma_wmb();
1166 rhte->lxt_start = NULL;
1167 dma_wmb();
1168 err:
1169
1170 for (j = 0; j < i; j++) {
1171 aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT);
1172 ba_free(&blka->ba_lun, aun);
1173 }
1174 kfree(lxt);
1175 goto out;
1176 }
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 int cxlflash_disk_clone(struct scsi_device *sdev,
1191 struct dk_cxlflash_clone *clone)
1192 {
1193 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1194 struct device *dev = &cfg->dev->dev;
1195 struct llun_info *lli = sdev->hostdata;
1196 struct glun_info *gli = lli->parent;
1197 struct blka *blka = &gli->blka;
1198 struct afu *afu = cfg->afu;
1199 struct dk_cxlflash_release release = { { 0 }, 0 };
1200
1201 struct ctx_info *ctxi_src = NULL,
1202 *ctxi_dst = NULL;
1203 struct lun_access *lun_access_src, *lun_access_dst;
1204 u32 perms;
1205 u64 ctxid_src = DECODE_CTXID(clone->context_id_src),
1206 ctxid_dst = DECODE_CTXID(clone->context_id_dst),
1207 rctxid_src = clone->context_id_src,
1208 rctxid_dst = clone->context_id_dst;
1209 int i, j;
1210 int rc = 0;
1211 bool found;
1212 LIST_HEAD(sidecar);
1213
1214 dev_dbg(dev, "%s: ctxid_src=%llu ctxid_dst=%llu\n",
1215 __func__, ctxid_src, ctxid_dst);
1216
1217
1218 if (unlikely(rctxid_src == rctxid_dst)) {
1219 rc = -EINVAL;
1220 goto out;
1221 }
1222
1223 if (unlikely(gli->mode != MODE_VIRTUAL)) {
1224 rc = -EINVAL;
1225 dev_dbg(dev, "%s: Only supported on virtual LUNs mode=%u\n",
1226 __func__, gli->mode);
1227 goto out;
1228 }
1229
1230 ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE);
1231 ctxi_dst = get_context(cfg, rctxid_dst, lli, 0);
1232 if (unlikely(!ctxi_src || !ctxi_dst)) {
1233 dev_dbg(dev, "%s: Bad context ctxid_src=%llu ctxid_dst=%llu\n",
1234 __func__, ctxid_src, ctxid_dst);
1235 rc = -EINVAL;
1236 goto out;
1237 }
1238
1239
1240 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
1241 if (ctxi_dst->rht_start[i].nmask != 0) {
1242 rc = -EINVAL;
1243 goto out;
1244 }
1245
1246
1247 list_for_each_entry(lun_access_src, &ctxi_src->luns, list) {
1248 found = false;
1249 list_for_each_entry(lun_access_dst, &ctxi_dst->luns, list)
1250 if (lun_access_dst->sdev == lun_access_src->sdev) {
1251 found = true;
1252 break;
1253 }
1254
1255 if (!found) {
1256 lun_access_dst = kzalloc(sizeof(*lun_access_dst),
1257 GFP_KERNEL);
1258 if (unlikely(!lun_access_dst)) {
1259 dev_err(dev, "%s: lun_access allocation fail\n",
1260 __func__);
1261 rc = -ENOMEM;
1262 goto out;
1263 }
1264
1265 *lun_access_dst = *lun_access_src;
1266 list_add(&lun_access_dst->list, &sidecar);
1267 }
1268 }
1269
1270 if (unlikely(!ctxi_src->rht_out)) {
1271 dev_dbg(dev, "%s: Nothing to clone\n", __func__);
1272 goto out_success;
1273 }
1274
1275
1276 perms = ctxi_dst->rht_perms;
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
1290 if (ctxi_src->rht_out == ctxi_dst->rht_out)
1291 break;
1292 if (ctxi_src->rht_start[i].nmask == 0)
1293 continue;
1294
1295
1296 ctxi_dst->rht_out++;
1297 ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask;
1298 ctxi_dst->rht_start[i].fp =
1299 SISL_RHT_FP_CLONE(ctxi_src->rht_start[i].fp, perms);
1300 ctxi_dst->rht_lun[i] = ctxi_src->rht_lun[i];
1301
1302 rc = clone_lxt(afu, blka, ctxid_dst, i,
1303 &ctxi_dst->rht_start[i],
1304 &ctxi_src->rht_start[i]);
1305 if (rc) {
1306 marshal_clone_to_rele(clone, &release);
1307 for (j = 0; j < i; j++) {
1308 release.rsrc_handle = j;
1309 _cxlflash_disk_release(sdev, ctxi_dst,
1310 &release);
1311 }
1312
1313
1314 rhte_checkin(ctxi_dst, &ctxi_dst->rht_start[i]);
1315 goto err;
1316 }
1317
1318 cxlflash_lun_attach(gli, gli->mode, false);
1319 }
1320
1321 out_success:
1322 list_splice(&sidecar, &ctxi_dst->luns);
1323
1324
1325 out:
1326 if (ctxi_src)
1327 put_context(ctxi_src);
1328 if (ctxi_dst)
1329 put_context(ctxi_dst);
1330 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1331 return rc;
1332
1333 err:
1334 list_for_each_entry_safe(lun_access_src, lun_access_dst, &sidecar, list)
1335 kfree(lun_access_src);
1336 goto out;
1337 }