Skip to content

Commit 550f84d

Browse files
committed
Cleanup ZNS code
1 parent 5188179 commit 550f84d

File tree

3 files changed

+86
-100
lines changed

3 files changed

+86
-100
lines changed

README.md

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -284,10 +284,6 @@ emulation.
284284

285285
### 4. Run FEMU as NVMe ZNS (Zoned-Namespace) SSDs (``ZNSSD`` mode) ###
286286

287-
**Notes:** Currently only basic ZNS interface is supported and it can be used
288-
for development purposes. More features like proper latency emulation,
289-
controller-level zone mappings to flash chips are work-in-progress.
290-
291287
```Bash
292288
./run-zns.sh
293289
```

femu-scripts/run-zns.sh

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#!/bin/bash
22
#
3-
# Huaicheng Li <hcli@cmu.edu>
3+
# Huaicheng Li <huaicheng@vt.edu>
44
# Run FEMU as Zoned-Namespace (ZNS) SSDs
55
#
66

@@ -18,19 +18,19 @@ if [[ ! -e "$OSIMGF" ]]; then
1818
exit
1919
fi
2020

21-
ssd_size=4096
22-
num_channels=2
23-
num_chips_per_channel=4
24-
read_latency=40000
25-
write_latency=200000
21+
SSD_SIZE_MB=4096
22+
NUM_CHANNELS=8
23+
NUM_CHIPS_PER_CHANNEL=4
24+
READ_LATENCY_NS=40000
25+
WRITE_LATENCY_NS=200000
2626

2727
FEMU_OPTIONS="-device femu"
28-
FEMU_OPTIONS=${FEMU_OPTIONS}",devsz_mb=${ssd_size}"
28+
FEMU_OPTIONS=${FEMU_OPTIONS}",devsz_mb=${SSD_SIZE_MB}"
2929
FEMU_OPTIONS=${FEMU_OPTIONS}",namespaces=1"
30-
FEMU_OPTIONS=${FEMU_OPTIONS}",zns_num_ch=${num_channels}"
31-
FEMU_OPTIONS=${FEMU_OPTIONS}",zns_num_lun=${num_chips_per_channel}"
32-
FEMU_OPTIONS=${FEMU_OPTIONS}",zns_read=${read_latency}"
33-
FEMU_OPTIONS=${FEMU_OPTIONS}",zns_write=${write_latency}"
30+
FEMU_OPTIONS=${FEMU_OPTIONS}",zns_num_ch=${NUM_CHANNELS}"
31+
FEMU_OPTIONS=${FEMU_OPTIONS}",zns_num_lun=${NUM_CHIPS_PER_CHANNEL}"
32+
FEMU_OPTIONS=${FEMU_OPTIONS}",zns_read=${READ_LATENCY_NS}"
33+
FEMU_OPTIONS=${FEMU_OPTIONS}",zns_write=${WRITE_LATENCY_NS}"
3434
FEMU_OPTIONS=${FEMU_OPTIONS}",femu_mode=3"
3535

3636
sudo x86_64-softmmu/qemu-system-x86_64 \

hw/femu/zns/zns.c

Lines changed: 75 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,7 @@ static inline uint32_t zns_zone_idx(NvmeNamespace *ns, uint64_t slba)
88
{
99
FemuCtrl *n = ns->ctrl;
1010

11-
return (n->zone_size_log2 > 0 ? slba >> n->zone_size_log2 : slba /
12-
n->zone_size);
11+
return (n->zone_size_log2 > 0 ? slba >> n->zone_size_log2 : slba / n->zone_size);
1312
}
1413

1514
static inline NvmeZone *zns_get_zone_by_slba(NvmeNamespace *ns, uint64_t slba)
@@ -167,8 +166,7 @@ static void zns_clear_zone(NvmeNamespace *ns, NvmeZone *zone)
167166

168167
zone->w_ptr = zone->d.wp;
169168
state = zns_get_zone_state(zone);
170-
if (zone->d.wp != zone->d.zslba ||
171-
(zone->d.za & NVME_ZA_ZD_EXT_VALID)) {
169+
if (zone->d.wp != zone->d.zslba || (zone->d.za & NVME_ZA_ZD_EXT_VALID)) {
172170
if (state != NVME_ZONE_STATE_CLOSED) {
173171
zns_set_zone_state(zone, NVME_ZONE_STATE_CLOSED);
174172
}
@@ -223,8 +221,7 @@ void zns_ns_cleanup(NvmeNamespace *ns)
223221
}
224222
}
225223

226-
static void zns_assign_zone_state(NvmeNamespace *ns, NvmeZone *zone,
227-
NvmeZoneState state)
224+
static void zns_assign_zone_state(NvmeNamespace *ns, NvmeZone *zone, NvmeZoneState state)
228225
{
229226
FemuCtrl *n = ns->ctrl;
230227

@@ -314,8 +311,8 @@ static uint16_t zns_check_zone_state_for_write(NvmeZone *zone)
314311
}
315312

316313
static uint16_t zns_check_zone_write(FemuCtrl *n, NvmeNamespace *ns,
317-
NvmeZone *zone, uint64_t slba,
318-
uint32_t nlb, bool append)
314+
NvmeZone *zone, uint64_t slba,
315+
uint32_t nlb, bool append)
319316
{
320317
uint16_t status;
321318

@@ -366,8 +363,7 @@ static uint16_t zns_check_zone_state_for_read(NvmeZone *zone)
366363
return status;
367364
}
368365

369-
static uint16_t zns_check_zone_read(NvmeNamespace *ns, uint64_t slba,
370-
uint32_t nlb)
366+
static uint16_t zns_check_zone_read(NvmeNamespace *ns, uint64_t slba, uint32_t nlb)
371367
{
372368
FemuCtrl *n = ns->ctrl;
373369
NvmeZone *zone = zns_get_zone_by_slba(ns, slba);
@@ -432,8 +428,7 @@ static uint16_t zns_auto_open_zone(NvmeNamespace *ns, NvmeZone *zone)
432428
return status;
433429
}
434430

435-
static void zns_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req,
436-
bool failed)
431+
static void zns_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req, bool failed)
437432
{
438433
NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
439434
NvmeZone *zone;
@@ -510,16 +505,19 @@ static void advance_read_pointer(FemuCtrl *n)
510505
//printf("NUM CH: %"PRIu64"\n", wpp->ch);
511506
check_addr(wpp->ch, num_ch);
512507
wpp->ch++;
513-
if (wpp->ch == num_ch) {
514-
wpp->ch = 0;
515-
check_addr(wpp->lun, num_lun);
516-
wpp->lun++;
517-
if(wpp->lun == num_lun) {
518-
wpp->lun = 0;
519508

520-
assert(wpp->ch == 0);
521-
assert(wpp->lun == 0);
522-
}
509+
if (wpp->ch != num_ch) {
510+
return;
511+
}
512+
513+
/* Wrap-up, wpp->ch == num_ch */
514+
wpp->ch = 0;
515+
check_addr(wpp->lun, num_lun);
516+
wpp->lun++;
517+
if (wpp->lun == num_lun) {
518+
wpp->lun = 0;
519+
assert(wpp->ch == 0);
520+
assert(wpp->lun == 0);
523521
}
524522
}
525523

@@ -543,8 +541,7 @@ static inline struct ppa lpn_to_ppa(FemuCtrl *n, NvmeNamespace *ns, uint64_t lpn
543541
return ppa;
544542
}
545543

546-
static uint64_t zns_advance_status(FemuCtrl *n, struct nand_cmd *ncmd,
547-
struct ppa *ppa)
544+
static uint64_t zns_advance_status(FemuCtrl *n, struct nand_cmd *ncmd, struct ppa *ppa)
548545
{
549546
int c = ncmd->cmd;
550547

@@ -559,40 +556,38 @@ static uint64_t zns_advance_status(FemuCtrl *n, struct nand_cmd *ncmd,
559556
uint64_t read_delay = n->zns_params.zns_read;
560557
uint64_t write_delay = n->zns_params.zns_write;
561558
uint64_t erase_delay = 2000000;
562-
//200 us for write
559+
563560
switch (c) {
564561
case NAND_READ:
565-
nand_stime = (fc->next_fc_avail_time < req_stime) ? req_stime : \
566-
fc->next_fc_avail_time;
567-
568-
fc->next_fc_avail_time = nand_stime + read_delay;
569-
lat = fc->next_fc_avail_time - req_stime;
570-
562+
nand_stime = (fc->next_fc_avail_time < req_stime) ? req_stime : \
563+
fc->next_fc_avail_time;
564+
fc->next_fc_avail_time = nand_stime + read_delay;
565+
lat = fc->next_fc_avail_time - req_stime;
571566
break;
567+
572568
case NAND_WRITE:
573569
nand_stime = (fc->next_fc_avail_time < req_stime) ? req_stime : \
574570
fc->next_fc_avail_time;
575571
fc->next_fc_avail_time = nand_stime + write_delay;
576572
lat = fc->next_fc_avail_time - req_stime;
577-
578573
break;
574+
579575
case NAND_ERASE:
580-
nand_stime = (fc->next_fc_avail_time < req_stime) ? req_stime : \
581-
fc->next_fc_avail_time;
582-
fc->next_fc_avail_time = nand_stime + erase_delay;
583-
lat = fc->next_fc_avail_time - req_stime;
576+
nand_stime = (fc->next_fc_avail_time < req_stime) ? req_stime : \
577+
fc->next_fc_avail_time;
578+
fc->next_fc_avail_time = nand_stime + erase_delay;
579+
lat = fc->next_fc_avail_time - req_stime;
580+
break;
584581

585-
break;
586582
default:
587-
//lat = 0;
588-
;
583+
/* To silent warnings */
584+
;
589585
}
586+
590587
return lat;
591588
}
592-
//----------------------------------
593589

594-
static uint64_t zns_advance_zone_wp(NvmeNamespace *ns, NvmeZone *zone,
595-
uint32_t nlb)
590+
static uint64_t zns_advance_zone_wp(NvmeNamespace *ns, NvmeZone *zone, uint32_t nlb)
596591
{
597592
uint64_t result = zone->w_ptr;
598593
uint8_t zs;
@@ -623,6 +618,7 @@ static void zns_aio_zone_reset_cb(NvmeRequest *req, NvmeZone *zone)
623618
{
624619
NvmeNamespace *ns = req->ns;
625620
FemuCtrl *n = ns->ctrl;
621+
int ch, lun;
626622

627623
/* FIXME, We always assume reset SUCCESS */
628624
switch (zns_get_zone_state(zone)) {
@@ -647,18 +643,17 @@ static void zns_aio_zone_reset_cb(NvmeRequest *req, NvmeZone *zone)
647643
uint64_t num_lun = zns->num_lun;
648644
struct ppa ppa;
649645

650-
for (int ch = 0; ch < num_ch; ch++){
651-
for (int lun = 0; lun < num_lun; lun++) {
652-
ppa.g.ch = ch;
653-
ppa.g.fc = lun;
654-
ppa.g.blk = zns_zone_idx(ns, zone->d.zslba);
655-
656-
struct nand_cmd erase;
657-
erase.cmd = NAND_ERASE;
658-
erase.stime = 0;
659-
zns_advance_status(n, &erase, &ppa);
660-
661-
}
646+
for (ch = 0; ch < num_ch; ch++) {
647+
for (lun = 0; lun < num_lun; lun++) {
648+
ppa.g.ch = ch;
649+
ppa.g.fc = lun;
650+
ppa.g.blk = zns_zone_idx(ns, zone->d.zslba);
651+
652+
struct nand_cmd erase;
653+
erase.cmd = NAND_ERASE;
654+
erase.stime = 0;
655+
zns_advance_status(n, &erase, &ppa);
656+
}
662657
}
663658
}
664659

@@ -847,8 +842,7 @@ static uint16_t zns_do_zone_op(NvmeNamespace *ns, NvmeZone *zone,
847842
} else {
848843
if (proc_mask & NVME_PROC_CLOSED_ZONES) {
849844
QTAILQ_FOREACH_SAFE(zone, &n->closed_zones, entry, next) {
850-
status = zns_bulk_proc_zone(ns, zone, proc_mask, op_hndlr,
851-
req);
845+
status = zns_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, req);
852846
if (status && status != NVME_NO_COMPLETE) {
853847
goto out;
854848
}
@@ -873,8 +867,7 @@ static uint16_t zns_do_zone_op(NvmeNamespace *ns, NvmeZone *zone,
873867
}
874868
if (proc_mask & NVME_PROC_FULL_ZONES) {
875869
QTAILQ_FOREACH_SAFE(zone, &n->full_zones, entry, next) {
876-
status = zns_bulk_proc_zone(ns, zone, proc_mask, op_hndlr,
877-
req);
870+
status = zns_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, req);
878871
if (status && status != NVME_NO_COMPLETE) {
879872
goto out;
880873
}
@@ -1156,6 +1149,7 @@ static inline bool nvme_csi_has_nvm_support(NvmeNamespace *ns)
11561149
case NVME_CSI_ZONED:
11571150
return true;
11581151
}
1152+
11591153
return false;
11601154
}
11611155

@@ -1312,29 +1306,28 @@ static uint16_t zns_read(FemuCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
13121306
data_offset = zns_l2b(ns, slba);
13131307

13141308
backend_rw(n->mbe, &req->qsg, &data_offset, req->is_write);
1315-
1316-
uint64_t slpn = (slba)/4096;
1317-
uint64_t elpn = (slba + nlb - 1)/4096;
1318-
1309+
1310+
uint64_t slpn = (slba) / 4096;
1311+
uint64_t elpn = (slba + nlb - 1) / 4096;
13191312
uint64_t lpn;
13201313
struct ppa ppa;
13211314
uint64_t sublat,maxlat=0;
13221315

13231316
for (lpn = slpn; lpn <= elpn; lpn++) {
13241317
ppa = lpn_to_ppa(n, ns, lpn);
1325-
advance_read_pointer(n);
1318+
advance_read_pointer(n);
13261319

1327-
struct nand_cmd read;
1328-
read.cmd = NAND_READ;
1329-
read.stime = req->stime;
1320+
struct nand_cmd read;
1321+
read.cmd = NAND_READ;
1322+
read.stime = req->stime;
13301323

1331-
sublat = zns_advance_status(n, &read, &ppa);
1324+
sublat = zns_advance_status(n, &read, &ppa);
13321325
maxlat = (sublat > maxlat) ? sublat : maxlat;
13331326
}
13341327

13351328
req->reqlat = maxlat;
13361329
req->expire_time += maxlat;
1337-
1330+
13381331
return NVME_SUCCESS;
13391332

13401333
err:
@@ -1386,29 +1379,29 @@ static uint16_t zns_write(FemuCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
13861379
if (status) {
13871380
goto err;
13881381
}
1389-
1382+
13901383
backend_rw(n->mbe, &req->qsg, &data_offset, req->is_write);
13911384
zns_finalize_zoned_write(ns, req, false);
1392-
1393-
uint64_t slpn = (slba)/4096;
1394-
uint64_t elpn = (slba + nlb - 1)/4096;
1385+
1386+
uint64_t slpn = (slba) / 4096;
1387+
uint64_t elpn = (slba + nlb - 1) / 4096;
13951388

13961389
uint64_t lpn;
13971390
struct ppa ppa;
13981391
uint64_t sublat,maxlat=0;
13991392

14001393
for (lpn = slpn; lpn <= elpn; lpn++) {
14011394
ppa = lpn_to_ppa(n, ns, lpn);
1402-
advance_read_pointer(n);
1395+
advance_read_pointer(n);
14031396

1404-
struct nand_cmd write;
1405-
write.cmd = NAND_WRITE;
1406-
write.stime = req->stime;
1397+
struct nand_cmd write;
1398+
write.cmd = NAND_WRITE;
1399+
write.stime = req->stime;
14071400

1408-
sublat = zns_advance_status(n, &write, &ppa);
1401+
sublat = zns_advance_status(n, &write, &ppa);
14091402
maxlat = (sublat > maxlat) ? sublat : maxlat;
14101403
}
1411-
1404+
14121405
req->reqlat = maxlat;
14131406
req->expire_time += maxlat;
14141407
return NVME_SUCCESS;
@@ -1483,23 +1476,21 @@ static void zns_init_ch(struct zns_ch *ch, uint8_t num_lun)
14831476
static void zns_init_params(FemuCtrl *n)
14841477
{
14851478
struct zns_ssd *id_zns;
1486-
1479+
int i;
1480+
14871481
id_zns = g_malloc0(sizeof(struct zns_ssd));
14881482
id_zns->num_ch = n->zns_params.zns_num_ch;
14891483
id_zns->num_lun = n->zns_params.zns_num_lun;
14901484
id_zns->ch = g_malloc0(sizeof(struct zns_ch) * id_zns->num_ch);
1491-
for (int i =0; i < id_zns->num_ch; i++) {
1485+
for (i =0; i < id_zns->num_ch; i++) {
14921486
zns_init_ch(&id_zns->ch[i], id_zns->num_lun);
14931487
}
1494-
1488+
14951489
id_zns->wp.ch = 0;
14961490
id_zns->wp.lun = 0;
14971491
n->zns = id_zns;
14981492
}
14991493

1500-
// ---------------------------------------
1501-
1502-
15031494
static int zns_init_zone_cap(FemuCtrl *n)
15041495
{
15051496
n->zoned = true;
@@ -1510,7 +1501,7 @@ static int zns_init_zone_cap(FemuCtrl *n)
15101501
n->max_active_zones = 0;
15111502
n->max_open_zones = 0;
15121503
n->zd_extension_size = 0;
1513-
1504+
15141505
return 0;
15151506
}
15161507

@@ -1545,7 +1536,6 @@ static void zns_init(FemuCtrl *n, Error **errp)
15451536
}
15461537

15471538
zns_init_zone_identify(n, ns, 0);
1548-
15491539
zns_init_params(n);
15501540
}
15511541

0 commit comments

Comments
 (0)