diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs index 375a10f159b1..99220259da1b 100644 --- a/Documentation/ABI/testing/sysfs-driver-ufs +++ b/Documentation/ABI/testing/sysfs-driver-ufs @@ -1164,3 +1164,74 @@ Description: This node is used to set or display whether UFS WriteBooster is (if the platform supports UFSHCD_CAP_CLK_SCALING). For a platform that doesn't support UFSHCD_CAP_CLK_SCALING, we can disable/enable WriteBooster through this sysfs node. + +What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/hpb_version +Date: December 2020 +Contact: Daejun Park +Description: This entry shows the HPB specification version. + The full information about the descriptor could be found at UFS + HPB (Host Performance Booster) Extension specifications. + Example: version 1.2.3 = 0123h + The file is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/hpb_control +Date: December 2020 +Contact: Daejun Park +Description: This entry shows an indication of the HPB control mode. + 00h: Host control mode + 01h: Device control mode + The file is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_region_size +Date: December 2020 +Contact: Daejun Park +Description: This entry shows the bHPBRegionSize which can be calculated + as in the following (in bytes): + HPB Region size = 512B * 2^bHPBRegionSize + The file is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_number_lu +Date: December 2020 +Contact: Daejun Park +Description: This entry shows the maximum number of HPB LU supported by + the device. + 00h: HPB is not supported by the device. + 01h ~ 20h: Maximum number of HPB LU supported by the device + The file is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_subregion_size +Date: December 2020 +Contact: Daejun Park +Description: This entry shows the bHPBSubRegionSize, which can be + calculated as in the following (in bytes) and shall be a multiple of + logical block size: + HPB Sub-Region size = 512B x 2^bHPBSubRegionSize + bHPBSubRegionSize shall not exceed bHPBRegionSize. + The file is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_max_active_regions +Date: December 2020 +Contact: Daejun Park +Description: This entry shows the maximum number of active HPB regions that + is supported by the device. + The file is read only. + +What: /sys/class/scsi_device/*/device/unit_descriptor/hpb_lu_max_active_regions +Date: December 2020 +Contact: Daejun Park +Description: This entry shows the maximum number of HPB regions assigned to + the HPB logical unit. + The file is read only. + +What: /sys/class/scsi_device/*/device/unit_descriptor/hpb_pinned_region_start_offset +Date: December 2020 +Contact: Daejun Park +Description: This entry shows the start offset of HPB pinned region. + The file is read only. + +What: /sys/class/scsi_device/*/device/unit_descriptor/hpb_number_pinned_regions +Date: December 2020 +Contact: Daejun Park +Description: This entry shows the Number of HPB pinned regions assigned to + the HPB logical unit. + The file is read only. diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index d0d7ead99eaa..72c0bcb5caa9 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -181,3 +181,12 @@ config SCSI_UFS_CRYPTO Enabling this makes it possible for the kernel to use the crypto capabilities of the UFS device (if present) to perform crypto operations on data being transferred to/from the device. + +config SCSI_UFS_HPB + bool "Support UFS Host Performance Booster" + depends on SCSI_UFSHCD + help + The UFS HPB feature improves random read performance. It caches + L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB + read command by piggybacking physical page number for bypassing FTL (flash + translation layer)'s L2P address translation. diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 06f3a3fe4a44..7dd11cd3f6a6 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -8,6 +8,7 @@ ufshcd-core-y += ufshcd.o ufs-sysfs.o ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o +ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c index acc54f530f2d..2546e7a1ac4f 100644 --- a/drivers/scsi/ufs/ufs-sysfs.c +++ b/drivers/scsi/ufs/ufs-sysfs.c @@ -368,6 +368,8 @@ UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2); UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1); UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4); UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1); +UFS_DEVICE_DESC_PARAM(hpb_version, _HPB_VER, 2); +UFS_DEVICE_DESC_PARAM(hpb_control, _HPB_CONTROL, 1); UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4); UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1); UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1); @@ -400,6 +402,8 @@ static struct attribute *ufs_sysfs_device_descriptor[] = { &dev_attr_number_of_secure_wpa.attr, &dev_attr_psa_max_data_size.attr, &dev_attr_psa_state_timeout.attr, + &dev_attr_hpb_version.attr, + &dev_attr_hpb_control.attr, &dev_attr_ext_feature_sup.attr, &dev_attr_wb_presv_us_en.attr, &dev_attr_wb_type.attr, @@ -473,6 +477,10 @@ UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units, _ENM4_MAX_NUM_UNITS, 4); UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor, _ENM4_CAP_ADJ_FCTR, 2); +UFS_GEOMETRY_DESC_PARAM(hpb_region_size, _HPB_REGION_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(hpb_number_lu, _HPB_NUMBER_LU, 1); +UFS_GEOMETRY_DESC_PARAM(hpb_subregion_size, _HPB_SUBREGION_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(hpb_max_active_regions, _HPB_MAX_ACTIVE_REGS, 2); UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4); UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1); UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1); @@ -510,6 +518,10 @@ static struct attribute *ufs_sysfs_geometry_descriptor[] = { &dev_attr_enh3_memory_capacity_adjustment_factor.attr, &dev_attr_enh4_memory_max_alloc_units.attr, &dev_attr_enh4_memory_capacity_adjustment_factor.attr, + &dev_attr_hpb_region_size.attr, + &dev_attr_hpb_number_lu.attr, + &dev_attr_hpb_subregion_size.attr, + &dev_attr_hpb_max_active_regions.attr, &dev_attr_wb_max_alloc_units.attr, &dev_attr_wb_max_wb_luns.attr, &dev_attr_wb_buff_cap_adj.attr, @@ -923,6 +935,9 @@ UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1); UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8); UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2); UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1); +UFS_UNIT_DESC_PARAM(hpb_lu_max_active_regions, _HPB_LU_MAX_ACTIVE_RGNS, 2); +UFS_UNIT_DESC_PARAM(hpb_pinned_region_start_offset, _HPB_PIN_RGN_START_OFF, 2); +UFS_UNIT_DESC_PARAM(hpb_number_pinned_regions, _HPB_NUM_PIN_RGNS, 2); UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4); @@ -940,6 +955,9 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = { &dev_attr_physical_memory_resourse_count.attr, &dev_attr_context_capabilities.attr, &dev_attr_large_unit_granularity.attr, + &dev_attr_hpb_lu_max_active_regions.attr, + &dev_attr_hpb_pinned_region_start_offset.attr, + &dev_attr_hpb_number_pinned_regions.attr, &dev_attr_wb_buf_alloc_units.attr, NULL, }; diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index bf1897a72532..039421cfc1f7 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -122,6 +122,7 @@ enum flag_idn { QUERY_FLAG_IDN_WB_EN = 0x0E, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F, QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10, + QUERY_FLAG_IDN_HPB_RESET = 0x11, }; /* Attribute idn for Query requests */ @@ -195,6 +196,9 @@ enum unit_desc_param { UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18, UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20, UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS = 0x23, + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF = 0x25, + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS = 0x27, UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29, }; @@ -235,6 +239,8 @@ enum device_desc_param { DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25, DEVICE_DESC_PARAM_PSA_TMT = 0x29, DEVICE_DESC_PARAM_PRDCT_REV = 0x2A, + DEVICE_DESC_PARAM_HPB_VER = 0x40, + DEVICE_DESC_PARAM_HPB_CONTROL = 0x42, DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F, DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53, DEVICE_DESC_PARAM_WB_TYPE = 0x54, @@ -283,6 +289,10 @@ enum geometry_desc_param { GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E, GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42, GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44, + GEOMETRY_DESC_PARAM_HPB_REGION_SIZE = 0x48, + GEOMETRY_DESC_PARAM_HPB_NUMBER_LU = 0x49, + GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE = 0x4A, + GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS = 0x4B, GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F, GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53, GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54, @@ -327,8 +337,10 @@ enum { /* Possible values for dExtendedUFSFeaturesSupport */ enum { + UFS_DEV_HPB_SUPPORT = BIT(7), UFS_DEV_WRITE_BOOSTER_SUP = BIT(8), }; +#define UFS_DEV_HPB_SUPPORT_VERSION 0x310 #define POWER_DESC_MAX_ACTV_ICC_LVLS 16 @@ -460,6 +472,41 @@ struct utp_cmd_rsp { u8 sense_data[UFS_SENSE_SIZE]; }; +struct ufshpb_active_field { + __be16 active_rgn; + __be16 active_srgn; +}; +#define HPB_ACT_FIELD_SIZE 4 + +/** + * struct utp_hpb_rsp - Response UPIU structure + * @residual_transfer_count: Residual transfer count DW-3 + * @reserved1: Reserved double words DW-4 to DW-7 + * @sense_data_len: Sense data length DW-8 U16 + * @desc_type: Descriptor type of sense data + * @additional_len: Additional length of sense data + * @hpb_op: HPB operation type + * @reserved2: Reserved field + * @active_rgn_cnt: Active region count + * @inactive_rgn_cnt: Inactive region count + * @hpb_active_field: Recommended to read HPB region and subregion + * @hpb_inactive_field: To be inactivated HPB region and subregion + */ +struct utp_hpb_rsp { + __be32 residual_transfer_count; + __be32 reserved1[4]; + __be16 sense_data_len; + u8 desc_type; + u8 additional_len; + u8 hpb_op; + u8 reserved2; + u8 active_rgn_cnt; + u8 inactive_rgn_cnt; + struct ufshpb_active_field hpb_active_field[2]; + __be16 hpb_inactive_field[2]; +}; +#define UTP_HPB_RSP_SIZE 40 + /** * struct utp_upiu_rsp - general upiu response structure * @header: UPIU header structure DW-0 to DW-2 @@ -470,6 +517,7 @@ struct utp_upiu_rsp { struct utp_upiu_header header; union { struct utp_cmd_rsp sr; + struct utp_hpb_rsp hr; struct utp_upiu_query qr; }; }; @@ -546,6 +594,9 @@ struct ufs_dev_info { bool b_rpm_dev_flush_capable; u8 b_presrv_uspc_en; + + u8 b_ufs_feature_sup; + u32 d_ext_ufs_feature_sup; }; /* diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 36bcbb38f631..a7a38ae23d6f 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -23,6 +23,7 @@ #include "ufs-debugfs.h" #include "ufs_bsg.h" #include "ufshcd-crypto.h" +#include "ufshpb.h" #include #include @@ -2652,6 +2653,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ufshcd_comp_scsi_upiu(hba, lrbp); + ufshpb_prep(hba, lrbp); + err = ufshcd_map_sg(hba, lrbp); if (err) { lrbp->cmd = NULL; @@ -4852,6 +4855,26 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) return scsi_change_queue_depth(sdev, depth); } +static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev) +{ + /* skip well-known LU */ + if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || !ufshpb_is_allowed(hba)) + return; + + ufshpb_destroy_lu(hba, sdev); +} + +static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev) +{ + /* skip well-known LU */ + if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || + !(hba->dev_info.b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT) || + !ufshpb_is_allowed(hba)) + return; + + ufshpb_init_hpb_lu(hba, sdev); +} + /** * ufshcd_slave_configure - adjust SCSI device configurations * @sdev: pointer to SCSI device @@ -4861,6 +4884,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) struct ufs_hba *hba = shost_priv(sdev->host); struct request_queue *q = sdev->request_queue; + ufshcd_hpb_configure(hba, sdev); + blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE) blk_queue_update_dma_alignment(q, PAGE_SIZE - 1); @@ -4882,6 +4907,9 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev) struct ufs_hba *hba; hba = shost_priv(sdev->host); + + ufshcd_hpb_destroy(hba, sdev); + /* Drop the reference as it won't be needed anymore */ if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) { unsigned long flags; @@ -4989,6 +5017,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) */ pm_runtime_get_noresume(hba->dev); } + + if (scsi_status == SAM_STAT_GOOD) + ufshpb_rsp_upiu(hba, lrbp); break; case UPIU_TRANSACTION_REJECT_UPIU: /* TODO: handle Reject UPIU Response */ @@ -6961,6 +6992,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) * Stop the host controller and complete the requests * cleared by h/w */ + ufshpb_reset_host(hba); + ufshcd_hba_stop(hba); spin_lock_irqsave(hba->host->host_lock, flags); @@ -7393,9 +7426,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba) /* getting Specification Version in big endian format */ dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 | desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1]; + dev_info->b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT]; model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; + if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION && + (dev_info->b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) + ufshpb_get_dev_info(hba, desc_buf); + err = ufshcd_read_string_desc(hba, model_index, &dev_info->model, SD_ASCII_STD); if (err < 0) { @@ -7624,6 +7662,10 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba) else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0) hba->dev_info.max_lu_supported = 8; + if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >= + GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS) + ufshpb_get_geo_info(hba, desc_buf); + out: kfree(desc_buf); return err; @@ -7764,6 +7806,7 @@ static int ufshcd_add_lus(struct ufs_hba *hba) } ufs_bsg_probe(hba); + ufshpb_init(hba); scsi_scan_host(hba->host); pm_runtime_put_sync(hba->dev); @@ -7907,6 +7950,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async) /* Enable Auto-Hibernate if configured */ ufshcd_auto_hibern8_enable(hba); + ufshpb_reset(hba); out: spin_lock_irqsave(hba->host->host_lock, flags); if (ret) @@ -7956,6 +8000,9 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie) static const struct attribute_group *ufshcd_driver_groups[] = { &ufs_sysfs_unit_descriptor_group, &ufs_sysfs_lun_attributes_group, +#ifdef CONFIG_SCSI_UFS_HPB + &ufs_sysfs_hpb_stat_group, +#endif NULL, }; @@ -8672,6 +8719,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) req_link_state = UIC_LINK_OFF_STATE; } + ufshpb_suspend(hba); + /* * If we can't transition into any of the low power modes * just gate the clocks. @@ -8806,6 +8855,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) hba->clk_gating.is_suspended = false; hba->dev_info.b_rpm_dev_flush_capable = false; ufshcd_release(hba); + ufshpb_resume(hba); out: if (hba->dev_info.b_rpm_dev_flush_capable) { schedule_delayed_work(&hba->rpm_dev_flush_recheck_work, @@ -8910,6 +8960,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) /* Enable Auto-Hibernate if configured */ ufshcd_auto_hibern8_enable(hba); + ufshpb_resume(hba); + if (hba->dev_info.b_rpm_dev_flush_capable) { hba->dev_info.b_rpm_dev_flush_capable = false; cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); @@ -9154,6 +9206,7 @@ EXPORT_SYMBOL(ufshcd_shutdown); void ufshcd_remove(struct ufs_hba *hba) { ufs_bsg_remove(hba); + ufshpb_remove(hba); ufs_sysfs_remove_nodes(hba->dev); blk_cleanup_queue(hba->tmf_queue); blk_mq_free_tag_set(&hba->tmf_tag_set); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index ee61f821f75d..c2c0b413a792 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -645,6 +645,25 @@ struct ufs_hba_variant_params { u32 wb_flush_threshold; }; +#ifdef CONFIG_SCSI_UFS_HPB +/** + * struct ufshpb_dev_info - UFSHPB device related info + * @num_lu: the number of user logical unit to check whether all lu finished + * initialization + * @rgn_size: device reported HPB region size + * @srgn_size: device reported HPB sub-region size + * @slave_conf_cnt: counter to check all lu finished initialization + * @hpb_disabled: flag to check if HPB is disabled + */ +struct ufshpb_dev_info { + int num_lu; + int rgn_size; + int srgn_size; + atomic_t slave_conf_cnt; + bool hpb_disabled; +}; +#endif + /** * struct ufs_hba - per adapter private structure * @mmio_base: UFSHCI base register address @@ -831,7 +850,9 @@ struct ufs_hba { struct device bsg_dev; struct request_queue *bsg_queue; struct delayed_work rpm_dev_flush_recheck_work; - +#ifdef CONFIG_SCSI_UFS_HPB + struct ufshpb_dev_info ufshpb_dev; +#endif #ifdef CONFIG_SCSI_UFS_CRYPTO union ufs_crypto_capabilities crypto_capabilities; union ufs_crypto_cap_entry *crypto_cap_array; diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c new file mode 100644 index 000000000000..3ea9f7079189 --- /dev/null +++ b/drivers/scsi/ufs/ufshpb.c @@ -0,0 +1,2090 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Universal Flash Storage Host Performance Booster + * + * Copyright (C) 2017-2020 Samsung Electronics Co., Ltd. + * + * Authors: + * Yongmyung Lee + * Jinyoung Choi + */ + +#include +#include + +#include "ufshcd.h" +#include "ufshpb.h" +#include "../sd.h" + +#define WORK_PENDING 0 +#define RESET_PENDING 1 +#define TIMEOUT_WORK_PENDING 2 +#define ACTIVATION_THRSHLD 4 /* 4 IOs */ +#define EVICTION_THRSHLD (ACTIVATION_THRSHLD << 6) /* 256 IOs */ +#define READ_TO_MS 1000 +#define READ_TO_EXPIRIES 100 +#define POLLING_INTERVAL_MS 200 + +/* memory management */ +static struct kmem_cache *ufshpb_mctx_cache; +static mempool_t *ufshpb_mctx_pool; +static mempool_t *ufshpb_page_pool; +/* A cache size of 2MB can cache ppn in the 1GB range. */ +static unsigned int ufshpb_host_map_kbytes = 2048; +static int tot_active_srgn_pages; + +static struct workqueue_struct *ufshpb_wq; + +static enum UFSHPB_MODE ufshpb_mode; + +bool ufshpb_is_allowed(struct ufs_hba *hba) +{ + return !(hba->ufshpb_dev.hpb_disabled); +} + +static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + return rgn->rgn_state != HPB_RGN_INACTIVE && + srgn->srgn_state == HPB_SRGN_VALID; +} + +static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd) +{ + return req_op(cmd->request) == REQ_OP_READ; +} + +static bool ufshpb_is_write_or_discard_cmd(struct scsi_cmnd *cmd) +{ + return op_is_write(req_op(cmd->request)) || + op_is_discard(req_op(cmd->request)); +} + +static bool ufshpb_is_support_chunk(int transfer_len) +{ + return transfer_len <= HPB_MULTI_CHUNK_HIGH; +} + +static bool ufshpb_is_general_lun(int lun) +{ + return lun < UFS_UPIU_MAX_UNIT_NUM_ID; +} + +static bool +ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx) +{ + if (hpb->lu_pinned_end != PINNED_NOT_SET && + rgn_idx >= hpb->lu_pinned_start && + rgn_idx <= hpb->lu_pinned_end) + return true; + + return false; +} + +static void ufshpb_kick_map_work(struct ufshpb_lu *hpb) +{ + bool ret = true; + unsigned long flags; + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn)) + ret = false; + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + if (ret) + queue_work(ufshpb_wq, &hpb->map_work); +} + +static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, + struct utp_hpb_rsp *rsp_field) +{ + if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN || + rsp_field->desc_type != DEV_DES_TYPE || + rsp_field->additional_len != DEV_ADDITIONAL_LEN || + rsp_field->hpb_op == HPB_RSP_NONE || + rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM || + rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM || + (!rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt)) + return false; + + if (!ufshpb_is_general_lun(lrbp->lun)) { + dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n", + lrbp->lun); + return false; + } + + return true; +} + +static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev) +{ + return sdev->hostdata; +} + +static int ufshpb_get_state(struct ufshpb_lu *hpb) +{ + return atomic_read(&hpb->hpb_state); +} + +static void ufshpb_set_state(struct ufshpb_lu *hpb, int state) +{ + atomic_set(&hpb->hpb_state, state); +} + +static void ufshpb_set_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx, int srgn_offset, int cnt) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int set_bit_len; + int bitmap_len = hpb->entries_per_srgn; + +next_srgn: + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + if ((srgn_offset + cnt) > bitmap_len) + set_bit_len = bitmap_len - srgn_offset; + else + set_bit_len = cnt; + + set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); + + if (rgn->rgn_state != HPB_RGN_INACTIVE && + srgn->srgn_state == HPB_SRGN_VALID) + bitmap_set(srgn->mctx->ppn_dirty, srgn_offset, set_bit_len); + + srgn_offset = 0; + if (++srgn_idx == hpb->srgns_per_rgn) { + srgn_idx = 0; + rgn_idx++; + } + + cnt -= set_bit_len; + if (cnt > 0) + goto next_srgn; + + WARN_ON(cnt < 0); +} + +static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx, int srgn_offset, int cnt) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int bitmap_len = hpb->entries_per_srgn; + int bit_len; + +next_srgn: + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + if (!ufshpb_is_valid_srgn(rgn, srgn)) + return true; + + /* + * If the region state is active, mctx must be allocated. + * In this case, check whether the region is evicted or + * mctx allcation fail. + */ + WARN_ON(!srgn->mctx); + + if ((srgn_offset + cnt) > bitmap_len) + bit_len = bitmap_len - srgn_offset; + else + bit_len = cnt; + + if (find_next_bit(srgn->mctx->ppn_dirty, + bit_len, srgn_offset) >= srgn_offset) + return true; + + srgn_offset = 0; + if (++srgn_idx == hpb->srgns_per_rgn) { + srgn_idx = 0; + rgn_idx++; + } + + cnt -= bit_len; + if (cnt > 0) + goto next_srgn; + + return false; +} + +static inline bool is_rgn_dirty(struct ufshpb_region *rgn) +{ + return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); +} + +static u64 ufshpb_get_ppn(struct ufshpb_lu *hpb, + struct ufshpb_map_ctx *mctx, int pos, int *error) +{ + u64 *ppn_table; + struct page *page; + int index, offset; + + index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE); + offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE); + + page = mctx->m_page[index]; + if (unlikely(!page)) { + *error = -ENOMEM; + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "error. cannot find page in mctx\n"); + return 0; + } + + ppn_table = page_address(page); + if (unlikely(!ppn_table)) { + *error = -ENOMEM; + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "error. cannot get ppn_table\n"); + return 0; + } + + return ppn_table[offset]; +} + +static void +ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx, + int *srgn_idx, int *offset) +{ + int rgn_offset; + + *rgn_idx = lpn >> hpb->entries_per_rgn_shift; + rgn_offset = lpn & hpb->entries_per_rgn_mask; + *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift; + *offset = rgn_offset & hpb->entries_per_srgn_mask; +} + +static void +ufshpb_set_hpb_read_to_upiu(struct ufshpb_lu *hpb, struct ufshcd_lrb *lrbp, + u32 lpn, u64 ppn, unsigned int transfer_len) +{ + unsigned char *cdb = lrbp->ucd_req_ptr->sc.cdb; + + cdb[0] = UFSHPB_READ; + + put_unaligned_be64(ppn, &cdb[6]); + cdb[14] = transfer_len; +} + +static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + list_del_init(&rgn->list_inact_rgn); + + if (list_empty(&srgn->list_act_srgn)) + list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); +} + +/* + * This function will set up HPB read command using host-side L2P map data. + * In HPB v1.0, maximum size of HPB read command is 4KB. + */ +void ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufshpb_lu *hpb; + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + struct scsi_cmnd *cmd = lrbp->cmd; + u32 lpn; + u64 ppn; + unsigned long flags; + int transfer_len, rgn_idx, srgn_idx, srgn_offset; + int err = 0; + u64 reads; + + hpb = ufshpb_get_hpb_data(cmd->device); + if (!hpb) + return; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT", __func__); + return; + } + + if (!ufshpb_is_write_or_discard_cmd(cmd) && + !ufshpb_is_read_cmd(cmd)) + return; + + transfer_len = sectors_to_logical(cmd->device, blk_rq_sectors(cmd->request)); + if (unlikely(!transfer_len)) + return; + + lpn = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request)); + ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset); + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + /* If command type is WRITE or DISCARD, set bitmap as drity */ + if (ufshpb_is_write_or_discard_cmd(cmd)) { + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + ufshpb_set_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset, + transfer_len); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + if (ufshpb_mode == HPB_HOST_CONTROL) + atomic64_set(&rgn->reads, 0); + + return; + } + + if (ufshpb_mode == HPB_HOST_CONTROL) + reads = atomic64_inc_return(&rgn->reads); + + if (!ufshpb_is_support_chunk(transfer_len)) + return; + + if (ufshpb_mode == HPB_HOST_CONTROL) { + /* + * in host control mode, reads are the main source for + * activation trials. + */ + if (reads == ACTIVATION_THRSHLD || + test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) { + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_update_active_info(hpb, rgn_idx, srgn_idx); + hpb->stats.rb_active_cnt++; + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "activate region %d-%d\n", rgn_idx, srgn_idx); + } + + /* keep those counters normalized */ + if (reads > hpb->entries_per_srgn && + !test_and_set_bit(WORK_PENDING, &hpb->work_data_bits)) + schedule_work(&hpb->ufshpb_normalization_work); + } + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset, + transfer_len)) { + hpb->stats.miss_cnt++; + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return; + } + + ppn = ufshpb_get_ppn(hpb, srgn->mctx, srgn_offset, &err); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + if (unlikely(err)) { + /* + * In this case, the region state is active, + * but the ppn table is not allocated. + * Make sure that ppn table must be allocated on + * active state. + */ + WARN_ON(true); + dev_err(hba->dev, "ufshpb_get_ppn failed. err %d\n", err); + return; + } + + ufshpb_set_hpb_read_to_upiu(hpb, lrbp, lpn, ppn, transfer_len); + + hpb->stats.hit_cnt++; +} + +static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, + int rgn_idx, enum req_opf dir) +{ + struct ufshpb_req *rq; + struct request *req; + + rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL); + if (!rq) + return NULL; + + req = blk_get_request(hpb->sdev_ufs_lu->request_queue, + dir, 0); + if (IS_ERR(req)) + goto free_rq; + + rq->hpb = hpb; + rq->req = req; + rq->rgn_idx = rgn_idx; + + return rq; + +free_rq: + kmem_cache_free(hpb->map_req_cache, rq); + return NULL; +} + +static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq) +{ + blk_put_request(rq->req); + kmem_cache_free(hpb->map_req_cache, rq); +} + +static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_req *map_req; + struct bio *bio; + + map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_SCSI_IN); + if (!map_req) + return NULL; + + bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn); + if (!bio) { + ufshpb_put_req(hpb, map_req); + return NULL; + } + + map_req->bio = bio; + map_req->srgn_idx = srgn->srgn_idx; + map_req->mctx = srgn->mctx; + + return map_req; +} + +static void ufshpb_put_map_req(struct ufshpb_lu *hpb, + struct ufshpb_req *map_req) +{ + bio_put(map_req->bio); + ufshpb_put_req(hpb, map_req); +} + +static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_region *rgn; + + WARN_ON(!srgn->mctx); + bitmap_zero(srgn->mctx->ppn_dirty, hpb->entries_per_srgn); + rgn = hpb->rgn_tbl + srgn->rgn_idx; + clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); + return 0; +} + +static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int srgn_idx; + + rgn = hpb->rgn_tbl + rgn_idx; + + for_each_sub_region(rgn, srgn_idx, srgn) + list_del_init(&srgn->list_act_srgn); + + if (list_empty(&rgn->list_inact_rgn)) + list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn); +} + +static void ufshpb_activate_subregion(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_region *rgn; + + /* + * If there is no mctx in subregion + * after I/O progress for HPB_READ_BUFFER, the region to which the + * subregion belongs was evicted. + * Mask sure the region must not evict in I/O progress + */ + WARN_ON(!srgn->mctx); + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + + if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "region %d subregion %d evicted\n", + srgn->rgn_idx, srgn->srgn_idx); + srgn->srgn_state = HPB_SRGN_INVALID; + return; + } + srgn->srgn_state = HPB_SRGN_VALID; +} + +static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error) +{ + struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data; + + ufshpb_put_req(umap_req->hpb, umap_req); +} + +static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error) +{ + struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data; + struct ufshpb_lu *hpb = map_req->hpb; + struct ufshpb_subregion *srgn; + unsigned long flags; + + srgn = hpb->rgn_tbl[map_req->rgn_idx].srgn_tbl + + map_req->srgn_idx; + + ufshpb_clear_dirty_bitmap(hpb, srgn); + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + ufshpb_activate_subregion(hpb, srgn); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + ufshpb_put_map_req(map_req->hpb, map_req); +} + +static void ufshpb_set_write_buf_cmd(unsigned char *cdb, int rgn_idx) +{ + cdb[0] = UFSHPB_WRITE_BUFFER; + cdb[1] = UFSHPB_WRITE_BUFFER_ID; + put_unaligned_be16(rgn_idx, &cdb[2]); + cdb[9] = 0x00; +} + +static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx, + int srgn_idx, int srgn_mem_size) +{ + cdb[0] = UFSHPB_READ_BUFFER; + cdb[1] = UFSHPB_READ_BUFFER_ID; + + put_unaligned_be16(rgn_idx, &cdb[2]); + put_unaligned_be16(srgn_idx, &cdb[4]); + put_unaligned_be24(srgn_mem_size, &cdb[6]); + + cdb[9] = 0x00; +} + +static int ufshpb_execute_umap_req(struct ufshpb_lu *hpb, + struct ufshpb_req *umap_req) +{ + struct request_queue *q; + struct request *req; + struct scsi_request *rq; + + q = hpb->sdev_ufs_lu->request_queue; + req = umap_req->req; + req->timeout = 0; + req->end_io_data = (void *)umap_req; + rq = scsi_req(req); + ufshpb_set_write_buf_cmd(rq->cmd, umap_req->rgn_idx); + rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH; + + blk_execute_rq_nowait(q, NULL, req, 1, ufshpb_umap_req_compl_fn); + + hpb->stats.umap_req_cnt++; + return 0; +} + +static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, + struct ufshpb_req *map_req) +{ + struct request_queue *q; + struct request *req; + struct scsi_request *rq; + int ret = 0; + int i; + + q = hpb->sdev_ufs_lu->request_queue; + for (i = 0; i < hpb->pages_per_srgn; i++) { + ret = bio_add_pc_page(q, map_req->bio, map_req->mctx->m_page[i], + PAGE_SIZE, 0); + if (ret != PAGE_SIZE) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "bio_add_pc_page fail %d - %d\n", + map_req->rgn_idx, map_req->srgn_idx); + return ret; + } + } + + req = map_req->req; + + blk_rq_append_bio(req, &map_req->bio); + + req->end_io_data = map_req; + + rq = scsi_req(req); + ufshpb_set_read_buf_cmd(rq->cmd, map_req->rgn_idx, + map_req->srgn_idx, hpb->srgn_mem_size); + rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH; + + blk_execute_rq_nowait(q, NULL, req, 1, ufshpb_map_req_compl_fn); + + hpb->stats.map_req_cnt++; + return 0; +} + +static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb) +{ + struct ufshpb_map_ctx *mctx; + int i, j; + + mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL); + if (!mctx) + return NULL; + + mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL); + if (!mctx->m_page) + goto release_mctx; + + mctx->ppn_dirty = bitmap_zalloc(hpb->entries_per_srgn, GFP_KERNEL); + if (!mctx->ppn_dirty) + goto release_m_page; + + for (i = 0; i < hpb->pages_per_srgn; i++) { + mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL); + if (!mctx->m_page[i]) { + for (j = 0; j < i; j++) + mempool_free(mctx->m_page[j], ufshpb_page_pool); + goto release_ppn_dirty; + } + clear_page(page_address(mctx->m_page[i])); + } + + return mctx; + +release_ppn_dirty: + bitmap_free(mctx->ppn_dirty); +release_m_page: + kmem_cache_free(hpb->m_page_cache, mctx->m_page); +release_mctx: + mempool_free(mctx, ufshpb_mctx_pool); + return NULL; +} + +static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb, + struct ufshpb_map_ctx *mctx) +{ + int i; + + for (i = 0; i < hpb->pages_per_srgn; i++) + mempool_free(mctx->m_page[i], ufshpb_page_pool); + + bitmap_free(mctx->ppn_dirty); + kmem_cache_free(hpb->m_page_cache, mctx->m_page); + mempool_free(mctx, ufshpb_mctx_pool); +} + +static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct ufshpb_subregion *srgn; + int srgn_idx; + + for_each_sub_region(rgn, srgn_idx, srgn) + if (srgn->srgn_state == HPB_SRGN_ISSUED) + return -EPERM; + + return 0; +} + +static void ufshpb_read_to_handler(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct ufshpb_lu *hpb; + struct victim_select_info *lru_info; + struct ufshpb_region *rgn, *next_rgn; + unsigned long flags; + LIST_HEAD(expired_list); + + hpb = container_of(dwork, struct ufshpb_lu, ufshpb_read_to_work); + + if (test_and_set_bit(TIMEOUT_WORK_PENDING, &hpb->work_data_bits)) + return; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + + lru_info = &hpb->lru_info; + + list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn, + list_lru_rgn) { + bool timedout = ktime_after(ktime_get(), rgn->read_timeout); + bool dirty, expired; + + if (!timedout) + continue; + + dirty = is_rgn_dirty(rgn); + expired = atomic_dec_and_test(&rgn->read_timeout_expiries); + + if (dirty || expired) + list_add(&rgn->list_expired_rgn, &expired_list); + else + rgn->read_timeout = ktime_add_ms(ktime_get(), + READ_TO_MS); + } + + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + list_for_each_entry_safe(rgn, next_rgn, &expired_list, + list_expired_rgn) { + list_del_init(&rgn->list_expired_rgn); + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_update_inactive_info(hpb, rgn->rgn_idx); + hpb->stats.rb_inactive_cnt++; + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + } + + clear_bit(TIMEOUT_WORK_PENDING, &hpb->work_data_bits); + + schedule_delayed_work(&hpb->ufshpb_read_to_work, + msecs_to_jiffies(POLLING_INTERVAL_MS)); +} + +static void ufshpb_add_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + rgn->rgn_state = HPB_RGN_ACTIVE; + list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); + atomic_inc(&lru_info->active_cnt); + if (ufshpb_mode == HPB_HOST_CONTROL) { + rgn->read_timeout = ktime_add_ms(ktime_get(), READ_TO_MS); + atomic_set(&rgn->read_timeout_expiries, READ_TO_EXPIRIES); + } +} + +static void ufshpb_hit_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); +} + +static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb) +{ + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn, *victim_rgn = NULL; + + list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) { + WARN_ON(!rgn); + if (ufshpb_check_srgns_issue_state(hpb, rgn)) + continue; + + /* + * in host control mode, verify that the exiting region + * has less reads + */ + if (ufshpb_mode == HPB_HOST_CONTROL && + atomic64_read(&rgn->reads) > (EVICTION_THRSHLD >> 1)) + continue; + + victim_rgn = rgn; + break; + } + + return victim_rgn; +} + +static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + list_del_init(&rgn->list_lru_rgn); + rgn->rgn_state = HPB_RGN_INACTIVE; + atomic_dec(&lru_info->active_cnt); +} + +static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + if (srgn->srgn_state != HPB_SRGN_UNUSED) { + ufshpb_put_map_ctx(hpb, srgn->mctx); + srgn->srgn_state = HPB_SRGN_UNUSED; + srgn->mctx = NULL; + } +} + +static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct ufshpb_req *umap_req; + + umap_req = ufshpb_get_req(hpb, rgn->rgn_idx, REQ_OP_SCSI_OUT); + if (!umap_req) + return -ENOMEM; + + if (ufshpb_execute_umap_req(hpb, umap_req)) + goto free_umap_req; + + return 0; + +free_umap_req: + ufshpb_put_req(hpb, umap_req); + return -EAGAIN; +} + +static void __ufshpb_evict_region(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct victim_select_info *lru_info; + struct ufshpb_subregion *srgn; + int srgn_idx; + + + if (ufshpb_mode == HPB_HOST_CONTROL && + ufshpb_issue_umap_req(hpb, rgn)) + return; + + lru_info = &hpb->lru_info; + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx); + + ufshpb_cleanup_lru_info(lru_info, rgn); + + if (ufshpb_mode == HPB_HOST_CONTROL) + atomic64_set(&rgn->reads, 0); + + for_each_sub_region(rgn, srgn_idx, srgn) + ufshpb_purge_active_subregion(hpb, srgn); +} + +static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (rgn->rgn_state == HPB_RGN_PINNED) { + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "pinned region cannot drop-out. region %d\n", + rgn->rgn_idx); + goto out; + } + if (!list_empty(&rgn->list_lru_rgn)) { + if (ufshpb_check_srgns_issue_state(hpb, rgn)) { + ret = -EBUSY; + goto out; + } + + __ufshpb_evict_region(hpb, rgn); + } +out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return ret; +} + +static int ufshpb_issue_map_req(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_req *map_req; + unsigned long flags; + int ret; + int err = -EAGAIN; + bool alloc_required = false; + enum HPB_SRGN_STATE state = HPB_SRGN_INVALID; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT\n", __func__); + goto unlock_out; + } + + if ((rgn->rgn_state == HPB_RGN_INACTIVE) && + (srgn->srgn_state == HPB_SRGN_INVALID)) { + err = 0; + goto unlock_out; + } + + if (srgn->srgn_state == HPB_SRGN_UNUSED) + alloc_required = true; + + /* + * If the subregion is already ISSUED state, + * a specific event (e.g., GC or wear-leveling, etc.) occurs in + * the device and HPB response for map loading is received. + * In this case, after finishing the HPB_READ_BUFFER, + * the next HPB_READ_BUFFER is performed again to obtain the latest + * map data. + */ + if (srgn->srgn_state == HPB_SRGN_ISSUED) + goto unlock_out; + + srgn->srgn_state = HPB_SRGN_ISSUED; + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + if (alloc_required) { + WARN_ON(srgn->mctx); + srgn->mctx = ufshpb_get_map_ctx(hpb); + if (!srgn->mctx) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "get map_ctx failed. region %d - %d\n", + rgn->rgn_idx, srgn->srgn_idx); + state = HPB_SRGN_UNUSED; + goto change_srgn_state; + } + } + + map_req = ufshpb_get_map_req(hpb, srgn); + if (!map_req) + goto change_srgn_state; + + + ret = ufshpb_execute_map_req(hpb, map_req); + if (ret) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "%s: issue map_req failed: %d, region %d - %d\n", + __func__, ret, srgn->rgn_idx, srgn->srgn_idx); + goto free_map_req; + } + return 0; + +free_map_req: + ufshpb_put_map_req(hpb, map_req); +change_srgn_state: + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + srgn->srgn_state = state; +unlock_out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return err; +} + +static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) +{ + struct ufshpb_region *victim_rgn = NULL; + struct victim_select_info *lru_info = &hpb->lru_info; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + /* + * If region belongs to lru_list, just move the region + * to the front of lru list. because the state of the region + * is already active-state + */ + if (!list_empty(&rgn->list_lru_rgn)) { + ufshpb_hit_lru_info(lru_info, rgn); + goto out; + } + + if (rgn->rgn_state == HPB_RGN_INACTIVE) { + if (atomic_read(&lru_info->active_cnt) == + lru_info->max_lru_active_cnt) { + /* + * If the maximum number of active regions + * is exceeded, evict the least recently used region. + * This case may occur when the device responds + * to the eviction information late. + * It is okay to evict the least recently used region, + * because the device could detect this region + * by not issuing HPB_READ + */ + + /* + * in host control mode, verify that the entering + * region has enough reads + */ + if (ufshpb_mode == HPB_HOST_CONTROL && + atomic64_read(&rgn->reads) < EVICTION_THRSHLD) { + ret = -EACCES; + goto out; + } + + victim_rgn = ufshpb_victim_lru_info(hpb); + if (!victim_rgn) { + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "cannot get victim region error\n"); + ret = -ENOMEM; + goto out; + } + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "LRU full (%d), choose victim %d\n", + atomic_read(&lru_info->active_cnt), + victim_rgn->rgn_idx); + __ufshpb_evict_region(hpb, victim_rgn); + } + + /* + * When a region is added to lru_info list_head, + * it is guaranteed that the subregion has been + * assigned all mctx. If failed, try to receive mctx again + * without being added to lru_info list_head + */ + ufshpb_add_lru_info(lru_info, rgn); + } +out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return ret; +} + +static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, + struct utp_hpb_rsp *rsp_field) +{ + int i, rgn_idx, srgn_idx; + + BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE); + /* + * If the active region and the inactive region are the same, + * we will inactivate this region. + * The device could check this (region inactivated) and + * will response the proper active region information + */ + spin_lock(&hpb->rsp_list_lock); + for (i = 0; i < rsp_field->active_rgn_cnt; i++) { + struct ufshpb_region *rgn; + + rgn_idx = + be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn); + srgn_idx = + be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn); + + rgn = hpb->rgn_tbl + rgn_idx; + if (ufshpb_mode == HPB_HOST_CONTROL && + (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) { + /* + * in host control mode, subregion activation + * recommendations are only allowed to active regions. + * Also, ignore recommendations for dirty regions - the + * host will make decisions concerning those by himself + */ + continue; + } + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "activate(%d) region %d - %d\n", i, rgn_idx, srgn_idx); + ufshpb_update_active_info(hpb, rgn_idx, srgn_idx); + hpb->stats.rb_active_cnt++; + } + + if (ufshpb_mode == HPB_HOST_CONTROL) { + /* + * in host control mode the device is not allowed to inactivate + * regions + */ + goto out_unlock; + } + + for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) { + rgn_idx = be16_to_cpu(rsp_field->hpb_inactive_field[i]); + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "inactivate(%d) region %d\n", i, rgn_idx); + ufshpb_update_inactive_info(hpb, rgn_idx); + hpb->stats.rb_inactive_cnt++; + } + +out_unlock: + spin_unlock(&hpb->rsp_list_lock); + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n", + rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt); + + queue_work(ufshpb_wq, &hpb->map_work); +} + +/* + * This function will parse recommended active subregion information in sense + * data field of response UPIU with SAM_STAT_GOOD state. + */ +void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device); + struct utp_hpb_rsp *rsp_field; + int data_seg_len; + + if (!hpb) + return; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT\n", __func__); + return; + } + + data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) + & MASK_RSP_UPIU_DATA_SEG_LEN; + + /* To flush remained rsp_list, we queue the map_work task */ + if (!data_seg_len) { + if (!ufshpb_is_general_lun(lrbp->lun)) + return; + + ufshpb_kick_map_work(hpb); + return; + } + + /* Check HPB_UPDATE_ALERT */ + if (!(lrbp->ucd_rsp_ptr->header.dword_2 & + UPIU_HEADER_DWORD(0, 2, 0, 0))) + return; + + rsp_field = &lrbp->ucd_rsp_ptr->hr; + BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE); + + if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field)) + return; + + hpb->stats.rb_noti_cnt++; + + switch (rsp_field->hpb_op) { + case HPB_RSP_NONE: + /* nothing to do */ + break; + case HPB_RSP_REQ_REGION_UPDATE: + WARN_ON(data_seg_len != DEV_DATA_SEG_LEN); + ufshpb_rsp_req_region_update(hpb, rsp_field); + break; + case HPB_RSP_DEV_RESET: + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "UFS device lost HPB information during PM.\n"); + + if (ufshpb_mode == HPB_HOST_CONTROL) { + struct ufshpb_lu *h; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + h = sdev->hostdata; + if (!h) + continue; + + if (test_and_set_bit(RESET_PENDING, + &h->work_data_bits)) + continue; + + schedule_work(&h->ufshpb_lun_reset_work); + } + } + + break; + default: + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "hpb_op is not available: %d\n", + rsp_field->hpb_op); + break; + } +} + +static void ufshpb_add_active_list(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + if (!list_empty(&rgn->list_inact_rgn)) + return; + + if (!list_empty(&srgn->list_act_srgn)) { + list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn); + return; + } + + list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn); +} + +static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct list_head *pending_list) +{ + struct ufshpb_subregion *srgn; + int srgn_idx; + + if (!list_empty(&rgn->list_inact_rgn)) + return; + + for_each_sub_region(rgn, srgn_idx, srgn) + if (!list_empty(&srgn->list_act_srgn)) + return; + + list_add_tail(&rgn->list_inact_rgn, pending_list); +} + +static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn, + struct ufshpb_subregion, + list_act_srgn))) { + if (ufshpb_get_state(hpb) == HPB_SUSPEND) + break; + + list_del_init(&srgn->list_act_srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + ret = ufshpb_add_region(hpb, rgn); + if (ret) { + if (ret == -EACCES) { + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + continue; + } + goto add_region_failed; + } + + ret = ufshpb_issue_map_req(hpb, rgn, srgn); + if (ret) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "issue map_req failed. ret %d, region %d - %d\n", + ret, rgn->rgn_idx, srgn->srgn_idx); + goto active_failed; + } + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + } + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + return; + +add_region_failed: + if (ufshpb_mode == HPB_HOST_CONTROL) { + /* + * In host control mode, it is common that eviction trials + * fail, either because the entering region didn't have enough + * reads, or a poor-performing exiting region wasn't found. + * No need to re-insert those regions to the "to-be-activated" + * list. + */ + return; + } + +active_failed: + dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n", + rgn->rgn_idx, srgn->srgn_idx); + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_add_active_list(hpb, rgn, srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn; + unsigned long flags; + int ret; + LIST_HEAD(pending_list); + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn, + struct ufshpb_region, + list_inact_rgn))) { + if (ufshpb_get_state(hpb) == HPB_SUSPEND) + break; + + list_del_init(&rgn->list_inact_rgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + ret = ufshpb_evict_region(hpb, rgn); + if (ret) { + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_add_pending_evict_list(hpb, rgn, &pending_list); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + } + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + } + + list_splice(&pending_list, &hpb->lh_inact_rgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_reset_work_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb; + struct victim_select_info *lru_info; + struct ufshpb_region *rgn, *next_rgn; + unsigned long flags; + + hpb = container_of(work, struct ufshpb_lu, ufshpb_lun_reset_work); + + lru_info = &hpb->lru_info; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + + list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn, + list_lru_rgn) + set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags); + + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + clear_bit(RESET_PENDING, &hpb->work_data_bits); +} + +static void ufshpb_normalization_work_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb; + int rgn_idx; + + hpb = container_of(work, struct ufshpb_lu, ufshpb_normalization_work); + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx; + u64 reads = atomic64_read(&rgn->reads); + + if (reads) + atomic64_set(&rgn->reads, reads >> 1); + + if (rgn->rgn_state != HPB_RGN_ACTIVE || + atomic64_read(&rgn->reads)) + continue; + + /* if region is active but has no reads - inactivate it */ + spin_lock(&hpb->rsp_list_lock); + ufshpb_update_inactive_info(hpb, rgn->rgn_idx); + spin_unlock(&hpb->rsp_list_lock); + } + + clear_bit(WORK_PENDING, &hpb->work_data_bits); +} + +static void ufshpb_map_work_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work); + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT\n", __func__); + return; + } + + ufshpb_run_inactive_region_list(hpb); + ufshpb_run_active_subregion_list(hpb); +} + +/* + * this function doesn't need to hold lock due to be called in init. + * (rgn_state_lock, rsp_list_lock, etc..) + */ +static int ufshpb_init_pinned_active_region(struct ufs_hba *hba, + struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct ufshpb_subregion *srgn; + int srgn_idx, i; + int err = 0; + + for_each_sub_region(rgn, srgn_idx, srgn) { + srgn->mctx = ufshpb_get_map_ctx(hpb); + srgn->srgn_state = HPB_SRGN_INVALID; + if (!srgn->mctx) { + err = -ENOMEM; + dev_err(hba->dev, + "alloc mctx for pinned region failed\n"); + goto release; + } + + list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); + } + + rgn->rgn_state = HPB_RGN_PINNED; + return 0; + +release: + for (i = 0; i < srgn_idx; i++) { + srgn = rgn->srgn_tbl + i; + ufshpb_put_map_ctx(hpb, srgn->mctx); + } + return err; +} + +static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + int srgn_idx; + struct ufshpb_subregion *srgn; + + for_each_sub_region(rgn, srgn_idx, srgn) { + INIT_LIST_HEAD(&srgn->list_act_srgn); + + srgn->rgn_idx = rgn->rgn_idx; + srgn->srgn_idx = srgn_idx; + srgn->srgn_state = HPB_SRGN_UNUSED; + } +} + +static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + int srgn_cnt) +{ + rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion), + GFP_KERNEL); + if (!rgn->srgn_tbl) + return -ENOMEM; + + rgn->srgn_cnt = srgn_cnt; + return 0; +} + +static void ufshpb_lu_parameter_init(struct ufs_hba *hba, + struct ufshpb_lu *hpb, + struct ufshpb_dev_info *hpb_dev_info, + struct ufshpb_lu_info *hpb_lu_info) +{ + u32 entries_per_rgn; + u64 rgn_mem_size, tmp; + + hpb->lu_pinned_start = hpb_lu_info->pinned_start; + hpb->lu_pinned_end = hpb_lu_info->num_pinned ? + (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1) + : PINNED_NOT_SET; + hpb->lru_info.max_lru_active_cnt = + hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned; + + rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT + * HPB_ENTRY_SIZE; + do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE); + hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size) + * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE; + + tmp = rgn_mem_size; + do_div(tmp, HPB_ENTRY_SIZE); + entries_per_rgn = (u32)tmp; + hpb->entries_per_rgn_shift = ilog2(entries_per_rgn); + hpb->entries_per_rgn_mask = entries_per_rgn - 1; + + hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE; + hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn); + hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1; + + tmp = rgn_mem_size; + do_div(tmp, hpb->srgn_mem_size); + hpb->srgns_per_rgn = (int)tmp; + + hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks, + entries_per_rgn); + hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks, + (hpb->srgn_mem_size / HPB_ENTRY_SIZE)); + + hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE); +} + +static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn_table, *rgn; + int rgn_idx, i; + int ret = 0; + + rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region), + GFP_KERNEL); + if (!rgn_table) + return -ENOMEM; + + hpb->rgn_tbl = rgn_table; + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + int srgn_cnt = hpb->srgns_per_rgn; + + rgn = rgn_table + rgn_idx; + rgn->rgn_idx = rgn_idx; + + INIT_LIST_HEAD(&rgn->list_inact_rgn); + INIT_LIST_HEAD(&rgn->list_lru_rgn); + INIT_LIST_HEAD(&rgn->list_expired_rgn); + + if (rgn_idx == hpb->rgns_per_lu - 1) + srgn_cnt = ((hpb->srgns_per_lu - 1) % + hpb->srgns_per_rgn) + 1; + + ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt); + if (ret) + goto release_srgn_table; + ufshpb_init_subregion_tbl(hpb, rgn); + + if (ufshpb_is_pinned_region(hpb, rgn_idx)) { + ret = ufshpb_init_pinned_active_region(hba, hpb, rgn); + if (ret) + goto release_srgn_table; + } else { + rgn->rgn_state = HPB_RGN_INACTIVE; + } + + rgn->rgn_flags = 0; + } + + return 0; + +release_srgn_table: + for (i = 0; i < rgn_idx; i++) { + rgn = rgn_table + i; + if (rgn->srgn_tbl) + kvfree(rgn->srgn_tbl); + } + kvfree(rgn_table); + return ret; +} + +static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + int srgn_idx; + struct ufshpb_subregion *srgn; + + for_each_sub_region(rgn, srgn_idx, srgn) + if (srgn->srgn_state != HPB_SRGN_UNUSED) { + srgn->srgn_state = HPB_SRGN_UNUSED; + ufshpb_put_map_ctx(hpb, srgn->mctx); + } +} + +static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb) +{ + int rgn_idx; + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + struct ufshpb_region *rgn; + + rgn = hpb->rgn_tbl + rgn_idx; + if (rgn->rgn_state != HPB_RGN_INACTIVE) { + rgn->rgn_state = HPB_RGN_INACTIVE; + + ufshpb_destroy_subregion_tbl(hpb, rgn); + } + + kvfree(rgn->srgn_tbl); + } + + kvfree(hpb->rgn_tbl); +} + +/* SYSFS functions */ +#define ufshpb_sysfs_attr_show_func(__name) \ +static ssize_t __name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \ + \ + return sysfs_emit(buf, "%llu\n", hpb->stats.__name); \ +} \ +\ +static DEVICE_ATTR_RO(__name) + +ufshpb_sysfs_attr_show_func(hit_cnt); +ufshpb_sysfs_attr_show_func(miss_cnt); +ufshpb_sysfs_attr_show_func(rb_noti_cnt); +ufshpb_sysfs_attr_show_func(rb_active_cnt); +ufshpb_sysfs_attr_show_func(rb_inactive_cnt); +ufshpb_sysfs_attr_show_func(map_req_cnt); +ufshpb_sysfs_attr_show_func(umap_req_cnt); + +static struct attribute *hpb_dev_attrs[] = { + &dev_attr_hit_cnt.attr, + &dev_attr_miss_cnt.attr, + &dev_attr_rb_noti_cnt.attr, + &dev_attr_rb_active_cnt.attr, + &dev_attr_rb_inactive_cnt.attr, + &dev_attr_map_req_cnt.attr, + &dev_attr_umap_req_cnt.attr, + NULL, +}; + +struct attribute_group ufs_sysfs_hpb_stat_group = { + .name = "hpb_sysfs", + .attrs = hpb_dev_attrs, +}; + +static void ufshpb_stat_init(struct ufshpb_lu *hpb) +{ + hpb->stats.hit_cnt = 0; + hpb->stats.miss_cnt = 0; + hpb->stats.rb_noti_cnt = 0; + hpb->stats.rb_active_cnt = 0; + hpb->stats.rb_inactive_cnt = 0; + hpb->stats.map_req_cnt = 0; + hpb->stats.umap_req_cnt = 0; +} + +static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb) +{ + int ret; + + spin_lock_init(&hpb->rgn_state_lock); + spin_lock_init(&hpb->rsp_list_lock); + + INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn); + INIT_LIST_HEAD(&hpb->lh_act_srgn); + INIT_LIST_HEAD(&hpb->lh_inact_rgn); + INIT_LIST_HEAD(&hpb->list_hpb_lu); + + INIT_WORK(&hpb->map_work, ufshpb_map_work_handler); + if (ufshpb_mode == HPB_HOST_CONTROL) { + INIT_WORK(&hpb->ufshpb_normalization_work, + ufshpb_normalization_work_handler); + INIT_WORK(&hpb->ufshpb_lun_reset_work, + ufshpb_reset_work_handler); + INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work, + ufshpb_read_to_handler); + } + + hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache", + sizeof(struct ufshpb_req), 0, 0, NULL); + if (!hpb->map_req_cache) { + dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail", + hpb->lun); + return -ENOMEM; + } + + hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache", + sizeof(struct page *) * hpb->pages_per_srgn, + 0, 0, NULL); + if (!hpb->m_page_cache) { + dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail", + hpb->lun); + ret = -ENOMEM; + goto release_req_cache; + } + + ret = ufshpb_alloc_region_tbl(hba, hpb); + if (ret) + goto release_m_page_cache; + + ufshpb_stat_init(hpb); + + if (ufshpb_mode == HPB_HOST_CONTROL) + schedule_delayed_work(&hpb->ufshpb_read_to_work, + msecs_to_jiffies(POLLING_INTERVAL_MS)); + + return 0; + +release_m_page_cache: + kmem_cache_destroy(hpb->m_page_cache); +release_req_cache: + kmem_cache_destroy(hpb->map_req_cache); + return ret; +} + +static struct ufshpb_lu *ufshpb_alloc_hpb_lu(struct ufs_hba *hba, int lun, + struct ufshpb_dev_info *hpb_dev_info, + struct ufshpb_lu_info *hpb_lu_info) +{ + struct ufshpb_lu *hpb; + int ret; + + hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL); + if (!hpb) + return NULL; + + hpb->lun = lun; + + ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info); + + ret = ufshpb_lu_hpb_init(hba, hpb); + if (ret) { + dev_err(hba->dev, "hpb lu init failed. ret %d", ret); + goto release_hpb; + } + + return hpb; + +release_hpb: + kfree(hpb); + return NULL; +} + +static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn, *next_rgn; + struct ufshpb_subregion *srgn, *next_srgn; + unsigned long flags; + + /* + * If the device reset occurred, the remained HPB region information + * may be stale. Therefore, by dicarding the lists of HPB response + * that remained after reset, it prevents unnecessary work. + */ + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn, + list_inact_rgn) + list_del_init(&rgn->list_inact_rgn); + + list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn, + list_act_srgn) + list_del_init(&srgn->list_act_srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb) +{ + if (ufshpb_mode == HPB_HOST_CONTROL) { + cancel_delayed_work_sync(&hpb->ufshpb_read_to_work); + cancel_work_sync(&hpb->ufshpb_lun_reset_work); + cancel_work_sync(&hpb->ufshpb_normalization_work); + } + cancel_work_sync(&hpb->map_work); +} + +static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba) +{ + int err = 0; + bool flag_res = true; + int try; + + /* wait for the device to complete HPB reset query */ + for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) { + dev_dbg(hba->dev, + "%s start flag reset polling %d times\n", + __func__, try); + + /* Poll fHpbReset flag to be cleared */ + err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res); + + if (err) { + dev_err(hba->dev, + "%s reading fHpbReset flag failed with error %d\n", + __func__, err); + return flag_res; + } + + if (!flag_res) + goto out; + + usleep_range(1000, 1100); + } + if (flag_res) { + dev_err(hba->dev, + "%s fHpbReset was not cleared by the device\n", + __func__); + } +out: + return flag_res; +} + +void ufshpb_reset(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb) + continue; + + if (ufshpb_get_state(hpb) != HPB_RESET) + continue; + + ufshpb_set_state(hpb, HPB_PRESENT); + } +} + +void ufshpb_reset_host(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb) + continue; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) + continue; + ufshpb_set_state(hpb, HPB_RESET); + ufshpb_cancel_jobs(hpb); + ufshpb_discard_rsp_lists(hpb); + } +} + +void ufshpb_suspend(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb) + continue; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) + continue; + ufshpb_set_state(hpb, HPB_SUSPEND); + ufshpb_cancel_jobs(hpb); + } +} + +void ufshpb_resume(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb) + continue; + + if ((ufshpb_get_state(hpb) != HPB_PRESENT) && + (ufshpb_get_state(hpb) != HPB_SUSPEND)) + continue; + ufshpb_set_state(hpb, HPB_PRESENT); + ufshpb_kick_map_work(hpb); + if (ufshpb_mode == HPB_HOST_CONTROL) + schedule_delayed_work(&hpb->ufshpb_read_to_work, + msecs_to_jiffies(POLLING_INTERVAL_MS)); + + } +} + +static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun, + struct ufshpb_lu_info *hpb_lu_info) +{ + u16 max_active_rgns; + u8 lu_enable; + int size; + int ret; + char desc_buf[QUERY_DESC_MAX_SIZE]; + + ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size); + + pm_runtime_get_sync(hba->dev); + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + QUERY_DESC_IDN_UNIT, lun, 0, + desc_buf, &size); + pm_runtime_put_sync(hba->dev); + + if (ret) { + dev_err(hba->dev, + "%s: idn: %d lun: %d query request failed", + __func__, QUERY_DESC_IDN_UNIT, lun); + return ret; + } + + lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE]; + if (lu_enable != LU_ENABLED_HPB_FUNC) + return -ENODEV; + + max_active_rgns = get_unaligned_be16( + desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS); + if (!max_active_rgns) { + dev_err(hba->dev, + "lun %d wrong number of max active regions\n", lun); + return -ENODEV; + } + + hpb_lu_info->num_blocks = get_unaligned_be64( + desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT); + hpb_lu_info->pinned_start = get_unaligned_be16( + desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF); + hpb_lu_info->num_pinned = get_unaligned_be16( + desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS); + hpb_lu_info->max_active_rgns = max_active_rgns; + + return 0; +} + +void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) +{ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + + if (!hpb) + return; + + ufshpb_set_state(hpb, HPB_FAILED); + + sdev = hpb->sdev_ufs_lu; + sdev->hostdata = NULL; + + ufshpb_cancel_jobs(hpb); + + ufshpb_destroy_region_tbl(hpb); + + kmem_cache_destroy(hpb->map_req_cache); + kmem_cache_destroy(hpb->m_page_cache); + + list_del_init(&hpb->list_hpb_lu); + + kfree(hpb); +} + +static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba) +{ + int pool_size; + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + bool init_success; + + if (tot_active_srgn_pages == 0) { + ufshpb_remove(hba); + return; + } + + init_success = !ufshpb_check_hpb_reset_query(hba); + + pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; + if (pool_size > tot_active_srgn_pages) { + mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages); + mempool_resize(ufshpb_page_pool, tot_active_srgn_pages); + } + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb) + continue; + + if (init_success) { + ufshpb_set_state(hpb, HPB_PRESENT); + if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0) + queue_work(ufshpb_wq, &hpb->map_work); + } else { + dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun); + ufshpb_destroy_lu(hba, sdev); + } + } + + if (!init_success) + ufshpb_remove(hba); +} + +void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) +{ + struct ufshpb_lu *hpb; + int ret; + struct ufshpb_lu_info hpb_lu_info = { 0 }; + int lun = sdev->lun; + + if (lun >= hba->dev_info.max_lu_supported) + goto out; + + ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info); + if (ret) + goto out; + + hpb = ufshpb_alloc_hpb_lu(hba, lun, &hba->ufshpb_dev, + &hpb_lu_info); + if (!hpb) + goto out; + + tot_active_srgn_pages += hpb_lu_info.max_active_rgns * + hpb->srgns_per_rgn * hpb->pages_per_srgn; + + hpb->sdev_ufs_lu = sdev; + sdev->hostdata = hpb; + +out: + /* All LUs are initialized */ + if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt)) + ufshpb_hpb_lu_prepared(hba); +} + +static int ufshpb_init_mem_wq(void) +{ + int ret; + unsigned int pool_size; + + ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache", + sizeof(struct ufshpb_map_ctx), + 0, 0, NULL); + if (!ufshpb_mctx_cache) { + pr_err("ufshpb: cannot init mctx cache\n"); + return -ENOMEM; + } + + pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; + pr_info("%s:%d ufshpb_host_map_kbytes %u pool_size %u\n", + __func__, __LINE__, ufshpb_host_map_kbytes, pool_size); + + ufshpb_mctx_pool = mempool_create_slab_pool(pool_size, + ufshpb_mctx_cache); + if (!ufshpb_mctx_pool) { + pr_err("ufshpb: cannot init mctx pool\n"); + ret = -ENOMEM; + goto release_mctx_cache; + } + + ufshpb_page_pool = mempool_create_page_pool(pool_size, 0); + if (!ufshpb_page_pool) { + pr_err("ufshpb: cannot init page pool\n"); + ret = -ENOMEM; + goto release_mctx_pool; + } + + ufshpb_wq = alloc_workqueue("ufshpb-wq", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0); + if (!ufshpb_wq) { + pr_err("ufshpb: alloc workqueue failed\n"); + ret = -ENOMEM; + goto release_page_pool; + } + + return 0; + +release_page_pool: + mempool_destroy(ufshpb_page_pool); +release_mctx_pool: + mempool_destroy(ufshpb_mctx_pool); +release_mctx_cache: + kmem_cache_destroy(ufshpb_mctx_cache); + return ret; +} + +void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) +{ + struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev; + int hpb_device_max_active_rgns = 0; + int hpb_num_lu; + + hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU]; + if (hpb_num_lu == 0) { + dev_err(hba->dev, "No HPB LU supported\n"); + hpb_dev_info->hpb_disabled = true; + return; + } + + hpb_dev_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE]; + hpb_dev_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE]; + hpb_device_max_active_rgns = + get_unaligned_be16(geo_buf + + GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS); + + if (hpb_dev_info->rgn_size == 0 || hpb_dev_info->srgn_size == 0 || + hpb_device_max_active_rgns == 0) { + dev_err(hba->dev, "No HPB supported device\n"); + hpb_dev_info->hpb_disabled = true; + return; + } +} + +void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) +{ + struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev; + int version; + + ufshpb_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL]; + + version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER); + if (version != HPB_SUPPORT_VERSION) { + dev_err(hba->dev, "%s: HPB %x version is not supported.\n", + __func__, version); + hpb_dev_info->hpb_disabled = true; + return; + } + + /* + * Get the number of user logical unit to check whether all + * scsi_device finish initialization + */ + hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU]; +} + +void ufshpb_init(struct ufs_hba *hba) +{ + struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev; + int try; + int ret; + + if (!ufshpb_is_allowed(hba)) + return; + + if (ufshpb_init_mem_wq()) { + hpb_dev_info->hpb_disabled = true; + return; + } + + atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu); + tot_active_srgn_pages = 0; + /* issue HPB reset query */ + for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) { + ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_HPB_RESET, 0, NULL); + if (!ret) + break; + } +} + +void ufshpb_remove(struct ufs_hba *hba) +{ + mempool_destroy(ufshpb_page_pool); + mempool_destroy(ufshpb_mctx_pool); + kmem_cache_destroy(ufshpb_mctx_cache); + + destroy_workqueue(ufshpb_wq); +} + +module_param(ufshpb_host_map_kbytes, uint, 0644); +MODULE_PARM_DESC(ufshpb_host_map_kbytes, + "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool"); diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h new file mode 100644 index 000000000000..86c16a4127bd --- /dev/null +++ b/drivers/scsi/ufs/ufshpb.h @@ -0,0 +1,254 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Universal Flash Storage Host Performance Booster + * + * Copyright (C) 2017-2020 Samsung Electronics Co., Ltd. + * + * Authors: + * Yongmyung Lee + * Jinyoung Choi + */ + +#ifndef _UFSHPB_H_ +#define _UFSHPB_H_ + +/* hpb response UPIU macro */ +#define HPB_RSP_NONE 0x0 +#define HPB_RSP_REQ_REGION_UPDATE 0x1 +#define HPB_RSP_DEV_RESET 0x2 +#define MAX_ACTIVE_NUM 2 +#define MAX_INACTIVE_NUM 2 +#define DEV_DATA_SEG_LEN 0x14 +#define DEV_SENSE_SEG_LEN 0x12 +#define DEV_DES_TYPE 0x80 +#define DEV_ADDITIONAL_LEN 0x10 + +/* hpb map & entries macro */ +#define HPB_RGN_SIZE_UNIT 512 +#define HPB_ENTRY_BLOCK_SIZE 4096 +#define HPB_ENTRY_SIZE 0x8 +#define PINNED_NOT_SET U32_MAX + +/* hpb support chunk size */ +#define HPB_MULTI_CHUNK_HIGH 1 + +/* hpb vender defined opcode */ +#define UFSHPB_READ 0xF8 +#define UFSHPB_READ_BUFFER 0xF9 +#define UFSHPB_WRITE_BUFFER 0xFA +#define UFSHPB_READ_BUFFER_ID 0x01 +#define UFSHPB_WRITE_BUFFER_ID 0x01 +#define HPB_READ_BUFFER_CMD_LENGTH 10 +#define HPB_WRITE_BUFFER_CMD_LENGTH 10 +#define LU_ENABLED_HPB_FUNC 0x02 + +#define HPB_RESET_REQ_RETRIES 10 + +#define HPB_SUPPORT_VERSION 0x100 + +enum UFSHPB_MODE { + HPB_HOST_CONTROL, + HPB_DEVICE_CONTROL, +}; + +enum HPB_RGN_FLAGS { + RGN_FLAG_UPDATE = 0, + RGN_FLAG_DIRTY, +}; + +enum UFSHPB_STATE { + HPB_PRESENT = 1, + HPB_SUSPEND, + HPB_FAILED, + HPB_RESET, +}; + +enum HPB_RGN_STATE { + HPB_RGN_INACTIVE, + HPB_RGN_ACTIVE, + /* pinned regions are always active */ + HPB_RGN_PINNED, +}; + +enum HPB_SRGN_STATE { + HPB_SRGN_UNUSED, + HPB_SRGN_INVALID, + HPB_SRGN_VALID, + HPB_SRGN_ISSUED, +}; + +/** + * struct ufshpb_lu_info - UFSHPB logical unit related info + * @num_blocks: the number of logical block + * @pinned_start: the start region number of pinned region + * @num_pinned: the number of pinned regions + * @max_active_rgns: maximum number of active regions + */ +struct ufshpb_lu_info { + int num_blocks; + int pinned_start; + int num_pinned; + int max_active_rgns; +}; + +struct ufshpb_map_ctx { + struct page **m_page; + unsigned long *ppn_dirty; +}; + +struct ufshpb_subregion { + struct ufshpb_map_ctx *mctx; + enum HPB_SRGN_STATE srgn_state; + int rgn_idx; + int srgn_idx; + + /* below information is used by rsp_list */ + struct list_head list_act_srgn; +}; + +struct ufshpb_region { + struct ufshpb_subregion *srgn_tbl; + enum HPB_RGN_STATE rgn_state; + int rgn_idx; + int srgn_cnt; + + /* below information is used by rsp_list */ + struct list_head list_inact_rgn; + + /* below information is used by lru */ + struct list_head list_lru_rgn; + unsigned long rgn_flags; + + /* region reads - for host mode */ + atomic64_t reads; + + /* region "cold" timer - for host mode */ + ktime_t read_timeout; + atomic_t read_timeout_expiries; + struct list_head list_expired_rgn; + +}; + +#define for_each_sub_region(rgn, i, srgn) \ + for ((i) = 0; \ + ((i) < (rgn)->srgn_cnt) && ((srgn) = &(rgn)->srgn_tbl[i]); \ + (i)++) + +/** + * struct ufshpb_req - UFSHPB READ BUFFER (for caching map) request structure + * @req: block layer request for READ BUFFER + * @bio: bio for holding map page + * @hpb: ufshpb_lu structure that related to the L2P map + * @mctx: L2P map information + * @rgn_idx: target region index + * @srgn_idx: target sub-region index + * @lun: target logical unit number + */ +struct ufshpb_req { + struct request *req; + struct bio *bio; + struct ufshpb_lu *hpb; + struct ufshpb_map_ctx *mctx; + + unsigned int rgn_idx; + unsigned int srgn_idx; +}; + +struct victim_select_info { + struct list_head lh_lru_rgn; /* LRU list of regions */ + int max_lru_active_cnt; /* supported hpb #region - pinned #region */ + atomic_t active_cnt; +}; + +struct ufshpb_stats { + u64 hit_cnt; + u64 miss_cnt; + u64 rb_noti_cnt; + u64 rb_active_cnt; + u64 rb_inactive_cnt; + u64 map_req_cnt; + u64 umap_req_cnt; +}; + +struct ufshpb_lu { + int lun; + struct scsi_device *sdev_ufs_lu; + + spinlock_t rgn_state_lock; /* for protect rgn/srgn state */ + struct ufshpb_region *rgn_tbl; + + atomic_t hpb_state; + + spinlock_t rsp_list_lock; + struct list_head lh_act_srgn; /* hold rsp_list_lock */ + struct list_head lh_inact_rgn; /* hold rsp_list_lock */ + + /* cached L2P map management worker */ + struct work_struct map_work; + + /* for selecting victim */ + struct victim_select_info lru_info; + struct work_struct ufshpb_normalization_work; + struct work_struct ufshpb_lun_reset_work; + struct delayed_work ufshpb_read_to_work; + unsigned long work_data_bits; + + /* pinned region information */ + u32 lu_pinned_start; + u32 lu_pinned_end; + + /* HPB related configuration */ + u32 rgns_per_lu; + u32 srgns_per_lu; + int srgns_per_rgn; + u32 srgn_mem_size; + u32 entries_per_rgn_mask; + u32 entries_per_rgn_shift; + u32 entries_per_srgn; + u32 entries_per_srgn_mask; + u32 entries_per_srgn_shift; + u32 pages_per_srgn; + + struct ufshpb_stats stats; + + struct kmem_cache *map_req_cache; + struct kmem_cache *m_page_cache; + + struct list_head list_hpb_lu; +}; + +struct ufs_hba; +struct ufshcd_lrb; + +#ifndef CONFIG_SCSI_UFS_HPB +static void ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {} +static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {} +static void ufshpb_resume(struct ufs_hba *hba) {} +static void ufshpb_suspend(struct ufs_hba *hba) {} +static void ufshpb_reset(struct ufs_hba *hba) {} +static void ufshpb_reset_host(struct ufs_hba *hba) {} +static void ufshpb_init(struct ufs_hba *hba) {} +static void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) {} +static void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) {} +static void ufshpb_remove(struct ufs_hba *hba) {} +static bool ufshpb_is_allowed(struct ufs_hba *hba) { return false; } +static void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) {} +static void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) {} +#else +void ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); +void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); +void ufshpb_resume(struct ufs_hba *hba); +void ufshpb_suspend(struct ufs_hba *hba); +void ufshpb_reset(struct ufs_hba *hba); +void ufshpb_reset_host(struct ufs_hba *hba); +void ufshpb_init(struct ufs_hba *hba); +void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev); +void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev); +void ufshpb_remove(struct ufs_hba *hba); +bool ufshpb_is_allowed(struct ufs_hba *hba); +void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf); +void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf); +extern struct attribute_group ufs_sysfs_hpb_stat_group; +#endif + +#endif /* End of Header */