123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166 |
- /*
- * Copyright (c) 2011-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
- #ifndef _NVME_H
- #define _NVME_H
- #include <linux/mutex.h>
- #include <linux/nvme.h>
- #include <linux/pci.h>
- #include <linux/kref.h>
- #include <linux/blk-mq.h>
- extern unsigned char nvme_io_timeout;
- #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
- enum {
- NVME_NS_LBA = 0,
- NVME_NS_LIGHTNVM = 1,
- };
- /* The below value is the specific amount of delay needed before checking
- * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
- * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
- * found empirically.
- */
- #define NVME_QUIRK_DELAY_AMOUNT 2000
- /*
- * Represents an NVM Express device. Each nvme_dev is a PCI function.
- */
- struct nvme_dev {
- struct list_head node;
- struct nvme_queue **queues;
- struct request_queue *admin_q;
- struct blk_mq_tag_set tagset;
- struct blk_mq_tag_set admin_tagset;
- u32 __iomem *dbs;
- struct device *dev;
- struct dma_pool *prp_page_pool;
- struct dma_pool *prp_small_pool;
- int instance;
- unsigned queue_count;
- unsigned online_queues;
- unsigned max_qid;
- int q_depth;
- u32 db_stride;
- u32 ctrl_config;
- struct msix_entry *entry;
- struct nvme_bar __iomem *bar;
- struct list_head namespaces;
- struct kref kref;
- struct device *device;
- struct work_struct reset_work;
- struct work_struct probe_work;
- struct work_struct scan_work;
- struct mutex shutdown_lock;
- char name[12];
- char serial[20];
- char model[40];
- char firmware_rev[8];
- bool subsystem;
- u32 max_hw_sectors;
- u32 stripe_size;
- u32 page_size;
- void __iomem *cmb;
- dma_addr_t cmb_dma_addr;
- u64 cmb_size;
- u32 cmbsz;
- u16 oncs;
- u16 abort_limit;
- u8 event_limit;
- u8 vwc;
- };
- /*
- * An NVM Express namespace is equivalent to a SCSI LUN
- */
- struct nvme_ns {
- struct list_head list;
- struct nvme_dev *dev;
- struct request_queue *queue;
- struct gendisk *disk;
- struct kref kref;
- unsigned ns_id;
- int lba_shift;
- u16 ms;
- bool ext;
- u8 pi_type;
- int type;
- u64 mode_select_num_blocks;
- u32 mode_select_block_len;
- };
- /*
- * The nvme_iod describes the data in an I/O, including the list of PRP
- * entries. You can't see it in this data structure because C doesn't let
- * me express that. Use nvme_alloc_iod to ensure there's enough space
- * allocated to store the PRP list.
- */
- struct nvme_iod {
- unsigned long private; /* For the use of the submitter of the I/O */
- int npages; /* In the PRP list. 0 means small pool in use */
- int offset; /* Of PRP list */
- int nents; /* Used in scatterlist */
- int length; /* Of data, in bytes */
- dma_addr_t first_dma;
- struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
- struct scatterlist sg[0];
- };
- static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
- {
- return (sector >> (ns->lba_shift - 9));
- }
- int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
- void *buf, unsigned bufflen);
- int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
- void *buffer, void __user *ubuffer, unsigned bufflen,
- u32 *result, unsigned timeout);
- int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id);
- int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
- struct nvme_id_ns **id);
- int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log);
- int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
- dma_addr_t dma_addr, u32 *result);
- int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
- dma_addr_t dma_addr, u32 *result);
- struct sg_io_hdr;
- int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
- int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
- int nvme_sg_get_version_num(int __user *ip);
- #ifdef CONFIG_NVM
- int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
- int nvme_nvm_register(struct request_queue *q, char *disk_name);
- void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
- #else
- static inline int nvme_nvm_register(struct request_queue *q, char *disk_name)
- {
- return 0;
- }
- static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
- static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
- {
- return 0;
- }
- #endif /* CONFIG_NVM */
- #endif /* _NVME_H */
|