blob: e8dd6da164b28550f42d4909afed480291cc29ab [file] [log] [blame]
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001/*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 *
Andy Grovere3d6f902011-07-19 08:55:10 +00004 * This file contains the TCM Virtual Device and Disk Transport
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08005 * agnostic related functions.
6 *
Nicholas Bellinger4c762512013-09-05 15:29:12 -07007 * (c) Copyright 2003-2013 Datera, Inc.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08008 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27#include <linux/net.h>
28#include <linux/string.h>
29#include <linux/delay.h>
30#include <linux/timer.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080033#include <linux/kthread.h>
34#include <linux/in.h>
Paul Gortmakerc53181a2011-08-30 18:16:43 -040035#include <linux/export.h>
Bart Van Assche8dcf07b2016-11-14 15:47:14 -080036#include <linux/t10-pi.h>
Andy Grover7bfea53b2015-05-19 14:44:40 -070037#include <asm/unaligned.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080038#include <net/sock.h>
39#include <net/tcp.h>
Bart Van Asscheba929992015-05-08 10:11:12 +020040#include <scsi/scsi_common.h>
41#include <scsi/scsi_proto.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080042
43#include <target/target_core_base.h>
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050044#include <target/target_core_backend.h>
45#include <target/target_core_fabric.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080046
Christoph Hellwige26d99a2011-11-14 12:30:30 -050047#include "target_core_internal.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080048#include "target_core_alua.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080049#include "target_core_pr.h"
50#include "target_core_ua.h"
51
Colin Ian Kingc82ff232017-07-04 09:44:19 +010052static DEFINE_MUTEX(device_mutex);
53static LIST_HEAD(device_list);
Mike Christie0a5eee62017-06-23 01:18:13 -050054static DEFINE_IDR(devices_idr);
Nicholas Bellingerd9ea32b2013-08-22 11:33:37 -070055
Andy Grovere3d6f902011-07-19 08:55:10 +000056static struct se_hba *lun0_hba;
Andy Grovere3d6f902011-07-19 08:55:10 +000057/* not static, needed by tpg.c */
58struct se_device *g_lun0_dev;
59
Christoph Hellwigde103c92012-11-06 12:24:09 -080060sense_reason_t
Hannes Reineckef2d30682015-06-10 08:41:22 +020061transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080062{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080063 struct se_lun *se_lun = NULL;
Andy Grovere3d6f902011-07-19 08:55:10 +000064 struct se_session *se_sess = se_cmd->se_sess;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -070065 struct se_node_acl *nacl = se_sess->se_node_acl;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -070066 struct se_dev_entry *deve;
Nicholas Bellinger8fa3a862015-09-16 20:23:53 -070067 sense_reason_t ret = TCM_NO_SENSE;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080068
Nicholas Bellinger29a05de2015-03-22 20:42:19 -070069 rcu_read_lock();
70 deve = target_nacl_find_deve(nacl, unpacked_lun);
71 if (deve) {
72 atomic_long_inc(&deve->total_cmds);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080073
Andy Grover5951146d2011-07-19 10:26:37 +000074 if (se_cmd->data_direction == DMA_TO_DEVICE)
Nicholas Bellinger29a05de2015-03-22 20:42:19 -070075 atomic_long_add(se_cmd->data_length,
76 &deve->write_bytes);
Andy Grover5951146d2011-07-19 10:26:37 +000077 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
Nicholas Bellinger29a05de2015-03-22 20:42:19 -070078 atomic_long_add(se_cmd->data_length,
79 &deve->read_bytes);
Andy Grover5951146d2011-07-19 10:26:37 +000080
Nicholas Bellinger29a05de2015-03-22 20:42:19 -070081 se_lun = rcu_dereference(deve->se_lun);
Nicholas Bellingerbd4e2d22017-02-22 22:06:32 -080082
83 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
84 se_lun = NULL;
85 goto out_unlock;
86 }
87
Nicholas Bellinger29a05de2015-03-22 20:42:19 -070088 se_cmd->se_lun = rcu_dereference(deve->se_lun);
Andy Grover5951146d2011-07-19 10:26:37 +000089 se_cmd->pr_res_key = deve->pr_res_key;
90 se_cmd->orig_fe_lun = unpacked_lun;
Andy Grover5951146d2011-07-19 10:26:37 +000091 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
Nicholas Bellinger52777972013-11-06 21:03:43 -080092 se_cmd->lun_ref_active = true;
Nicholas Bellinger8fa3a862015-09-16 20:23:53 -070093
94 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
Andy Grover03a68b42016-02-25 15:14:32 -080095 deve->lun_access_ro) {
Nicholas Bellinger8fa3a862015-09-16 20:23:53 -070096 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
97 " Access for 0x%08llx\n",
98 se_cmd->se_tfo->get_fabric_name(),
99 unpacked_lun);
100 rcu_read_unlock();
101 ret = TCM_WRITE_PROTECTED;
102 goto ref_dev;
103 }
Andy Grover5951146d2011-07-19 10:26:37 +0000104 }
Nicholas Bellingerbd4e2d22017-02-22 22:06:32 -0800105out_unlock:
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700106 rcu_read_unlock();
Andy Grover5951146d2011-07-19 10:26:37 +0000107
108 if (!se_lun) {
109 /*
110 * Use the se_portal_group->tpg_virt_lun0 to allow for
111 * REPORT_LUNS, et al to be returned when no active
112 * MappedLUN=0 exists for this Initiator Port.
113 */
114 if (unpacked_lun != 0) {
Andy Grover6708bb22011-06-08 10:36:43 -0700115 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
Hannes Reineckef2d30682015-06-10 08:41:22 +0200116 " Access for 0x%08llx\n",
Andy Grover5951146d2011-07-19 10:26:37 +0000117 se_cmd->se_tfo->get_fabric_name(),
118 unpacked_lun);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800119 return TCM_NON_EXISTENT_LUN;
Andy Grover5951146d2011-07-19 10:26:37 +0000120 }
Andy Grover5951146d2011-07-19 10:26:37 +0000121
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700122 se_lun = se_sess->se_tpg->tpg_virt_lun0;
123 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
Andy Grover5951146d2011-07-19 10:26:37 +0000124 se_cmd->orig_fe_lun = 0;
Andy Grover5951146d2011-07-19 10:26:37 +0000125 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
Nicholas Bellinger52777972013-11-06 21:03:43 -0800126
127 percpu_ref_get(&se_lun->lun_ref);
128 se_cmd->lun_ref_active = true;
Nicholas Bellinger8fa3a862015-09-16 20:23:53 -0700129
130 /*
131 * Force WRITE PROTECT for virtual LUN 0
132 */
133 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
134 (se_cmd->data_direction != DMA_NONE)) {
135 ret = TCM_WRITE_PROTECTED;
136 goto ref_dev;
137 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800138 }
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700139 /*
140 * RCU reference protected by percpu se_lun->lun_ref taken above that
141 * must drop to zero (including initial reference) before this se_lun
142 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
143 * target_core_fabric_configfs.c:target_fabric_port_release
144 */
Nicholas Bellinger8fa3a862015-09-16 20:23:53 -0700145ref_dev:
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700146 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
147 atomic_long_inc(&se_cmd->se_dev->num_cmds);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800148
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800149 if (se_cmd->data_direction == DMA_TO_DEVICE)
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700150 atomic_long_add(se_cmd->data_length,
151 &se_cmd->se_dev->write_bytes);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800152 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700153 atomic_long_add(se_cmd->data_length,
154 &se_cmd->se_dev->read_bytes);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800155
Nicholas Bellinger8fa3a862015-09-16 20:23:53 -0700156 return ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800157}
Andy Grover5951146d2011-07-19 10:26:37 +0000158EXPORT_SYMBOL(transport_lookup_cmd_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800159
Hannes Reineckef2d30682015-06-10 08:41:22 +0200160int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800161{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800162 struct se_dev_entry *deve;
163 struct se_lun *se_lun = NULL;
Andy Grovere3d6f902011-07-19 08:55:10 +0000164 struct se_session *se_sess = se_cmd->se_sess;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700165 struct se_node_acl *nacl = se_sess->se_node_acl;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800166 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
Roland Dreier5e1be912011-07-20 09:28:56 +0000167 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800168
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700169 rcu_read_lock();
170 deve = target_nacl_find_deve(nacl, unpacked_lun);
171 if (deve) {
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700172 se_lun = rcu_dereference(deve->se_lun);
Nicholas Bellingereeb64d22017-06-03 06:41:03 -0700173
174 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
175 se_lun = NULL;
176 goto out_unlock;
177 }
178
179 se_cmd->se_lun = rcu_dereference(deve->se_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800180 se_cmd->pr_res_key = deve->pr_res_key;
181 se_cmd->orig_fe_lun = unpacked_lun;
Nicholas Bellingereeb64d22017-06-03 06:41:03 -0700182 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
183 se_cmd->lun_ref_active = true;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800184 }
Nicholas Bellingereeb64d22017-06-03 06:41:03 -0700185out_unlock:
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700186 rcu_read_unlock();
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800187
188 if (!se_lun) {
Andy Grover6708bb22011-06-08 10:36:43 -0700189 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
Hannes Reineckef2d30682015-06-10 08:41:22 +0200190 " Access for 0x%08llx\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000191 se_cmd->se_tfo->get_fabric_name(),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800192 unpacked_lun);
Andy Grovere3d6f902011-07-19 08:55:10 +0000193 return -ENODEV;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800194 }
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700195 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
196 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
Andy Grover5951146d2011-07-19 10:26:37 +0000197
Roland Dreier5e1be912011-07-20 09:28:56 +0000198 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
Andy Grover5951146d2011-07-19 10:26:37 +0000199 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
Roland Dreier5e1be912011-07-20 09:28:56 +0000200 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800201
202 return 0;
203}
Andy Grover5951146d2011-07-19 10:26:37 +0000204EXPORT_SYMBOL(transport_lookup_tmr_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800205
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700206bool target_lun_is_rdonly(struct se_cmd *cmd)
207{
208 struct se_session *se_sess = cmd->se_sess;
209 struct se_dev_entry *deve;
210 bool ret;
211
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700212 rcu_read_lock();
213 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
Andy Grover03a68b42016-02-25 15:14:32 -0800214 ret = deve && deve->lun_access_ro;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700215 rcu_read_unlock();
216
217 return ret;
218}
219EXPORT_SYMBOL(target_lun_is_rdonly);
220
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800221/*
222 * This function is called from core_scsi3_emulate_pro_register_and_move()
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700223 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800224 * when a matching rtpi is found.
225 */
226struct se_dev_entry *core_get_se_deve_from_rtpi(
227 struct se_node_acl *nacl,
228 u16 rtpi)
229{
230 struct se_dev_entry *deve;
231 struct se_lun *lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800232 struct se_portal_group *tpg = nacl->se_tpg;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800233
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700234 rcu_read_lock();
235 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
236 lun = rcu_dereference(deve->se_lun);
Andy Grover6708bb22011-06-08 10:36:43 -0700237 if (!lun) {
238 pr_err("%s device entries device pointer is"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800239 " NULL, but Initiator has access.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000240 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800241 continue;
242 }
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700243 if (lun->lun_rtpi != rtpi)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800244 continue;
245
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700246 kref_get(&deve->pr_kref);
247 rcu_read_unlock();
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800248
249 return deve;
250 }
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700251 rcu_read_unlock();
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800252
253 return NULL;
254}
255
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700256void core_free_device_list_for_node(
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800257 struct se_node_acl *nacl,
258 struct se_portal_group *tpg)
259{
260 struct se_dev_entry *deve;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800261
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700262 mutex_lock(&nacl->lun_entry_mutex);
263 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
264 struct se_lun *lun = rcu_dereference_check(deve->se_lun,
265 lockdep_is_held(&nacl->lun_entry_mutex));
266 core_disable_device_list_for_node(lun, deve, nacl, tpg);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800267 }
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700268 mutex_unlock(&nacl->lun_entry_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800269}
270
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800271void core_update_device_list_access(
Hannes Reineckef2d30682015-06-10 08:41:22 +0200272 u64 mapped_lun,
Andy Grover03a68b42016-02-25 15:14:32 -0800273 bool lun_access_ro,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800274 struct se_node_acl *nacl)
275{
276 struct se_dev_entry *deve;
277
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700278 mutex_lock(&nacl->lun_entry_mutex);
279 deve = target_nacl_find_deve(nacl, mapped_lun);
Andy Grover03a68b42016-02-25 15:14:32 -0800280 if (deve)
281 deve->lun_access_ro = lun_access_ro;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700282 mutex_unlock(&nacl->lun_entry_mutex);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700283}
284
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700285/*
286 * Called with rcu_read_lock or nacl->device_list_lock held.
Andy Grovere80ac6c2012-07-12 17:34:58 -0700287 */
Hannes Reineckef2d30682015-06-10 08:41:22 +0200288struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700289{
290 struct se_dev_entry *deve;
291
292 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
293 if (deve->mapped_lun == mapped_lun)
294 return deve;
295
296 return NULL;
297}
298EXPORT_SYMBOL(target_nacl_find_deve);
299
300void target_pr_kref_release(struct kref *kref)
301{
302 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
303 pr_kref);
304 complete(&deve->pr_comp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800305}
306
Hannes Reinecke7c0d0d52015-06-11 10:01:29 +0200307static void
308target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
309 bool skip_new)
310{
311 struct se_dev_entry *tmp;
312
313 rcu_read_lock();
314 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
315 if (skip_new && tmp == new)
316 continue;
317 core_scsi3_ua_allocate(tmp, 0x3F,
318 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
319 }
320 rcu_read_unlock();
321}
322
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800323int core_enable_device_list_for_node(
Andy Grovere80ac6c2012-07-12 17:34:58 -0700324 struct se_lun *lun,
325 struct se_lun_acl *lun_acl,
Hannes Reineckef2d30682015-06-10 08:41:22 +0200326 u64 mapped_lun,
Andy Grover03a68b42016-02-25 15:14:32 -0800327 bool lun_access_ro,
Andy Grovere80ac6c2012-07-12 17:34:58 -0700328 struct se_node_acl *nacl,
329 struct se_portal_group *tpg)
330{
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700331 struct se_dev_entry *orig, *new;
Andy Grovere80ac6c2012-07-12 17:34:58 -0700332
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700333 new = kzalloc(sizeof(*new), GFP_KERNEL);
334 if (!new) {
335 pr_err("Unable to allocate se_dev_entry memory\n");
336 return -ENOMEM;
337 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800338
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700339 atomic_set(&new->ua_count, 0);
340 spin_lock_init(&new->ua_lock);
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700341 INIT_LIST_HEAD(&new->ua_list);
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700342 INIT_LIST_HEAD(&new->lun_link);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800343
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700344 new->mapped_lun = mapped_lun;
345 kref_init(&new->pr_kref);
346 init_completion(&new->pr_comp);
347
Andy Grover03a68b42016-02-25 15:14:32 -0800348 new->lun_access_ro = lun_access_ro;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700349 new->creation_time = get_jiffies_64();
350 new->attach_count++;
351
352 mutex_lock(&nacl->lun_entry_mutex);
353 orig = target_nacl_find_deve(nacl, mapped_lun);
354 if (orig && orig->se_lun) {
355 struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
356 lockdep_is_held(&nacl->lun_entry_mutex));
357
358 if (orig_lun != lun) {
359 pr_err("Existing orig->se_lun doesn't match new lun"
360 " for dynamic -> explicit NodeACL conversion:"
361 " %s\n", nacl->initiatorname);
362 mutex_unlock(&nacl->lun_entry_mutex);
363 kfree(new);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800364 return -EINVAL;
365 }
Nicholas Bellinger391e2a62016-10-23 14:28:15 -0700366 if (orig->se_lun_acl != NULL) {
367 pr_warn_ratelimited("Detected existing explicit"
368 " se_lun_acl->se_lun_group reference for %s"
369 " mapped_lun: %llu, failing\n",
370 nacl->initiatorname, mapped_lun);
371 mutex_unlock(&nacl->lun_entry_mutex);
372 kfree(new);
373 return -EINVAL;
374 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800375
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700376 rcu_assign_pointer(new->se_lun, lun);
377 rcu_assign_pointer(new->se_lun_acl, lun_acl);
378 hlist_del_rcu(&orig->link);
379 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
380 mutex_unlock(&nacl->lun_entry_mutex);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700381
Nicholas Bellinger1adff1b2015-06-22 23:44:05 -0700382 spin_lock(&lun->lun_deve_lock);
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700383 list_del(&orig->lun_link);
384 list_add_tail(&new->lun_link, &lun->lun_deve_list);
Nicholas Bellinger1adff1b2015-06-22 23:44:05 -0700385 spin_unlock(&lun->lun_deve_lock);
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700386
387 kref_put(&orig->pr_kref, target_pr_kref_release);
388 wait_for_completion(&orig->pr_comp);
389
Hannes Reinecke7c0d0d52015-06-11 10:01:29 +0200390 target_luns_data_has_changed(nacl, new, true);
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700391 kfree_rcu(orig, rcu_head);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700392 return 0;
393 }
394
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700395 rcu_assign_pointer(new->se_lun, lun);
396 rcu_assign_pointer(new->se_lun_acl, lun_acl);
397 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
398 mutex_unlock(&nacl->lun_entry_mutex);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700399
Nicholas Bellinger1adff1b2015-06-22 23:44:05 -0700400 spin_lock(&lun->lun_deve_lock);
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700401 list_add_tail(&new->lun_link, &lun->lun_deve_list);
Nicholas Bellinger1adff1b2015-06-22 23:44:05 -0700402 spin_unlock(&lun->lun_deve_lock);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700403
Hannes Reinecke7c0d0d52015-06-11 10:01:29 +0200404 target_luns_data_has_changed(nacl, new, true);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700405 return 0;
406}
407
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700408/*
409 * Called with se_node_acl->lun_entry_mutex held.
Andy Grovere80ac6c2012-07-12 17:34:58 -0700410 */
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700411void core_disable_device_list_for_node(
Andy Grovere80ac6c2012-07-12 17:34:58 -0700412 struct se_lun *lun,
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700413 struct se_dev_entry *orig,
Andy Grovere80ac6c2012-07-12 17:34:58 -0700414 struct se_node_acl *nacl,
415 struct se_portal_group *tpg)
416{
Andy Grovere80ac6c2012-07-12 17:34:58 -0700417 /*
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700418 * rcu_dereference_raw protected by se_lun->lun_group symlink
419 * reference to se_device->dev_group.
420 */
421 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700422 /*
423 * If the MappedLUN entry is being disabled, the entry in
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700424 * lun->lun_deve_list must be removed now before clearing the
Andy Grovere80ac6c2012-07-12 17:34:58 -0700425 * struct se_dev_entry pointers below as logic in
426 * core_alua_do_transition_tg_pt() depends on these being present.
427 *
428 * deve->se_lun_acl will be NULL for demo-mode created LUNs
429 * that have not been explicitly converted to MappedLUNs ->
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700430 * struct se_lun_acl, but we remove deve->lun_link from
431 * lun->lun_deve_list. This also means that active UAs and
Andy Grovere80ac6c2012-07-12 17:34:58 -0700432 * NodeACL context specific PR metadata for demo-mode
433 * MappedLUN *deve will be released below..
434 */
Nicholas Bellinger1adff1b2015-06-22 23:44:05 -0700435 spin_lock(&lun->lun_deve_lock);
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700436 list_del(&orig->lun_link);
Nicholas Bellinger1adff1b2015-06-22 23:44:05 -0700437 spin_unlock(&lun->lun_deve_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800438 /*
439 * Disable struct se_dev_entry LUN ACL mapping
440 */
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700441 core_scsi3_ua_release_all(orig);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800442
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700443 hlist_del_rcu(&orig->link);
Nicholas Bellinger80bfdfa2015-03-25 01:02:57 -0700444 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
Andy Grover03a68b42016-02-25 15:14:32 -0800445 orig->lun_access_ro = false;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700446 orig->creation_time = 0;
447 orig->attach_count--;
448 /*
449 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
450 * or REGISTER_AND_MOVE PR operation to complete.
451 */
452 kref_put(&orig->pr_kref, target_pr_kref_release);
453 wait_for_completion(&orig->pr_comp);
454
Nicholas Bellinger3ccd6e82015-09-13 02:30:46 -0700455 rcu_assign_pointer(orig->se_lun, NULL);
456 rcu_assign_pointer(orig->se_lun_acl, NULL);
457
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700458 kfree_rcu(orig, rcu_head);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800459
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700460 core_scsi3_free_pr_reg_from_nacl(dev, nacl);
Hannes Reinecke7c0d0d52015-06-11 10:01:29 +0200461 target_luns_data_has_changed(nacl, NULL, false);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800462}
463
464/* core_clear_lun_from_tpg():
465 *
466 *
467 */
468void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
469{
470 struct se_node_acl *nacl;
471 struct se_dev_entry *deve;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800472
Nicholas Bellinger403edd72015-03-08 22:33:47 +0000473 mutex_lock(&tpg->acl_node_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800474 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800475
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700476 mutex_lock(&nacl->lun_entry_mutex);
477 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
478 struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
479 lockdep_is_held(&nacl->lun_entry_mutex));
480
481 if (lun != tmp_lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800482 continue;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800483
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700484 core_disable_device_list_for_node(lun, deve, nacl, tpg);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800485 }
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700486 mutex_unlock(&nacl->lun_entry_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800487 }
Nicholas Bellinger403edd72015-03-08 22:33:47 +0000488 mutex_unlock(&tpg->acl_node_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800489}
490
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700491int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800492{
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700493 struct se_lun *tmp;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800494
495 spin_lock(&dev->se_port_lock);
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700496 if (dev->export_count == 0x0000ffff) {
Andy Grover6708bb22011-06-08 10:36:43 -0700497 pr_warn("Reached dev->dev_port_count =="
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800498 " 0x0000ffff\n");
499 spin_unlock(&dev->se_port_lock);
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700500 return -ENOSPC;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800501 }
502again:
503 /*
Masanari Iida35d1efe2012-08-16 22:43:13 +0900504 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800505 * Here is the table from spc4r17 section 7.7.3.8.
506 *
507 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
508 *
509 * Code Description
510 * 0h Reserved
511 * 1h Relative port 1, historically known as port A
512 * 2h Relative port 2, historically known as port B
513 * 3h to FFFFh Relative port 3 through 65 535
514 */
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700515 lun->lun_rtpi = dev->dev_rpti_counter++;
516 if (!lun->lun_rtpi)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800517 goto again;
518
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700519 list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800520 /*
Masanari Iida35d1efe2012-08-16 22:43:13 +0900521 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800522 * for 16-bit wrap..
523 */
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700524 if (lun->lun_rtpi == tmp->lun_rtpi)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800525 goto again;
526 }
527 spin_unlock(&dev->se_port_lock);
528
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800529 return 0;
530}
531
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400532static void se_release_vpd_for_dev(struct se_device *dev)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800533{
534 struct t10_vpd *vpd, *vpd_tmp;
535
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400536 spin_lock(&dev->t10_wwn.t10_vpd_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800537 list_for_each_entry_safe(vpd, vpd_tmp,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400538 &dev->t10_wwn.t10_vpd_list, vpd_list) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800539 list_del(&vpd->vpd_list);
540 kfree(vpd);
541 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400542 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800543}
544
Roland Dreierc8045372012-07-16 15:17:12 -0700545static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700546{
Roland Dreier3e039892012-10-31 09:16:45 -0700547 u32 aligned_max_sectors;
548 u32 alignment;
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700549 /*
550 * Limit max_sectors to a PAGE_SIZE aligned value for modern
551 * transport_allocate_data_tasks() operation.
552 */
Roland Dreier3e039892012-10-31 09:16:45 -0700553 alignment = max(1ul, PAGE_SIZE / block_size);
554 aligned_max_sectors = rounddown(max_sectors, alignment);
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700555
Roland Dreier3e039892012-10-31 09:16:45 -0700556 if (max_sectors != aligned_max_sectors)
557 pr_info("Rounding down aligned max_sectors from %u to %u\n",
558 max_sectors, aligned_max_sectors);
559
560 return aligned_max_sectors;
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700561}
562
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700563int core_dev_add_lun(
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800564 struct se_portal_group *tpg,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800565 struct se_device *dev,
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700566 struct se_lun *lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800567{
Sebastian Andrzej Siewior8d9efe52012-01-11 21:43:38 +0100568 int rc;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800569
Andy Grover03a68b42016-02-25 15:14:32 -0800570 rc = core_tpg_add_lun(tpg, lun, false, dev);
Sebastian Andrzej Siewior8d9efe52012-01-11 21:43:38 +0100571 if (rc < 0)
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700572 return rc;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800573
Hannes Reineckef2d30682015-06-10 08:41:22 +0200574 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
Andy Grovere3d6f902011-07-19 08:55:10 +0000575 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
Andy Grover2af79732013-11-26 11:55:22 -0800576 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
Andy Grover2dca6732012-07-12 17:34:55 -0700577 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800578 /*
579 * Update LUN maps for dynamically added initiators when
580 * generate_node_acl is enabled.
581 */
Andy Grovere3d6f902011-07-19 08:55:10 +0000582 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800583 struct se_node_acl *acl;
Nicholas Bellinger403edd72015-03-08 22:33:47 +0000584
585 mutex_lock(&tpg->acl_node_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800586 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
Nicholas Bellinger052605c2011-07-26 17:48:43 -0700587 if (acl->dynamic_node_acl &&
588 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
589 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
Nicholas Bellingerdf9766c2015-05-22 02:05:19 +0000590 core_tpg_add_node_to_devs(acl, tpg, lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800591 }
592 }
Nicholas Bellinger403edd72015-03-08 22:33:47 +0000593 mutex_unlock(&tpg->acl_node_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800594 }
595
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700596 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800597}
598
599/* core_dev_del_lun():
600 *
601 *
602 */
Andy Grovercd9d7cb2014-06-30 16:39:44 -0700603void core_dev_del_lun(
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800604 struct se_portal_group *tpg,
Andy Grovercd9d7cb2014-06-30 16:39:44 -0700605 struct se_lun *lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800606{
Hannes Reineckef2d30682015-06-10 08:41:22 +0200607 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
Andy Grovere3d6f902011-07-19 08:55:10 +0000608 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
Andy Grovercd9d7cb2014-06-30 16:39:44 -0700609 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
Andy Grovere3d6f902011-07-19 08:55:10 +0000610 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800611
Andy Grovercd9d7cb2014-06-30 16:39:44 -0700612 core_tpg_remove_lun(tpg, lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800613}
614
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800615struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
616 struct se_portal_group *tpg,
Nicholas Bellingerfcf29482013-02-18 18:00:33 -0800617 struct se_node_acl *nacl,
Hannes Reineckef2d30682015-06-10 08:41:22 +0200618 u64 mapped_lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800619 int *ret)
620{
621 struct se_lun_acl *lacl;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800622
Nicholas Bellingerfcf29482013-02-18 18:00:33 -0800623 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
Andy Grover6708bb22011-06-08 10:36:43 -0700624 pr_err("%s InitiatorName exceeds maximum size.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000625 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800626 *ret = -EOVERFLOW;
627 return NULL;
628 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800629 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -0700630 if (!lacl) {
631 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800632 *ret = -ENOMEM;
633 return NULL;
634 }
635
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800636 lacl->mapped_lun = mapped_lun;
637 lacl->se_lun_nacl = nacl;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800638
639 return lacl;
640}
641
642int core_dev_add_initiator_node_lun_acl(
643 struct se_portal_group *tpg,
644 struct se_lun_acl *lacl,
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700645 struct se_lun *lun,
Andy Grover03a68b42016-02-25 15:14:32 -0800646 bool lun_access_ro)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800647{
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700648 struct se_node_acl *nacl = lacl->se_lun_nacl;
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700649 /*
650 * rcu_dereference_raw protected by se_lun->lun_group symlink
651 * reference to se_device->dev_group.
652 */
653 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800654
Andy Grover6708bb22011-06-08 10:36:43 -0700655 if (!nacl)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800656 return -EINVAL;
657
Andy Grover03a68b42016-02-25 15:14:32 -0800658 if (lun->lun_access_ro)
659 lun_access_ro = true;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800660
661 lacl->se_lun = lun;
662
Andy Grovere80ac6c2012-07-12 17:34:58 -0700663 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
Andy Grover03a68b42016-02-25 15:14:32 -0800664 lun_access_ro, nacl, tpg) < 0)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800665 return -EINVAL;
666
Hannes Reineckef2d30682015-06-10 08:41:22 +0200667 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
Andy Grovere3d6f902011-07-19 08:55:10 +0000668 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700669 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
Andy Grover03a68b42016-02-25 15:14:32 -0800670 lun_access_ro ? "RO" : "RW",
Chris Zankelb6a54b82015-07-20 16:29:50 -0700671 nacl->initiatorname);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800672 /*
673 * Check to see if there are any existing persistent reservation APTPL
674 * pre-registrations that need to be enabled for this LUN ACL..
675 */
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700676 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
Nicholas Bellingere2480562014-10-04 04:23:15 +0000677 lacl->mapped_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800678 return 0;
679}
680
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800681int core_dev_del_initiator_node_lun_acl(
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800682 struct se_lun *lun,
683 struct se_lun_acl *lacl)
684{
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700685 struct se_portal_group *tpg = lun->lun_tpg;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800686 struct se_node_acl *nacl;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700687 struct se_dev_entry *deve;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800688
689 nacl = lacl->se_lun_nacl;
Andy Grover6708bb22011-06-08 10:36:43 -0700690 if (!nacl)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800691 return -EINVAL;
692
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700693 mutex_lock(&nacl->lun_entry_mutex);
694 deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
695 if (deve)
696 core_disable_device_list_for_node(lun, deve, nacl, tpg);
697 mutex_unlock(&nacl->lun_entry_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800698
Hannes Reineckef2d30682015-06-10 08:41:22 +0200699 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
700 " InitiatorNode: %s Mapped LUN: %llu\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000701 tpg->se_tpg_tfo->get_fabric_name(),
702 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
Chris Zankelb6a54b82015-07-20 16:29:50 -0700703 nacl->initiatorname, lacl->mapped_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800704
705 return 0;
706}
707
708void core_dev_free_initiator_node_lun_acl(
709 struct se_portal_group *tpg,
710 struct se_lun_acl *lacl)
711{
Andy Grover6708bb22011-06-08 10:36:43 -0700712 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
Hannes Reineckef2d30682015-06-10 08:41:22 +0200713 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
Andy Grovere3d6f902011-07-19 08:55:10 +0000714 tpg->se_tpg_tfo->tpg_get_tag(tpg),
715 tpg->se_tpg_tfo->get_fabric_name(),
Chris Zankelb6a54b82015-07-20 16:29:50 -0700716 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800717
718 kfree(lacl);
719}
720
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400721static void scsi_dump_inquiry(struct se_device *dev)
722{
723 struct t10_wwn *wwn = &dev->t10_wwn;
724 char buf[17];
725 int i, device_type;
726 /*
727 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
728 */
729 for (i = 0; i < 8; i++)
730 if (wwn->vendor[i] >= 0x20)
731 buf[i] = wwn->vendor[i];
732 else
733 buf[i] = ' ';
734 buf[i] = '\0';
735 pr_debug(" Vendor: %s\n", buf);
736
737 for (i = 0; i < 16; i++)
738 if (wwn->model[i] >= 0x20)
739 buf[i] = wwn->model[i];
740 else
741 buf[i] = ' ';
742 buf[i] = '\0';
743 pr_debug(" Model: %s\n", buf);
744
745 for (i = 0; i < 4; i++)
746 if (wwn->revision[i] >= 0x20)
747 buf[i] = wwn->revision[i];
748 else
749 buf[i] = ' ';
750 buf[i] = '\0';
751 pr_debug(" Revision: %s\n", buf);
752
753 device_type = dev->transport->get_device_type(dev);
754 pr_debug(" Type: %s ", scsi_device_type(device_type));
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400755}
756
757struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
758{
759 struct se_device *dev;
Nicholas Bellinger4863e522013-11-08 13:10:44 -0800760 struct se_lun *xcopy_lun;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400761
Christoph Hellwig0a06d432015-05-10 18:14:56 +0200762 dev = hba->backend->ops->alloc_device(hba, name);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400763 if (!dev)
764 return NULL;
765
766 dev->se_hba = hba;
Christoph Hellwig0a06d432015-05-10 18:14:56 +0200767 dev->transport = hba->backend->ops;
Sagi Grimbergfe052a12015-06-29 13:08:19 +0300768 dev->prot_length = sizeof(struct t10_pi_tuple);
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700769 dev->hba_index = hba->hba_index;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400770
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400771 INIT_LIST_HEAD(&dev->dev_sep_list);
772 INIT_LIST_HEAD(&dev->dev_tmr_list);
773 INIT_LIST_HEAD(&dev->delayed_cmd_list);
774 INIT_LIST_HEAD(&dev->state_list);
775 INIT_LIST_HEAD(&dev->qf_cmd_list);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400776 spin_lock_init(&dev->execute_task_lock);
777 spin_lock_init(&dev->delayed_cmd_lock);
778 spin_lock_init(&dev->dev_reservation_lock);
779 spin_lock_init(&dev->se_port_lock);
780 spin_lock_init(&dev->se_tmr_lock);
781 spin_lock_init(&dev->qf_cmd_lock);
Nicholas Bellinger68ff9b92013-08-19 15:20:28 -0700782 sema_init(&dev->caw_sem, 1);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400783 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
784 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
785 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
786 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
787 spin_lock_init(&dev->t10_pr.registration_lock);
788 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
789 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
790 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
Hannes Reineckec66094b2013-12-17 09:18:49 +0100791 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
792 spin_lock_init(&dev->t10_alua.lba_map_lock);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400793
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400794 dev->t10_wwn.t10_dev = dev;
795 dev->t10_alua.t10_dev = dev;
796
797 dev->dev_attrib.da_dev = dev;
Tregaron Baylyadfa9572013-01-31 15:30:24 -0700798 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
Christoph Hellwig814e5b42015-04-20 15:00:30 +0200799 dev->dev_attrib.emulate_dpo = 1;
800 dev->dev_attrib.emulate_fua_write = 1;
801 dev->dev_attrib.emulate_fua_read = 1;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400802 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
803 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
804 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
805 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
806 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
Nicholas Bellinger0123a9e2013-08-20 14:24:09 -0700807 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
Nicholas Bellingerd397a442013-08-22 14:17:20 -0700808 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
Nicholas Bellinger2ed22c92014-01-08 18:19:31 +0000809 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400810 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
Nicholas Bellinger92404e62014-10-04 01:06:08 +0000811 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400812 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
813 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
814 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
815 dev->dev_attrib.max_unmap_block_desc_count =
816 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
817 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
818 dev->dev_attrib.unmap_granularity_alignment =
819 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
Jamie Pocase6f416332015-11-29 14:44:57 -0800820 dev->dev_attrib.unmap_zeroes_data =
821 DA_UNMAP_ZEROES_DATA_DEFAULT;
Nicholas Bellinger773cbaf2012-11-15 11:02:49 -0800822 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400823
Nicholas Bellinger4863e522013-11-08 13:10:44 -0800824 xcopy_lun = &dev->xcopy_lun;
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700825 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
Nicholas Bellinger4863e522013-11-08 13:10:44 -0800826 init_completion(&xcopy_lun->lun_ref_comp);
Nicholas Bellingerbd4e2d22017-02-22 22:06:32 -0800827 init_completion(&xcopy_lun->lun_shutdown_comp);
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700828 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
829 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
830 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
831 xcopy_lun->lun_tpg = &xcopy_pt_tpg;
Nicholas Bellinger4863e522013-11-08 13:10:44 -0800832
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400833 return dev;
834}
835
Mike Christie8a9ebe72016-01-18 14:09:27 -0600836/*
837 * Check if the underlying struct block_device request_queue supports
838 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
839 * in ATA and we need to set TPE=1
840 */
841bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
Mike Christieea263c72016-06-02 20:12:37 -0500842 struct request_queue *q)
Mike Christie8a9ebe72016-01-18 14:09:27 -0600843{
Mike Christieea263c72016-06-02 20:12:37 -0500844 int block_size = queue_logical_block_size(q);
845
Mike Christie8a9ebe72016-01-18 14:09:27 -0600846 if (!blk_queue_discard(q))
847 return false;
848
Mike Christieea263c72016-06-02 20:12:37 -0500849 attrib->max_unmap_lba_count =
850 q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
Mike Christie8a9ebe72016-01-18 14:09:27 -0600851 /*
852 * Currently hardcoded to 1 in Linux/SCSI code..
853 */
854 attrib->max_unmap_block_desc_count = 1;
855 attrib->unmap_granularity = q->limits.discard_granularity / block_size;
856 attrib->unmap_granularity_alignment = q->limits.discard_alignment /
857 block_size;
Nicholas Bellinger22374982017-04-11 22:21:47 -0700858 attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors);
Mike Christie8a9ebe72016-01-18 14:09:27 -0600859 return true;
860}
861EXPORT_SYMBOL(target_configure_unmap_from_queue);
862
863/*
864 * Convert from blocksize advertised to the initiator to the 512 byte
865 * units unconditionally used by the Linux block layer.
866 */
867sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
868{
869 switch (dev->dev_attrib.block_size) {
870 case 4096:
871 return lb << 3;
872 case 2048:
873 return lb << 2;
874 case 1024:
875 return lb << 1;
876 default:
877 return lb;
878 }
879}
880EXPORT_SYMBOL(target_to_linux_sector);
881
Mike Christie85441e62017-06-23 01:18:14 -0500882/**
883 * target_find_device - find a se_device by its dev_index
884 * @id: dev_index
885 * @do_depend: true if caller needs target_depend_item to be done
886 *
887 * If do_depend is true, the caller must do a target_undepend_item
888 * when finished using the device.
889 *
890 * If do_depend is false, the caller must be called in a configfs
891 * callback or during removal.
892 */
893struct se_device *target_find_device(int id, bool do_depend)
894{
895 struct se_device *dev;
896
Mike Christiebe50f532017-06-23 01:18:18 -0500897 mutex_lock(&device_mutex);
Mike Christie85441e62017-06-23 01:18:14 -0500898 dev = idr_find(&devices_idr, id);
899 if (dev && do_depend && target_depend_item(&dev->dev_group.cg_item))
900 dev = NULL;
Mike Christiebe50f532017-06-23 01:18:18 -0500901 mutex_unlock(&device_mutex);
Mike Christie85441e62017-06-23 01:18:14 -0500902 return dev;
903}
904EXPORT_SYMBOL(target_find_device);
905
Mike Christieb1943fd2017-06-23 01:18:16 -0500906struct devices_idr_iter {
907 int (*fn)(struct se_device *dev, void *data);
908 void *data;
909};
910
911static int target_devices_idr_iter(int id, void *p, void *data)
912{
913 struct devices_idr_iter *iter = data;
914 struct se_device *dev = p;
915
916 /*
917 * We add the device early to the idr, so it can be used
918 * by backend modules during configuration. We do not want
919 * to allow other callers to access partially setup devices,
920 * so we skip them here.
921 */
922 if (!(dev->dev_flags & DF_CONFIGURED))
923 return 0;
924
925 return iter->fn(dev, iter->data);
926}
927
928/**
929 * target_for_each_device - iterate over configured devices
930 * @fn: iterator function
931 * @data: pointer to data that will be passed to fn
932 *
933 * fn must return 0 to continue looping over devices. non-zero will break
934 * from the loop and return that value to the caller.
935 */
936int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
937 void *data)
938{
939 struct devices_idr_iter iter;
940 int ret;
941
942 iter.fn = fn;
943 iter.data = data;
944
Mike Christiebe50f532017-06-23 01:18:18 -0500945 mutex_lock(&device_mutex);
Mike Christieb1943fd2017-06-23 01:18:16 -0500946 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
Mike Christiebe50f532017-06-23 01:18:18 -0500947 mutex_unlock(&device_mutex);
Mike Christieb1943fd2017-06-23 01:18:16 -0500948 return ret;
949}
950
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400951int target_configure_device(struct se_device *dev)
952{
953 struct se_hba *hba = dev->se_hba;
Mike Christie0a5eee62017-06-23 01:18:13 -0500954 int ret, id;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400955
956 if (dev->dev_flags & DF_CONFIGURED) {
957 pr_err("se_dev->se_dev_ptr already set for storage"
958 " object\n");
959 return -EEXIST;
960 }
961
Mike Christie0a5eee62017-06-23 01:18:13 -0500962 /*
963 * Add early so modules like tcmu can use during its
964 * configuration.
965 */
Mike Christiebe50f532017-06-23 01:18:18 -0500966 mutex_lock(&device_mutex);
Mike Christie0a5eee62017-06-23 01:18:13 -0500967 /*
968 * Use cyclic to try and avoid collisions with devices
969 * that were recently removed.
970 */
971 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
Mike Christiebe50f532017-06-23 01:18:18 -0500972 mutex_unlock(&device_mutex);
Mike Christie0a5eee62017-06-23 01:18:13 -0500973 if (id < 0) {
974 ret = -ENOMEM;
975 goto out;
976 }
977 dev->dev_index = id;
978
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400979 ret = dev->transport->configure_device(dev);
980 if (ret)
Mike Christie0a5eee62017-06-23 01:18:13 -0500981 goto out_free_index;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400982 /*
983 * XXX: there is not much point to have two different values here..
984 */
985 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
986 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
987
988 /*
989 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
990 */
991 dev->dev_attrib.hw_max_sectors =
992 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
993 dev->dev_attrib.hw_block_size);
Nicholas Bellinger046ba642015-01-06 16:10:37 -0800994 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400995
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400996 dev->creation_time = get_jiffies_64();
997
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400998 ret = core_setup_alua(dev);
999 if (ret)
Mike Christie0a5eee62017-06-23 01:18:13 -05001000 goto out_free_index;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001001
1002 /*
1003 * Startup the struct se_device processing thread
1004 */
1005 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1006 dev->transport->name);
1007 if (!dev->tmr_wq) {
1008 pr_err("Unable to create tmr workqueue for %s\n",
1009 dev->transport->name);
1010 ret = -ENOMEM;
1011 goto out_free_alua;
1012 }
1013
1014 /*
1015 * Setup work_queue for QUEUE_FULL
1016 */
1017 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1018
1019 /*
1020 * Preload the initial INQUIRY const values if we are doing
1021 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1022 * passthrough because this is being provided by the backend LLD.
1023 */
Andy Grovera3541702015-05-19 14:44:41 -07001024 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001025 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1026 strncpy(&dev->t10_wwn.model[0],
1027 dev->transport->inquiry_prod, 16);
1028 strncpy(&dev->t10_wwn.revision[0],
1029 dev->transport->inquiry_rev, 4);
1030 }
1031
1032 scsi_dump_inquiry(dev);
1033
1034 spin_lock(&hba->device_lock);
1035 hba->dev_count++;
1036 spin_unlock(&hba->device_lock);
Nicholas Bellingerd9ea32b2013-08-22 11:33:37 -07001037
Nicholas Bellinger5f7da042015-03-05 03:28:24 +00001038 dev->dev_flags |= DF_CONFIGURED;
1039
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001040 return 0;
1041
1042out_free_alua:
1043 core_alua_free_lu_gp_mem(dev);
Mike Christie0a5eee62017-06-23 01:18:13 -05001044out_free_index:
Mike Christiebe50f532017-06-23 01:18:18 -05001045 mutex_lock(&device_mutex);
Mike Christie0a5eee62017-06-23 01:18:13 -05001046 idr_remove(&devices_idr, dev->dev_index);
Mike Christiebe50f532017-06-23 01:18:18 -05001047 mutex_unlock(&device_mutex);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001048out:
1049 se_release_vpd_for_dev(dev);
1050 return ret;
1051}
1052
1053void target_free_device(struct se_device *dev)
1054{
1055 struct se_hba *hba = dev->se_hba;
1056
1057 WARN_ON(!list_empty(&dev->dev_sep_list));
1058
1059 if (dev->dev_flags & DF_CONFIGURED) {
1060 destroy_workqueue(dev->tmr_wq);
1061
Mike Christie92634702017-06-23 01:18:12 -05001062 dev->transport->destroy_device(dev);
1063
Mike Christiebe50f532017-06-23 01:18:18 -05001064 mutex_lock(&device_mutex);
Mike Christie0a5eee62017-06-23 01:18:13 -05001065 idr_remove(&devices_idr, dev->dev_index);
Mike Christiebe50f532017-06-23 01:18:18 -05001066 mutex_unlock(&device_mutex);
Nicholas Bellingerd9ea32b2013-08-22 11:33:37 -07001067
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001068 spin_lock(&hba->device_lock);
1069 hba->dev_count--;
1070 spin_unlock(&hba->device_lock);
1071 }
1072
1073 core_alua_free_lu_gp_mem(dev);
Hannes Reinecke229d4f12013-12-17 09:18:50 +01001074 core_alua_set_lba_map(dev, NULL, 0, 0);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001075 core_scsi3_free_all_registrations(dev);
1076 se_release_vpd_for_dev(dev);
1077
Nicholas Bellinger2ed22c92014-01-08 18:19:31 +00001078 if (dev->transport->free_prot)
1079 dev->transport->free_prot(dev);
1080
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001081 dev->transport->free_device(dev);
1082}
1083
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001084int core_dev_setup_virtual_lun0(void)
1085{
1086 struct se_hba *hba;
1087 struct se_device *dev;
Andy Groverdb5d1c32013-05-28 16:55:20 -07001088 char buf[] = "rd_pages=8,rd_nullio=1";
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001089 int ret;
1090
Andy Grover6708bb22011-06-08 10:36:43 -07001091 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001092 if (IS_ERR(hba))
1093 return PTR_ERR(hba);
1094
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001095 dev = target_alloc_device(hba, "virt_lun0");
1096 if (!dev) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001097 ret = -ENOMEM;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001098 goto out_free_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001099 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001100
Christoph Hellwig0a06d432015-05-10 18:14:56 +02001101 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001102
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001103 ret = target_configure_device(dev);
1104 if (ret)
1105 goto out_free_se_dev;
1106
1107 lun0_hba = hba;
Andy Grovere3d6f902011-07-19 08:55:10 +00001108 g_lun0_dev = dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001109 return 0;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001110
1111out_free_se_dev:
1112 target_free_device(dev);
1113out_free_hba:
1114 core_delete_hba(hba);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001115 return ret;
1116}
1117
1118
1119void core_dev_release_virtual_lun0(void)
1120{
Andy Grovere3d6f902011-07-19 08:55:10 +00001121 struct se_hba *hba = lun0_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001122
Andy Grover6708bb22011-06-08 10:36:43 -07001123 if (!hba)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001124 return;
1125
Andy Grovere3d6f902011-07-19 08:55:10 +00001126 if (g_lun0_dev)
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001127 target_free_device(g_lun0_dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001128 core_delete_hba(hba);
1129}
Andy Grover7bfea53b2015-05-19 14:44:40 -07001130
1131/*
1132 * Common CDB parsing for kernel and user passthrough.
1133 */
1134sense_reason_t
1135passthrough_parse_cdb(struct se_cmd *cmd,
1136 sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1137{
1138 unsigned char *cdb = cmd->t_task_cdb;
Bryant G. Ly4ec5bf02017-04-21 20:40:50 -05001139 struct se_device *dev = cmd->se_dev;
1140 unsigned int size;
Andy Grover7bfea53b2015-05-19 14:44:40 -07001141
1142 /*
1143 * Clear a lun set in the cdb if the initiator talking to use spoke
1144 * and old standards version, as we can't assume the underlying device
1145 * won't choke up on it.
1146 */
1147 switch (cdb[0]) {
1148 case READ_10: /* SBC - RDProtect */
1149 case READ_12: /* SBC - RDProtect */
1150 case READ_16: /* SBC - RDProtect */
1151 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1152 case VERIFY: /* SBC - VRProtect */
1153 case VERIFY_16: /* SBC - VRProtect */
1154 case WRITE_VERIFY: /* SBC - VRProtect */
1155 case WRITE_VERIFY_12: /* SBC - VRProtect */
1156 case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
1157 break;
1158 default:
1159 cdb[1] &= 0x1f; /* clear logical unit number */
1160 break;
1161 }
1162
1163 /*
1164 * For REPORT LUNS we always need to emulate the response, for everything
1165 * else, pass it up.
1166 */
1167 if (cdb[0] == REPORT_LUNS) {
1168 cmd->execute_cmd = spc_emulate_report_luns;
1169 return TCM_NO_SENSE;
1170 }
1171
Bryant G. Ly4ec5bf02017-04-21 20:40:50 -05001172 /*
1173 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1174 * emulate the response, since tcmu does not have the information
1175 * required to process these commands.
1176 */
1177 if (!(dev->transport->transport_flags &
1178 TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1179 if (cdb[0] == PERSISTENT_RESERVE_IN) {
1180 cmd->execute_cmd = target_scsi3_emulate_pr_in;
Bart Van Asschea85d6672017-05-23 16:48:27 -07001181 size = get_unaligned_be16(&cdb[7]);
Bryant G. Ly4ec5bf02017-04-21 20:40:50 -05001182 return target_cmd_size_check(cmd, size);
1183 }
1184 if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1185 cmd->execute_cmd = target_scsi3_emulate_pr_out;
Tang Wenji388fe692017-07-08 11:15:44 +08001186 size = get_unaligned_be32(&cdb[5]);
Bryant G. Ly4ec5bf02017-04-21 20:40:50 -05001187 return target_cmd_size_check(cmd, size);
1188 }
1189
1190 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1191 cmd->execute_cmd = target_scsi2_reservation_release;
1192 if (cdb[0] == RELEASE_10)
Bart Van Asschea85d6672017-05-23 16:48:27 -07001193 size = get_unaligned_be16(&cdb[7]);
Bryant G. Ly4ec5bf02017-04-21 20:40:50 -05001194 else
1195 size = cmd->data_length;
1196 return target_cmd_size_check(cmd, size);
1197 }
1198 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1199 cmd->execute_cmd = target_scsi2_reservation_reserve;
1200 if (cdb[0] == RESERVE_10)
Bart Van Asschea85d6672017-05-23 16:48:27 -07001201 size = get_unaligned_be16(&cdb[7]);
Bryant G. Ly4ec5bf02017-04-21 20:40:50 -05001202 else
1203 size = cmd->data_length;
1204 return target_cmd_size_check(cmd, size);
1205 }
1206 }
1207
Andy Grover7bfea53b2015-05-19 14:44:40 -07001208 /* Set DATA_CDB flag for ops that should have it */
1209 switch (cdb[0]) {
1210 case READ_6:
1211 case READ_10:
1212 case READ_12:
1213 case READ_16:
1214 case WRITE_6:
1215 case WRITE_10:
1216 case WRITE_12:
1217 case WRITE_16:
1218 case WRITE_VERIFY:
1219 case WRITE_VERIFY_12:
Bart Van Assche3e182db2017-05-23 16:48:22 -07001220 case WRITE_VERIFY_16:
Andy Grover7bfea53b2015-05-19 14:44:40 -07001221 case COMPARE_AND_WRITE:
1222 case XDWRITEREAD_10:
1223 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1224 break;
1225 case VARIABLE_LENGTH_CMD:
1226 switch (get_unaligned_be16(&cdb[8])) {
1227 case READ_32:
1228 case WRITE_32:
Damien Le Moale5dc9a72017-06-28 14:58:56 +09001229 case WRITE_VERIFY_32:
Andy Grover7bfea53b2015-05-19 14:44:40 -07001230 case XDWRITEREAD_32:
1231 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1232 break;
1233 }
1234 }
1235
1236 cmd->execute_cmd = exec_cmd;
1237
1238 return TCM_NO_SENSE;
1239}
1240EXPORT_SYMBOL(passthrough_parse_cdb);