Codebase list tcmu / 04c13c57-3ce2-4b12-82d4-eb87cfe1d0f4/upstream/1.6.0
Import upstream version 1.6.0 Debian Janitor 1 year, 3 months ago
19 changed file(s) with 318 addition(s) and 210 deletion(s). Raw diff Collapse all Expand all
00 cmake_minimum_required (VERSION 2.8 FATAL_ERROR)
11 project (tcmu-runner C)
2 set(VERSION 1.5.4)
2 set(VERSION 1.6.0)
33 set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Wall -Wdeclaration-after-statement -std=c99")
44
55 include(GNUInstallDirs)
431431 * the first command sent to us so clear
432432 * lock state to avoid later blacklist errors.
433433 */
434 pthread_mutex_lock(&rdev->state_lock);
434 pthread_mutex_lock(&rdev->rdev_lock);
435435 if (rdev->lock_state == TCMUR_DEV_LOCK_WRITE_LOCKED) {
436436 tcmu_dev_dbg(dev, "Dropping lock\n");
437437 rdev->lock_state = TCMUR_DEV_LOCK_UNLOCKED;
438438 }
439 pthread_mutex_unlock(&rdev->state_lock);
439 pthread_mutex_unlock(&rdev->rdev_lock);
440440 }
441441 }
442442
559559 if (!lock_is_required(dev))
560560 return ret;
561561
562 pthread_mutex_lock(&rdev->state_lock);
562 pthread_mutex_lock(&rdev->rdev_lock);
563563 if (rdev->lock_state == TCMUR_DEV_LOCK_WRITE_LOCKED) {
564564 /* For both read/write cases in this state is good */
565565 goto done;
616616 }
617617
618618 done:
619 pthread_mutex_unlock(&rdev->state_lock);
619 pthread_mutex_unlock(&rdev->rdev_lock);
620620 return ret;
621621 }
622622
2626 #include "libtcmu_common.h"
2727 #include "libtcmu_priv.h"
2828 #include "be_byteshift.h"
29
30 __thread int __tcmu_is_ework_thread = 0;
2931
3032 int tcmu_cdb_get_length(uint8_t *cdb)
3133 {
6466
6567 switch (tcmu_cdb_get_length(cdb)) {
6668 case 6:
67 val = be16toh(*((uint16_t *)&cdb[2]));
69 val = get_unaligned_be16(&cdb[2]);
6870 return ((cdb[1] & 0x1f) << 16) | val;
6971 case 10:
70 return be32toh(*((u_int32_t *)&cdb[2]));
72 return get_unaligned_be32(&cdb[2]);
7173 case 12:
72 return be32toh(*((u_int32_t *)&cdb[2]));
74 return get_unaligned_be32(&cdb[2]);
7375 case 16:
74 return be64toh(*((u_int64_t *)&cdb[2]));
76 return get_unaligned_be64(&cdb[2]);
7577 default:
76 assert_perror(EINVAL);
78 assert(0);
7779 return 0; /* not reached */
7880 }
7981 }
8486 case 6:
8587 return cdb[4];
8688 case 10:
87 return be16toh(*((uint16_t *)&cdb[7]));
89 return get_unaligned_be16(&cdb[7]);
8890 case 12:
89 return be32toh(*((u_int32_t *)&cdb[6]));
91 return get_unaligned_be32(&cdb[6]);
9092 case 16:
91 return be32toh(*((u_int32_t *)&cdb[10]));
93 return get_unaligned_be32(&cdb[10]);
9294 default:
93 assert_perror(EINVAL);
95 assert(0);
9496 return 0; /* not reached */
9597 }
9698 }
77
88 #ifndef _TCMU_BE_BYTESHIFT_H
99 #define _TCMU_BE_BYTESHIFT_H
10
11 #include <endian.h>
12 #include <stdint.h>
13 #include <string.h>
14
1015
1116 static inline void __put_unaligned_be32(uint32_t val, uint8_t *p)
1217 {
4752 return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
4853 }
4954
50 static inline uint16_t get_unaligned_be32(const void *p)
55 static inline uint32_t get_unaligned_be32(const void *p)
5156 {
5257 return __get_unaligned_be32(p);
5358 }
5459
60 static inline uint64_t get_unaligned_be64(const void *p)
61 {
62 uint64_t val;
63 memcpy(&val, p, sizeof(val));
64 return be64toh(val);
65 }
66
67 static inline void put_unaligned_be64(uint64_t val, void *p)
68 {
69 val = htobe64(val);
70 memcpy(p, &val, sizeof(val));
71 }
72
5573 #endif
1212 case $ID in
1313 fedora|rhel|centos)
1414 # for generic
15 $SUDO yum install -y cmake make gcc libnl3 glib2 zlib kmod
16 $SUDO yum install -y libnl3-devel glib2-devel zlib-devel kmod-devel gperftools-devel
15 $SUDO yum install -y cmake make gcc
16 $SUDO yum install -y libnl3
17 $SUDO yum install -y libnl3-devel
18 $SUDO yum install -y glib2
19 $SUDO yum install -y glib2-devel
20 $SUDO yum install -y zlib
21 $SUDO yum install -y zlib-devel
22 $SUDO yum install -y kmod
23 $SUDO yum install -y kmod-devel
24 $SUDO yum install -y gperftools-devel
1725
1826 # for glusterfs
19 $SUDO yum install -y glusterfs-api glusterfs-api-devel
27 $SUDO yum install -y glusterfs-api
28 $SUDO yum install -y glusterfs-api-devel
2029 # for ceph
21 $SUDO yum install -y librados2 librados2-devel librbd1
30 $SUDO yum install -y librados2
31 $SUDO yum install -y librados2-devel
32 $SUDO yum install -y librbd1
2233 yum search librbd-devel | grep -q "N/S matched" && LIBRBD=librbd || LIBRBD=librbd1
2334 $SUDO yum install -y $LIBRBD-devel
2435 ;;
2738 $SUDO apt update
2839
2940 # for generic
30 $SUDO apt install -y cmake make gcc zlib1g kmod
31 $SUDO apt install -y libnl-3-dev libnl-genl-3-dev libglib2.0-0 libkmod-dev libgoogle-perftools-dev
41 $SUDO apt install -y cmake make gcc
42 $SUDO apt install -y zlib1g kmod
43 $SUDO apt install -y libnl-3-dev
44 $SUDO apt install -y libnl-genl-3-dev
45 $SUDO apt install -y libglib2.0-0
46 $SUDO apt install -y libkmod-dev
47 $SUDO apt install -y libgoogle-perftools-dev
3248
3349 # for glusterfs
3450 $SUDO apt install -y libglusterfs-dev
3551
3652 # for ceph
37 $SUDO apt install -y librados2 librbd-dev
53 $SUDO apt install -y librados2
54 $SUDO apt install -y librbd-dev
3855 ;;
3956 sles|opensuse-tumbleweed)
4057 # for generic
41 $SUDO zypper install -y cmake make gcc libnl3-200 glib2 zlib kmod
42 $SUDO zypper install -y libnl3-devel glib2-devel zlib-devel libkmod-devel gperftools-devel
58 $SUDO zypper install -y cmake make gcc
59 $SUDO zypper install -y libnl3-200
60 $SUDO zypper install -y glib2
61 $SUDO zypper install -y zlib
62 $SUDO zypper install -y kmod
63 $SUDO zypper install -y libnl3-devel
64 $SUDO zypper install -y glib2-devel
65 $SUDO zypper install -y zlib-devel
66 $SUDO zypper install -y libkmod-devel
67 $SUDO zypper install -y gperftools-devel
4368
4469 #for glusterfs
45 $SUDO zypper install -y glusterfs-devel glusterfs
70 $SUDO zypper install -y glusterfs
71 $SUDO zypper install -y glusterfs-devel
4672 #for ceph
47 $SUDO zypper install -y librbd-devel librados-devel librados2
73 $SUDO zypper install -y librbd-devel
74 $SUDO zypper install -y librados-devel
75 $SUDO zypper install -y librados2
4876 ;;
4977 *)
5078 echo "TODO: distro not supported for now!"
842842 void tcmu_set_thread_name(const char *prefix, struct tcmu_device *dev)
843843 {
844844 const char *uio = dev ? tcmu_dev_get_uio_name(dev) : NULL;
845 char cname[TCMU_THREAD_NAME_LEN];
846845 char *pname;
847
848 if (pthread_getname_np(pthread_self(), cname, TCMU_THREAD_NAME_LEN))
849 return;
850846
851847 /*
852848 * If we are trying to set the pthread name in the
853849 * event work thread, we must ignore it.
854850 */
855 if (!strcmp(cname, "ework-thread")) {
851 if (__tcmu_is_ework_thread) {
856852 tcmu_dev_warn(dev, "Do not set name for event work thread in the callback fn\n");
857853 return;
858854 }
859855
860856 if (!prefix) {
861857 tcmu_dev_err(dev, "Failed to set name for thread %lu\n",
862 pthread_self());
858 (long unsigned int)pthread_self());
863859 return;
864860 }
865861
10221018 bool tcmu_dev_get_unmap_enabled(struct tcmu_device *dev)
10231019 {
10241020 return dev->unmap_enabled;
1021 }
1022
1023 void tcmu_dev_set_write_protect_enabled(struct tcmu_device *dev, bool enabled)
1024 {
1025 dev->write_protect_enabled = enabled;
1026 }
1027
1028 bool tcmu_dev_get_write_protect_enabled(struct tcmu_device *dev)
1029 {
1030 return dev->write_protect_enabled;
10251031 }
10261032
10271033 int tcmu_dev_get_fd(struct tcmu_device *dev)
10631069 /* get length of map from file */
10641070 ssize_t size;
10651071 char *size_name;
1066
1072
10671073 if (asprintf(&size_name, sizefmt, dev->dev_name) == -1) {
10681074 tcmu_err("cannot construct device map size filename\n");
10691075 goto err_free;
1313 #define __LIBTCMU_COMMON_H
1414
1515 #include <stdbool.h>
16 #include <pthread.h>
1617
1718 #ifdef __cplusplus
1819 extern "C" {
136137 bool tcmu_dev_get_solid_state_media(struct tcmu_device *dev);
137138 void tcmu_dev_set_unmap_enabled(struct tcmu_device *dev, bool enabled);
138139 bool tcmu_dev_get_unmap_enabled(struct tcmu_device *dev);
140 void tcmu_dev_set_write_protect_enabled(struct tcmu_device *dev, bool enabled);
141 bool tcmu_dev_get_write_protect_enabled(struct tcmu_device *dev);
139142 struct tcmulib_handler *tcmu_dev_get_handler(struct tcmu_device *dev);
140143 void tcmu_dev_flush_ring(struct tcmu_device *dev);
141144 bool tcmu_dev_oooc_supported(struct tcmu_device* dev);
192195 */
193196 void tcmu_thread_cancel(pthread_t thread);
194197
198 extern __thread int __tcmu_is_ework_thread;
199
195200 #ifdef __cplusplus
196201 }
197202 #endif
201201
202202 do {
203203 len = read(fd, buf, count);
204 } while (errno == EAGAIN);
204 } while (len < 0 && errno == EAGAIN);
205205
206206 errno = save;
207207 return len;
402402 int fd = (intptr_t) data;
403403 char *buf, *msg;
404404 int count, ret, written = 0, r, pid = 0;
405 char pname[TCMU_THREAD_NAME_LEN];
406405
407406 if (fd == -1)
408407 return -1;
411410 if (pid <= 0)
412411 return -1;
413412
414 if (pthread_getname_np(pthread_self(), pname, TCMU_THREAD_NAME_LEN))
415 return -1;
416
417413 /*
418414 * format: timestamp pid [loglevel] msg
419415 */
420 ret = asprintf(&msg, "%s %d:%s [%s] %s", timestamp, pid, pname,
416 ret = asprintf(&msg, "%s %d [%s] %s", timestamp, pid,
421417 loglevel_string(pri), str);
422418 if (ret < 0)
423419 return -1;
5454 unsigned int write_cache_enabled:1;
5555 unsigned int solid_state_media:1;
5656 unsigned int unmap_enabled:1;
57 unsigned int write_protect_enabled:1;
5758
5859 char dev_name[16]; /* e.g. "uio14" */
5960 char tcm_hba_name[16]; /* e.g. "user_8" */
614614 struct tcmur_device *rdev = tcmu_dev_get_private(dev);
615615 bool is_open = false;
616616
617 pthread_mutex_lock(&rdev->state_lock);
617 pthread_mutex_lock(&rdev->rdev_lock);
618618 /* check if this was already called due to thread cancelation */
619619 if (rdev->flags & TCMUR_DEV_FLAG_STOPPED) {
620 pthread_mutex_unlock(&rdev->state_lock);
620 pthread_mutex_unlock(&rdev->rdev_lock);
621621 return;
622622 }
623623 rdev->flags |= TCMUR_DEV_FLAG_STOPPING;
624 pthread_mutex_unlock(&rdev->state_lock);
624 pthread_mutex_unlock(&rdev->rdev_lock);
625625
626626 /*
627627 * The lock thread can fire off the recovery thread, so make sure
632632
633633 tcmu_release_dev_lock(dev);
634634
635 pthread_mutex_lock(&rdev->state_lock);
635 pthread_mutex_lock(&rdev->rdev_lock);
636636 if (rdev->flags & TCMUR_DEV_FLAG_IS_OPEN) {
637637 rdev->flags &= ~TCMUR_DEV_FLAG_IS_OPEN;
638638 is_open = true;
639639 }
640 pthread_mutex_unlock(&rdev->state_lock);
640 pthread_mutex_unlock(&rdev->rdev_lock);
641641
642642 if (is_open)
643643 rhandler->close(dev);
644644
645 pthread_mutex_lock(&rdev->state_lock);
645 pthread_mutex_lock(&rdev->rdev_lock);
646646 rdev->flags |= TCMUR_DEV_FLAG_STOPPED;
647 pthread_mutex_unlock(&rdev->state_lock);
647 pthread_mutex_unlock(&rdev->rdev_lock);
648648
649649 tcmu_dev_dbg(dev, "cmdproc cleanup done\n");
650650 }
656656
657657 ret = clock_gettime(CLOCK_MONOTONIC_COARSE, time);
658658 if (!ret) {
659 tcmu_dev_dbg(dev, "Current time %lu secs.\n", time->tv_sec);
659 tcmu_dev_dbg(dev, "Current time %"PRIdMAX" secs.\n", (intmax_t)time->tv_sec);
660660 return 0;
661661 }
662662
680680
681681 memset(tmo, 0, sizeof(*tmo));
682682
683 pthread_spin_lock(&rdev->lock);
683 pthread_spin_lock(&rdev->cmds_list_lock);
684684 list_for_each(&rdev->cmds_list, tcmur_cmd, cmds_list_entry) {
685685 if (tcmur_cmd->timed_out)
686686 continue;
699699 tmo->tv_sec = 0;
700700 }
701701
702 tcmu_dev_dbg(dev, "Next cmd id %hu timeout in %lu secs. Current time %lu. Start time %lu\n",
703 tcmur_cmd->lib_cmd->cmd_id, tmo->tv_sec,
704 curr_time->tv_sec, tcmur_cmd->start_time.tv_sec);
702 tcmu_dev_dbg(dev, "Next cmd id %hu timeout in %"PRIdMAX" secs. Current time %"PRIdMAX". Start time %"PRIdMAX"\n",
703 tcmur_cmd->lib_cmd->cmd_id, (intmax_t)tmo->tv_sec,
704 (intmax_t)curr_time->tv_sec, (intmax_t)tcmur_cmd->start_time.tv_sec);
705705 break;
706706 }
707 pthread_spin_unlock(&rdev->lock);
707 pthread_spin_unlock(&rdev->cmds_list_lock);
708708
709709 return has_timeout;
710710 }
727727 if (tcmur_get_time(dev, &curr_time))
728728 return;
729729
730 pthread_spin_lock(&rdev->lock);
730 pthread_spin_lock(&rdev->cmds_list_lock);
731731 list_for_each(&rdev->cmds_list, tcmur_cmd, cmds_list_entry) {
732732 if (tcmur_cmd->timed_out)
733733 continue;
757757 */
758758 tcmu_notify_cmd_timed_out(dev);
759759 }
760 pthread_spin_unlock(&rdev->lock);
760 pthread_spin_unlock(&rdev->cmds_list_lock);
761761 }
762762
763763 static void tcmur_tcmulib_cmd_start(struct tcmu_device *dev,
774774 if (rdev->cmd_time_out) {
775775 tcmur_cmd->start_time.tv_sec = curr_time->tv_sec;
776776
777 pthread_spin_lock(&rdev->lock);
777 pthread_spin_lock(&rdev->cmds_list_lock);
778778 list_add_tail(&rdev->cmds_list, &tcmur_cmd->cmds_list_entry);
779 pthread_spin_unlock(&rdev->lock);
779 pthread_spin_unlock(&rdev->cmds_list_lock);
780780 }
781781 }
782782
871871 * requests that LIO has completed. We only need to wait for replies
872872 * for outstanding requests so throttle the cmdproc thread now.
873873 */
874 pthread_mutex_lock(&rdev->state_lock);
874 pthread_mutex_lock(&rdev->rdev_lock);
875875 if (rdev->flags & TCMUR_DEV_FLAG_STOPPING)
876876 dev_stopping = true;
877 pthread_mutex_unlock(&rdev->state_lock);
877 pthread_mutex_unlock(&rdev->rdev_lock);
878878 }
879879
880880 /*
10191019 tcmu_dev_dbg(dev, "Got block_size %d, size in bytes %"PRId64"\n",
10201020 block_size, dev_size);
10211021
1022 ret = pthread_spin_init(&rdev->lock, 0);
1022 ret = pthread_spin_init(&rdev->cmds_list_lock, 0);
10231023 if (ret) {
10241024 ret = -ret;
10251025 goto free_rdev;
10371037 goto cleanup_caw_lock;
10381038 }
10391039
1040 ret = pthread_mutex_init(&rdev->state_lock, NULL);
1040 ret = pthread_mutex_init(&rdev->rdev_lock, NULL);
10411041 if (ret) {
10421042 ret = -ret;
10431043 goto cleanup_format_lock;
10451045
10461046 ret = setup_io_work_queue(dev);
10471047 if (ret < 0)
1048 goto cleanup_state_lock;
1048 goto cleanup_rdev_lock;
10491049
10501050 ret = setup_aio_tracking(rdev);
10511051 if (ret < 0)
10871087 cleanup_aio_tracking(rdev);
10881088 cleanup_io_work_queue:
10891089 cleanup_io_work_queue(dev, true);
1090 cleanup_state_lock:
1091 pthread_mutex_destroy(&rdev->state_lock);
1090 cleanup_rdev_lock:
1091 pthread_mutex_destroy(&rdev->rdev_lock);
10921092 cleanup_format_lock:
10931093 pthread_mutex_destroy(&rdev->format_lock);
10941094 cleanup_caw_lock:
10951095 pthread_mutex_destroy(&rdev->caw_lock);
10961096 cleanup_dev_lock:
1097 pthread_spin_destroy(&rdev->lock);
1097 pthread_spin_destroy(&rdev->cmds_list_lock);
10981098 free_rdev:
10991099 free(rdev);
11001100 return ret;
11051105 struct tcmur_device *rdev = tcmu_dev_get_private(dev);
11061106 int ret;
11071107
1108 pthread_mutex_lock(&rdev->state_lock);
1108 pthread_mutex_lock(&rdev->rdev_lock);
11091109 rdev->flags |= TCMUR_DEV_FLAG_STOPPING;
1110 pthread_mutex_unlock(&rdev->state_lock);
1110 pthread_mutex_unlock(&rdev->rdev_lock);
11111111
11121112 /*
11131113 * The order of cleaning up worker threads and calling ->removed()
11291129
11301130 tcmur_destroy_work(rdev->event_work);
11311131
1132 ret = pthread_mutex_destroy(&rdev->state_lock);
1132 ret = pthread_mutex_destroy(&rdev->rdev_lock);
11331133 if (ret != 0)
11341134 tcmu_err("could not cleanup state lock %d\n", ret);
11351135
11411141 if (ret != 0)
11421142 tcmu_err("could not cleanup caw lock %d\n", ret);
11431143
1144 ret = pthread_spin_destroy(&rdev->lock);
1144 ret = pthread_spin_destroy(&rdev->cmds_list_lock);
11451145 if (ret != 0)
11461146 tcmu_err("could not cleanup mailbox lock %d\n", ret);
11471147
13971397 g_unix_signal_add(SIGTERM, handle_sig, loop) <= 0 ||
13981398 g_unix_signal_add(SIGHUP, handle_sighup, loop) <= 0) {
13991399 tcmu_err("couldn't setup signal handlers\n");
1400 goto unwatch_cfg;
1400 goto loop_unref;
14011401 }
14021402
14031403 /* Set up event for libtcmu */
14231423
14241424 tcmu_crit("Exiting...\n");
14251425 g_bus_unown_name(reg_id);
1426 g_main_loop_unref(loop);
14271426 g_source_remove(watch_id);
14281427 g_io_channel_shutdown(libtcmu_gio, TRUE, NULL);
14291428 g_io_channel_unref (libtcmu_gio);
1430 g_object_unref(manager);
1429 if (manager)
1430 g_object_unref(manager);
14311431
14321432 ret = 0;
14331433
1434 unwatch_cfg:
1434 loop_unref:
1435 g_main_loop_unref(loop);
14351436 if (watching_cfg)
14361437 tcmu_unwatch_config(tcmu_cfg);
14371438 tcmulib_close(tcmulib_context);
+117
-72
rbd.c less more
108108 size_t iov_cnt;
109109 };
110110
111 static pthread_mutex_t blacklist_caches_lock = PTHREAD_MUTEX_INITIALIZER;
112 static darray(char *) blacklist_caches;
111 static pthread_mutex_t blocklist_caches_lock = PTHREAD_MUTEX_INITIALIZER;
112 static darray(char *) blocklist_caches;
113113
114114 #ifdef LIBRADOS_SUPPORTS_SERVICES
115115
215215 }
216216
217217 ret = tcmu_rbd_report_event(dev);
218 if (ret < 0)
219 tcmu_dev_err(dev, "Could not update status. (Err %d)\n", ret);
220218
221219 free_meta_buf:
222220 free(metadata_buf);
237235 }
238236
239237 #ifdef RBD_LOCK_ACQUIRE_SUPPORT
240 static void tcmu_rbd_service_status_update(struct tcmu_device *dev,
238 static int tcmu_rbd_service_status_update(struct tcmu_device *dev,
241239 bool has_lock)
242240 {
241 return 0;
243242 }
244243 #endif /* RBD_LOCK_ACQUIRE_SUPPORT */
245244
246245 #endif /* LIBRADOS_SUPPORTS_SERVICES */
247246
248247 #if defined LIBRADOS_SUPPORTS_GETADDRS || defined RBD_LOCK_ACQUIRE_SUPPORT
249 static void tcmu_rbd_rm_stale_entry_from_blacklist(struct tcmu_device *dev, char *addrs)
250 {
251 struct tcmu_rbd_state *state = tcmur_dev_get_private(dev);
248
249 static int tcmu_rbd_remove_blocklist(struct tcmu_device *dev, char *addr)
250 {
251 struct tcmu_rbd_state *state = tcmur_dev_get_private(dev);
252 char *cmd;
253 int ret;
254
255 ret = asprintf(&cmd,
256 "{\"prefix\": \"osd blocklist\","
257 "\"blocklistop\": \"rm\","
258 "\"addr\": \"%s\"}",
259 addr);
260 if (ret < 0) {
261 tcmu_dev_warn(dev, "Could not allocate blocklist rm command. (Err %d)\n",
262 ret);
263 return -1;
264 }
265 ret = rados_mon_command(state->cluster, (const char**)&cmd, 1, NULL, 0,
266 NULL, NULL, NULL, NULL);
267 free(cmd);
268 if (ret < 0) {
269 tcmu_dev_dbg(dev, "Could not rm blocklist entry '%s'. (Err %d)\n",
270 addr, ret);
271 return 1;
272 }
273 return 0;
274 }
275
276 /* Old style */
277 static int tcmu_rbd_remove_blacklist(struct tcmu_device *dev, char *addr)
278 {
279 struct tcmu_rbd_state *state = tcmur_dev_get_private(dev);
280 char *cmd;
281 int ret;
282
283 ret = asprintf(&cmd,
284 "{\"prefix\": \"osd blacklist\","
285 "\"blacklistop\": \"rm\","
286 "\"addr\": \"%s\"}",
287 addr);
288 if (ret < 0) {
289 tcmu_dev_warn(dev, "Could not allocate blacklist rm command. (Err %d)\n",
290 ret);
291 return -1;
292 }
293 ret = rados_mon_command(state->cluster, (const char**)&cmd, 1, NULL, 0,
294 NULL, NULL, NULL, NULL);
295 free(cmd);
296 if (ret < 0) {
297 tcmu_dev_err(dev, "Could not rm blacklist entry '%s'. (Err %d)\n",
298 addr, ret);
299 return 1;
300 }
301 return 0;
302 }
303
304 static void tcmu_rbd_rm_stale_entry_from_blocklist(struct tcmu_device *dev, char *addrs)
305 {
252306 const char *p, *q, *end;
253 char *cmd, *addr;
307 char *addr;
254308 int ret;
255309
256310 /*
302356 p = NULL;
303357 }
304358
305 ret = asprintf(&cmd,
306 "{\"prefix\": \"osd blacklist\","
307 "\"blacklistop\": \"rm\","
308 "\"addr\": \"%s\"}",
309 addr);
359 ret = tcmu_rbd_remove_blocklist(dev, addr);
360 if (ret > 0)
361 ret = tcmu_rbd_remove_blacklist(dev, addr);
310362 free(addr);
311 if (ret < 0) {
312 tcmu_dev_warn(dev, "Could not allocate command. (Err %d)\n",
313 ret);
363 if (ret)
314364 return;
315 }
316 ret = rados_mon_command(state->cluster, (const char**)&cmd, 1, NULL, 0,
317 NULL, NULL, NULL, NULL);
318 free(cmd);
319 if (ret < 0) {
320 tcmu_dev_err(dev, "Could not rm blacklist entry '%s'. (Err %d)\n",
321 addr, ret);
322 return;
323 }
324 }
325 }
326
327 static int tcmu_rbd_rm_stale_entries_from_blacklist(struct tcmu_device *dev)
365 }
366 }
367
368 static int tcmu_rbd_rm_stale_entries_from_blocklist(struct tcmu_device *dev)
328369 {
329370 char **entry, *tmp_entry;
330371 int ret = 0;
331372 int i;
332373
333 pthread_mutex_lock(&blacklist_caches_lock);
334 if (darray_empty(blacklist_caches))
374 pthread_mutex_lock(&blocklist_caches_lock);
375 if (darray_empty(blocklist_caches))
335376 goto unlock;
336377
337 /* Try to remove all the stale blacklist entities */
338 darray_foreach(entry, blacklist_caches) {
339 tcmu_dev_info(dev, "removing addrs: {%s}\n", *entry);
340 tcmu_rbd_rm_stale_entry_from_blacklist(dev, *entry);
378 /* Try to remove all the stale blocklist entities */
379 darray_foreach(entry, blocklist_caches) {
380 tcmu_dev_info(dev, "removing blocklist entry: {%s}\n", *entry);
381 tcmu_rbd_rm_stale_entry_from_blocklist(dev, *entry);
341382 }
342383
343384 unlock:
344 for (i = darray_size(blacklist_caches) - 1; i >= 0; i--) {
345 tmp_entry = darray_item(blacklist_caches, i);
346 darray_remove(blacklist_caches, i);
385 for (i = darray_size(blocklist_caches) - 1; i >= 0; i--) {
386 tmp_entry = darray_item(blocklist_caches, i);
387 darray_remove(blocklist_caches, i);
347388 free(tmp_entry);
348389 }
349390
350 pthread_mutex_unlock(&blacklist_caches_lock);
391 pthread_mutex_unlock(&blocklist_caches_lock);
351392 return ret;
352393 }
353394 #endif // LIBRADOS_SUPPORTS_GETADDRS || RBD_LOCK_ACQUIRE_SUPPORT
640681 * Returns:
641682 * 0 = client is not owner.
642683 * 1 = client is owner.
643 * -ESHUTDOWN/-EBLACKLISTED(-108) = client is blacklisted.
684 * -ESHUTDOWN/-EBLOCKLISTED(-108) = client is blocklisted.
644685 * -ETIMEDOUT = rados osd op timeout has expired.
645686 * -EIO = misc error.
646687 */
652693 ret = rbd_is_exclusive_lock_owner(state->image, &is_owner);
653694 if (ret < 0) {
654695 if (ret == -ESHUTDOWN) {
655 tcmu_dev_dbg(dev, "Client is blacklisted. Could not check lock ownership.\n");
696 tcmu_dev_dbg(dev, "Client is blocklisted. Could not check lock ownership.\n");
656697 } else {
657698 tcmu_dev_err(dev, "Could not check lock ownership. Error: %s.\n",
658699 strerror(-ret));
911952 ret = rbd_lock_get_owners(state->image, &lock_mode, owners1,
912953 &num_owners1);
913954 if ((!ret && !num_owners1) || ret < 0) {
914 tcmu_dev_warn(dev, "Could not get lock owners to store blacklist entry %d!\n",
955 tcmu_dev_warn(dev, "Could not get lock owners to store blocklist entry %d!\n",
915956 ret);
916957 } else {
917958 int is_owner;
920961 ret = rbd_is_exclusive_lock_owner(state->image, &is_owner);
921962 if (ret) {
922963 rbd_lock_get_owners_cleanup(owners1, num_owners1);
923 tcmu_dev_warn(dev, "Could not check lock owners to store blacklist entry %d!\n",
964 tcmu_dev_warn(dev, "Could not check lock owners to store blocklist entry %d!\n",
924965 ret);
925966 goto no_owner;
926967 }
929970 ret = rbd_lock_get_owners(state->image, &lock_mode, owners2,
930971 &num_owners2);
931972 if ((!ret && !num_owners2) || ret < 0) {
932 tcmu_dev_warn(dev, "Could not get lock owners to store blacklist entry %d!\n",
973 tcmu_dev_warn(dev, "Could not get lock owners to store blocklist entry %d!\n",
933974 ret);
934 /* Only we didn't lose the lock during the above check will we store the blacklist list */
975 /* Only we didn't lose the lock during the above check will we store the blocklist list */
935976 } else if (!strcmp(owners1[0], owners2[0]) && is_owner) {
936977 state->addrs = strdup(owners1[0]); // ignore the errors
937978 }
943984 #endif
944985
945986 set_lock_tag:
946 tcmu_dev_warn(dev, "Acquired exclusive lock.\n");
987 tcmu_dev_info(dev, "Acquired exclusive lock.\n");
947988 if (tag != TCMU_INVALID_LOCK_TAG)
948989 ret = tcmu_rbd_set_lock_tag(dev, tag);
949990
10241065 char *config, *dev_cfg_dup;
10251066 struct tcmu_rbd_state *state;
10261067 uint32_t max_blocks, unmap_gran;
1027 int ret;
10281068 char buf[128];
1069 char *savedptr = NULL;
1070 int ret;
10291071
10301072 state = calloc(1, sizeof(*state));
10311073 if (!state)
10511093 }
10521094 config += 1; /* get past '/' */
10531095
1054 pool = strtok(config, "/");
1096 pool = strtok_r(config, "/", &savedptr);
10551097 if (!pool) {
10561098 tcmu_dev_err(dev, "Could not get pool name\n");
10571099 ret = -EINVAL;
10641106 goto free_config;
10651107 }
10661108
1067 name = strtok(NULL, ";");
1109 name = strtok_r(NULL, ";", &savedptr);
10681110 if (!name) {
10691111 tcmu_dev_err(dev, "Could not get image name\n");
10701112 ret = -EINVAL;
10791121 }
10801122
10811123 /* The next options are optional */
1082 next_opt = strtok(NULL, ";");
1124 next_opt = strtok_r(NULL, ";", &savedptr);
10831125 while (next_opt) {
10841126 if (!strncmp(next_opt, "osd_op_timeout=", 15)) {
10851127 state->osd_op_timeout = strdup(next_opt + 15);
11041146 goto free_config;
11051147 }
11061148 }
1107 next_opt = strtok(NULL, ";");
1149 next_opt = strtok_r(NULL, ";", &savedptr);
11081150 }
11091151
11101152 ret = tcmu_rbd_image_open(dev);
11521194 tcmu_dev_set_write_cache_enabled(dev, 0);
11531195
11541196 #if defined LIBRADOS_SUPPORTS_GETADDRS || defined RBD_LOCK_ACQUIRE_SUPPORT
1155 tcmu_rbd_rm_stale_entries_from_blacklist(dev);
1197 tcmu_rbd_rm_stale_entries_from_blocklist(dev);
11561198 #endif
11571199
11581200 #ifdef LIBRADOS_SUPPORTS_GETADDRS
11591201 /* Get current entry address for the image */
11601202 ret = rados_getaddrs(state->cluster, &state->addrs);
1203 if (ret < 0) {
1204 tcmu_dev_err(dev, "Could not get address. (Err %d)\n", ret);
1205 goto stop_image;
1206 }
11611207 tcmu_dev_info(dev, "address: {%s}\n", state->addrs);
1162 if (ret < 0)
1163 return ret;
11641208 #endif
11651209
11661210 free(dev_cfg_dup);
11831227
11841228 /*
11851229 * Since we are closing the device, but current device maybe
1186 * already blacklisted by other tcmu nodes. Let's just save
1187 * the entity addrs into the blacklist_caches, and let any
1230 * already blocklisted by other tcmu nodes. Let's just save
1231 * the entity addrs into the blocklist_caches, and let any
11881232 * other new device help remove it.
11891233 */
11901234 if (state->addrs) {
1191 pthread_mutex_lock(&blacklist_caches_lock);
1192 darray_append(blacklist_caches, state->addrs);
1193 pthread_mutex_unlock(&blacklist_caches_lock);
1235 pthread_mutex_lock(&blocklist_caches_lock);
1236 darray_append(blocklist_caches, state->addrs);
1237 pthread_mutex_unlock(&blocklist_caches_lock);
1238 tcmu_dev_info(dev, "appended blocklist entry: {%s}\n", state->addrs);
11941239 state->addrs = NULL;
11951240 }
11961241
11971242 tcmu_rbd_state_free(state);
11981243 }
11991244
1200 static int tcmu_rbd_handle_blacklisted_cmd(struct tcmu_device *dev)
1245 static int tcmu_rbd_handle_blocklisted_cmd(struct tcmu_device *dev)
12011246 {
12021247 tcmu_notify_lock_lost(dev);
12031248 /*
12041249 * This will happen during failback normally, because
1205 * running IO is failed due to librbd's immediate blacklisting
1250 * running IO is failed due to librbd's immediate blocklisting
12061251 * during lock acquisition on a higher priority path.
12071252 */
12081253 return TCMU_STS_BUSY;
12271272 * that end up reaching it after the initiator's failover/recovery
12281273 * timeout. For implicit and explicit FO, we will just disable
12291274 * the iscsi port, and let the initiator switch paths which will
1230 * result in us getting blacklisted, so fail with a retryable
1275 * result in us getting blocklisted, so fail with a retryable
12311276 * error.
12321277 */
12331278 return TCMU_STS_TIMEOUT;
13161361 if (ret == -ETIMEDOUT) {
13171362 tcmu_r = tcmu_rbd_handle_timedout_cmd(dev);
13181363 } else if (ret == -ESHUTDOWN || ret == -EROFS) {
1319 tcmu_r = tcmu_rbd_handle_blacklisted_cmd(dev);
1364 tcmu_r = tcmu_rbd_handle_blocklisted_cmd(dev);
13201365 } else if (ret == -EILSEQ && aio_cb->type == RBD_AIO_TYPE_CAW) {
13211366 cmp_offset = aio_cb->caw.miscompare_offset - aio_cb->caw.offset;
13221367 tcmu_dev_dbg(dev, "CAW miscompare at offset %u.\n", cmp_offset);
16371682 case TCMULIB_CFG_DEV_SIZE:
16381683 /*
16391684 * Apps will already have resized on the ceph side, so no
1640 * need to double check and have to also handle unblacklisting
1685 * need to double check and have to also handle unblocklisting
16411686 * the client from this context.
16421687 */
16431688 return 0;
16501695
16511696 static int tcmu_rbd_init(void)
16521697 {
1653 darray_init(blacklist_caches);
1698 darray_init(blocklist_caches);
16541699 return 0;
16551700 }
16561701
16591704 char **entry;
16601705
16611706 tcmu_info("destroying the rbd handler\n");
1662 pthread_mutex_lock(&blacklist_caches_lock);
1663 if (darray_empty(blacklist_caches))
1707 pthread_mutex_lock(&blocklist_caches_lock);
1708 if (darray_empty(blocklist_caches))
16641709 goto unlock;
16651710
1666 /* Try to remove all the stale blacklist entities */
1667 darray_foreach(entry, blacklist_caches)
1711 /* Try to remove all the stale blocklist entities */
1712 darray_foreach(entry, blocklist_caches)
16681713 free(*entry);
16691714
1670 darray_free(blacklist_caches);
1715 darray_free(blocklist_caches);
16711716
16721717 unlock:
1673 pthread_mutex_unlock(&blacklist_caches_lock);
1718 pthread_mutex_unlock(&blocklist_caches_lock);
16741719 }
16751720
16761721 /*
799799 orig_buf[0] = used_len - 1;
800800 }
801801
802 if (tcmu_dev_get_write_protect_enabled(dev)) {
803 if (sense_ten) {
804 orig_buf[3] |= 0x80;
805 } else {
806 orig_buf[2] |= 0x80;
807 }
808 }
809
802810 tcmu_memcpy_into_iovec(iovec, iov_cnt, orig_buf, alloc_len);
803811 free(orig_buf);
804812 return TCMU_STS_OK;
2828 static struct list_head tpg_recovery_list = LIST_HEAD_INIT(tpg_recovery_list);
2929 /*
3030 * Locking ordering:
31 * rdev->state_lock
31 * rdev->rdev_lock
3232 * tpg_recovery_lock
3333 */
3434 static pthread_mutex_t tpg_recovery_lock = PTHREAD_MUTEX_INITIALIZER;
3434 Summary: A daemon that handles the userspace side of the LIO TCM-User backstore
3535 Group: System Environment/Daemons
3636 License: ASL 2.0 or LGPLv2+
37 Version: 1.5.4
37 Version: 1.6.0
3838 URL: https://github.com/open-iscsi/tcmu-runner
3939
4040 #%define _RC
41 Release: %{?_RC:%{_RC}}%{dist}
41 Release: %{?_RC:%{_RC}}%{?dist}
4242 BuildRoot: %(mktemp -udp %{_tmppath}/%{name}-%{version}%{?_RC:-%{_RC}})
4343 Source: %{name}-%{version}%{?_RC:-%{_RC}}.tar.gz
4444 ExclusiveOS: Linux
3939 struct tcmur_cmd *tcmur_cmd = cmd->hm_private;
4040 struct timespec curr_time;
4141
42 pthread_cleanup_push(_cleanup_spin_lock, (void *)&rdev->lock);
43 pthread_spin_lock(&rdev->lock);
42 pthread_cleanup_push(_cleanup_spin_lock, (void *)&rdev->cmds_list_lock);
43 pthread_spin_lock(&rdev->cmds_list_lock);
4444
4545 if (tcmur_cmd->timed_out) {
4646 if (tcmur_get_time(dev, &curr_time)) {
5959
6060 tcmulib_command_complete(dev, cmd, rc);
6161
62 pthread_spin_unlock(&rdev->lock);
62 pthread_spin_unlock(&rdev->cmds_list_lock);
6363 pthread_cleanup_pop(0);
6464 }
6565
23282328 {
23292329 struct tcmur_device *rdev = tcmu_dev_get_private(dev);
23302330
2331 pthread_mutex_lock(&rdev->state_lock);
2331 pthread_mutex_lock(&rdev->rdev_lock);
23322332 rdev->pending_uas |= (1 << ua);
2333 pthread_mutex_unlock(&rdev->state_lock);
2333 pthread_mutex_unlock(&rdev->rdev_lock);
23342334 }
23352335
23362336 /*
23472347 /* The kernel will handle REPORT_LUNS */
23482348 return TCMU_STS_NOT_HANDLED;
23492349 }
2350 pthread_mutex_lock(&rdev->state_lock);
2350 pthread_mutex_lock(&rdev->rdev_lock);
23512351
23522352 if (!rdev->pending_uas) {
23532353 ret = TCMU_STS_NOT_HANDLED;
23632363 rdev->pending_uas &= ~(1 << ua);
23642364
23652365 unlock:
2366 pthread_mutex_unlock(&rdev->state_lock);
2366 pthread_mutex_unlock(&rdev->rdev_lock);
23672367 return ret;
23682368 }
23692369
2828 struct tcmur_device *rdev = tcmu_dev_get_private(dev);
2929 int in_recov = false;
3030
31 pthread_mutex_lock(&rdev->state_lock);
31 pthread_mutex_lock(&rdev->rdev_lock);
3232 if (rdev->flags & TCMUR_DEV_FLAG_IN_RECOVERY)
3333 in_recov = true;
34 pthread_mutex_unlock(&rdev->state_lock);
34 pthread_mutex_unlock(&rdev->rdev_lock);
3535 return in_recov;
3636 }
3737
4545 int ret, attempt = 0;
4646 bool needs_close = false;
4747
48 pthread_mutex_lock(&rdev->state_lock);
48 pthread_mutex_lock(&rdev->rdev_lock);
4949 if (rdev->flags & TCMUR_DEV_FLAG_STOPPING) {
5050 ret = 0;
5151 goto done;
5252 }
53 pthread_mutex_unlock(&rdev->state_lock);
53 pthread_mutex_unlock(&rdev->rdev_lock);
5454
5555 /*
5656 * There are no SCSI commands running but there may be
5959 */
6060 tcmur_flush_work(rdev->event_work);
6161
62 pthread_mutex_lock(&rdev->state_lock);
62 pthread_mutex_lock(&rdev->rdev_lock);
6363 if (rdev->flags & TCMUR_DEV_FLAG_IS_OPEN)
6464 needs_close = true;
6565 rdev->flags &= ~TCMUR_DEV_FLAG_IS_OPEN;
66 pthread_mutex_unlock(&rdev->state_lock);
66 pthread_mutex_unlock(&rdev->rdev_lock);
6767
6868 if (pthread_self() != rdev->cmdproc_thread)
6969 /*
7676 tcmu_dev_dbg(dev, "Waiting for outstanding commands to complete\n");
7777 ret = aio_wait_for_empty_queue(rdev);
7878 if (ret) {
79 pthread_mutex_lock(&rdev->state_lock);
79 pthread_mutex_lock(&rdev->rdev_lock);
8080 goto done;
8181 }
8282
8585 rhandler->close(dev);
8686 }
8787
88 pthread_mutex_lock(&rdev->state_lock);
88 pthread_mutex_lock(&rdev->rdev_lock);
8989 ret = -EIO;
9090 while (ret != 0 && !(rdev->flags & TCMUR_DEV_FLAG_STOPPING) &&
9191 (retries < 0 || attempt <= retries)) {
92 pthread_mutex_unlock(&rdev->state_lock);
92 pthread_mutex_unlock(&rdev->rdev_lock);
9393
9494 tcmu_dev_dbg(dev, "Opening device. Attempt %d\n", attempt);
9595 ret = rhandler->open(dev, true);
9898 sleep(1);
9999 }
100100
101 pthread_mutex_lock(&rdev->state_lock);
101 pthread_mutex_lock(&rdev->rdev_lock);
102102 if (!ret) {
103103 rdev->flags |= TCMUR_DEV_FLAG_IS_OPEN;
104104 rdev->lock_lost = false;
108108
109109 done:
110110 rdev->flags &= ~TCMUR_DEV_FLAG_IN_RECOVERY;
111 pthread_mutex_unlock(&rdev->state_lock);
111 pthread_mutex_unlock(&rdev->rdev_lock);
112112
113113 return ret;
114114 }
122122 {
123123 struct tcmur_device *rdev = tcmu_dev_get_private(dev);
124124
125 pthread_mutex_lock(&rdev->state_lock);
125 pthread_mutex_lock(&rdev->rdev_lock);
126126 if (rdev->flags & TCMUR_DEV_FLAG_IN_RECOVERY) {
127 pthread_mutex_unlock(&rdev->state_lock);
127 pthread_mutex_unlock(&rdev->rdev_lock);
128128 return -EBUSY;
129129 }
130130 rdev->flags |= TCMUR_DEV_FLAG_IN_RECOVERY;
131 pthread_mutex_unlock(&rdev->state_lock);
131 pthread_mutex_unlock(&rdev->rdev_lock);
132132
133133 return __tcmu_reopen_dev(dev, retries);
134134 }
143143 * handlers to fail/complete normally to avoid a segfault.
144144 */
145145 tcmu_dev_dbg(dev, "Waiting on recovery thread\n");
146 pthread_mutex_lock(&rdev->state_lock);
146 pthread_mutex_lock(&rdev->rdev_lock);
147147 while (rdev->flags & TCMUR_DEV_FLAG_IN_RECOVERY) {
148 pthread_mutex_unlock(&rdev->state_lock);
148 pthread_mutex_unlock(&rdev->rdev_lock);
149149 sleep(1);
150 pthread_mutex_lock(&rdev->state_lock);
151 }
152 pthread_mutex_unlock(&rdev->state_lock);
150 pthread_mutex_lock(&rdev->rdev_lock);
151 }
152 pthread_mutex_unlock(&rdev->rdev_lock);
153153 tcmu_dev_dbg(dev, "Recovery thread wait done\n");
154154 }
155155
166166 */
167167 sleep(1);
168168
169 pthread_mutex_lock(&rdev->state_lock);
169 pthread_mutex_lock(&rdev->rdev_lock);
170170 ret = rhandler->report_event(dev);
171171 if (ret)
172172 tcmu_dev_err(dev, "Could not report events. Error %d.\n", ret);
173 pthread_mutex_unlock(&rdev->state_lock);
173 pthread_mutex_unlock(&rdev->rdev_lock);
174174 }
175175
176176 static void tcmu_report_event(struct tcmu_device *dev)
235235 struct tcmur_device *rdev = tcmu_dev_get_private(dev);
236236 bool report;
237237
238 pthread_mutex_lock(&rdev->state_lock);
238 pthread_mutex_lock(&rdev->rdev_lock);
239239 report =__tcmu_notify_conn_lost(dev);
240 pthread_mutex_unlock(&rdev->state_lock);
240 pthread_mutex_unlock(&rdev->rdev_lock);
241241
242242 if (report)
243243 tcmu_report_event(dev);
266266 {
267267 struct tcmur_device *rdev = tcmu_dev_get_private(dev);
268268
269 pthread_mutex_lock(&rdev->state_lock);
269 pthread_mutex_lock(&rdev->rdev_lock);
270270 tcmu_dev_warn(dev, "Async lock drop. Old state %d\n", rdev->lock_state);
271271 /*
272272 * We could be getting stale IO completions. If we are trying to
275275 if (rdev->lock_state != TCMUR_DEV_LOCK_WRITE_LOCKING) {
276276 __tcmu_notify_lock_lost(dev);
277277 }
278 pthread_mutex_unlock(&rdev->state_lock);
278 pthread_mutex_unlock(&rdev->rdev_lock);
279279 }
280280
281281 void tcmu_release_dev_lock(struct tcmu_device *dev)
284284 struct tcmur_device *rdev = tcmu_dev_get_private(dev);
285285 int ret;
286286
287 pthread_mutex_lock(&rdev->state_lock);
287 pthread_mutex_lock(&rdev->rdev_lock);
288288 if (rdev->lock_state != TCMUR_DEV_LOCK_WRITE_LOCKED) {
289 pthread_mutex_unlock(&rdev->state_lock);
289 pthread_mutex_unlock(&rdev->rdev_lock);
290290 return;
291291 }
292292
293293 if (!(rdev->flags & TCMUR_DEV_FLAG_IS_OPEN)) {
294294 tcmu_dev_dbg(dev, "Device is closed so unlock is not needed\n");
295295 rdev->lock_state = TCMUR_DEV_LOCK_UNLOCKED;
296 pthread_mutex_unlock(&rdev->state_lock);
296 pthread_mutex_unlock(&rdev->rdev_lock);
297297 return;
298298 }
299299
300 pthread_mutex_unlock(&rdev->state_lock);
300 pthread_mutex_unlock(&rdev->rdev_lock);
301301
302302 ret = rhandler->unlock(dev);
303303 if (ret != TCMU_STS_OK)
308308 * to unlocked to prevent new IO from executing in case the lock
309309 * is in a state where it cannot be fenced.
310310 */
311 pthread_mutex_lock(&rdev->state_lock);
311 pthread_mutex_lock(&rdev->rdev_lock);
312312 rdev->lock_state = TCMUR_DEV_LOCK_UNLOCKED;
313 pthread_mutex_unlock(&rdev->state_lock);
313 pthread_mutex_unlock(&rdev->rdev_lock);
314314 }
315315
316316 int tcmu_get_lock_tag(struct tcmu_device *dev, uint16_t *tag)
322322 if (rdev->failover_type != TCMUR_DEV_FAILOVER_EXPLICIT)
323323 return 0;
324324
325 pthread_mutex_lock(&rdev->state_lock);
325 pthread_mutex_lock(&rdev->rdev_lock);
326326 if (!(rdev->flags & TCMUR_DEV_FLAG_IS_OPEN)) {
327327 /*
328328 * Return tmp error until the recovery thread is able to
329329 * start up.
330330 */
331 pthread_mutex_unlock(&rdev->state_lock);
331 pthread_mutex_unlock(&rdev->rdev_lock);
332332 return TCMU_STS_BUSY;
333333 }
334 pthread_mutex_unlock(&rdev->state_lock);
334 pthread_mutex_unlock(&rdev->rdev_lock);
335335
336336 retry:
337337 ret = rhandler->get_lock_tag(dev, tag);
414414 */
415415
416416 reopen = false;
417 pthread_mutex_lock(&rdev->state_lock);
417 pthread_mutex_lock(&rdev->rdev_lock);
418418 if (rdev->lock_lost || !(rdev->flags & TCMUR_DEV_FLAG_IS_OPEN))
419419 reopen = true;
420 pthread_mutex_unlock(&rdev->state_lock);
420 pthread_mutex_unlock(&rdev->rdev_lock);
421421
422422 retry:
423423 tcmu_dev_dbg(dev, "lock call state %d retries %d. tag %hu reopen %d\n",
435435 }
436436 }
437437
438 pthread_mutex_lock(&rdev->state_lock);
438 pthread_mutex_lock(&rdev->rdev_lock);
439439 if (rdev->lock_state == TCMUR_DEV_LOCK_READ_LOCKING) {
440 pthread_mutex_unlock(&rdev->state_lock);
440 pthread_mutex_unlock(&rdev->rdev_lock);
441441 ret = TCMU_STS_OK;
442442 goto done;
443443 }
444 pthread_mutex_unlock(&rdev->state_lock);
444 pthread_mutex_unlock(&rdev->rdev_lock);
445445
446446 ret = rhandler->lock(dev, tag);
447447 if (ret == TCMU_STS_FENCED) {
475475 tcmu_dev_flush_ring(dev);
476476
477477 /* TODO: set UA based on bgly's patches */
478 pthread_mutex_lock(&rdev->state_lock);
478 pthread_mutex_lock(&rdev->rdev_lock);
479479 if (ret != TCMU_STS_OK) {
480480 rdev->lock_state = TCMUR_DEV_LOCK_UNLOCKED;
481481 tcmu_dev_info(dev, "Lock acquisition unsuccessful\n");
497497
498498 tcmu_cfgfs_dev_exec_action(dev, "block_dev", 0);
499499
500 pthread_mutex_unlock(&rdev->state_lock);
500 pthread_mutex_unlock(&rdev->rdev_lock);
501501
502502 return ret;
503503 }
511511 if (!rhandler->get_lock_state)
512512 return;
513513
514 pthread_mutex_lock(&rdev->state_lock);
514 pthread_mutex_lock(&rdev->rdev_lock);
515515 if (!(rdev->flags & TCMUR_DEV_FLAG_IS_OPEN)) {
516516 tcmu_dev_dbg(dev, "device closed.\n");
517517 state = TCMUR_DEV_LOCK_UNKNOWN;
518518 goto check_state;
519519 }
520 pthread_mutex_unlock(&rdev->state_lock);
520 pthread_mutex_unlock(&rdev->rdev_lock);
521521
522522 state = rhandler->get_lock_state(dev);
523 pthread_mutex_lock(&rdev->state_lock);
523 pthread_mutex_lock(&rdev->rdev_lock);
524524 check_state:
525525 if (rdev->lock_state == TCMUR_DEV_LOCK_WRITE_LOCKED &&
526526 state != TCMUR_DEV_LOCK_WRITE_LOCKED) {
527527 tcmu_dev_dbg(dev, "Updated out of sync lock state.\n");
528528 __tcmu_notify_lock_lost(dev);
529529 }
530 pthread_mutex_unlock(&rdev->state_lock);
530 pthread_mutex_unlock(&rdev->rdev_lock);
531531 }
532532
533533 void tcmur_dev_set_private(struct tcmu_device *dev, void *private)
548548 {
549549 struct tcmur_device *rdev = tcmu_dev_get_private(dev);
550550
551 pthread_mutex_lock(&rdev->state_lock);
551 pthread_mutex_lock(&rdev->rdev_lock);
552552 rdev->cmd_timed_out_cnt++;
553553 __tcmu_notify_conn_lost(dev);
554 pthread_mutex_unlock(&rdev->state_lock);
554 pthread_mutex_unlock(&rdev->rdev_lock);
555555
556556 tcmu_report_event(dev);
557557 }
4747
4848 pthread_t cmdproc_thread;
4949
50 /* General lock for the members from "flags" to "pending_uas" */
51 pthread_mutex_t rdev_lock;
52
5053 /* TCMUR_DEV flags */
5154 uint32_t flags;
5255 uint8_t failover_type;
6265 bool lock_lost;
6366 uint8_t lock_state;
6467
65 /* General lock for lock state, thread, dev state, etc */
66 pthread_mutex_t state_lock;
6768 int pending_uas;
6869
6970 /*
7475 struct tcmu_io_queue work_queue;
7576 struct tcmu_track_aio track_queue;
7677
77 pthread_spinlock_t lock; /* protects concurrent updates to mailbox */
7878 pthread_mutex_t caw_lock; /* for atomic CAW operation */
7979
8080 uint32_t format_progress;
8181 pthread_mutex_t format_lock; /* for atomic format operations */
8282
8383 int cmd_time_out;
84
85 pthread_spinlock_t cmds_list_lock; /* protects cmds_list */
8486 struct list_head cmds_list;
8587 };
8688
4040
4141 static void __tcmur_flush_work(struct tcmur_work *work)
4242 {
43 char pname[TCMU_THREAD_NAME_LEN];
44
45 if (pthread_getname_np(pthread_self(), pname, TCMU_THREAD_NAME_LEN))
46 return;
47
4843 /*
4944 * The event work thread may need to do a handler reopen
5045 * call and try to flush itself. Just ignore.
5146 */
52 if (!strcmp(pname, "ework-thread"))
47 if (__tcmu_is_ework_thread)
5348 return;
5449
5550 /*
7873 struct private *p = data;
7974
8075 tcmu_set_thread_name("ework-thread", NULL);
76 __tcmu_is_ework_thread = 1;
8177
8278 p->work_fn(p->data);
8379