Gentoo Archives: gentoo-commits

From: Patrick McLean <chutzpah@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] repo/gentoo:master commit in: sys-cluster/ceph/, sys-cluster/ceph/files/
Date: Wed, 02 Aug 2017 21:04:33
Message-Id: 1501707862.36236559c3205a224ed43e6d6ac4dccc4b3a4c29.chutzpah@gentoo
1 commit: 36236559c3205a224ed43e6d6ac4dccc4b3a4c29
2 Author: Patrick McLean <chutzpah <AT> gentoo <DOT> org>
3 AuthorDate: Wed Aug 2 21:03:55 2017 +0000
4 Commit: Patrick McLean <chutzpah <AT> gentoo <DOT> org>
5 CommitDate: Wed Aug 2 21:04:22 2017 +0000
6 URL: https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=36236559
7
8 sys-cluster/ceph: Revision bump to 10.2.9, pull in fixes from upstream
9
10 Also update openrc dependency to make sure we are using a version that
11 supports the supervisor stuff properly.
12
13 Package-Manager: Portage-2.3.6, Repoman-2.3.3
14
15 sys-cluster/ceph/ceph-10.2.9-r3.ebuild | 308 +++++++++++++++++++++
16 ...2.9-filestore_fix_infinit_loops_in_fiemap.patch | 82 ++++++
17 ....2.9-librados_Fix_deadlock_in_watch_flush.patch | 61 ++++
18 ..._error_codes_from_is_exclusive_lock_owner.patch | 28 ++
19 ...ecifies_clone_ver_but_transaction_include.patch | 39 +++
20 ...t_attempt_to_load_key_if_auth_is_disabled.patch | 39 +++
21 ...ck_sdata_op_ordering_lock_with_sdata_lock.patch | 32 +++
22 7 files changed, 589 insertions(+)
23
24 diff --git a/sys-cluster/ceph/ceph-10.2.9-r3.ebuild b/sys-cluster/ceph/ceph-10.2.9-r3.ebuild
25 new file mode 100644
26 index 00000000000..d74301f3760
27 --- /dev/null
28 +++ b/sys-cluster/ceph/ceph-10.2.9-r3.ebuild
29 @@ -0,0 +1,308 @@
30 +# Copyright 1999-2017 Gentoo Foundation
31 +# Distributed under the terms of the GNU General Public License v2
32 +
33 +EAPI=6
34 +PYTHON_COMPAT=( python{2_7,3_{4,5,6}} )
35 +
36 +inherit check-reqs autotools eutils python-r1 udev user \
37 + readme.gentoo-r1 systemd versionator flag-o-matic
38 +
39 +if [[ ${PV} == *9999* ]]; then
40 + inherit git-r3
41 + EGIT_REPO_URI="https://github.com/ceph/ceph.git"
42 + SRC_URI=""
43 +else
44 + SRC_URI="https://download.ceph.com/tarballs/${P}.tar.gz"
45 + KEYWORDS="~amd64 ~arm64 ~x86"
46 +fi
47 +
48 +DESCRIPTION="Ceph distributed filesystem"
49 +HOMEPAGE="https://ceph.com/"
50 +
51 +LICENSE="LGPL-2.1"
52 +SLOT="0"
53 +
54 +IUSE="babeltrace cephfs cryptopp debug fuse gtk jemalloc ldap +libaio"
55 +IUSE+=" libatomic lttng +nss +radosgw static-libs +tcmalloc test xfs zfs"
56 +
57 +# unbundling code commented out pending bugs 584056 and 584058
58 +#>=dev-libs/jerasure-2.0.0-r1
59 +#>=dev-libs/gf-complete-2.0.0
60 +COMMON_DEPEND="
61 + app-arch/snappy:=
62 + sys-libs/zlib:=
63 + app-arch/lz4:=
64 + app-arch/bzip2:=
65 + app-arch/zstd:=
66 + dev-libs/boost:=[threads]
67 + dev-libs/libaio:=
68 + dev-libs/leveldb:=[snappy]
69 + nss? ( dev-libs/nss:= )
70 + libatomic? ( dev-libs/libatomic_ops:= )
71 + cryptopp? ( dev-libs/crypto++:= )
72 + sys-apps/keyutils
73 + sys-apps/util-linux
74 + dev-libs/libxml2:=
75 + radosgw? ( dev-libs/fcgi:= )
76 + ldap? ( net-nds/openldap:= )
77 + babeltrace? ( dev-util/babeltrace )
78 + fuse? ( sys-fs/fuse:0= )
79 + xfs? ( sys-fs/xfsprogs:= )
80 + zfs? ( sys-fs/zfs:= )
81 + gtk? (
82 + x11-libs/gtk+:2=
83 + dev-cpp/gtkmm:2.4
84 + gnome-base/librsvg:=
85 + )
86 + radosgw? (
87 + dev-libs/fcgi:=
88 + dev-libs/expat:=
89 + net-misc/curl:=
90 + )
91 + jemalloc? ( dev-libs/jemalloc:= )
92 + !jemalloc? ( =dev-util/google-perftools-2.4*:= )
93 + lttng? ( dev-util/lttng-ust:= )
94 + ${PYTHON_DEPS}
95 + "
96 +DEPEND="${COMMON_DEPEND}
97 + dev-python/cython[${PYTHON_USEDEP}]
98 + app-arch/cpio
99 + virtual/pkgconfig
100 + dev-python/sphinx
101 + test? (
102 + sys-fs/btrfs-progs
103 + sys-apps/grep[pcre]
104 + dev-python/tox[${PYTHON_USEDEP}]
105 + dev-python/virtualenv[${PYTHON_USEDEP}]
106 + )"
107 +RDEPEND="${COMMON_DEPEND}
108 + sys-apps/hdparm
109 + sys-block/parted
110 + sys-fs/cryptsetup
111 + sys-apps/gptfdisk
112 + !<sys-apps/openrc-0.26.3
113 + dev-python/flask[${PYTHON_USEDEP}]
114 + dev-python/requests[${PYTHON_USEDEP}]
115 + "
116 +REQUIRED_USE="
117 + $(python_gen_useflags 'python2*')
118 + ${PYTHON_REQUIRED_USE}
119 + ^^ ( nss cryptopp )
120 + ?? ( jemalloc tcmalloc )
121 + "
122 +
123 +# work around bug in ceph compilation (rgw/ceph_dencoder-rgw_dencoder.o... undefined reference to `vtable for RGWZoneGroup')
124 +REQUIRED_USE+=" radosgw"
125 +
126 +#RESTRICT="test? ( userpriv )"
127 +
128 +# distribution tarball does not include everything needed for tests
129 +RESTRICT+=" test"
130 +
131 +STRIP_MASK="/usr/lib*/rados-classes/*"
132 +
133 +UNBUNDLE_LIBS=(
134 + src/erasure-code/jerasure/jerasure
135 + src/erasure-code/jerasure/gf-complete
136 +)
137 +
138 +PATCHES=(
139 + "${FILESDIR}/ceph-10.2.0-dont-use-virtualenvs.patch"
140 + #"${FILESDIR}/ceph-10.2.1-unbundle-jerasure.patch"
141 + "${FILESDIR}/${PN}-10.2.1-libzfs.patch"
142 + "${FILESDIR}/${PN}-10.2.3-build-without-openldap.patch"
143 + "${FILESDIR}/${PN}-10.2.5-Make-RBD-Python-bindings-compatible-with-Python-3.patch"
144 + "${FILESDIR}/${PN}-10.2.5-Make-CephFS-bindings-and-tests-compatible-with-Python-3.patch"
145 + "${FILESDIR}/${PN}-10.2.7-fix-compilation-with-zstd.patch"
146 + # pull in some bugfixes from upstream
147 + "${FILESDIR}/${PN}-10.2.9-libradosstriper_fix_format_injection_vulnerability.patch"
148 + "${FILESDIR}/${PN}-10.2.9-rbd-nbd_relax_size_check_for_newer_kernel_versions.patch"
149 + "${FILESDIR}/${PN}-10.2.9-filestore_fix_infinit_loops_in_fiemap.patch"
150 + "${FILESDIR}/${PN}-10.2.9-librados_Fix_deadlock_in_watch_flush.patch"
151 + "${FILESDIR}/${PN}-10.2.9-librbd_filter_expected_error_codes_from_is_exclusive_lock_owner.patch"
152 + "${FILESDIR}/${PN}-10.2.9-osd-scrub_to_specifies_clone_ver_but_transaction_include.patch"
153 + "${FILESDIR}/${PN}-10.2.9-rbd-do_not_attempt_to_load_key_if_auth_is_disabled.patch"
154 + "${FILESDIR}/${PN}-10.2.9-unlock_sdata_op_ordering_lock_with_sdata_lock.patch"
155 +
156 +)
157 +
158 +check-reqs_export_vars() {
159 + if use debug; then
160 + CHECKREQS_DISK_BUILD="23G"
161 + CHECKREQS_DISK_USR="7G"
162 + elif use amd64; then
163 + CHECKREQS_DISK_BUILD="12G"
164 + CHECKREQS_DISK_USR="450M"
165 + else
166 + CHECKREQS_DISK_BUILD="1400M"
167 + CHECKREQS_DISK_USR="450M"
168 + fi
169 +
170 + export CHECKREQS_DISK_BUILD CHECKREQS_DISK_USR
171 +}
172 +
173 +user_setup() {
174 + enewgroup ceph ${CEPH_GID}
175 + enewuser ceph "${CEPH_UID:--1}" -1 /var/lib/ceph ceph
176 +}
177 +
178 +emake_python_bindings() {
179 + local action="${1}" params binding module
180 + shift
181 + params=("${@}")
182 +
183 + __emake_python_bindings_do_impl() {
184 + ceph_run_econf "${EPYTHON}"
185 + emake "${params[@]}" PYTHON="${EPYTHON}" "${binding}-pybind-${action}"
186 +
187 + # these don't work and aren't needed on python3
188 + if [[ ${EBUILD_PHASE} == install ]]; then
189 + for module in "${S}"/src/pybind/*.py; do
190 + module_basename="$(basename "${module}")"
191 + if [[ ${module_basename} == ceph_volume_client.py ]] && ! use cephfs; then
192 + continue
193 + elif [[ ! -e "${ED}/$(python_get_sitedir)/${module_basename}" ]]; then
194 + python_domodule ${module}
195 + fi
196 + done
197 + fi
198 + }
199 +
200 + pushd "${S}/src"
201 + for binding in rados rbd $(use cephfs && echo cephfs); do
202 + python_foreach_impl __emake_python_bindings_do_impl
203 + done
204 + popd
205 +
206 + unset __emake_python_bindings_do_impl
207 +}
208 +
209 +pkg_pretend() {
210 + check-reqs_export_vars
211 + check-reqs_pkg_pretend
212 +}
213 +
214 +pkg_setup() {
215 + python_setup
216 + check-reqs_export_vars
217 + check-reqs_pkg_setup
218 + user_setup
219 +}
220 +
221 +src_prepare() {
222 + default
223 +
224 + # remove tests that need root access
225 + rm src/test/cli/ceph-authtool/cap*.t
226 +
227 + #rm -rf "${UNBUNDLE_LIBS[@]}"
228 +
229 + append-flags -fPIC
230 + eautoreconf
231 +}
232 +
233 +src_configure() {
234 + ECONFARGS=(
235 + --without-hadoop
236 + --includedir=/usr/include
237 + $(use_with cephfs)
238 + $(use_with debug)
239 + $(use_with fuse)
240 + $(use_with libaio)
241 + $(use_with libatomic libatomic-ops)
242 + $(use_with nss)
243 + $(use_with cryptopp)
244 + $(use_with radosgw)
245 + $(use_with gtk gtk2)
246 + $(use_enable static-libs static)
247 + $(use_with jemalloc)
248 + $(use_with xfs libxfs)
249 + $(use_with zfs libzfs)
250 + $(use_with lttng )
251 + $(use_with babeltrace)
252 + $(use_with ldap openldap)
253 + $(use jemalloc || usex tcmalloc " --with-tcmalloc" " --with-tcmalloc-minimal")
254 + --with-mon
255 + --with-eventfd
256 + --with-cython
257 + --without-kinetic
258 + --without-librocksdb
259 + --with-systemdsystemunitdir="$(systemd_get_systemunitdir)"
260 + )
261 +
262 + # we can only use python2.7 for building at the moment
263 + ceph_run_econf "python2*"
264 +}
265 +
266 +ceph_run_econf() {
267 + [[ -z ${ECONFARGS} ]] && die "called ${FUNCNAME[0]} with ECONFARGS unset"
268 + [[ -z ${1} ]] && die "called ${FUNCNAME[0]} without passing python implementation"
269 +
270 + pushd "${S}" >/dev/null || die
271 + #
272 + # This generates a QA warning about running econf in src_compile
273 + # and src_install. Unfortunately the only other way to do this would
274 + # involve building all of for each python implementation times, which
275 + # wastes a _lot_ of CPU time and disk space. This hack will no longer
276 + # be needed with >=ceph-11.2.
277 + #
278 + python_setup "${1}"
279 + econf "${ECONFARGS[@]}"
280 +
281 + popd >/dev/null || die
282 +}
283 +
284 +src_compile() {
285 + emake
286 + emake_python_bindings all
287 +
288 + use test && emake check-local
289 +}
290 +
291 +src_test() {
292 + make check || die "make check failed"
293 +}
294 +
295 +src_install() {
296 + default
297 + emake_python_bindings install-exec "DESTDIR=\"${D}\""
298 +
299 + prune_libtool_files --all
300 +
301 + exeinto /usr/$(get_libdir)/ceph
302 + newexe src/init-ceph ceph_init.sh
303 +
304 + insinto /etc/logrotate.d/
305 + newins "${FILESDIR}"/ceph.logrotate ${PN}
306 +
307 + keepdir /var/lib/${PN}{,/tmp} /var/log/${PN}/stat
308 +
309 + fowners -R ceph:ceph /var/lib/ceph /var/log/ceph
310 +
311 + newinitd "${FILESDIR}/rbdmap.initd" rbdmap
312 + newinitd "${FILESDIR}/${PN}.initd-r5" ${PN}
313 + newconfd "${FILESDIR}/${PN}.confd-r3" ${PN}
314 +
315 + insinto /etc/sysctl.d
316 + newins "${FILESDIR}"/sysctld 90-${PN}.conf
317 +
318 + use tcmalloc && newenvd "${FILESDIR}"/envd-tcmalloc 99${PN}-tcmalloc
319 +
320 + systemd_install_serviced "${FILESDIR}/ceph-mds_at.service.conf" "ceph-mds@.service"
321 + systemd_install_serviced "${FILESDIR}/ceph-osd_at.service.conf" "ceph-osd@.service"
322 +
323 + udev_dorules udev/*.rules
324 +
325 + readme.gentoo_create_doc
326 +
327 + python_setup 'python2*'
328 + python_fix_shebang "${ED}"/usr/{,s}bin/
329 +
330 + # python_fix_shebang apparently is not idempotent
331 + sed -i -r 's:(/usr/lib/python-exec/python[0-9]\.[0-9]/python)[0-9]\.[0-9]:\1:' \
332 + "${ED}"/usr/{sbin/ceph-disk,bin/ceph-detect-init} || die "sed failed"
333 +}
334 +
335 +pkg_postinst() {
336 + readme.gentoo_print_elog
337 +}
338
339 diff --git a/sys-cluster/ceph/files/ceph-10.2.9-filestore_fix_infinit_loops_in_fiemap.patch b/sys-cluster/ceph/files/ceph-10.2.9-filestore_fix_infinit_loops_in_fiemap.patch
340 new file mode 100644
341 index 00000000000..e67113e51cc
342 --- /dev/null
343 +++ b/sys-cluster/ceph/files/ceph-10.2.9-filestore_fix_infinit_loops_in_fiemap.patch
344 @@ -0,0 +1,82 @@
345 +From b52bfe6b443f0ff88c8614441752102058063699 Mon Sep 17 00:00:00 2001
346 +From: Ning Yao <yaoning@×××××××××××.com>
347 +Date: Thu, 6 Apr 2017 11:12:04 +0000
348 +Subject: [PATCH] os/filestore: fix infinit loops in fiemap()
349 +
350 +since fiemap can get extents based on offset --> len
351 +but we should consider last extents is retrieved when len == 0
352 +even though it is not last fiemap extents
353 +
354 +Signed-off-by: Ning Yao <yaoning@×××××××××××.com>
355 +(cherry picked from commit 36f6b668a8910d76847674086cbc86910c78faee)
356 +---
357 + src/os/filestore/FileStore.cc | 13 +++++--------
358 + src/test/objectstore/store_test.cc | 21 +++++++++++++++++++++
359 + 2 files changed, 26 insertions(+), 8 deletions(-)
360 +
361 +diff --git a/src/os/filestore/FileStore.cc b/src/os/filestore/FileStore.cc
362 +index c47b0d0d2eae..95f48cdf4960 100644
363 +--- a/src/os/filestore/FileStore.cc
364 ++++ b/src/os/filestore/FileStore.cc
365 +@@ -3102,17 +3102,14 @@ int FileStore::_do_fiemap(int fd, uint64_t offset, size_t len,
366 + i++;
367 + last = extent++;
368 + }
369 +- const bool is_last = last->fe_flags & FIEMAP_EXTENT_LAST;
370 ++ uint64_t xoffset = last->fe_logical + last->fe_length - offset;
371 ++ offset = last->fe_logical + last->fe_length;
372 ++ len -= xoffset;
373 ++ const bool is_last = (last->fe_flags & FIEMAP_EXTENT_LAST) || (len == 0);
374 ++ free(fiemap);
375 + if (!is_last) {
376 +- uint64_t xoffset = last->fe_logical + last->fe_length - offset;
377 +- offset = last->fe_logical + last->fe_length;
378 +- len -= xoffset;
379 +- free(fiemap); /* fix clang warn: use-after-free */
380 + goto more;
381 + }
382 +- else {
383 +- free(fiemap);
384 +- }
385 +
386 + return r;
387 + }
388 +diff --git a/src/test/objectstore/store_test.cc b/src/test/objectstore/store_test.cc
389 +index 5ab011ad17d8..4cada7e2e435 100644
390 +--- a/src/test/objectstore/store_test.cc
391 ++++ b/src/test/objectstore/store_test.cc
392 +@@ -279,6 +279,7 @@ TEST_P(StoreTest, FiemapHoles) {
393 + ASSERT_EQ(r, 0);
394 + }
395 + {
396 ++ //fiemap test from 0 to SKIP_STEP * (MAX_EXTENTS - 1) + 3
397 + bufferlist bl;
398 + store->fiemap(cid, oid, 0, SKIP_STEP * (MAX_EXTENTS - 1) + 3, bl);
399 + map<uint64_t,uint64_t> m, e;
400 +@@ -295,6 +296,26 @@ TEST_P(StoreTest, FiemapHoles) {
401 + ASSERT_TRUE((m.size() == 1 &&
402 + m[0] > SKIP_STEP * (MAX_EXTENTS - 1)) ||
403 + (m.size() == MAX_EXTENTS && extents_exist));
404 ++
405 ++ // fiemap test from SKIP_STEP to SKIP_STEP * (MAX_EXTENTS - 2) + 3
406 ++ // reset bufferlist and map
407 ++ bl.clear();
408 ++ m.clear();
409 ++ e.clear();
410 ++ store->fiemap(cid, oid, SKIP_STEP, SKIP_STEP * (MAX_EXTENTS - 2) + 3, bl);
411 ++ p = bl.begin();
412 ++ ::decode(m, p);
413 ++ cout << " got " << m << std::endl;
414 ++ ASSERT_TRUE(!m.empty());
415 ++ ASSERT_GE(m[SKIP_STEP], 3u);
416 ++ extents_exist = true;
417 ++ if (m.size() == (MAX_EXTENTS - 2)) {
418 ++ for (uint64_t i = 1; i < MAX_EXTENTS - 1; i++)
419 ++ extents_exist = extents_exist && m.count(SKIP_STEP*i);
420 ++ }
421 ++ ASSERT_TRUE((m.size() == 1 &&
422 ++ m[SKIP_STEP] > SKIP_STEP * (MAX_EXTENTS - 2)) ||
423 ++ (m.size() == (MAX_EXTENTS - 1) && extents_exist));
424 + }
425 + {
426 + ObjectStore::Transaction t;
427
428 diff --git a/sys-cluster/ceph/files/ceph-10.2.9-librados_Fix_deadlock_in_watch_flush.patch b/sys-cluster/ceph/files/ceph-10.2.9-librados_Fix_deadlock_in_watch_flush.patch
429 new file mode 100644
430 index 00000000000..77ab14b4295
431 --- /dev/null
432 +++ b/sys-cluster/ceph/files/ceph-10.2.9-librados_Fix_deadlock_in_watch_flush.patch
433 @@ -0,0 +1,61 @@
434 +From 2fb04c40804f646b4cdd3a55ec8a9e9df95b9360 Mon Sep 17 00:00:00 2001
435 +From: Xiaoxi Chen <xiaoxchen@××××.com>
436 +Date: Sat, 10 Sep 2016 00:23:55 +0800
437 +Subject: [PATCH] Librados: Fix deadlock in watch_flush
438 +
439 +In previous code, in the watch_flush, it is waiting on conditon
440 +with holding the "lock". The condition will only be signal by
441 +finisher thread, but sadly, in some cases,when finisher queue
442 +is not empty, some context need to take the "lock", thus deadlock.
443 +
444 +To avoid concurrent call to shutdown cause race condition, add
445 +a shutdown_lock to ensure only one concurrent in shutdown function.
446 +
447 +Signed-off-by: Xiaoxi Chen <xiaoxchen@××××.com>
448 +---
449 + src/librados/RadosClient.cc | 4 ++++
450 + src/librados/RadosClient.h | 1 +
451 + 2 files changed, 5 insertions(+)
452 +
453 +diff --git a/src/librados/RadosClient.cc b/src/librados/RadosClient.cc
454 +index 8c5e8ed803fa..230e8b53ecd9 100644
455 +--- a/src/librados/RadosClient.cc
456 ++++ b/src/librados/RadosClient.cc
457 +@@ -72,6 +72,7 @@ librados::RadosClient::RadosClient(CephContext *cct_)
458 + instance_id(0),
459 + objecter(NULL),
460 + lock("librados::RadosClient::lock"),
461 ++ shutdown_lock("librados::RadosClient::shutdown_lock"),
462 + timer(cct, lock),
463 + refcnt(1),
464 + log_last_version(0), log_cb(NULL), log_cb_arg(NULL),
465 +@@ -324,6 +325,7 @@ int librados::RadosClient::connect()
466 +
467 + void librados::RadosClient::shutdown()
468 + {
469 ++ Mutex::Locker l(shutdown_lock);
470 + lock.Lock();
471 + if (state == DISCONNECTED) {
472 + lock.Unlock();
473 +@@ -338,7 +340,9 @@ void librados::RadosClient::shutdown()
474 + if (state == CONNECTED) {
475 + if (need_objecter) {
476 + // make sure watch callbacks are flushed
477 ++ lock.Unlock();
478 + watch_flush();
479 ++ lock.Lock();
480 + }
481 + finisher.wait_for_empty();
482 + finisher.stop();
483 +diff --git a/src/librados/RadosClient.h b/src/librados/RadosClient.h
484 +index f495ba5966c2..a8ef2070ddcb 100644
485 +--- a/src/librados/RadosClient.h
486 ++++ b/src/librados/RadosClient.h
487 +@@ -62,6 +62,7 @@ class librados::RadosClient : public Dispatcher
488 + Objecter *objecter;
489 +
490 + Mutex lock;
491 ++ Mutex shutdown_lock;
492 + Cond cond;
493 + SafeTimer timer;
494 + int refcnt;
495
496 diff --git a/sys-cluster/ceph/files/ceph-10.2.9-librbd_filter_expected_error_codes_from_is_exclusive_lock_owner.patch b/sys-cluster/ceph/files/ceph-10.2.9-librbd_filter_expected_error_codes_from_is_exclusive_lock_owner.patch
497 new file mode 100644
498 index 00000000000..6800285318f
499 --- /dev/null
500 +++ b/sys-cluster/ceph/files/ceph-10.2.9-librbd_filter_expected_error_codes_from_is_exclusive_lock_owner.patch
501 @@ -0,0 +1,28 @@
502 +From 787ba33e5dba285dff874955a8f0d7aabd3f87fe Mon Sep 17 00:00:00 2001
503 +From: Jason Dillaman <dillaman@××××××.com>
504 +Date: Mon, 5 Jun 2017 08:17:05 -0400
505 +Subject: [PATCH] librbd: filter expected error codes from
506 + is_exclusive_lock_owner
507 +
508 +Fixes: http://tracker.ceph.com/issues/20182
509 +Signed-off-by: Jason Dillaman <dillaman@××××××.com>
510 +(cherry picked from commit d4daaf54e6bc42cd4fb2111ea20b2042941b0c31)
511 +---
512 + src/librbd/internal.cc | 4 +++-
513 + 1 file changed, 3 insertions(+), 1 deletion(-)
514 +
515 +diff --git a/src/librbd/internal.cc b/src/librbd/internal.cc
516 +index 9fecb1e1688f..6021be078090 100644
517 +--- a/src/librbd/internal.cc
518 ++++ b/src/librbd/internal.cc
519 +@@ -2110,7 +2110,9 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
520 + // might have been blacklisted by peer -- ensure we still own
521 + // the lock by pinging the OSD
522 + int r = ictx->exclusive_lock->assert_header_locked();
523 +- if (r < 0) {
524 ++ if (r == -EBUSY || r == -ENOENT) {
525 ++ return 0;
526 ++ } else if (r < 0) {
527 + return r;
528 + }
529 +
530
531 diff --git a/sys-cluster/ceph/files/ceph-10.2.9-osd-scrub_to_specifies_clone_ver_but_transaction_include.patch b/sys-cluster/ceph/files/ceph-10.2.9-osd-scrub_to_specifies_clone_ver_but_transaction_include.patch
532 new file mode 100644
533 index 00000000000..8bf14c2944b
534 --- /dev/null
535 +++ b/sys-cluster/ceph/files/ceph-10.2.9-osd-scrub_to_specifies_clone_ver_but_transaction_include.patch
536 @@ -0,0 +1,39 @@
537 +From 153f77544118613e19d5e88c030c3901234cf950 Mon Sep 17 00:00:00 2001
538 +From: David Zafman <dzafman@××××××.com>
539 +Date: Tue, 18 Jul 2017 15:08:14 -0700
540 +Subject: [PATCH] osd: scrub_to specifies clone ver, but transaction include
541 + head write ver
542 +
543 +Fixes: http://tracker.ceph.com/issues/20041
544 +
545 +Signed-off-by: David Zafman <dzafman@××××××.com>
546 +(cherry picked from commit fd598a0d23d61c645633ae774c3404a43d035e3c)
547 +
548 +Conflicts:
549 + src/osd/ReplicatedPG.cc (trivial)
550 +---
551 + src/osd/ReplicatedPG.cc | 4 ++--
552 + 1 file changed, 2 insertions(+), 2 deletions(-)
553 +
554 +diff --git a/src/osd/ReplicatedPG.cc b/src/osd/ReplicatedPG.cc
555 +index 4b4dc34c602a..4d80ad1770e1 100644
556 +--- a/src/osd/ReplicatedPG.cc
557 ++++ b/src/osd/ReplicatedPG.cc
558 +@@ -8318,7 +8318,7 @@ void ReplicatedPG::op_applied(const eversion_t &applied_version)
559 + last_update_applied = applied_version;
560 + if (is_primary()) {
561 + if (scrubber.active) {
562 +- if (last_update_applied == scrubber.subset_last_update) {
563 ++ if (last_update_applied >= scrubber.subset_last_update) {
564 + requeue_scrub();
565 + }
566 + } else {
567 +@@ -8326,7 +8326,7 @@ void ReplicatedPG::op_applied(const eversion_t &applied_version)
568 + }
569 + } else {
570 + if (scrubber.active_rep_scrub) {
571 +- if (last_update_applied == static_cast<MOSDRepScrub*>(
572 ++ if (last_update_applied >= static_cast<MOSDRepScrub*>(
573 + scrubber.active_rep_scrub->get_req())->scrub_to) {
574 + osd->op_wq.queue(
575 + make_pair(
576
577 diff --git a/sys-cluster/ceph/files/ceph-10.2.9-rbd-do_not_attempt_to_load_key_if_auth_is_disabled.patch b/sys-cluster/ceph/files/ceph-10.2.9-rbd-do_not_attempt_to_load_key_if_auth_is_disabled.patch
578 new file mode 100644
579 index 00000000000..4b03e335bc5
580 --- /dev/null
581 +++ b/sys-cluster/ceph/files/ceph-10.2.9-rbd-do_not_attempt_to_load_key_if_auth_is_disabled.patch
582 @@ -0,0 +1,39 @@
583 +From 0cd7df3649d7486d444a61cab89c48a89ddd3e8d Mon Sep 17 00:00:00 2001
584 +From: Jason Dillaman <dillaman@××××××.com>
585 +Date: Thu, 29 Jun 2017 14:54:40 -0400
586 +Subject: [PATCH] rbd: do not attempt to load key if auth is disabled
587 +
588 +Fixes: http://tracker.ceph.com/issues/19035
589 +Signed-off-by: Jason Dillaman <dillaman@××××××.com>
590 +(cherry picked from commit 8b9c8df6d7f0b75c5451953bb322bc1f9afb6299)
591 +---
592 + src/krbd.cc | 16 +++++++++-------
593 + 1 file changed, 9 insertions(+), 7 deletions(-)
594 +
595 +diff --git a/src/krbd.cc b/src/krbd.cc
596 +index a0e546fa7f6f..2bb6b4270abd 100644
597 +--- a/src/krbd.cc
598 ++++ b/src/krbd.cc
599 +@@ -129,13 +129,15 @@ static int build_map_buf(CephContext *cct, const char *pool, const char *image,
600 + oss << " name=" << cct->_conf->name.get_id();
601 +
602 + KeyRing keyring;
603 +- r = keyring.from_ceph_context(cct);
604 +- if (r == -ENOENT && !(cct->_conf->keyfile.length() ||
605 +- cct->_conf->key.length()))
606 +- r = 0;
607 +- if (r < 0) {
608 +- cerr << "rbd: failed to get secret" << std::endl;
609 +- return r;
610 ++ if (cct->_conf->auth_client_required != "none") {
611 ++ r = keyring.from_ceph_context(cct);
612 ++ if (r == -ENOENT && !(cct->_conf->keyfile.length() ||
613 ++ cct->_conf->key.length()))
614 ++ r = 0;
615 ++ if (r < 0) {
616 ++ cerr << "rbd: failed to get secret" << std::endl;
617 ++ return r;
618 ++ }
619 + }
620 +
621 + CryptoKey secret;
622
623 diff --git a/sys-cluster/ceph/files/ceph-10.2.9-unlock_sdata_op_ordering_lock_with_sdata_lock.patch b/sys-cluster/ceph/files/ceph-10.2.9-unlock_sdata_op_ordering_lock_with_sdata_lock.patch
624 new file mode 100644
625 index 00000000000..60f46ab36a4
626 --- /dev/null
627 +++ b/sys-cluster/ceph/files/ceph-10.2.9-unlock_sdata_op_ordering_lock_with_sdata_lock.patch
628 @@ -0,0 +1,32 @@
629 +From 3fa277b479d69699bf5a6875cd4a5efcf9ae0788 Mon Sep 17 00:00:00 2001
630 +From: Alexey Sheplyakov <asheplyakov@××××××××.com>
631 +Date: Tue, 27 Jun 2017 16:07:01 +0400
632 +Subject: [PATCH] jewel: osd: unlock sdata_op_ordering_lock with sdata_lock
633 + hold to avoid missing wakeup signal
634 +
635 +Based on commit bc683385819146f3f6f096ceec97e1226a3cd237. The OSD code has
636 +been refactored a lot since Jewel, hence cherry-picking that patch introduces
637 +a lot of unrelated changes, and is much more difficult than reusing the idea.
638 +
639 +Fixes: http://tracker.ceph.com/issues/20428
640 +
641 +Signed-off-by: Alexey Sheplyakov <asheplyakov@××××××××.com>
642 +---
643 + src/osd/OSD.cc | 2 +-
644 + 1 file changed, 1 insertion(+), 1 deletion(-)
645 +
646 +diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc
647 +index f5cfda3b686a..38a2711f6f92 100644
648 +--- a/src/osd/OSD.cc
649 ++++ b/src/osd/OSD.cc
650 +@@ -8727,9 +8727,9 @@ void OSD::ShardedOpWQ::_process(uint32_t thread_index, heartbeat_handle_d *hb )
651 + assert(NULL != sdata);
652 + sdata->sdata_op_ordering_lock.Lock();
653 + if (sdata->pqueue->empty()) {
654 +- sdata->sdata_op_ordering_lock.Unlock();
655 + osd->cct->get_heartbeat_map()->reset_timeout(hb, 4, 0);
656 + sdata->sdata_lock.Lock();
657 ++ sdata->sdata_op_ordering_lock.Unlock();
658 + sdata->sdata_cond.WaitInterval(osd->cct, sdata->sdata_lock, utime_t(2, 0));
659 + sdata->sdata_lock.Unlock();
660 + sdata->sdata_op_ordering_lock.Lock();