Gentoo Archives: gentoo-commits

From: Alexey Shvetsov <alexxy@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/sci:master commit in: sys-cluster/lustre/, sys-cluster/lustre/files/
Date: Wed, 02 Apr 2014 12:29:28
Message-Id: 1396441738.a7d037ca556e8c39196b91cba9b18b20864d3b46.alexxy@gentoo
1 commit: a7d037ca556e8c39196b91cba9b18b20864d3b46
2 Author: Alexey Shvetsov <alexxy <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 2 12:28:58 2014 +0000
4 Commit: Alexey Shvetsov <alexxy <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 2 12:28:58 2014 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/sci.git;a=commit;h=a7d037ca
7
8 Lustre with patches up to kernel v3.13
9
10 Package-Manager: portage-2.2.10
11 RepoMan-Options: --force
12
13 ---
14 sys-cluster/lustre/ChangeLog | 15 +
15 .../0001-LU-3319-procfs-fix-symlink-handling.patch | 2 +-
16 ...cfs-Move-NRS-TBF-proc-handling-to-seq_fil.patch | 2 +-
17 ...cfs-update-zfs-proc-handling-to-seq_files.patch | 2 +-
18 ...rocfs-move-mgs-proc-handling-to-seq_files.patch | 2 +-
19 ...rocfs-move-osp-proc-handling-to-seq_files.patch | 2 +-
20 ...rocfs-move-lod-proc-handling-to-seq_files.patch | 2 +-
21 ...cfs-move-mdt-mds-proc-handling-to-seq_fil.patch | 2 +-
22 ...cfs-move-mdd-ofd-proc-handling-to-seq_fil.patch | 2 +-
23 ...cfs-update-ldiskfs-proc-handling-to-seq_f.patch | 3 +-
24 ...m-Backport-shrinker-changes-from-upstream.patch | 757 +++++++++++++++++++++
25 .../lustre/files/lustre-readline6.3_fix.patch | 13 +
26 sys-cluster/lustre/lustre-9999.ebuild | 2 +
27 13 files changed, 797 insertions(+), 9 deletions(-)
28
29 diff --git a/sys-cluster/lustre/ChangeLog b/sys-cluster/lustre/ChangeLog
30 index 6b3ecb1..0ed9678 100644
31 --- a/sys-cluster/lustre/ChangeLog
32 +++ b/sys-cluster/lustre/ChangeLog
33 @@ -3,6 +3,21 @@
34 # $Header: $
35
36 02 Apr 2014; Alexey Shvetsov <alexxy@g.o>
37 + +files/0010-LU-4416-mm-Backport-shrinker-changes-from-upstream.patch,
38 + +files/lustre-readline6.3_fix.patch,
39 + files/0001-LU-3319-procfs-fix-symlink-handling.patch,
40 + files/0002-LU-3319-procfs-Move-NRS-TBF-proc-handling-to-seq_fil.patch,
41 + files/0003-LU-3319-procfs-update-zfs-proc-handling-to-seq_files.patch,
42 + files/0004-LU-3319-procfs-move-mgs-proc-handling-to-seq_files.patch,
43 + files/0005-LU-3319-procfs-move-osp-proc-handling-to-seq_files.patch,
44 + files/0006-LU-3319-procfs-move-lod-proc-handling-to-seq_files.patch,
45 + files/0007-LU-3319-procfs-move-mdt-mds-proc-handling-to-seq_fil.patch,
46 + files/0008-LU-3319-procfs-move-mdd-ofd-proc-handling-to-seq_fil.patch,
47 + files/0009-LU-3319-procfs-update-ldiskfs-proc-handling-to-seq_f.patch,
48 + lustre-9999.ebuild:
49 + Lustre with patches up to kernel v3.13
50 +
51 + 02 Apr 2014; Alexey Shvetsov <alexxy@g.o>
52 +files/0001-LU-3319-procfs-fix-symlink-handling.patch,
53 +files/0002-LU-3319-procfs-Move-NRS-TBF-proc-handling-to-seq_fil.patch,
54 +files/0003-LU-3319-procfs-update-zfs-proc-handling-to-seq_files.patch,
55
56 diff --git a/sys-cluster/lustre/files/0001-LU-3319-procfs-fix-symlink-handling.patch b/sys-cluster/lustre/files/0001-LU-3319-procfs-fix-symlink-handling.patch
57 index d0b6e9c..4583601 100644
58 --- a/sys-cluster/lustre/files/0001-LU-3319-procfs-fix-symlink-handling.patch
59 +++ b/sys-cluster/lustre/files/0001-LU-3319-procfs-fix-symlink-handling.patch
60 @@ -1,7 +1,7 @@
61 From 4c418e7208a62c7bb7d61c1f97cf300675215bd4 Mon Sep 17 00:00:00 2001
62 From: James Simmons <uja.ornl@×××××.com>
63 Date: Fri, 28 Mar 2014 11:19:07 -0400
64 -Subject: [PATCH 1/9] LU-3319 procfs: fix symlink handling
65 +Subject: [PATCH 01/10] LU-3319 procfs: fix symlink handling
66
67 While working on symlink handling for seq files I noticed a
68 long outstanding bug. Code was developed to link osc obds
69
70 diff --git a/sys-cluster/lustre/files/0002-LU-3319-procfs-Move-NRS-TBF-proc-handling-to-seq_fil.patch b/sys-cluster/lustre/files/0002-LU-3319-procfs-Move-NRS-TBF-proc-handling-to-seq_fil.patch
71 index d7da497..5c67013 100644
72 --- a/sys-cluster/lustre/files/0002-LU-3319-procfs-Move-NRS-TBF-proc-handling-to-seq_fil.patch
73 +++ b/sys-cluster/lustre/files/0002-LU-3319-procfs-Move-NRS-TBF-proc-handling-to-seq_fil.patch
74 @@ -1,7 +1,7 @@
75 From 7dbddd98e60ab73580ea52c8b53274da2283d624 Mon Sep 17 00:00:00 2001
76 From: James Simmons <uja.ornl@×××××.com>
77 Date: Tue, 25 Feb 2014 12:54:05 -0500
78 -Subject: [PATCH 2/9] LU-3319 procfs: Move NRS TBF proc handling to seq_files
79 +Subject: [PATCH 02/10] LU-3319 procfs: Move NRS TBF proc handling to seq_files
80
81 With newer kernels moving their proc file system handling
82 to seq_files this patch migrates the proc handling for NRS
83
84 diff --git a/sys-cluster/lustre/files/0003-LU-3319-procfs-update-zfs-proc-handling-to-seq_files.patch b/sys-cluster/lustre/files/0003-LU-3319-procfs-update-zfs-proc-handling-to-seq_files.patch
85 index b06c8da..90f1770 100644
86 --- a/sys-cluster/lustre/files/0003-LU-3319-procfs-update-zfs-proc-handling-to-seq_files.patch
87 +++ b/sys-cluster/lustre/files/0003-LU-3319-procfs-update-zfs-proc-handling-to-seq_files.patch
88 @@ -1,7 +1,7 @@
89 From abe827ebe8722336c55affd8388dacfbb38b49f1 Mon Sep 17 00:00:00 2001
90 From: James Simmons <uja.ornl@×××××.com>
91 Date: Tue, 18 Feb 2014 18:44:22 -0500
92 -Subject: [PATCH 3/9] LU-3319 procfs: update zfs proc handling to seq_files
93 +Subject: [PATCH 03/10] LU-3319 procfs: update zfs proc handling to seq_files
94
95 Migrate all zfs proc handling to using strictly seq_files.
96
97
98 diff --git a/sys-cluster/lustre/files/0004-LU-3319-procfs-move-mgs-proc-handling-to-seq_files.patch b/sys-cluster/lustre/files/0004-LU-3319-procfs-move-mgs-proc-handling-to-seq_files.patch
99 index c7d415a..c5712a1 100644
100 --- a/sys-cluster/lustre/files/0004-LU-3319-procfs-move-mgs-proc-handling-to-seq_files.patch
101 +++ b/sys-cluster/lustre/files/0004-LU-3319-procfs-move-mgs-proc-handling-to-seq_files.patch
102 @@ -1,7 +1,7 @@
103 From 00e2e7c431e38433b919735890481e4bb5707cd4 Mon Sep 17 00:00:00 2001
104 From: James Simmons <uja.ornl@×××××.com>
105 Date: Tue, 4 Mar 2014 10:45:30 -0500
106 -Subject: [PATCH 4/9] LU-3319 procfs: move mgs proc handling to seq_files
107 +Subject: [PATCH 04/10] LU-3319 procfs: move mgs proc handling to seq_files
108
109 With 3.10 linux kernel and above proc handling now only
110 uses struct seq_files. This patch migrates the mgs
111
112 diff --git a/sys-cluster/lustre/files/0005-LU-3319-procfs-move-osp-proc-handling-to-seq_files.patch b/sys-cluster/lustre/files/0005-LU-3319-procfs-move-osp-proc-handling-to-seq_files.patch
113 index e439041..5b94e9a 100644
114 --- a/sys-cluster/lustre/files/0005-LU-3319-procfs-move-osp-proc-handling-to-seq_files.patch
115 +++ b/sys-cluster/lustre/files/0005-LU-3319-procfs-move-osp-proc-handling-to-seq_files.patch
116 @@ -1,7 +1,7 @@
117 From ab793e8472447314f6428025175f80afc26339ac Mon Sep 17 00:00:00 2001
118 From: James Simmons <uja.ornl@×××××.com>
119 Date: Wed, 26 Mar 2014 19:59:18 -0400
120 -Subject: [PATCH 5/9] LU-3319 procfs: move osp proc handling to seq_files
121 +Subject: [PATCH 05/10] LU-3319 procfs: move osp proc handling to seq_files
122
123 With 3.10 linux kernel and above proc handling now only
124 uses struct seq_files. This patch migrates the osp
125
126 diff --git a/sys-cluster/lustre/files/0006-LU-3319-procfs-move-lod-proc-handling-to-seq_files.patch b/sys-cluster/lustre/files/0006-LU-3319-procfs-move-lod-proc-handling-to-seq_files.patch
127 index d9bcc89..67c955b 100644
128 --- a/sys-cluster/lustre/files/0006-LU-3319-procfs-move-lod-proc-handling-to-seq_files.patch
129 +++ b/sys-cluster/lustre/files/0006-LU-3319-procfs-move-lod-proc-handling-to-seq_files.patch
130 @@ -1,7 +1,7 @@
131 From 1394aacb441c3ba07b24a4b465f2496af8eb3c73 Mon Sep 17 00:00:00 2001
132 From: James Simmons <uja.ornl@×××××.com>
133 Date: Wed, 26 Mar 2014 19:57:44 -0400
134 -Subject: [PATCH 6/9] LU-3319 procfs: move lod proc handling to seq_files
135 +Subject: [PATCH 06/10] LU-3319 procfs: move lod proc handling to seq_files
136
137 With 3.10 linux kernel and above proc handling now only
138 uses struct seq_files. This patch migrates the lod
139
140 diff --git a/sys-cluster/lustre/files/0007-LU-3319-procfs-move-mdt-mds-proc-handling-to-seq_fil.patch b/sys-cluster/lustre/files/0007-LU-3319-procfs-move-mdt-mds-proc-handling-to-seq_fil.patch
141 index 5c1c9cd..2205759 100644
142 --- a/sys-cluster/lustre/files/0007-LU-3319-procfs-move-mdt-mds-proc-handling-to-seq_fil.patch
143 +++ b/sys-cluster/lustre/files/0007-LU-3319-procfs-move-mdt-mds-proc-handling-to-seq_fil.patch
144 @@ -1,7 +1,7 @@
145 From 039e3e44e07b59923d94bea976173d9e3cbd0ba9 Mon Sep 17 00:00:00 2001
146 From: James Simmons <uja.ornl@×××××.com>
147 Date: Tue, 25 Mar 2014 11:10:33 -0400
148 -Subject: [PATCH 7/9] LU-3319 procfs: move mdt/mds proc handling to seq_files
149 +Subject: [PATCH 07/10] LU-3319 procfs: move mdt/mds proc handling to seq_files
150
151 With 3.10 linux kernel and above proc handling now only
152 uses struct seq_files. This patch migrates the mdt/mds
153
154 diff --git a/sys-cluster/lustre/files/0008-LU-3319-procfs-move-mdd-ofd-proc-handling-to-seq_fil.patch b/sys-cluster/lustre/files/0008-LU-3319-procfs-move-mdd-ofd-proc-handling-to-seq_fil.patch
155 index 6c6106b..37dd14c 100644
156 --- a/sys-cluster/lustre/files/0008-LU-3319-procfs-move-mdd-ofd-proc-handling-to-seq_fil.patch
157 +++ b/sys-cluster/lustre/files/0008-LU-3319-procfs-move-mdd-ofd-proc-handling-to-seq_fil.patch
158 @@ -1,7 +1,7 @@
159 From c6c3f4063e244cea365940adc0fd1a1c35d0b525 Mon Sep 17 00:00:00 2001
160 From: James Simmons <uja.ornl@×××××.com>
161 Date: Wed, 26 Mar 2014 20:01:52 -0400
162 -Subject: [PATCH 8/9] LU-3319 procfs: move mdd/ofd proc handling to seq_files
163 +Subject: [PATCH 08/10] LU-3319 procfs: move mdd/ofd proc handling to seq_files
164
165 With 3.10 linux kernel and above proc handling now only
166 uses struct seq_files. This patch migrates the mdd/ofd
167
168 diff --git a/sys-cluster/lustre/files/0009-LU-3319-procfs-update-ldiskfs-proc-handling-to-seq_f.patch b/sys-cluster/lustre/files/0009-LU-3319-procfs-update-ldiskfs-proc-handling-to-seq_f.patch
169 index cc93d19..50f9f1c 100644
170 --- a/sys-cluster/lustre/files/0009-LU-3319-procfs-update-ldiskfs-proc-handling-to-seq_f.patch
171 +++ b/sys-cluster/lustre/files/0009-LU-3319-procfs-update-ldiskfs-proc-handling-to-seq_f.patch
172 @@ -1,7 +1,8 @@
173 From 671de4e9a7ec811db225a763b88e48379d5a4daf Mon Sep 17 00:00:00 2001
174 From: James Simmons <uja.ornl@×××××.com>
175 Date: Tue, 25 Mar 2014 12:37:41 -0400
176 -Subject: [PATCH 9/9] LU-3319 procfs: update ldiskfs proc handling to seq_files
177 +Subject: [PATCH 09/10] LU-3319 procfs: update ldiskfs proc handling to
178 + seq_files
179
180 Migrate all ldiskfs proc handling to using strictly
181 seq_files. Also include a fix with newer gcc complaining
182
183 diff --git a/sys-cluster/lustre/files/0010-LU-4416-mm-Backport-shrinker-changes-from-upstream.patch b/sys-cluster/lustre/files/0010-LU-4416-mm-Backport-shrinker-changes-from-upstream.patch
184 new file mode 100644
185 index 0000000..e508fb4
186 --- /dev/null
187 +++ b/sys-cluster/lustre/files/0010-LU-4416-mm-Backport-shrinker-changes-from-upstream.patch
188 @@ -0,0 +1,757 @@
189 +From 3027cd3a234dc4a6531844b885e63b93a2fb6c67 Mon Sep 17 00:00:00 2001
190 +From: yangsheng <yang.sheng@×××××.com>
191 +Date: Fri, 28 Feb 2014 20:30:18 +0800
192 +Subject: [PATCH 10/10] LU-4416 mm: Backport shrinker changes from upstream
193 +
194 +Convert shrinker to new count/scan API.
195 +--ptlrpc shrinker
196 +--lu_object shrinker
197 +--ldlm pool shrinker
198 +
199 +Signed-off-by: Peng Tao <tao.peng@×××.com>
200 +Signed-off-by: Andreas Dilger <andreas.dilger@×××××.com>
201 +Signed-off-by: yang sheng <yang.sheng@×××××.com>
202 +Change-Id: Idbd7cd3b7488202e5e8f6fdf757ae6d20e28d642
203 +---
204 + libcfs/autoconf/lustre-libcfs.m4 | 22 ++++
205 + libcfs/include/libcfs/linux/linux-mem.h | 34 ++++-
206 + libcfs/include/libcfs/posix/libcfs.h | 13 +-
207 + lustre/ldlm/ldlm_pool.c | 222 ++++++++++++++++++++------------
208 + lustre/obdclass/lu_object.c | 134 +++++++++++--------
209 + lustre/ptlrpc/sec_bulk.c | 80 +++++++++---
210 + 6 files changed, 342 insertions(+), 163 deletions(-)
211 +
212 +diff --git a/libcfs/autoconf/lustre-libcfs.m4 b/libcfs/autoconf/lustre-libcfs.m4
213 +index cb86497..93360ce 100644
214 +--- a/libcfs/autoconf/lustre-libcfs.m4
215 ++++ b/libcfs/autoconf/lustre-libcfs.m4
216 +@@ -288,6 +288,26 @@ fi
217 + ])
218 +
219 + #
220 ++# FC19 3.12 kernel struct shrinker change
221 ++#
222 ++AC_DEFUN([LIBCFS_SHRINKER_COUNT],
223 ++[AC_MSG_CHECKING([shrinker has count_objects])
224 ++LB_LINUX_TRY_COMPILE([
225 ++ #include <linux/mmzone.h>
226 ++ #include <linux/shrinker.h>
227 ++],[
228 ++ ((struct shrinker*)0)->count_objects(NULL, NULL);
229 ++],[
230 ++ AC_MSG_RESULT(yes)
231 ++ AC_DEFINE(HAVE_SHRINKER_COUNT, 1,
232 ++ [shrinker has count_objects memeber])
233 ++],[
234 ++ AC_MSG_RESULT(no)
235 ++],[
236 ++])
237 ++])
238 ++
239 ++#
240 + # LIBCFS_PROG_LINUX
241 + #
242 + # LNet linux kernel checks
243 +@@ -319,6 +339,8 @@ LIBCFS_HAVE_CRC32
244 + LIBCFS_ENABLE_CRC32_ACCEL
245 + # 3.10
246 + LIBCFS_ENABLE_CRC32C_ACCEL
247 ++# 3.12
248 ++LIBCFS_SHRINKER_COUNT
249 + ])
250 +
251 + #
252 +diff --git a/libcfs/include/libcfs/linux/linux-mem.h b/libcfs/include/libcfs/linux/linux-mem.h
253 +index 6109645..ce20cb8 100644
254 +--- a/libcfs/include/libcfs/linux/linux-mem.h
255 ++++ b/libcfs/include/libcfs/linux/linux-mem.h
256 +@@ -108,13 +108,16 @@ extern void *cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep,
257 + /*
258 + * Shrinker
259 + */
260 +-
261 + #ifdef HAVE_SHRINK_CONTROL
262 + # define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask) \
263 + struct shrinker *shrinker, \
264 + struct shrink_control *sc
265 + # define shrink_param(sc, var) ((sc)->var)
266 + #else
267 ++struct shrink_control {
268 ++ gfp_t gfp_mask;
269 ++ unsigned long nr_to_scan;
270 ++};
271 + # ifdef HAVE_SHRINKER_WANT_SHRINK_PTR
272 + # define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask) \
273 + struct shrinker *shrinker, \
274 +@@ -123,13 +126,31 @@ extern void *cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep,
275 + # define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask) \
276 + int nr_to_scan, gfp_t gfp_mask
277 + # endif
278 ++ /* avoid conflict with spl mm_compat.h */
279 ++# define HAVE_SHRINK_CONTROL_STRUCT 1
280 + # define shrink_param(sc, var) (var)
281 + #endif
282 +
283 +-typedef int (*shrinker_t)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask));
284 ++#ifdef HAVE_SHRINKER_COUNT
285 ++struct shrinker_var {
286 ++ unsigned long (*count)(struct shrinker *,
287 ++ struct shrink_control *sc);
288 ++ unsigned long (*scan)(struct shrinker *,
289 ++ struct shrink_control *sc);
290 ++};
291 ++# define DEF_SHRINKER_VAR(name, shrink, count_obj, scan_obj) \
292 ++ struct shrinker_var name = { .count = count_obj, .scan = scan_obj }
293 ++#else
294 ++struct shrinker_var {
295 ++ int (*shrink)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask));
296 ++};
297 ++# define DEF_SHRINKER_VAR(name, shrinker, count, scan) \
298 ++ struct shrinker_var name = { .shrink = shrinker }
299 ++# define SHRINK_STOP (~0UL)
300 ++#endif
301 +
302 + static inline
303 +-struct shrinker *set_shrinker(int seek, shrinker_t func)
304 ++struct shrinker *set_shrinker(int seek, struct shrinker_var *var)
305 + {
306 + struct shrinker *s;
307 +
308 +@@ -137,7 +158,12 @@ struct shrinker *set_shrinker(int seek, shrinker_t func)
309 + if (s == NULL)
310 + return (NULL);
311 +
312 +- s->shrink = func;
313 ++#ifdef HAVE_SHRINKER_COUNT
314 ++ s->count_objects = var->count;
315 ++ s->scan_objects = var->scan;
316 ++#else
317 ++ s->shrink = var->shrink;
318 ++#endif
319 + s->seeks = seek;
320 +
321 + register_shrinker(s);
322 +diff --git a/libcfs/include/libcfs/posix/libcfs.h b/libcfs/include/libcfs/posix/libcfs.h
323 +index 180d18e..38cebbb 100644
324 +--- a/libcfs/include/libcfs/posix/libcfs.h
325 ++++ b/libcfs/include/libcfs/posix/libcfs.h
326 +@@ -278,12 +278,19 @@ struct shrinker {
327 + #endif
328 + };
329 +
330 +-#define DEFAULT_SEEKS (0)
331 ++struct shrinker_var {
332 ++#ifndef __INTEL_COMPILER
333 ++ ;
334 ++#endif
335 ++};
336 ++
337 ++#define DEF_SHRINKER_VAR(name, shrink, count, scan) \
338 ++ struct shrinker_var name = {};
339 +
340 +-typedef int (*shrinker_t)(int, unsigned int);
341 ++#define DEFAULT_SEEKS (0)
342 +
343 + static inline
344 +-struct shrinker *set_shrinker(int seeks, shrinker_t shrink)
345 ++struct shrinker *set_shrinker(int seeks, struct shrinker_var *var)
346 + {
347 + return (struct shrinker *)0xdeadbea1; /* Cannot return NULL here */
348 + }
349 +diff --git a/lustre/ldlm/ldlm_pool.c b/lustre/ldlm/ldlm_pool.c
350 +index 6cf50f2..b3eaf1c 100644
351 +--- a/lustre/ldlm/ldlm_pool.c
352 ++++ b/lustre/ldlm/ldlm_pool.c
353 +@@ -531,7 +531,7 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
354 + int nr, unsigned int gfp_mask)
355 + {
356 + struct ldlm_namespace *ns;
357 +- int canceled = 0, unused;
358 ++ int unused;
359 +
360 + ns = ldlm_pl2ns(pl);
361 +
362 +@@ -550,17 +550,14 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
363 + unused = ns->ns_nr_unused;
364 + spin_unlock(&ns->ns_lock);
365 +
366 +- if (nr) {
367 +- canceled = ldlm_cancel_lru(ns, nr, LCF_ASYNC,
368 +- LDLM_CANCEL_SHRINK);
369 +- }
370 + #ifdef __KERNEL__
371 +- /*
372 +- * Return the number of potentially reclaimable locks.
373 +- */
374 +- return ((unused - canceled) / 100) * sysctl_vfs_cache_pressure;
375 ++ if (nr == 0)
376 ++ return (unused / 100) * sysctl_vfs_cache_pressure;
377 ++ else
378 ++ return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
379 + #else
380 +- return unused - canceled;
381 ++ return unused - (nr ? ldlm_cancel_lru(ns, nr, LCF_ASYNC,
382 ++ LDLM_CANCEL_SHRINK) : 0);
383 + #endif
384 + }
385 +
386 +@@ -1045,41 +1042,36 @@ static struct shrinker *ldlm_pools_cli_shrinker;
387 + static struct completion ldlm_pools_comp;
388 +
389 + /*
390 +- * Cancel \a nr locks from all namespaces (if possible). Returns number of
391 +- * cached locks after shrink is finished. All namespaces are asked to
392 +- * cancel approximately equal amount of locks to keep balancing.
393 +- */
394 +-static int ldlm_pools_shrink(ldlm_side_t client, int nr,
395 +- unsigned int gfp_mask)
396 ++* count locks from all namespaces (if possible). Returns number of
397 ++* cached locks.
398 ++*/
399 ++static unsigned long ldlm_pools_count(ldlm_side_t client, unsigned int gfp_mask)
400 + {
401 +- unsigned int total = 0, cached = 0;
402 +- int nr_ns;
403 +- struct ldlm_namespace *ns;
404 ++ int total = 0, nr_ns;
405 ++ struct ldlm_namespace *ns;
406 + struct ldlm_namespace *ns_old = NULL; /* loop detection */
407 +- void *cookie;
408 ++ void *cookie;
409 +
410 +- if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
411 +- !(gfp_mask & __GFP_FS))
412 +- return -1;
413 ++ if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
414 ++ return 0;
415 +
416 +- CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",
417 +- nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
418 ++ CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
419 ++ client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
420 +
421 +- cookie = cl_env_reenter();
422 ++ cookie = cl_env_reenter();
423 +
424 +- /*
425 +- * Find out how many resources we may release.
426 +- */
427 ++ /*
428 ++ * Find out how many resources we may release.
429 ++ */
430 + for (nr_ns = ldlm_namespace_nr_read(client);
431 +- nr_ns > 0; nr_ns--)
432 +- {
433 ++ nr_ns > 0; nr_ns--) {
434 + mutex_lock(ldlm_namespace_lock(client));
435 +- if (cfs_list_empty(ldlm_namespace_list(client))) {
436 ++ if (list_empty(ldlm_namespace_list(client))) {
437 + mutex_unlock(ldlm_namespace_lock(client));
438 +- cl_env_reexit(cookie);
439 +- return 0;
440 +- }
441 +- ns = ldlm_namespace_first_locked(client);
442 ++ cl_env_reexit(cookie);
443 ++ return 0;
444 ++ }
445 ++ ns = ldlm_namespace_first_locked(client);
446 +
447 + if (ns == ns_old) {
448 + mutex_unlock(ldlm_namespace_lock(client));
449 +@@ -1095,57 +1087,117 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
450 + if (ns_old == NULL)
451 + ns_old = ns;
452 +
453 +- ldlm_namespace_get(ns);
454 +- ldlm_namespace_move_to_active_locked(ns, client);
455 ++ ldlm_namespace_get(ns);
456 ++ ldlm_namespace_move_to_active_locked(ns, client);
457 + mutex_unlock(ldlm_namespace_lock(client));
458 +- total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
459 +- ldlm_namespace_put(ns);
460 +- }
461 ++ total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
462 ++ ldlm_namespace_put(ns);
463 ++ }
464 +
465 +- if (nr == 0 || total == 0) {
466 +- cl_env_reexit(cookie);
467 +- return total;
468 +- }
469 ++ cl_env_reexit(cookie);
470 ++ return total;
471 ++}
472 +
473 +- /*
474 +- * Shrink at least ldlm_namespace_nr(client) namespaces.
475 +- */
476 +- for (nr_ns = ldlm_namespace_nr_read(client) - nr_ns;
477 +- nr_ns > 0; nr_ns--)
478 +- {
479 +- __u64 cancel;
480 +- unsigned int nr_locks;
481 ++static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr,
482 ++ unsigned int gfp_mask)
483 ++{
484 ++ unsigned long freed = 0;
485 ++ int tmp, nr_ns;
486 ++ struct ldlm_namespace *ns;
487 ++ void *cookie;
488 +
489 +- /*
490 +- * Do not call shrink under ldlm_namespace_lock(client)
491 +- */
492 ++ if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
493 ++ return -1;
494 ++
495 ++ cookie = cl_env_reenter();
496 ++
497 ++ /*
498 ++ * Shrink at least ldlm_namespace_nr_read(client) namespaces.
499 ++ */
500 ++ for (tmp = nr_ns = ldlm_namespace_nr_read(client);
501 ++ tmp > 0; tmp--) {
502 ++ int cancel, nr_locks;
503 ++
504 ++ /*
505 ++ * Do not call shrink under ldlm_namespace_lock(client)
506 ++ */
507 + mutex_lock(ldlm_namespace_lock(client));
508 +- if (cfs_list_empty(ldlm_namespace_list(client))) {
509 ++ if (list_empty(ldlm_namespace_list(client))) {
510 + mutex_unlock(ldlm_namespace_lock(client));
511 +- /*
512 +- * If list is empty, we can't return any @cached > 0,
513 +- * that probably would cause needless shrinker
514 +- * call.
515 +- */
516 +- cached = 0;
517 +- break;
518 +- }
519 +- ns = ldlm_namespace_first_locked(client);
520 +- ldlm_namespace_get(ns);
521 +- ldlm_namespace_move_to_active_locked(ns, client);
522 ++ break;
523 ++ }
524 ++ ns = ldlm_namespace_first_locked(client);
525 ++ ldlm_namespace_get(ns);
526 ++ ldlm_namespace_move_to_active_locked(ns, client);
527 + mutex_unlock(ldlm_namespace_lock(client));
528 +
529 +- nr_locks = ldlm_pool_granted(&ns->ns_pool);
530 +- cancel = (__u64)nr_locks * nr;
531 +- do_div(cancel, total);
532 +- ldlm_pool_shrink(&ns->ns_pool, 1 + cancel, gfp_mask);
533 +- cached += ldlm_pool_granted(&ns->ns_pool);
534 +- ldlm_namespace_put(ns);
535 +- }
536 +- cl_env_reexit(cookie);
537 +- /* we only decrease the SLV in server pools shrinker, return -1 to
538 +- * kernel to avoid needless loop. LU-1128 */
539 +- return (client == LDLM_NAMESPACE_SERVER) ? -1 : cached;
540 ++ nr_locks = ldlm_pool_granted(&ns->ns_pool);
541 ++ /*
542 ++ * We use to shrink propotionally but with new shrinker API,
543 ++ * we lost the total number of freeable locks.
544 ++ */
545 ++ cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
546 ++ freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
547 ++ ldlm_namespace_put(ns);
548 ++ }
549 ++ cl_env_reexit(cookie);
550 ++ /*
551 ++ * we only decrease the SLV in server pools shrinker, return
552 ++ * SHRINK_STOP to kernel to avoid needless loop. LU-1128
553 ++ */
554 ++ return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
555 ++}
556 ++
557 ++#ifdef HAVE_SHRINKER_COUNT
558 ++static unsigned long ldlm_pools_srv_count(struct shrinker *s,
559 ++ struct shrink_control *sc)
560 ++{
561 ++ return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
562 ++}
563 ++
564 ++static unsigned long ldlm_pools_srv_scan(struct shrinker *s,
565 ++ struct shrink_control *sc)
566 ++{
567 ++ return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
568 ++ sc->gfp_mask);
569 ++}
570 ++
571 ++static unsigned long ldlm_pools_cli_count(struct shrinker *s, struct shrink_control *sc)
572 ++{
573 ++ return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
574 ++}
575 ++
576 ++static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
577 ++ struct shrink_control *sc)
578 ++{
579 ++ return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
580 ++ sc->gfp_mask);
581 ++}
582 ++
583 ++#else
584 ++/*
585 ++ * Cancel \a nr locks from all namespaces (if possible). Returns number of
586 ++ * cached locks after shrink is finished. All namespaces are asked to
587 ++ * cancel approximately equal amount of locks to keep balancing.
588 ++ */
589 ++static int ldlm_pools_shrink(ldlm_side_t client, int nr,
590 ++ unsigned int gfp_mask)
591 ++{
592 ++ unsigned int total = 0;
593 ++
594 ++ if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
595 ++ !(gfp_mask & __GFP_FS))
596 ++ return -1;
597 ++
598 ++ CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",
599 ++ nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
600 ++
601 ++ total = ldlm_pools_count(client, gfp_mask);
602 ++
603 ++ if (nr == 0 || total == 0)
604 ++ return total;
605 ++
606 ++ return ldlm_pools_scan(client, nr, gfp_mask);
607 + }
608 +
609 + static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
610 +@@ -1162,6 +1214,8 @@ static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
611 + shrink_param(sc, gfp_mask));
612 + }
613 +
614 ++#endif /* HAVE_SHRINKER_COUNT */
615 ++
616 + int ldlm_pools_recalc(ldlm_side_t client)
617 + {
618 + __u32 nr_l = 0, nr_p = 0, l;
619 +@@ -1418,16 +1472,18 @@ static void ldlm_pools_thread_stop(void)
620 + int ldlm_pools_init(void)
621 + {
622 + int rc;
623 ++ DEF_SHRINKER_VAR(shsvar, ldlm_pools_srv_shrink,
624 ++ ldlm_pools_srv_count, ldlm_pools_srv_scan);
625 ++ DEF_SHRINKER_VAR(shcvar, ldlm_pools_cli_shrink,
626 ++ ldlm_pools_cli_count, ldlm_pools_cli_scan);
627 + ENTRY;
628 +
629 + rc = ldlm_pools_thread_start();
630 + if (rc == 0) {
631 + ldlm_pools_srv_shrinker =
632 +- set_shrinker(DEFAULT_SEEKS,
633 +- ldlm_pools_srv_shrink);
634 ++ set_shrinker(DEFAULT_SEEKS, &shsvar);
635 + ldlm_pools_cli_shrinker =
636 +- set_shrinker(DEFAULT_SEEKS,
637 +- ldlm_pools_cli_shrink);
638 ++ set_shrinker(DEFAULT_SEEKS, &shcvar);
639 + }
640 + RETURN(rc);
641 + }
642 +diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c
643 +index 1304e95..0850b2a 100644
644 +--- a/lustre/obdclass/lu_object.c
645 ++++ b/lustre/obdclass/lu_object.c
646 +@@ -1884,6 +1884,69 @@ static void lu_site_stats_get(cfs_hash_t *hs,
647 +
648 + #ifdef __KERNEL__
649 +
650 ++static unsigned long lu_cache_shrink_count(struct shrinker *sk,
651 ++ struct shrink_control *sc)
652 ++{
653 ++ lu_site_stats_t stats;
654 ++ struct lu_site *s;
655 ++ struct lu_site *tmp;
656 ++ unsigned long cached = 0;
657 ++
658 ++ if (!(sc->gfp_mask & __GFP_FS))
659 ++ return 0;
660 ++
661 ++ mutex_lock(&lu_sites_guard);
662 ++ list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
663 ++ memset(&stats, 0, sizeof(stats));
664 ++ lu_site_stats_get(s->ls_obj_hash, &stats, 0);
665 ++ cached += stats.lss_total - stats.lss_busy;
666 ++ }
667 ++ mutex_unlock(&lu_sites_guard);
668 ++
669 ++ cached = (cached / 100) * sysctl_vfs_cache_pressure;
670 ++ CDEBUG(D_INODE, "%ld objects cached\n", cached);
671 ++ return cached;
672 ++}
673 ++
674 ++static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
675 ++ struct shrink_control *sc)
676 ++{
677 ++ struct lu_site *s;
678 ++ struct lu_site *tmp;
679 ++ unsigned long remain = sc->nr_to_scan, freed = 0;
680 ++ LIST_HEAD(splice);
681 ++
682 ++ if (!(sc->gfp_mask & __GFP_FS))
683 ++ /* We must not take the lu_sites_guard lock when
684 ++ * __GFP_FS is *not* set because of the deadlock
685 ++ * possibility detailed above. Additionally,
686 ++ * since we cannot determine the number of
687 ++ * objects in the cache without taking this
688 ++ * lock, we're in a particularly tough spot. As
689 ++ * a result, we'll just lie and say our cache is
690 ++ * empty. This _should_ be ok, as we can't
691 ++ * reclaim objects when __GFP_FS is *not* set
692 ++ * anyways.
693 ++ */
694 ++ return SHRINK_STOP;
695 ++
696 ++ mutex_lock(&lu_sites_guard);
697 ++ list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
698 ++ freed = lu_site_purge(&lu_shrink_env, s, remain);
699 ++ remain -= freed;
700 ++ /*
701 ++ * Move just shrunk site to the tail of site list to
702 ++ * assure shrinking fairness.
703 ++ */
704 ++ list_move_tail(&s->ls_linkage, &splice);
705 ++ }
706 ++ list_splice(&splice, lu_sites.prev);
707 ++ mutex_unlock(&lu_sites_guard);
708 ++
709 ++ return sc->nr_to_scan - remain;
710 ++}
711 ++
712 ++#ifndef HAVE_SHRINKER_COUNT
713 + /*
714 + * There exists a potential lock inversion deadlock scenario when using
715 + * Lustre on top of ZFS. This occurs between one of ZFS's
716 +@@ -1904,59 +1967,29 @@ static void lu_site_stats_get(cfs_hash_t *hs,
717 + */
718 + static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
719 + {
720 +- lu_site_stats_t stats;
721 +- struct lu_site *s;
722 +- struct lu_site *tmp;
723 + int cached = 0;
724 +- int remain = shrink_param(sc, nr_to_scan);
725 +- CFS_LIST_HEAD(splice);
726 +-
727 +- if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) {
728 +- if (remain != 0)
729 +- return -1;
730 +- else
731 +- /* We must not take the lu_sites_guard lock when
732 +- * __GFP_FS is *not* set because of the deadlock
733 +- * possibility detailed above. Additionally,
734 +- * since we cannot determine the number of
735 +- * objects in the cache without taking this
736 +- * lock, we're in a particularly tough spot. As
737 +- * a result, we'll just lie and say our cache is
738 +- * empty. This _should_ be ok, as we can't
739 +- * reclaim objects when __GFP_FS is *not* set
740 +- * anyways.
741 +- */
742 +- return 0;
743 +- }
744 ++ struct shrink_control scv = {
745 ++ .nr_to_scan = shrink_param(sc, nr_to_scan),
746 ++ .gfp_mask = shrink_param(sc, gfp_mask)
747 ++ };
748 ++#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
749 ++ struct shrinker* shrinker = NULL;
750 ++#endif
751 +
752 +- CDEBUG(D_INODE, "Shrink %d objects\n", remain);
753 +
754 +- mutex_lock(&lu_sites_guard);
755 +- cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
756 +- if (shrink_param(sc, nr_to_scan) != 0) {
757 +- remain = lu_site_purge(&lu_shrink_env, s, remain);
758 +- /*
759 +- * Move just shrunk site to the tail of site list to
760 +- * assure shrinking fairness.
761 +- */
762 +- cfs_list_move_tail(&s->ls_linkage, &splice);
763 +- }
764 ++ CDEBUG(D_INODE, "Shrink %lu objects\n", scv.nr_to_scan);
765 +
766 +- memset(&stats, 0, sizeof(stats));
767 +- lu_site_stats_get(s->ls_obj_hash, &stats, 0);
768 +- cached += stats.lss_total - stats.lss_busy;
769 +- if (shrink_param(sc, nr_to_scan) && remain <= 0)
770 +- break;
771 +- }
772 +- cfs_list_splice(&splice, lu_sites.prev);
773 +- mutex_unlock(&lu_sites_guard);
774 ++ lu_cache_shrink_scan(shrinker, &scv);
775 +
776 +- cached = (cached / 100) * sysctl_vfs_cache_pressure;
777 +- if (shrink_param(sc, nr_to_scan) == 0)
778 +- CDEBUG(D_INODE, "%d objects cached\n", cached);
779 +- return cached;
780 ++ cached = lu_cache_shrink_count(shrinker, &scv);
781 ++ if (scv.nr_to_scan == 0)
782 ++ CDEBUG(D_INODE, "%d objects cached\n", cached);
783 ++ return cached;
784 + }
785 +
786 ++#endif /* HAVE_SHRINKER_COUNT */
787 ++
788 ++
789 + /*
790 + * Debugging stuff.
791 + */
792 +@@ -2005,11 +2038,6 @@ void lu_context_keys_dump(void)
793 + }
794 + }
795 + EXPORT_SYMBOL(lu_context_keys_dump);
796 +-#else /* !__KERNEL__ */
797 +-static int lu_cache_shrink(int nr, unsigned int gfp_mask)
798 +-{
799 +- return 0;
800 +-}
801 + #endif /* __KERNEL__ */
802 +
803 + /**
804 +@@ -2018,6 +2046,8 @@ static int lu_cache_shrink(int nr, unsigned int gfp_mask)
805 + int lu_global_init(void)
806 + {
807 + int result;
808 ++ DEF_SHRINKER_VAR(shvar, lu_cache_shrink,
809 ++ lu_cache_shrink_count, lu_cache_shrink_scan);
810 +
811 + CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
812 +
813 +@@ -2046,7 +2076,7 @@ int lu_global_init(void)
814 + * inode, one for ea. Unfortunately setting this high value results in
815 + * lu_object/inode cache consuming all the memory.
816 + */
817 +- lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
818 ++ lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, &shvar);
819 + if (lu_site_shrinker == NULL)
820 + return -ENOMEM;
821 +
822 +diff --git a/lustre/ptlrpc/sec_bulk.c b/lustre/ptlrpc/sec_bulk.c
823 +index 0601486..9d970f20 100644
824 +--- a/lustre/ptlrpc/sec_bulk.c
825 ++++ b/lustre/ptlrpc/sec_bulk.c
826 +@@ -232,30 +232,46 @@ static void enc_pools_release_free_pages(long npages)
827 + }
828 +
829 + /*
830 +- * could be called frequently for query (@nr_to_scan == 0).
831 + * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
832 + */
833 +-static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
834 ++static unsigned long enc_pools_shrink_count(struct shrinker *s,
835 ++ struct shrink_control *sc)
836 + {
837 +- if (unlikely(shrink_param(sc, nr_to_scan) != 0)) {
838 ++ /*
839 ++ * if no pool access for a long time, we consider it's fully idle.
840 ++ * a little race here is fine.
841 ++ */
842 ++ if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
843 ++ CACHE_QUIESCENT_PERIOD)) {
844 + spin_lock(&page_pools.epp_lock);
845 +- shrink_param(sc, nr_to_scan) = min_t(unsigned long,
846 +- shrink_param(sc, nr_to_scan),
847 +- page_pools.epp_free_pages -
848 +- PTLRPC_MAX_BRW_PAGES);
849 +- if (shrink_param(sc, nr_to_scan) > 0) {
850 +- enc_pools_release_free_pages(shrink_param(sc,
851 +- nr_to_scan));
852 +- CDEBUG(D_SEC, "released %ld pages, %ld left\n",
853 +- (long)shrink_param(sc, nr_to_scan),
854 +- page_pools.epp_free_pages);
855 +-
856 +- page_pools.epp_st_shrinks++;
857 +- page_pools.epp_last_shrink = cfs_time_current_sec();
858 +- }
859 ++ page_pools.epp_idle_idx = IDLE_IDX_MAX;
860 + spin_unlock(&page_pools.epp_lock);
861 + }
862 +
863 ++ LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
864 ++ return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
865 ++ (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
866 ++}
867 ++
868 ++/*
869 ++ * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
870 ++ */
871 ++static unsigned long enc_pools_shrink_scan(struct shrinker *s,
872 ++ struct shrink_control *sc)
873 ++{
874 ++ spin_lock(&page_pools.epp_lock);
875 ++ sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan,
876 ++ page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES);
877 ++ if (sc->nr_to_scan > 0) {
878 ++ enc_pools_release_free_pages(sc->nr_to_scan);
879 ++ CDEBUG(D_SEC, "released %ld pages, %ld left\n",
880 ++ (long)sc->nr_to_scan, page_pools.epp_free_pages);
881 ++
882 ++ page_pools.epp_st_shrinks++;
883 ++ page_pools.epp_last_shrink = cfs_time_current_sec();
884 ++ }
885 ++ spin_unlock(&page_pools.epp_lock);
886 ++
887 + /*
888 + * if no pool access for a long time, we consider it's fully idle.
889 + * a little race here is fine.
890 +@@ -268,10 +284,31 @@ static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
891 + }
892 +
893 + LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
894 +- return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
895 +- (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
896 ++ return sc->nr_to_scan;
897 ++}
898 ++
899 ++#ifndef HAVE_SHRINKER_COUNT
900 ++/*
901 ++ * could be called frequently for query (@nr_to_scan == 0).
902 ++ * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
903 ++ */
904 ++static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
905 ++{
906 ++ struct shrink_control scv = {
907 ++ .nr_to_scan = shrink_param(sc, nr_to_scan),
908 ++ .gfp_mask = shrink_param(sc, gfp_mask)
909 ++ };
910 ++#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
911 ++ struct shrinker* shrinker = NULL;
912 ++#endif
913 ++
914 ++ enc_pools_shrink_scan(shrinker, &scv);
915 ++
916 ++ return enc_pools_shrink_count(shrinker, &scv);
917 + }
918 +
919 ++#endif /* HAVE_SHRINKER_COUNT */
920 ++
921 + static inline
922 + int npages_to_npools(unsigned long npages)
923 + {
924 +@@ -706,6 +743,8 @@ static inline void enc_pools_free(void)
925 +
926 + int sptlrpc_enc_pool_init(void)
927 + {
928 ++ DEF_SHRINKER_VAR(shvar, enc_pools_shrink,
929 ++ enc_pools_shrink_count, enc_pools_shrink_scan);
930 + /*
931 + * maximum capacity is 1/8 of total physical memory.
932 + * is the 1/8 a good number?
933 +@@ -741,8 +780,7 @@ int sptlrpc_enc_pool_init(void)
934 + if (page_pools.epp_pools == NULL)
935 + return -ENOMEM;
936 +
937 +- pools_shrinker = set_shrinker(pools_shrinker_seeks,
938 +- enc_pools_shrink);
939 ++ pools_shrinker = set_shrinker(pools_shrinker_seeks, &shvar);
940 + if (pools_shrinker == NULL) {
941 + enc_pools_free();
942 + return -ENOMEM;
943 +--
944 +1.9.1
945 +
946
947 diff --git a/sys-cluster/lustre/files/lustre-readline6.3_fix.patch b/sys-cluster/lustre/files/lustre-readline6.3_fix.patch
948 new file mode 100644
949 index 0000000..50384d6
950 --- /dev/null
951 +++ b/sys-cluster/lustre/files/lustre-readline6.3_fix.patch
952 @@ -0,0 +1,13 @@
953 +diff --git a/libcfs/libcfs/util/parser.c b/libcfs/libcfs/util/parser.c
954 +index f395fa3..3fc0373 100644
955 +--- a/libcfs/libcfs/util/parser.c
956 ++++ b/libcfs/libcfs/util/parser.c
957 +@@ -308,7 +308,7 @@ int init_input()
958 + rl_deprep_term_function = (rl_voidfunc_t *)noop_fn;
959 + }
960 +
961 +- rl_attempted_completion_function = (CPPFunction *)command_completion;
962 ++ rl_attempted_completion_function = (rl_completion_func_t *)command_completion;
963 + rl_completion_entry_function = (void *)command_generator;
964 + #endif
965 + return interactive;
966
967 diff --git a/sys-cluster/lustre/lustre-9999.ebuild b/sys-cluster/lustre/lustre-9999.ebuild
968 index c4de667..50587cc 100644
969 --- a/sys-cluster/lustre/lustre-9999.ebuild
970 +++ b/sys-cluster/lustre/lustre-9999.ebuild
971 @@ -42,6 +42,8 @@ PATCHES=(
972 "${FILESDIR}/0007-LU-3319-procfs-move-mdt-mds-proc-handling-to-seq_fil.patch"
973 "${FILESDIR}/0008-LU-3319-procfs-move-mdd-ofd-proc-handling-to-seq_fil.patch"
974 "${FILESDIR}/0009-LU-3319-procfs-update-ldiskfs-proc-handling-to-seq_f.patch"
975 + "${FILESDIR}/0010-LU-4416-mm-Backport-shrinker-changes-from-upstream.patch"
976 + "${FILESDIR}/lustre-readline6.3_fix.patch"
977 )
978
979 pkg_setup() {