1 |
Author: mpagano |
2 |
Date: 2014-06-02 19:45:19 +0000 (Mon, 02 Jun 2014) |
3 |
New Revision: 2802 |
4 |
|
5 |
Added: |
6 |
genpatches-2.6/trunk/3.14/1004_linux-3.14.5.patch |
7 |
Modified: |
8 |
genpatches-2.6/trunk/3.14/0000_README |
9 |
Log: |
10 |
Linux patch 3.14.5 |
11 |
|
12 |
Modified: genpatches-2.6/trunk/3.14/0000_README |
13 |
=================================================================== |
14 |
--- genpatches-2.6/trunk/3.14/0000_README 2014-05-31 20:17:24 UTC (rev 2801) |
15 |
+++ genpatches-2.6/trunk/3.14/0000_README 2014-06-02 19:45:19 UTC (rev 2802) |
16 |
@@ -58,6 +58,10 @@ |
17 |
From: http://www.kernel.org |
18 |
Desc: Linux 3.14.4 |
19 |
|
20 |
+Patch: 1004_linux-3.14.5.patch |
21 |
+From: http://www.kernel.org |
22 |
+Desc: Linux 3.14.5 |
23 |
+ |
24 |
Patch: 1500_XATTR_USER_PREFIX.patch |
25 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
26 |
Desc: Support for namespace user.pax.* on tmpfs. |
27 |
|
28 |
Added: genpatches-2.6/trunk/3.14/1004_linux-3.14.5.patch |
29 |
=================================================================== |
30 |
--- genpatches-2.6/trunk/3.14/1004_linux-3.14.5.patch (rev 0) |
31 |
+++ genpatches-2.6/trunk/3.14/1004_linux-3.14.5.patch 2014-06-02 19:45:19 UTC (rev 2802) |
32 |
@@ -0,0 +1,5450 @@ |
33 |
+diff --git a/Makefile b/Makefile |
34 |
+index d7c07fd8c944..fa77b0bed2a2 100644 |
35 |
+--- a/Makefile |
36 |
++++ b/Makefile |
37 |
+@@ -1,8 +1,8 @@ |
38 |
+ VERSION = 3 |
39 |
+ PATCHLEVEL = 14 |
40 |
+-SUBLEVEL = 4 |
41 |
++SUBLEVEL = 5 |
42 |
+ EXTRAVERSION = |
43 |
+-NAME = Shuffling Zombie Juror |
44 |
++NAME = Remembering Coco |
45 |
+ |
46 |
+ # *DOCUMENTATION* |
47 |
+ # To see a list of typical targets execute "make help" |
48 |
+diff --git a/arch/parisc/include/asm/shmparam.h b/arch/parisc/include/asm/shmparam.h |
49 |
+index 628ddc22faa8..afe1300ab667 100644 |
50 |
+--- a/arch/parisc/include/asm/shmparam.h |
51 |
++++ b/arch/parisc/include/asm/shmparam.h |
52 |
+@@ -1,8 +1,7 @@ |
53 |
+ #ifndef _ASMPARISC_SHMPARAM_H |
54 |
+ #define _ASMPARISC_SHMPARAM_H |
55 |
+ |
56 |
+-#define __ARCH_FORCE_SHMLBA 1 |
57 |
+- |
58 |
+-#define SHMLBA 0x00400000 /* attach addr needs to be 4 Mb aligned */ |
59 |
++#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ |
60 |
++#define SHM_COLOUR 0x00400000 /* shared mappings colouring */ |
61 |
+ |
62 |
+ #endif /* _ASMPARISC_SHMPARAM_H */ |
63 |
+diff --git a/arch/parisc/include/uapi/asm/resource.h b/arch/parisc/include/uapi/asm/resource.h |
64 |
+index 8b06343b62ed..090483c47dbb 100644 |
65 |
+--- a/arch/parisc/include/uapi/asm/resource.h |
66 |
++++ b/arch/parisc/include/uapi/asm/resource.h |
67 |
+@@ -1,7 +1,6 @@ |
68 |
+ #ifndef _ASM_PARISC_RESOURCE_H |
69 |
+ #define _ASM_PARISC_RESOURCE_H |
70 |
+ |
71 |
+-#define _STK_LIM_MAX 10 * _STK_LIM |
72 |
+ #include <asm-generic/resource.h> |
73 |
+ |
74 |
+ #endif |
75 |
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c |
76 |
+index a6ffc775a9f8..f6448c7c62b5 100644 |
77 |
+--- a/arch/parisc/kernel/cache.c |
78 |
++++ b/arch/parisc/kernel/cache.c |
79 |
+@@ -323,7 +323,8 @@ void flush_dcache_page(struct page *page) |
80 |
+ * specifically accesses it, of course) */ |
81 |
+ |
82 |
+ flush_tlb_page(mpnt, addr); |
83 |
+- if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) { |
84 |
++ if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) |
85 |
++ != (addr & (SHM_COLOUR - 1))) { |
86 |
+ __flush_cache_page(mpnt, addr, page_to_phys(page)); |
87 |
+ if (old_addr) |
88 |
+ printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)"); |
89 |
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c |
90 |
+index b7cadc4a06cd..31ffa9b55322 100644 |
91 |
+--- a/arch/parisc/kernel/sys_parisc.c |
92 |
++++ b/arch/parisc/kernel/sys_parisc.c |
93 |
+@@ -45,7 +45,7 @@ |
94 |
+ |
95 |
+ static int get_offset(unsigned int last_mmap) |
96 |
+ { |
97 |
+- return (last_mmap & (SHMLBA-1)) >> PAGE_SHIFT; |
98 |
++ return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT; |
99 |
+ } |
100 |
+ |
101 |
+ static unsigned long shared_align_offset(unsigned int last_mmap, |
102 |
+@@ -57,8 +57,8 @@ static unsigned long shared_align_offset(unsigned int last_mmap, |
103 |
+ static inline unsigned long COLOR_ALIGN(unsigned long addr, |
104 |
+ unsigned int last_mmap, unsigned long pgoff) |
105 |
+ { |
106 |
+- unsigned long base = (addr+SHMLBA-1) & ~(SHMLBA-1); |
107 |
+- unsigned long off = (SHMLBA-1) & |
108 |
++ unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1); |
109 |
++ unsigned long off = (SHM_COLOUR-1) & |
110 |
+ (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT); |
111 |
+ |
112 |
+ return base + off; |
113 |
+@@ -101,7 +101,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, |
114 |
+ if (flags & MAP_FIXED) { |
115 |
+ if ((flags & MAP_SHARED) && last_mmap && |
116 |
+ (addr - shared_align_offset(last_mmap, pgoff)) |
117 |
+- & (SHMLBA - 1)) |
118 |
++ & (SHM_COLOUR - 1)) |
119 |
+ return -EINVAL; |
120 |
+ goto found_addr; |
121 |
+ } |
122 |
+@@ -122,7 +122,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, |
123 |
+ info.length = len; |
124 |
+ info.low_limit = mm->mmap_legacy_base; |
125 |
+ info.high_limit = mmap_upper_limit(); |
126 |
+- info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0; |
127 |
++ info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; |
128 |
+ info.align_offset = shared_align_offset(last_mmap, pgoff); |
129 |
+ addr = vm_unmapped_area(&info); |
130 |
+ |
131 |
+@@ -161,7 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, |
132 |
+ if (flags & MAP_FIXED) { |
133 |
+ if ((flags & MAP_SHARED) && last_mmap && |
134 |
+ (addr - shared_align_offset(last_mmap, pgoff)) |
135 |
+- & (SHMLBA - 1)) |
136 |
++ & (SHM_COLOUR - 1)) |
137 |
+ return -EINVAL; |
138 |
+ goto found_addr; |
139 |
+ } |
140 |
+@@ -182,7 +182,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, |
141 |
+ info.length = len; |
142 |
+ info.low_limit = PAGE_SIZE; |
143 |
+ info.high_limit = mm->mmap_base; |
144 |
+- info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0; |
145 |
++ info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; |
146 |
+ info.align_offset = shared_align_offset(last_mmap, pgoff); |
147 |
+ addr = vm_unmapped_area(&info); |
148 |
+ if (!(addr & ~PAGE_MASK)) |
149 |
+diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S |
150 |
+index 80e5dd248934..83ead0ea127d 100644 |
151 |
+--- a/arch/parisc/kernel/syscall_table.S |
152 |
++++ b/arch/parisc/kernel/syscall_table.S |
153 |
+@@ -392,7 +392,7 @@ |
154 |
+ ENTRY_COMP(vmsplice) |
155 |
+ ENTRY_COMP(move_pages) /* 295 */ |
156 |
+ ENTRY_SAME(getcpu) |
157 |
+- ENTRY_SAME(epoll_pwait) |
158 |
++ ENTRY_COMP(epoll_pwait) |
159 |
+ ENTRY_COMP(statfs64) |
160 |
+ ENTRY_COMP(fstatfs64) |
161 |
+ ENTRY_COMP(kexec_load) /* 300 */ |
162 |
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c |
163 |
+index a778ee27518a..8e08c6712eb8 100644 |
164 |
+--- a/arch/s390/net/bpf_jit_comp.c |
165 |
++++ b/arch/s390/net/bpf_jit_comp.c |
166 |
+@@ -811,7 +811,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize, |
167 |
+ return NULL; |
168 |
+ memset(header, 0, sz); |
169 |
+ header->pages = sz / PAGE_SIZE; |
170 |
+- hole = sz - (bpfsize + sizeof(*header)); |
171 |
++ hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header)); |
172 |
+ /* Insert random number of illegal instructions before BPF code |
173 |
+ * and make sure the first instruction starts at an even address. |
174 |
+ */ |
175 |
+diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h |
176 |
+index c8b051933b1b..b39e194f6c8d 100644 |
177 |
+--- a/arch/x86/include/asm/preempt.h |
178 |
++++ b/arch/x86/include/asm/preempt.h |
179 |
+@@ -5,6 +5,18 @@ |
180 |
+ #include <asm/percpu.h> |
181 |
+ #include <linux/thread_info.h> |
182 |
+ |
183 |
++#ifdef CONFIG_X86_32 |
184 |
++/* |
185 |
++ * i386's current_thread_info() depends on ESP and for interrupt/exception |
186 |
++ * stacks this doesn't yield the actual task thread_info. |
187 |
++ * |
188 |
++ * We hard rely on the fact that all the TIF_NEED_RESCHED bits are |
189 |
++ * the same, therefore use the slightly more expensive version below. |
190 |
++ */ |
191 |
++#undef tif_need_resched |
192 |
++#define tif_need_resched() test_tsk_thread_flag(current, TIF_NEED_RESCHED) |
193 |
++#endif |
194 |
++ |
195 |
+ DECLARE_PER_CPU(int, __preempt_count); |
196 |
+ |
197 |
+ /* |
198 |
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
199 |
+index 2b8578432d5b..ee0c3b554a38 100644 |
200 |
+--- a/arch/x86/kvm/x86.c |
201 |
++++ b/arch/x86/kvm/x86.c |
202 |
+@@ -1109,7 +1109,6 @@ static inline u64 get_kernel_ns(void) |
203 |
+ { |
204 |
+ struct timespec ts; |
205 |
+ |
206 |
+- WARN_ON(preemptible()); |
207 |
+ ktime_get_ts(&ts); |
208 |
+ monotonic_to_bootbased(&ts); |
209 |
+ return timespec_to_ns(&ts); |
210 |
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c |
211 |
+index 4ed75dd81d05..af2d4317b218 100644 |
212 |
+--- a/arch/x86/net/bpf_jit_comp.c |
213 |
++++ b/arch/x86/net/bpf_jit_comp.c |
214 |
+@@ -171,7 +171,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen, |
215 |
+ memset(header, 0xcc, sz); /* fill whole space with int3 instructions */ |
216 |
+ |
217 |
+ header->pages = sz / PAGE_SIZE; |
218 |
+- hole = sz - (proglen + sizeof(*header)); |
219 |
++ hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header)); |
220 |
+ |
221 |
+ /* insert a random number of int3 instructions before BPF code */ |
222 |
+ *image_ptr = &header->image[prandom_u32() % hole]; |
223 |
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c |
224 |
+index 581521c843a5..c570feab9fa2 100644 |
225 |
+--- a/arch/x86/xen/spinlock.c |
226 |
++++ b/arch/x86/xen/spinlock.c |
227 |
+@@ -274,7 +274,7 @@ void __init xen_init_spinlocks(void) |
228 |
+ printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); |
229 |
+ return; |
230 |
+ } |
231 |
+- |
232 |
++ printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); |
233 |
+ pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); |
234 |
+ pv_lock_ops.unlock_kick = xen_unlock_kick; |
235 |
+ } |
236 |
+@@ -290,6 +290,9 @@ static __init int xen_init_spinlocks_jump(void) |
237 |
+ if (!xen_pvspin) |
238 |
+ return 0; |
239 |
+ |
240 |
++ if (!xen_domain()) |
241 |
++ return 0; |
242 |
++ |
243 |
+ static_key_slow_inc(¶virt_ticketlocks_enabled); |
244 |
+ return 0; |
245 |
+ } |
246 |
+diff --git a/block/blk-core.c b/block/blk-core.c |
247 |
+index bfe16d5af9f9..e45b321cf6a0 100644 |
248 |
+--- a/block/blk-core.c |
249 |
++++ b/block/blk-core.c |
250 |
+@@ -2353,7 +2353,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) |
251 |
+ if (!req->bio) |
252 |
+ return false; |
253 |
+ |
254 |
+- trace_block_rq_complete(req->q, req); |
255 |
++ trace_block_rq_complete(req->q, req, nr_bytes); |
256 |
+ |
257 |
+ /* |
258 |
+ * For fs requests, rq is just carrier of independent bio's |
259 |
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c |
260 |
+index 34898d53395b..7f2d09fbb10b 100644 |
261 |
+--- a/drivers/block/rbd.c |
262 |
++++ b/drivers/block/rbd.c |
263 |
+@@ -2253,7 +2253,7 @@ out_partial: |
264 |
+ rbd_obj_request_put(obj_request); |
265 |
+ out_unwind: |
266 |
+ for_each_obj_request_safe(img_request, obj_request, next_obj_request) |
267 |
+- rbd_obj_request_put(obj_request); |
268 |
++ rbd_img_obj_request_del(img_request, obj_request); |
269 |
+ |
270 |
+ return -ENOMEM; |
271 |
+ } |
272 |
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h |
273 |
+index df77e20e3c3d..697f2150a997 100644 |
274 |
+--- a/drivers/gpu/drm/i915/i915_drv.h |
275 |
++++ b/drivers/gpu/drm/i915/i915_drv.h |
276 |
+@@ -734,6 +734,7 @@ enum intel_sbi_destination { |
277 |
+ #define QUIRK_PIPEA_FORCE (1<<0) |
278 |
+ #define QUIRK_LVDS_SSC_DISABLE (1<<1) |
279 |
+ #define QUIRK_INVERT_BRIGHTNESS (1<<2) |
280 |
++#define QUIRK_NO_PCH_PWM_ENABLE (1<<3) |
281 |
+ |
282 |
+ struct intel_fbdev; |
283 |
+ struct intel_fbc_work; |
284 |
+diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h |
285 |
+index 6e580c98dede..783ae08200e8 100644 |
286 |
+--- a/drivers/gpu/drm/i915/i915_trace.h |
287 |
++++ b/drivers/gpu/drm/i915/i915_trace.h |
288 |
+@@ -238,14 +238,16 @@ TRACE_EVENT(i915_gem_evict_vm, |
289 |
+ TP_ARGS(vm), |
290 |
+ |
291 |
+ TP_STRUCT__entry( |
292 |
++ __field(u32, dev) |
293 |
+ __field(struct i915_address_space *, vm) |
294 |
+ ), |
295 |
+ |
296 |
+ TP_fast_assign( |
297 |
++ __entry->dev = vm->dev->primary->index; |
298 |
+ __entry->vm = vm; |
299 |
+ ), |
300 |
+ |
301 |
+- TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm) |
302 |
++ TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm) |
303 |
+ ); |
304 |
+ |
305 |
+ TRACE_EVENT(i915_gem_ring_sync_to, |
306 |
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
307 |
+index 9b8a7c7ea7fc..963639d9049b 100644 |
308 |
+--- a/drivers/gpu/drm/i915/intel_display.c |
309 |
++++ b/drivers/gpu/drm/i915/intel_display.c |
310 |
+@@ -10771,6 +10771,17 @@ static void quirk_invert_brightness(struct drm_device *dev) |
311 |
+ DRM_INFO("applying inverted panel brightness quirk\n"); |
312 |
+ } |
313 |
+ |
314 |
++/* |
315 |
++ * Some machines (Dell XPS13) suffer broken backlight controls if |
316 |
++ * BLM_PCH_PWM_ENABLE is set. |
317 |
++ */ |
318 |
++static void quirk_no_pcm_pwm_enable(struct drm_device *dev) |
319 |
++{ |
320 |
++ struct drm_i915_private *dev_priv = dev->dev_private; |
321 |
++ dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE; |
322 |
++ DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n"); |
323 |
++} |
324 |
++ |
325 |
+ struct intel_quirk { |
326 |
+ int device; |
327 |
+ int subsystem_vendor; |
328 |
+@@ -10839,6 +10850,11 @@ static struct intel_quirk intel_quirks[] = { |
329 |
+ |
330 |
+ /* Acer Aspire 4736Z */ |
331 |
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, |
332 |
++ |
333 |
++ /* Dell XPS13 HD Sandy Bridge */ |
334 |
++ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, |
335 |
++ /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */ |
336 |
++ { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable }, |
337 |
+ }; |
338 |
+ |
339 |
+ static void intel_init_quirks(struct drm_device *dev) |
340 |
+diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c |
341 |
+index 079ea38f14d9..9f1d7a9300e8 100644 |
342 |
+--- a/drivers/gpu/drm/i915/intel_panel.c |
343 |
++++ b/drivers/gpu/drm/i915/intel_panel.c |
344 |
+@@ -671,6 +671,10 @@ static void pch_enable_backlight(struct intel_connector *connector) |
345 |
+ pch_ctl2 = panel->backlight.max << 16; |
346 |
+ I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2); |
347 |
+ |
348 |
++ /* XXX: transitional */ |
349 |
++ if (dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE) |
350 |
++ return; |
351 |
++ |
352 |
+ pch_ctl1 = 0; |
353 |
+ if (panel->backlight.active_low_pwm) |
354 |
+ pch_ctl1 |= BLM_PCH_POLARITY; |
355 |
+diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c |
356 |
+index 22cf0f4ba248..99faac555d3b 100644 |
357 |
+--- a/drivers/gpu/drm/i915/intel_tv.c |
358 |
++++ b/drivers/gpu/drm/i915/intel_tv.c |
359 |
+@@ -1536,9 +1536,14 @@ static int tv_is_present_in_vbt(struct drm_device *dev) |
360 |
+ /* |
361 |
+ * If the device type is not TV, continue. |
362 |
+ */ |
363 |
+- if (p_child->old.device_type != DEVICE_TYPE_INT_TV && |
364 |
+- p_child->old.device_type != DEVICE_TYPE_TV) |
365 |
++ switch (p_child->old.device_type) { |
366 |
++ case DEVICE_TYPE_INT_TV: |
367 |
++ case DEVICE_TYPE_TV: |
368 |
++ case DEVICE_TYPE_TV_SVIDEO_COMPOSITE: |
369 |
++ break; |
370 |
++ default: |
371 |
+ continue; |
372 |
++ } |
373 |
+ /* Only when the addin_offset is non-zero, it is regarded |
374 |
+ * as present. |
375 |
+ */ |
376 |
+diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c |
377 |
+index c7e7e6590c2b..c82c1d6a965a 100644 |
378 |
+--- a/drivers/gpu/drm/qxl/qxl_ttm.c |
379 |
++++ b/drivers/gpu/drm/qxl/qxl_ttm.c |
380 |
+@@ -433,6 +433,7 @@ static int qxl_sync_obj_flush(void *sync_obj) |
381 |
+ |
382 |
+ static void qxl_sync_obj_unref(void **sync_obj) |
383 |
+ { |
384 |
++ *sync_obj = NULL; |
385 |
+ } |
386 |
+ |
387 |
+ static void *qxl_sync_obj_ref(void *sync_obj) |
388 |
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c |
389 |
+index bbb17841a9e5..986f9e7364ca 100644 |
390 |
+--- a/drivers/gpu/drm/radeon/cik.c |
391 |
++++ b/drivers/gpu/drm/radeon/cik.c |
392 |
+@@ -1095,7 +1095,7 @@ static const u32 spectre_golden_registers[] = |
393 |
+ 0x8a14, 0xf000003f, 0x00000007, |
394 |
+ 0x8b24, 0xffffffff, 0x00ffffff, |
395 |
+ 0x28350, 0x3f3f3fff, 0x00000082, |
396 |
+- 0x28355, 0x0000003f, 0x00000000, |
397 |
++ 0x28354, 0x0000003f, 0x00000000, |
398 |
+ 0x3e78, 0x00000001, 0x00000002, |
399 |
+ 0x913c, 0xffff03df, 0x00000004, |
400 |
+ 0xc768, 0x00000008, 0x00000008, |
401 |
+@@ -6521,8 +6521,8 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer) |
402 |
+ buffer[count++] = cpu_to_le32(0x00000000); |
403 |
+ break; |
404 |
+ case CHIP_HAWAII: |
405 |
+- buffer[count++] = 0x3a00161a; |
406 |
+- buffer[count++] = 0x0000002e; |
407 |
++ buffer[count++] = cpu_to_le32(0x3a00161a); |
408 |
++ buffer[count++] = cpu_to_le32(0x0000002e); |
409 |
+ break; |
410 |
+ default: |
411 |
+ buffer[count++] = cpu_to_le32(0x00000000); |
412 |
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c |
413 |
+index fbd8b930f2be..6e6f82c53fef 100644 |
414 |
+--- a/drivers/gpu/drm/radeon/radeon_display.c |
415 |
++++ b/drivers/gpu/drm/radeon/radeon_display.c |
416 |
+@@ -792,6 +792,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) |
417 |
+ if (radeon_connector->edid) { |
418 |
+ drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); |
419 |
+ ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); |
420 |
++ drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); |
421 |
+ return ret; |
422 |
+ } |
423 |
+ drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); |
424 |
+diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c |
425 |
+index 15e44a7281ab..8391fca52a58 100644 |
426 |
+--- a/drivers/gpu/drm/radeon/radeon_ring.c |
427 |
++++ b/drivers/gpu/drm/radeon/radeon_ring.c |
428 |
+@@ -257,6 +257,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev) |
429 |
+ r = radeon_ib_test(rdev, i, ring); |
430 |
+ if (r) { |
431 |
+ ring->ready = false; |
432 |
++ rdev->needs_reset = false; |
433 |
+ |
434 |
+ if (i == RADEON_RING_TYPE_GFX_INDEX) { |
435 |
+ /* oh, oh, that's really bad */ |
436 |
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c |
437 |
+index 1e80152674b5..4fbcfdb02360 100644 |
438 |
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c |
439 |
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c |
440 |
+@@ -117,10 +117,10 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) |
441 |
+ (void) vmw_context_binding_state_kill |
442 |
+ (&container_of(res, struct vmw_user_context, res)->cbs); |
443 |
+ (void) vmw_gb_context_destroy(res); |
444 |
++ mutex_unlock(&dev_priv->binding_mutex); |
445 |
+ if (dev_priv->pinned_bo != NULL && |
446 |
+ !dev_priv->query_cid_valid) |
447 |
+ __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
448 |
+- mutex_unlock(&dev_priv->binding_mutex); |
449 |
+ mutex_unlock(&dev_priv->cmdbuf_mutex); |
450 |
+ return; |
451 |
+ } |
452 |
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |
453 |
+index efb575a7996c..a3480c13eb1b 100644 |
454 |
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |
455 |
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |
456 |
+@@ -1214,14 +1214,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, |
457 |
+ SVGA3dCmdSurfaceDMA dma; |
458 |
+ } *cmd; |
459 |
+ int ret; |
460 |
++ SVGA3dCmdSurfaceDMASuffix *suffix; |
461 |
++ uint32_t bo_size; |
462 |
+ |
463 |
+ cmd = container_of(header, struct vmw_dma_cmd, header); |
464 |
++ suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma + |
465 |
++ header->size - sizeof(*suffix)); |
466 |
++ |
467 |
++ /* Make sure device and verifier stays in sync. */ |
468 |
++ if (unlikely(suffix->suffixSize != sizeof(*suffix))) { |
469 |
++ DRM_ERROR("Invalid DMA suffix size.\n"); |
470 |
++ return -EINVAL; |
471 |
++ } |
472 |
++ |
473 |
+ ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
474 |
+ &cmd->dma.guest.ptr, |
475 |
+ &vmw_bo); |
476 |
+ if (unlikely(ret != 0)) |
477 |
+ return ret; |
478 |
+ |
479 |
++ /* Make sure DMA doesn't cross BO boundaries. */ |
480 |
++ bo_size = vmw_bo->base.num_pages * PAGE_SIZE; |
481 |
++ if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) { |
482 |
++ DRM_ERROR("Invalid DMA offset.\n"); |
483 |
++ return -EINVAL; |
484 |
++ } |
485 |
++ |
486 |
++ bo_size -= cmd->dma.guest.ptr.offset; |
487 |
++ if (unlikely(suffix->maximumOffset > bo_size)) |
488 |
++ suffix->maximumOffset = bo_size; |
489 |
++ |
490 |
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
491 |
+ user_surface_converter, &cmd->dma.host.sid, |
492 |
+ NULL); |
493 |
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c |
494 |
+index ed5ce2a41bbf..021b5227e783 100644 |
495 |
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c |
496 |
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c |
497 |
+@@ -147,7 +147,7 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, |
498 |
+ } |
499 |
+ |
500 |
+ if (!vmw_kms_validate_mode_vram(vmw_priv, |
501 |
+- info->fix.line_length, |
502 |
++ var->xres * var->bits_per_pixel/8, |
503 |
+ var->yoffset + var->yres)) { |
504 |
+ DRM_ERROR("Requested geom can not fit in framebuffer\n"); |
505 |
+ return -EINVAL; |
506 |
+@@ -162,6 +162,8 @@ static int vmw_fb_set_par(struct fb_info *info) |
507 |
+ struct vmw_private *vmw_priv = par->vmw_priv; |
508 |
+ int ret; |
509 |
+ |
510 |
++ info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8; |
511 |
++ |
512 |
+ ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres, |
513 |
+ info->fix.line_length, |
514 |
+ par->bpp, par->depth); |
515 |
+@@ -177,6 +179,7 @@ static int vmw_fb_set_par(struct fb_info *info) |
516 |
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset); |
517 |
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); |
518 |
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); |
519 |
++ vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length); |
520 |
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); |
521 |
+ } |
522 |
+ |
523 |
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c |
524 |
+index cc32a6f96c64..8a5384ce0352 100644 |
525 |
+--- a/drivers/hid/hid-core.c |
526 |
++++ b/drivers/hid/hid-core.c |
527 |
+@@ -718,6 +718,9 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) |
528 |
+ case HID_MAIN_ITEM_TAG_END_COLLECTION: |
529 |
+ break; |
530 |
+ case HID_MAIN_ITEM_TAG_INPUT: |
531 |
++ /* ignore constant inputs, they will be ignored by hid-input */ |
532 |
++ if (data & HID_MAIN_ITEM_CONSTANT) |
533 |
++ break; |
534 |
+ for (i = 0; i < parser->local.usage_index; i++) |
535 |
+ hid_scan_input_usage(parser, parser->local.usage[i]); |
536 |
+ break; |
537 |
+@@ -1780,8 +1783,6 @@ static const struct hid_device_id hid_have_special_driver[] = { |
538 |
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, |
539 |
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, |
540 |
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, |
541 |
+- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2) }, |
542 |
+- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2) }, |
543 |
+ { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, |
544 |
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, |
545 |
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, |
546 |
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h |
547 |
+index 22f28d6b33a8..6e12cd0317f6 100644 |
548 |
+--- a/drivers/hid/hid-ids.h |
549 |
++++ b/drivers/hid/hid-ids.h |
550 |
+@@ -624,8 +624,6 @@ |
551 |
+ #define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 |
552 |
+ #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 |
553 |
+ #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c |
554 |
+-#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7 |
555 |
+-#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9 |
556 |
+ |
557 |
+ #define USB_VENDOR_ID_MOJO 0x8282 |
558 |
+ #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 |
559 |
+@@ -830,6 +828,7 @@ |
560 |
+ #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 |
561 |
+ #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 |
562 |
+ #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 |
563 |
++#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 |
564 |
+ |
565 |
+ #define USB_VENDOR_ID_THINGM 0x27b8 |
566 |
+ #define USB_DEVICE_ID_BLINK1 0x01ed |
567 |
+diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c |
568 |
+index 404a3a8a82f1..c6ef6eed3091 100644 |
569 |
+--- a/drivers/hid/hid-microsoft.c |
570 |
++++ b/drivers/hid/hid-microsoft.c |
571 |
+@@ -208,10 +208,6 @@ static const struct hid_device_id ms_devices[] = { |
572 |
+ .driver_data = MS_NOGET }, |
573 |
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), |
574 |
+ .driver_data = MS_DUPLICATE_USAGES }, |
575 |
+- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), |
576 |
+- .driver_data = 0 }, |
577 |
+- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), |
578 |
+- .driver_data = 0 }, |
579 |
+ |
580 |
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT), |
581 |
+ .driver_data = MS_PRESENTER }, |
582 |
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c |
583 |
+index dbd83878ff99..8e4ddb369883 100644 |
584 |
+--- a/drivers/hid/usbhid/hid-quirks.c |
585 |
++++ b/drivers/hid/usbhid/hid-quirks.c |
586 |
+@@ -119,6 +119,7 @@ static const struct hid_blacklist { |
587 |
+ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS }, |
588 |
+ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS }, |
589 |
+ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS }, |
590 |
++ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS }, |
591 |
+ |
592 |
+ { 0, 0 } |
593 |
+ }; |
594 |
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c |
595 |
+index 4a6ca1cb2e78..56e24c072b62 100644 |
596 |
+--- a/drivers/md/raid1.c |
597 |
++++ b/drivers/md/raid1.c |
598 |
+@@ -97,6 +97,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) |
599 |
+ struct pool_info *pi = data; |
600 |
+ struct r1bio *r1_bio; |
601 |
+ struct bio *bio; |
602 |
++ int need_pages; |
603 |
+ int i, j; |
604 |
+ |
605 |
+ r1_bio = r1bio_pool_alloc(gfp_flags, pi); |
606 |
+@@ -119,15 +120,15 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) |
607 |
+ * RESYNC_PAGES for each bio. |
608 |
+ */ |
609 |
+ if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) |
610 |
+- j = pi->raid_disks; |
611 |
++ need_pages = pi->raid_disks; |
612 |
+ else |
613 |
+- j = 1; |
614 |
+- while(j--) { |
615 |
++ need_pages = 1; |
616 |
++ for (j = 0; j < need_pages; j++) { |
617 |
+ bio = r1_bio->bios[j]; |
618 |
+ bio->bi_vcnt = RESYNC_PAGES; |
619 |
+ |
620 |
+ if (bio_alloc_pages(bio, gfp_flags)) |
621 |
+- goto out_free_bio; |
622 |
++ goto out_free_pages; |
623 |
+ } |
624 |
+ /* If not user-requests, copy the page pointers to all bios */ |
625 |
+ if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { |
626 |
+@@ -141,6 +142,14 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) |
627 |
+ |
628 |
+ return r1_bio; |
629 |
+ |
630 |
++out_free_pages: |
631 |
++ while (--j >= 0) { |
632 |
++ struct bio_vec *bv; |
633 |
++ |
634 |
++ bio_for_each_segment_all(bv, r1_bio->bios[j], i) |
635 |
++ __free_page(bv->bv_page); |
636 |
++ } |
637 |
++ |
638 |
+ out_free_bio: |
639 |
+ while (++j < pi->raid_disks) |
640 |
+ bio_put(r1_bio->bios[j]); |
641 |
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
642 |
+index e5628fc725c3..91ec8cd12478 100644 |
643 |
+--- a/drivers/net/bonding/bond_main.c |
644 |
++++ b/drivers/net/bonding/bond_main.c |
645 |
+@@ -4536,6 +4536,7 @@ static int __init bonding_init(void) |
646 |
+ out: |
647 |
+ return res; |
648 |
+ err: |
649 |
++ bond_destroy_debugfs(); |
650 |
+ bond_netlink_fini(); |
651 |
+ err_link: |
652 |
+ unregister_pernet_subsys(&bond_net_ops); |
653 |
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c |
654 |
+index 298c26509095..a937a37ae89f 100644 |
655 |
+--- a/drivers/net/bonding/bond_options.c |
656 |
++++ b/drivers/net/bonding/bond_options.c |
657 |
+@@ -70,6 +70,7 @@ static struct bond_opt_value bond_fail_over_mac_tbl[] = { |
658 |
+ static struct bond_opt_value bond_intmax_tbl[] = { |
659 |
+ { "off", 0, BOND_VALFLAG_DEFAULT}, |
660 |
+ { "maxval", INT_MAX, BOND_VALFLAG_MAX}, |
661 |
++ { NULL, -1, 0} |
662 |
+ }; |
663 |
+ |
664 |
+ static struct bond_opt_value bond_lacp_rate_tbl[] = { |
665 |
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c |
666 |
+index 70a225c8df5c..a210766279d3 100644 |
667 |
+--- a/drivers/net/ethernet/broadcom/tg3.c |
668 |
++++ b/drivers/net/ethernet/broadcom/tg3.c |
669 |
+@@ -12294,7 +12294,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e |
670 |
+ if (tg3_flag(tp, MAX_RXPEND_64) && |
671 |
+ tp->rx_pending > 63) |
672 |
+ tp->rx_pending = 63; |
673 |
+- tp->rx_jumbo_pending = ering->rx_jumbo_pending; |
674 |
++ |
675 |
++ if (tg3_flag(tp, JUMBO_RING_ENABLE)) |
676 |
++ tp->rx_jumbo_pending = ering->rx_jumbo_pending; |
677 |
+ |
678 |
+ for (i = 0; i < tp->irq_max; i++) |
679 |
+ tp->napi[i].tx_pending = ering->tx_pending; |
680 |
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c |
681 |
+index 70e95324a97d..c2cd8d31bcad 100644 |
682 |
+--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c |
683 |
++++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c |
684 |
+@@ -66,7 +66,6 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, |
685 |
+ |
686 |
+ cq->ring = ring; |
687 |
+ cq->is_tx = mode; |
688 |
+- spin_lock_init(&cq->lock); |
689 |
+ |
690 |
+ /* Allocate HW buffers on provided NUMA node. |
691 |
+ * dev->numa_node is used in mtt range allocation flow. |
692 |
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
693 |
+index 84a96f70dfb5..2f83f3489fdb 100644 |
694 |
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
695 |
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
696 |
+@@ -1315,15 +1315,11 @@ static void mlx4_en_netpoll(struct net_device *dev) |
697 |
+ { |
698 |
+ struct mlx4_en_priv *priv = netdev_priv(dev); |
699 |
+ struct mlx4_en_cq *cq; |
700 |
+- unsigned long flags; |
701 |
+ int i; |
702 |
+ |
703 |
+ for (i = 0; i < priv->rx_ring_num; i++) { |
704 |
+ cq = priv->rx_cq[i]; |
705 |
+- spin_lock_irqsave(&cq->lock, flags); |
706 |
+- napi_synchronize(&cq->napi); |
707 |
+- mlx4_en_process_rx_cq(dev, cq, 0); |
708 |
+- spin_unlock_irqrestore(&cq->lock, flags); |
709 |
++ napi_schedule(&cq->napi); |
710 |
+ } |
711 |
+ } |
712 |
+ #endif |
713 |
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c |
714 |
+index d413e60071d4..95c316bb7a42 100644 |
715 |
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c |
716 |
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c |
717 |
+@@ -2399,7 +2399,8 @@ slave_start: |
718 |
+ * No return code for this call, just warn the user in case of PCI |
719 |
+ * express device capabilities are under-satisfied by the bus. |
720 |
+ */ |
721 |
+- mlx4_check_pcie_caps(dev); |
722 |
++ if (!mlx4_is_slave(dev)) |
723 |
++ mlx4_check_pcie_caps(dev); |
724 |
+ |
725 |
+ /* In master functions, the communication channel must be initialized |
726 |
+ * after obtaining its address from fw */ |
727 |
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h |
728 |
+index b57e8c87a34e..72db9bedd765 100644 |
729 |
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h |
730 |
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h |
731 |
+@@ -309,7 +309,6 @@ struct mlx4_en_cq { |
732 |
+ struct mlx4_cq mcq; |
733 |
+ struct mlx4_hwq_resources wqres; |
734 |
+ int ring; |
735 |
+- spinlock_t lock; |
736 |
+ struct net_device *dev; |
737 |
+ struct napi_struct napi; |
738 |
+ int size; |
739 |
+diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c |
740 |
+index 79226b19e3c4..cb3fb9dba8fb 100644 |
741 |
+--- a/drivers/net/ethernet/sfc/nic.c |
742 |
++++ b/drivers/net/ethernet/sfc/nic.c |
743 |
+@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) |
744 |
+ efx->net_dev->rx_cpu_rmap = NULL; |
745 |
+ #endif |
746 |
+ |
747 |
+- /* Disable MSI/MSI-X interrupts */ |
748 |
+- efx_for_each_channel(channel, efx) |
749 |
+- free_irq(channel->irq, &efx->msi_context[channel->channel]); |
750 |
+- |
751 |
+- /* Disable legacy interrupt */ |
752 |
+- if (efx->legacy_irq) |
753 |
++ if (EFX_INT_MODE_USE_MSI(efx)) { |
754 |
++ /* Disable MSI/MSI-X interrupts */ |
755 |
++ efx_for_each_channel(channel, efx) |
756 |
++ free_irq(channel->irq, |
757 |
++ &efx->msi_context[channel->channel]); |
758 |
++ } else { |
759 |
++ /* Disable legacy interrupt */ |
760 |
+ free_irq(efx->legacy_irq, efx); |
761 |
++ } |
762 |
+ } |
763 |
+ |
764 |
+ /* Register dump */ |
765 |
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c |
766 |
+index 1831fb7cd017..20bb66944c4a 100644 |
767 |
+--- a/drivers/net/macvlan.c |
768 |
++++ b/drivers/net/macvlan.c |
769 |
+@@ -263,11 +263,9 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) |
770 |
+ const struct macvlan_dev *vlan = netdev_priv(dev); |
771 |
+ const struct macvlan_port *port = vlan->port; |
772 |
+ const struct macvlan_dev *dest; |
773 |
+- __u8 ip_summed = skb->ip_summed; |
774 |
+ |
775 |
+ if (vlan->mode == MACVLAN_MODE_BRIDGE) { |
776 |
+ const struct ethhdr *eth = (void *)skb->data; |
777 |
+- skb->ip_summed = CHECKSUM_UNNECESSARY; |
778 |
+ |
779 |
+ /* send to other bridge ports directly */ |
780 |
+ if (is_multicast_ether_addr(eth->h_dest)) { |
781 |
+@@ -285,7 +283,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) |
782 |
+ } |
783 |
+ |
784 |
+ xmit_world: |
785 |
+- skb->ip_summed = ip_summed; |
786 |
+ skb->dev = vlan->lowerdev; |
787 |
+ return dev_queue_xmit(skb); |
788 |
+ } |
789 |
+@@ -461,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change) |
790 |
+ struct macvlan_dev *vlan = netdev_priv(dev); |
791 |
+ struct net_device *lowerdev = vlan->lowerdev; |
792 |
+ |
793 |
+- if (change & IFF_ALLMULTI) |
794 |
+- dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); |
795 |
++ if (dev->flags & IFF_UP) { |
796 |
++ if (change & IFF_ALLMULTI) |
797 |
++ dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); |
798 |
++ } |
799 |
+ } |
800 |
+ |
801 |
+ static void macvlan_set_mac_lists(struct net_device *dev) |
802 |
+@@ -518,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key; |
803 |
+ #define MACVLAN_STATE_MASK \ |
804 |
+ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) |
805 |
+ |
806 |
++static int macvlan_get_nest_level(struct net_device *dev) |
807 |
++{ |
808 |
++ return ((struct macvlan_dev *)netdev_priv(dev))->nest_level; |
809 |
++} |
810 |
++ |
811 |
+ static void macvlan_set_lockdep_class_one(struct net_device *dev, |
812 |
+ struct netdev_queue *txq, |
813 |
+ void *_unused) |
814 |
+@@ -528,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev, |
815 |
+ |
816 |
+ static void macvlan_set_lockdep_class(struct net_device *dev) |
817 |
+ { |
818 |
+- lockdep_set_class(&dev->addr_list_lock, |
819 |
+- &macvlan_netdev_addr_lock_key); |
820 |
++ lockdep_set_class_and_subclass(&dev->addr_list_lock, |
821 |
++ &macvlan_netdev_addr_lock_key, |
822 |
++ macvlan_get_nest_level(dev)); |
823 |
+ netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); |
824 |
+ } |
825 |
+ |
826 |
+@@ -731,6 +736,7 @@ static const struct net_device_ops macvlan_netdev_ops = { |
827 |
+ .ndo_fdb_add = macvlan_fdb_add, |
828 |
+ .ndo_fdb_del = macvlan_fdb_del, |
829 |
+ .ndo_fdb_dump = ndo_dflt_fdb_dump, |
830 |
++ .ndo_get_lock_subclass = macvlan_get_nest_level, |
831 |
+ }; |
832 |
+ |
833 |
+ void macvlan_common_setup(struct net_device *dev) |
834 |
+@@ -859,6 +865,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, |
835 |
+ vlan->dev = dev; |
836 |
+ vlan->port = port; |
837 |
+ vlan->set_features = MACVLAN_FEATURES; |
838 |
++ vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1; |
839 |
+ |
840 |
+ vlan->mode = MACVLAN_MODE_VEPA; |
841 |
+ if (data && data[IFLA_MACVLAN_MODE]) |
842 |
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c |
843 |
+index ff111a89e17f..3381c4f91a8c 100644 |
844 |
+--- a/drivers/net/macvtap.c |
845 |
++++ b/drivers/net/macvtap.c |
846 |
+@@ -322,6 +322,15 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) |
847 |
+ segs = nskb; |
848 |
+ } |
849 |
+ } else { |
850 |
++ /* If we receive a partial checksum and the tap side |
851 |
++ * doesn't support checksum offload, compute the checksum. |
852 |
++ * Note: it doesn't matter which checksum feature to |
853 |
++ * check, we either support them all or none. |
854 |
++ */ |
855 |
++ if (skb->ip_summed == CHECKSUM_PARTIAL && |
856 |
++ !(features & NETIF_F_ALL_CSUM) && |
857 |
++ skb_checksum_help(skb)) |
858 |
++ goto drop; |
859 |
+ skb_queue_tail(&q->sk.sk_receive_queue, skb); |
860 |
+ } |
861 |
+ |
862 |
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c |
863 |
+index 2f6989b1e0dc..365375408904 100644 |
864 |
+--- a/drivers/net/phy/phy_device.c |
865 |
++++ b/drivers/net/phy/phy_device.c |
866 |
+@@ -613,8 +613,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, |
867 |
+ err = phy_init_hw(phydev); |
868 |
+ if (err) |
869 |
+ phy_detach(phydev); |
870 |
+- |
871 |
+- phy_resume(phydev); |
872 |
++ else |
873 |
++ phy_resume(phydev); |
874 |
+ |
875 |
+ return err; |
876 |
+ } |
877 |
+diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c |
878 |
+index cc70ecfc7062..ad4a94e9ff57 100644 |
879 |
+--- a/drivers/net/slip/slip.c |
880 |
++++ b/drivers/net/slip/slip.c |
881 |
+@@ -429,13 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty) |
882 |
+ if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) |
883 |
+ return; |
884 |
+ |
885 |
+- spin_lock(&sl->lock); |
886 |
++ spin_lock_bh(&sl->lock); |
887 |
+ if (sl->xleft <= 0) { |
888 |
+ /* Now serial buffer is almost free & we can start |
889 |
+ * transmission of another packet */ |
890 |
+ sl->dev->stats.tx_packets++; |
891 |
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); |
892 |
+- spin_unlock(&sl->lock); |
893 |
++ spin_unlock_bh(&sl->lock); |
894 |
+ sl_unlock(sl); |
895 |
+ return; |
896 |
+ } |
897 |
+@@ -443,7 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty) |
898 |
+ actual = tty->ops->write(tty, sl->xhead, sl->xleft); |
899 |
+ sl->xleft -= actual; |
900 |
+ sl->xhead += actual; |
901 |
+- spin_unlock(&sl->lock); |
902 |
++ spin_unlock_bh(&sl->lock); |
903 |
+ } |
904 |
+ |
905 |
+ static void sl_tx_timeout(struct net_device *dev) |
906 |
+diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c |
907 |
+index c9f3281506af..2e025ddcef21 100644 |
908 |
+--- a/drivers/net/usb/cdc_mbim.c |
909 |
++++ b/drivers/net/usb/cdc_mbim.c |
910 |
+@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf) |
911 |
+ cdc_ncm_unbind(dev, intf); |
912 |
+ } |
913 |
+ |
914 |
++/* verify that the ethernet protocol is IPv4 or IPv6 */ |
915 |
++static bool is_ip_proto(__be16 proto) |
916 |
++{ |
917 |
++ switch (proto) { |
918 |
++ case htons(ETH_P_IP): |
919 |
++ case htons(ETH_P_IPV6): |
920 |
++ return true; |
921 |
++ } |
922 |
++ return false; |
923 |
++} |
924 |
+ |
925 |
+ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) |
926 |
+ { |
927 |
+@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb |
928 |
+ struct cdc_ncm_ctx *ctx = info->ctx; |
929 |
+ __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); |
930 |
+ u16 tci = 0; |
931 |
++ bool is_ip; |
932 |
+ u8 *c; |
933 |
+ |
934 |
+ if (!ctx) |
935 |
+@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb |
936 |
+ if (skb->len <= ETH_HLEN) |
937 |
+ goto error; |
938 |
+ |
939 |
++ /* Some applications using e.g. packet sockets will |
940 |
++ * bypass the VLAN acceleration and create tagged |
941 |
++ * ethernet frames directly. We primarily look for |
942 |
++ * the accelerated out-of-band tag, but fall back if |
943 |
++ * required |
944 |
++ */ |
945 |
++ skb_reset_mac_header(skb); |
946 |
++ if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN && |
947 |
++ __vlan_get_tag(skb, &tci) == 0) { |
948 |
++ is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); |
949 |
++ skb_pull(skb, VLAN_ETH_HLEN); |
950 |
++ } else { |
951 |
++ is_ip = is_ip_proto(eth_hdr(skb)->h_proto); |
952 |
++ skb_pull(skb, ETH_HLEN); |
953 |
++ } |
954 |
++ |
955 |
+ /* mapping VLANs to MBIM sessions: |
956 |
+ * no tag => IPS session <0> |
957 |
+ * 1 - 255 => IPS session <vlanid> |
958 |
+ * 256 - 511 => DSS session <vlanid - 256> |
959 |
+ * 512 - 4095 => unsupported, drop |
960 |
+ */ |
961 |
+- vlan_get_tag(skb, &tci); |
962 |
+- |
963 |
+ switch (tci & 0x0f00) { |
964 |
+ case 0x0000: /* VLAN ID 0 - 255 */ |
965 |
+- /* verify that datagram is IPv4 or IPv6 */ |
966 |
+- skb_reset_mac_header(skb); |
967 |
+- switch (eth_hdr(skb)->h_proto) { |
968 |
+- case htons(ETH_P_IP): |
969 |
+- case htons(ETH_P_IPV6): |
970 |
+- break; |
971 |
+- default: |
972 |
++ if (!is_ip) |
973 |
+ goto error; |
974 |
+- } |
975 |
+ c = (u8 *)&sign; |
976 |
+ c[3] = tci; |
977 |
+ break; |
978 |
+@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb |
979 |
+ "unsupported tci=0x%04x\n", tci); |
980 |
+ goto error; |
981 |
+ } |
982 |
+- skb_pull(skb, ETH_HLEN); |
983 |
+ } |
984 |
+ |
985 |
+ spin_lock_bh(&ctx->mtx); |
986 |
+@@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci) |
987 |
+ return; |
988 |
+ |
989 |
+ /* need to send the NA on the VLAN dev, if any */ |
990 |
+- if (tci) |
991 |
++ rcu_read_lock(); |
992 |
++ if (tci) { |
993 |
+ netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q), |
994 |
+ tci); |
995 |
+- else |
996 |
++ if (!netdev) { |
997 |
++ rcu_read_unlock(); |
998 |
++ return; |
999 |
++ } |
1000 |
++ } else { |
1001 |
+ netdev = dev->net; |
1002 |
+- if (!netdev) |
1003 |
+- return; |
1004 |
++ } |
1005 |
++ dev_hold(netdev); |
1006 |
++ rcu_read_unlock(); |
1007 |
+ |
1008 |
+ in6_dev = in6_dev_get(netdev); |
1009 |
+ if (!in6_dev) |
1010 |
+- return; |
1011 |
++ goto out; |
1012 |
+ is_router = !!in6_dev->cnf.forwarding; |
1013 |
+ in6_dev_put(in6_dev); |
1014 |
+ |
1015 |
+@@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci) |
1016 |
+ true /* solicited */, |
1017 |
+ false /* override */, |
1018 |
+ true /* inc_opt */); |
1019 |
++out: |
1020 |
++ dev_put(netdev); |
1021 |
+ } |
1022 |
+ |
1023 |
+ static bool is_neigh_solicit(u8 *buf, size_t len) |
1024 |
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c |
1025 |
+index d350d2795e10..75d7d9dbbe35 100644 |
1026 |
+--- a/drivers/net/usb/cdc_ncm.c |
1027 |
++++ b/drivers/net/usb/cdc_ncm.c |
1028 |
+@@ -768,7 +768,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) |
1029 |
+ skb_out->len > CDC_NCM_MIN_TX_PKT) |
1030 |
+ memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, |
1031 |
+ ctx->tx_max - skb_out->len); |
1032 |
+- else if ((skb_out->len % dev->maxpacket) == 0) |
1033 |
++ else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) |
1034 |
+ *skb_put(skb_out, 1) = 0; /* force short packet */ |
1035 |
+ |
1036 |
+ /* set final frame length */ |
1037 |
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c |
1038 |
+index 313cb6cd4848..48c4902c0d62 100644 |
1039 |
+--- a/drivers/net/usb/qmi_wwan.c |
1040 |
++++ b/drivers/net/usb/qmi_wwan.c |
1041 |
+@@ -662,6 +662,22 @@ static const struct usb_device_id products[] = { |
1042 |
+ {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, |
1043 |
+ {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ |
1044 |
+ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ |
1045 |
++ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ |
1046 |
++ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ |
1047 |
++ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ |
1048 |
++ {QMI_FIXED_INTF(0x16d8, 0x6280, 0)}, /* CMOTech CHU-628 */ |
1049 |
++ {QMI_FIXED_INTF(0x16d8, 0x7001, 0)}, /* CMOTech CHU-720S */ |
1050 |
++ {QMI_FIXED_INTF(0x16d8, 0x7002, 0)}, /* CMOTech 7002 */ |
1051 |
++ {QMI_FIXED_INTF(0x16d8, 0x7003, 4)}, /* CMOTech CHU-629K */ |
1052 |
++ {QMI_FIXED_INTF(0x16d8, 0x7004, 3)}, /* CMOTech 7004 */ |
1053 |
++ {QMI_FIXED_INTF(0x16d8, 0x7006, 5)}, /* CMOTech CGU-629 */ |
1054 |
++ {QMI_FIXED_INTF(0x16d8, 0x700a, 4)}, /* CMOTech CHU-629S */ |
1055 |
++ {QMI_FIXED_INTF(0x16d8, 0x7211, 0)}, /* CMOTech CHU-720I */ |
1056 |
++ {QMI_FIXED_INTF(0x16d8, 0x7212, 0)}, /* CMOTech 7212 */ |
1057 |
++ {QMI_FIXED_INTF(0x16d8, 0x7213, 0)}, /* CMOTech 7213 */ |
1058 |
++ {QMI_FIXED_INTF(0x16d8, 0x7251, 1)}, /* CMOTech 7251 */ |
1059 |
++ {QMI_FIXED_INTF(0x16d8, 0x7252, 1)}, /* CMOTech 7252 */ |
1060 |
++ {QMI_FIXED_INTF(0x16d8, 0x7253, 1)}, /* CMOTech 7253 */ |
1061 |
+ {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, |
1062 |
+ {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, |
1063 |
+ {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, |
1064 |
+@@ -723,16 +739,28 @@ static const struct usb_device_id products[] = { |
1065 |
+ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ |
1066 |
+ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ |
1067 |
+ {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ |
1068 |
++ {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */ |
1069 |
++ {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */ |
1070 |
++ {QMI_FIXED_INTF(0x1199, 0x68c0, 11)}, /* Sierra Wireless MC73xx */ |
1071 |
+ {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ |
1072 |
++ {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ |
1073 |
++ {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ |
1074 |
+ {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ |
1075 |
+ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ |
1076 |
++ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ |
1077 |
+ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ |
1078 |
+ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ |
1079 |
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ |
1080 |
+ {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ |
1081 |
+ {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ |
1082 |
++ {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ |
1083 |
+ {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ |
1084 |
+ {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ |
1085 |
++ {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ |
1086 |
++ {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ |
1087 |
++ {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ |
1088 |
++ {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ |
1089 |
++ {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ |
1090 |
+ |
1091 |
+ /* 4. Gobi 1000 devices */ |
1092 |
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ |
1093 |
+diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h |
1094 |
+index 398f3d2c0a6c..a76e98eb8372 100644 |
1095 |
+--- a/drivers/net/wireless/ti/wl18xx/event.h |
1096 |
++++ b/drivers/net/wireless/ti/wl18xx/event.h |
1097 |
+@@ -68,6 +68,26 @@ struct wl18xx_event_mailbox { |
1098 |
+ |
1099 |
+ /* bitmap of inactive stations (by HLID) */ |
1100 |
+ __le32 inactive_sta_bitmap; |
1101 |
++ |
1102 |
++ /* rx BA win size indicated by RX_BA_WIN_SIZE_CHANGE_EVENT_ID */ |
1103 |
++ u8 rx_ba_role_id; |
1104 |
++ u8 rx_ba_link_id; |
1105 |
++ u8 rx_ba_win_size; |
1106 |
++ u8 padding; |
1107 |
++ |
1108 |
++ /* smart config */ |
1109 |
++ u8 sc_ssid_len; |
1110 |
++ u8 sc_pwd_len; |
1111 |
++ u8 sc_token_len; |
1112 |
++ u8 padding1; |
1113 |
++ u8 sc_ssid[32]; |
1114 |
++ u8 sc_pwd[32]; |
1115 |
++ u8 sc_token[32]; |
1116 |
++ |
1117 |
++ /* smart config sync channel */ |
1118 |
++ u8 sc_sync_channel; |
1119 |
++ u8 sc_sync_band; |
1120 |
++ u8 padding2[2]; |
1121 |
+ } __packed; |
1122 |
+ |
1123 |
+ int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, |
1124 |
+diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c |
1125 |
+index dfffd0f37916..a70692779a16 100644 |
1126 |
+--- a/drivers/scsi/megaraid/megaraid_mm.c |
1127 |
++++ b/drivers/scsi/megaraid/megaraid_mm.c |
1128 |
+@@ -486,6 +486,8 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc) |
1129 |
+ |
1130 |
+ pthru32->dataxferaddr = kioc->buf_paddr; |
1131 |
+ if (kioc->data_dir & UIOC_WR) { |
1132 |
++ if (pthru32->dataxferlen > kioc->xferlen) |
1133 |
++ return -EINVAL; |
1134 |
+ if (copy_from_user(kioc->buf_vaddr, kioc->user_data, |
1135 |
+ pthru32->dataxferlen)) { |
1136 |
+ return (-EFAULT); |
1137 |
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c |
1138 |
+index 307a81137607..4109530e92a0 100644 |
1139 |
+--- a/drivers/scsi/scsi_scan.c |
1140 |
++++ b/drivers/scsi/scsi_scan.c |
1141 |
+@@ -320,6 +320,7 @@ static void scsi_target_destroy(struct scsi_target *starget) |
1142 |
+ struct Scsi_Host *shost = dev_to_shost(dev->parent); |
1143 |
+ unsigned long flags; |
1144 |
+ |
1145 |
++ starget->state = STARGET_DEL; |
1146 |
+ transport_destroy_device(dev); |
1147 |
+ spin_lock_irqsave(shost->host_lock, flags); |
1148 |
+ if (shost->hostt->target_destroy) |
1149 |
+@@ -371,6 +372,37 @@ static struct scsi_target *__scsi_find_target(struct device *parent, |
1150 |
+ } |
1151 |
+ |
1152 |
+ /** |
1153 |
++ * scsi_target_reap_ref_release - remove target from visibility |
1154 |
++ * @kref: the reap_ref in the target being released |
1155 |
++ * |
1156 |
++ * Called on last put of reap_ref, which is the indication that no device |
1157 |
++ * under this target is visible anymore, so render the target invisible in |
1158 |
++ * sysfs. Note: we have to be in user context here because the target reaps |
1159 |
++ * should be done in places where the scsi device visibility is being removed. |
1160 |
++ */ |
1161 |
++static void scsi_target_reap_ref_release(struct kref *kref) |
1162 |
++{ |
1163 |
++ struct scsi_target *starget |
1164 |
++ = container_of(kref, struct scsi_target, reap_ref); |
1165 |
++ |
1166 |
++ /* |
1167 |
++ * if we get here and the target is still in the CREATED state that |
1168 |
++ * means it was allocated but never made visible (because a scan |
1169 |
++ * turned up no LUNs), so don't call device_del() on it. |
1170 |
++ */ |
1171 |
++ if (starget->state != STARGET_CREATED) { |
1172 |
++ transport_remove_device(&starget->dev); |
1173 |
++ device_del(&starget->dev); |
1174 |
++ } |
1175 |
++ scsi_target_destroy(starget); |
1176 |
++} |
1177 |
++ |
1178 |
++static void scsi_target_reap_ref_put(struct scsi_target *starget) |
1179 |
++{ |
1180 |
++ kref_put(&starget->reap_ref, scsi_target_reap_ref_release); |
1181 |
++} |
1182 |
++ |
1183 |
++/** |
1184 |
+ * scsi_alloc_target - allocate a new or find an existing target |
1185 |
+ * @parent: parent of the target (need not be a scsi host) |
1186 |
+ * @channel: target channel number (zero if no channels) |
1187 |
+@@ -392,7 +424,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, |
1188 |
+ + shost->transportt->target_size; |
1189 |
+ struct scsi_target *starget; |
1190 |
+ struct scsi_target *found_target; |
1191 |
+- int error; |
1192 |
++ int error, ref_got; |
1193 |
+ |
1194 |
+ starget = kzalloc(size, GFP_KERNEL); |
1195 |
+ if (!starget) { |
1196 |
+@@ -401,7 +433,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, |
1197 |
+ } |
1198 |
+ dev = &starget->dev; |
1199 |
+ device_initialize(dev); |
1200 |
+- starget->reap_ref = 1; |
1201 |
++ kref_init(&starget->reap_ref); |
1202 |
+ dev->parent = get_device(parent); |
1203 |
+ dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id); |
1204 |
+ dev->bus = &scsi_bus_type; |
1205 |
+@@ -441,29 +473,36 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, |
1206 |
+ return starget; |
1207 |
+ |
1208 |
+ found: |
1209 |
+- found_target->reap_ref++; |
1210 |
++ /* |
1211 |
++ * release routine already fired if kref is zero, so if we can still |
1212 |
++ * take the reference, the target must be alive. If we can't, it must |
1213 |
++ * be dying and we need to wait for a new target |
1214 |
++ */ |
1215 |
++ ref_got = kref_get_unless_zero(&found_target->reap_ref); |
1216 |
++ |
1217 |
+ spin_unlock_irqrestore(shost->host_lock, flags); |
1218 |
+- if (found_target->state != STARGET_DEL) { |
1219 |
++ if (ref_got) { |
1220 |
+ put_device(dev); |
1221 |
+ return found_target; |
1222 |
+ } |
1223 |
+- /* Unfortunately, we found a dying target; need to |
1224 |
+- * wait until it's dead before we can get a new one */ |
1225 |
++ /* |
1226 |
++ * Unfortunately, we found a dying target; need to wait until it's |
1227 |
++ * dead before we can get a new one. There is an anomaly here. We |
1228 |
++ * *should* call scsi_target_reap() to balance the kref_get() of the |
1229 |
++ * reap_ref above. However, since the target being released, it's |
1230 |
++ * already invisible and the reap_ref is irrelevant. If we call |
1231 |
++ * scsi_target_reap() we might spuriously do another device_del() on |
1232 |
++ * an already invisible target. |
1233 |
++ */ |
1234 |
+ put_device(&found_target->dev); |
1235 |
+- flush_scheduled_work(); |
1236 |
++ /* |
1237 |
++ * length of time is irrelevant here, we just want to yield the CPU |
1238 |
++ * for a tick to avoid busy waiting for the target to die. |
1239 |
++ */ |
1240 |
++ msleep(1); |
1241 |
+ goto retry; |
1242 |
+ } |
1243 |
+ |
1244 |
+-static void scsi_target_reap_usercontext(struct work_struct *work) |
1245 |
+-{ |
1246 |
+- struct scsi_target *starget = |
1247 |
+- container_of(work, struct scsi_target, ew.work); |
1248 |
+- |
1249 |
+- transport_remove_device(&starget->dev); |
1250 |
+- device_del(&starget->dev); |
1251 |
+- scsi_target_destroy(starget); |
1252 |
+-} |
1253 |
+- |
1254 |
+ /** |
1255 |
+ * scsi_target_reap - check to see if target is in use and destroy if not |
1256 |
+ * @starget: target to be checked |
1257 |
+@@ -474,28 +513,13 @@ static void scsi_target_reap_usercontext(struct work_struct *work) |
1258 |
+ */ |
1259 |
+ void scsi_target_reap(struct scsi_target *starget) |
1260 |
+ { |
1261 |
+- struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
1262 |
+- unsigned long flags; |
1263 |
+- enum scsi_target_state state; |
1264 |
+- int empty = 0; |
1265 |
+- |
1266 |
+- spin_lock_irqsave(shost->host_lock, flags); |
1267 |
+- state = starget->state; |
1268 |
+- if (--starget->reap_ref == 0 && list_empty(&starget->devices)) { |
1269 |
+- empty = 1; |
1270 |
+- starget->state = STARGET_DEL; |
1271 |
+- } |
1272 |
+- spin_unlock_irqrestore(shost->host_lock, flags); |
1273 |
+- |
1274 |
+- if (!empty) |
1275 |
+- return; |
1276 |
+- |
1277 |
+- BUG_ON(state == STARGET_DEL); |
1278 |
+- if (state == STARGET_CREATED) |
1279 |
+- scsi_target_destroy(starget); |
1280 |
+- else |
1281 |
+- execute_in_process_context(scsi_target_reap_usercontext, |
1282 |
+- &starget->ew); |
1283 |
++ /* |
1284 |
++ * serious problem if this triggers: STARGET_DEL is only set in the if |
1285 |
++ * the reap_ref drops to zero, so we're trying to do another final put |
1286 |
++ * on an already released kref |
1287 |
++ */ |
1288 |
++ BUG_ON(starget->state == STARGET_DEL); |
1289 |
++ scsi_target_reap_ref_put(starget); |
1290 |
+ } |
1291 |
+ |
1292 |
+ /** |
1293 |
+@@ -1532,6 +1556,10 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, |
1294 |
+ } |
1295 |
+ mutex_unlock(&shost->scan_mutex); |
1296 |
+ scsi_autopm_put_target(starget); |
1297 |
++ /* |
1298 |
++ * paired with scsi_alloc_target(). Target will be destroyed unless |
1299 |
++ * scsi_probe_and_add_lun made an underlying device visible |
1300 |
++ */ |
1301 |
+ scsi_target_reap(starget); |
1302 |
+ put_device(&starget->dev); |
1303 |
+ |
1304 |
+@@ -1612,8 +1640,10 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, |
1305 |
+ |
1306 |
+ out_reap: |
1307 |
+ scsi_autopm_put_target(starget); |
1308 |
+- /* now determine if the target has any children at all |
1309 |
+- * and if not, nuke it */ |
1310 |
++ /* |
1311 |
++ * paired with scsi_alloc_target(): determine if the target has |
1312 |
++ * any children at all and if not, nuke it |
1313 |
++ */ |
1314 |
+ scsi_target_reap(starget); |
1315 |
+ |
1316 |
+ put_device(&starget->dev); |
1317 |
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c |
1318 |
+index 9117d0bf408e..665acbf83693 100644 |
1319 |
+--- a/drivers/scsi/scsi_sysfs.c |
1320 |
++++ b/drivers/scsi/scsi_sysfs.c |
1321 |
+@@ -383,17 +383,14 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) |
1322 |
+ { |
1323 |
+ struct scsi_device *sdev; |
1324 |
+ struct device *parent; |
1325 |
+- struct scsi_target *starget; |
1326 |
+ struct list_head *this, *tmp; |
1327 |
+ unsigned long flags; |
1328 |
+ |
1329 |
+ sdev = container_of(work, struct scsi_device, ew.work); |
1330 |
+ |
1331 |
+ parent = sdev->sdev_gendev.parent; |
1332 |
+- starget = to_scsi_target(parent); |
1333 |
+ |
1334 |
+ spin_lock_irqsave(sdev->host->host_lock, flags); |
1335 |
+- starget->reap_ref++; |
1336 |
+ list_del(&sdev->siblings); |
1337 |
+ list_del(&sdev->same_target_siblings); |
1338 |
+ list_del(&sdev->starved_entry); |
1339 |
+@@ -413,8 +410,6 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) |
1340 |
+ /* NULL queue means the device can't be used */ |
1341 |
+ sdev->request_queue = NULL; |
1342 |
+ |
1343 |
+- scsi_target_reap(scsi_target(sdev)); |
1344 |
+- |
1345 |
+ kfree(sdev->inquiry); |
1346 |
+ kfree(sdev); |
1347 |
+ |
1348 |
+@@ -1071,6 +1066,13 @@ void __scsi_remove_device(struct scsi_device *sdev) |
1349 |
+ sdev->host->hostt->slave_destroy(sdev); |
1350 |
+ transport_destroy_device(dev); |
1351 |
+ |
1352 |
++ /* |
1353 |
++ * Paired with the kref_get() in scsi_sysfs_initialize(). We have |
1354 |
++ * remoed sysfs visibility from the device, so make the target |
1355 |
++ * invisible if this was the last device underneath it. |
1356 |
++ */ |
1357 |
++ scsi_target_reap(scsi_target(sdev)); |
1358 |
++ |
1359 |
+ put_device(dev); |
1360 |
+ } |
1361 |
+ |
1362 |
+@@ -1133,7 +1135,7 @@ void scsi_remove_target(struct device *dev) |
1363 |
+ continue; |
1364 |
+ if (starget->dev.parent == dev || &starget->dev == dev) { |
1365 |
+ /* assuming new targets arrive at the end */ |
1366 |
+- starget->reap_ref++; |
1367 |
++ kref_get(&starget->reap_ref); |
1368 |
+ spin_unlock_irqrestore(shost->host_lock, flags); |
1369 |
+ if (last) |
1370 |
+ scsi_target_reap(last); |
1371 |
+@@ -1217,6 +1219,12 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev) |
1372 |
+ list_add_tail(&sdev->same_target_siblings, &starget->devices); |
1373 |
+ list_add_tail(&sdev->siblings, &shost->__devices); |
1374 |
+ spin_unlock_irqrestore(shost->host_lock, flags); |
1375 |
++ /* |
1376 |
++ * device can now only be removed via __scsi_remove_device() so hold |
1377 |
++ * the target. Target will be held in CREATED state until something |
1378 |
++ * beneath it becomes visible (in which case it moves to RUNNING) |
1379 |
++ */ |
1380 |
++ kref_get(&starget->reap_ref); |
1381 |
+ } |
1382 |
+ |
1383 |
+ int scsi_is_sdev_device(const struct device *dev) |
1384 |
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c |
1385 |
+index 77f035158d6c..db8434d3def9 100644 |
1386 |
+--- a/drivers/tty/serial/omap-serial.c |
1387 |
++++ b/drivers/tty/serial/omap-serial.c |
1388 |
+@@ -225,14 +225,19 @@ static inline void serial_omap_enable_wakeirq(struct uart_omap_port *up, |
1389 |
+ if (enable) |
1390 |
+ enable_irq(up->wakeirq); |
1391 |
+ else |
1392 |
+- disable_irq(up->wakeirq); |
1393 |
++ disable_irq_nosync(up->wakeirq); |
1394 |
+ } |
1395 |
+ |
1396 |
+ static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable) |
1397 |
+ { |
1398 |
+ struct omap_uart_port_info *pdata = dev_get_platdata(up->dev); |
1399 |
+ |
1400 |
++ if (enable == up->wakeups_enabled) |
1401 |
++ return; |
1402 |
++ |
1403 |
+ serial_omap_enable_wakeirq(up, enable); |
1404 |
++ up->wakeups_enabled = enable; |
1405 |
++ |
1406 |
+ if (!pdata || !pdata->enable_wakeup) |
1407 |
+ return; |
1408 |
+ |
1409 |
+@@ -1488,6 +1493,11 @@ static int serial_omap_suspend(struct device *dev) |
1410 |
+ uart_suspend_port(&serial_omap_reg, &up->port); |
1411 |
+ flush_work(&up->qos_work); |
1412 |
+ |
1413 |
++ if (device_may_wakeup(dev)) |
1414 |
++ serial_omap_enable_wakeup(up, true); |
1415 |
++ else |
1416 |
++ serial_omap_enable_wakeup(up, false); |
1417 |
++ |
1418 |
+ return 0; |
1419 |
+ } |
1420 |
+ |
1421 |
+@@ -1495,6 +1505,9 @@ static int serial_omap_resume(struct device *dev) |
1422 |
+ { |
1423 |
+ struct uart_omap_port *up = dev_get_drvdata(dev); |
1424 |
+ |
1425 |
++ if (device_may_wakeup(dev)) |
1426 |
++ serial_omap_enable_wakeup(up, false); |
1427 |
++ |
1428 |
+ uart_resume_port(&serial_omap_reg, &up->port); |
1429 |
+ |
1430 |
+ return 0; |
1431 |
+@@ -1870,17 +1883,7 @@ static int serial_omap_runtime_suspend(struct device *dev) |
1432 |
+ |
1433 |
+ up->context_loss_cnt = serial_omap_get_context_loss_count(up); |
1434 |
+ |
1435 |
+- if (device_may_wakeup(dev)) { |
1436 |
+- if (!up->wakeups_enabled) { |
1437 |
+- serial_omap_enable_wakeup(up, true); |
1438 |
+- up->wakeups_enabled = true; |
1439 |
+- } |
1440 |
+- } else { |
1441 |
+- if (up->wakeups_enabled) { |
1442 |
+- serial_omap_enable_wakeup(up, false); |
1443 |
+- up->wakeups_enabled = false; |
1444 |
+- } |
1445 |
+- } |
1446 |
++ serial_omap_enable_wakeup(up, true); |
1447 |
+ |
1448 |
+ up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; |
1449 |
+ schedule_work(&up->qos_work); |
1450 |
+@@ -1894,6 +1897,8 @@ static int serial_omap_runtime_resume(struct device *dev) |
1451 |
+ |
1452 |
+ int loss_cnt = serial_omap_get_context_loss_count(up); |
1453 |
+ |
1454 |
++ serial_omap_enable_wakeup(up, false); |
1455 |
++ |
1456 |
+ if (loss_cnt < 0) { |
1457 |
+ dev_dbg(dev, "serial_omap_get_context_loss_count failed : %d\n", |
1458 |
+ loss_cnt); |
1459 |
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c |
1460 |
+index 900f7ff805ee..7783acabe443 100644 |
1461 |
+--- a/drivers/usb/class/cdc-acm.c |
1462 |
++++ b/drivers/usb/class/cdc-acm.c |
1463 |
+@@ -1652,13 +1652,27 @@ static const struct usb_device_id acm_ids[] = { |
1464 |
+ }, |
1465 |
+ /* Motorola H24 HSPA module: */ |
1466 |
+ { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */ |
1467 |
+- { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */ |
1468 |
+- { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */ |
1469 |
+- { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */ |
1470 |
+- { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */ |
1471 |
+- { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */ |
1472 |
+- { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */ |
1473 |
+- { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */ |
1474 |
++ { USB_DEVICE(0x22b8, 0x2d92), /* modem + diagnostics */ |
1475 |
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */ |
1476 |
++ }, |
1477 |
++ { USB_DEVICE(0x22b8, 0x2d93), /* modem + AT port */ |
1478 |
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */ |
1479 |
++ }, |
1480 |
++ { USB_DEVICE(0x22b8, 0x2d95), /* modem + AT port + diagnostics */ |
1481 |
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */ |
1482 |
++ }, |
1483 |
++ { USB_DEVICE(0x22b8, 0x2d96), /* modem + NMEA */ |
1484 |
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */ |
1485 |
++ }, |
1486 |
++ { USB_DEVICE(0x22b8, 0x2d97), /* modem + diagnostics + NMEA */ |
1487 |
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */ |
1488 |
++ }, |
1489 |
++ { USB_DEVICE(0x22b8, 0x2d99), /* modem + AT port + NMEA */ |
1490 |
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */ |
1491 |
++ }, |
1492 |
++ { USB_DEVICE(0x22b8, 0x2d9a), /* modem + AT port + diagnostics + NMEA */ |
1493 |
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */ |
1494 |
++ }, |
1495 |
+ |
1496 |
+ { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */ |
1497 |
+ .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on |
1498 |
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
1499 |
+index 95fa1217afdd..762e4a5f5ae9 100644 |
1500 |
+--- a/drivers/usb/serial/cp210x.c |
1501 |
++++ b/drivers/usb/serial/cp210x.c |
1502 |
+@@ -104,6 +104,7 @@ static const struct usb_device_id id_table[] = { |
1503 |
+ { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ |
1504 |
+ { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */ |
1505 |
+ { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ |
1506 |
++ { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */ |
1507 |
+ { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ |
1508 |
+ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ |
1509 |
+ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ |
1510 |
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c |
1511 |
+index 44ab12986805..7c6e1dedeb06 100644 |
1512 |
+--- a/drivers/usb/serial/ftdi_sio.c |
1513 |
++++ b/drivers/usb/serial/ftdi_sio.c |
1514 |
+@@ -909,6 +909,39 @@ static const struct usb_device_id id_table_combined[] = { |
1515 |
+ { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) }, |
1516 |
+ /* Cressi Devices */ |
1517 |
+ { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) }, |
1518 |
++ /* Brainboxes Devices */ |
1519 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_001_PID) }, |
1520 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_012_PID) }, |
1521 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) }, |
1522 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) }, |
1523 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) }, |
1524 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) }, |
1525 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) }, |
1526 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) }, |
1527 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_4_PID) }, |
1528 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_5_PID) }, |
1529 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) }, |
1530 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) }, |
1531 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) }, |
1532 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) }, |
1533 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) }, |
1534 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) }, |
1535 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) }, |
1536 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) }, |
1537 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) }, |
1538 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) }, |
1539 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) }, |
1540 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) }, |
1541 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_357_PID) }, |
1542 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_1_PID) }, |
1543 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_2_PID) }, |
1544 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_3_PID) }, |
1545 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_1_PID) }, |
1546 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_2_PID) }, |
1547 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_1_PID) }, |
1548 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) }, |
1549 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) }, |
1550 |
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) }, |
1551 |
+ { } /* Terminating entry */ |
1552 |
+ }; |
1553 |
+ |
1554 |
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h |
1555 |
+index e599fbfcde5f..993c93df6874 100644 |
1556 |
+--- a/drivers/usb/serial/ftdi_sio_ids.h |
1557 |
++++ b/drivers/usb/serial/ftdi_sio_ids.h |
1558 |
+@@ -1326,3 +1326,40 @@ |
1559 |
+ * Manufacturer: Cressi |
1560 |
+ */ |
1561 |
+ #define FTDI_CRESSI_PID 0x87d0 |
1562 |
++ |
1563 |
++/* |
1564 |
++ * Brainboxes devices |
1565 |
++ */ |
1566 |
++#define BRAINBOXES_VID 0x05d1 |
1567 |
++#define BRAINBOXES_VX_001_PID 0x1001 /* VX-001 ExpressCard 1 Port RS232 */ |
1568 |
++#define BRAINBOXES_VX_012_PID 0x1002 /* VX-012 ExpressCard 2 Port RS232 */ |
1569 |
++#define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */ |
1570 |
++#define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */ |
1571 |
++#define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */ |
1572 |
++#define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */ |
1573 |
++#define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */ |
1574 |
++#define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */ |
1575 |
++#define BRAINBOXES_US_606_3_PID 0x2003 /* US-606 6 Port RS232 Serial Port 4 and 6 */ |
1576 |
++#define BRAINBOXES_US_701_1_PID 0x2011 /* US-701 4xRS232 1Mbaud Port 1 and 2 */ |
1577 |
++#define BRAINBOXES_US_701_2_PID 0x2012 /* US-701 4xRS422 1Mbaud Port 3 and 4 */ |
1578 |
++#define BRAINBOXES_US_279_1_PID 0x2021 /* US-279 8xRS422 1Mbaud Port 1 and 2 */ |
1579 |
++#define BRAINBOXES_US_279_2_PID 0x2022 /* US-279 8xRS422 1Mbaud Port 3 and 4 */ |
1580 |
++#define BRAINBOXES_US_279_3_PID 0x2023 /* US-279 8xRS422 1Mbaud Port 5 and 6 */ |
1581 |
++#define BRAINBOXES_US_279_4_PID 0x2024 /* US-279 8xRS422 1Mbaud Port 7 and 8 */ |
1582 |
++#define BRAINBOXES_US_346_1_PID 0x3011 /* US-346 4xRS422/485 1Mbaud Port 1 and 2 */ |
1583 |
++#define BRAINBOXES_US_346_2_PID 0x3012 /* US-346 4xRS422/485 1Mbaud Port 3 and 4 */ |
1584 |
++#define BRAINBOXES_US_257_PID 0x5001 /* US-257 2xRS232 1Mbaud */ |
1585 |
++#define BRAINBOXES_US_313_PID 0x6001 /* US-313 2xRS422/485 1Mbaud */ |
1586 |
++#define BRAINBOXES_US_357_PID 0x7001 /* US_357 1xRS232/422/485 */ |
1587 |
++#define BRAINBOXES_US_842_1_PID 0x8001 /* US-842 8xRS422/485 1Mbaud Port 1 and 2 */ |
1588 |
++#define BRAINBOXES_US_842_2_PID 0x8002 /* US-842 8xRS422/485 1Mbaud Port 3 and 4 */ |
1589 |
++#define BRAINBOXES_US_842_3_PID 0x8003 /* US-842 8xRS422/485 1Mbaud Port 5 and 6 */ |
1590 |
++#define BRAINBOXES_US_842_4_PID 0x8004 /* US-842 8xRS422/485 1Mbaud Port 7 and 8 */ |
1591 |
++#define BRAINBOXES_US_160_1_PID 0x9001 /* US-160 16xRS232 1Mbaud Port 1 and 2 */ |
1592 |
++#define BRAINBOXES_US_160_2_PID 0x9002 /* US-160 16xRS232 1Mbaud Port 3 and 4 */ |
1593 |
++#define BRAINBOXES_US_160_3_PID 0x9003 /* US-160 16xRS232 1Mbaud Port 5 and 6 */ |
1594 |
++#define BRAINBOXES_US_160_4_PID 0x9004 /* US-160 16xRS232 1Mbaud Port 7 and 8 */ |
1595 |
++#define BRAINBOXES_US_160_5_PID 0x9005 /* US-160 16xRS232 1Mbaud Port 9 and 10 */ |
1596 |
++#define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */ |
1597 |
++#define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */ |
1598 |
++#define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */ |
1599 |
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c |
1600 |
+index a2db5be9c305..df90dae53eb9 100644 |
1601 |
+--- a/drivers/usb/serial/io_ti.c |
1602 |
++++ b/drivers/usb/serial/io_ti.c |
1603 |
+@@ -28,6 +28,7 @@ |
1604 |
+ #include <linux/spinlock.h> |
1605 |
+ #include <linux/mutex.h> |
1606 |
+ #include <linux/serial.h> |
1607 |
++#include <linux/swab.h> |
1608 |
+ #include <linux/kfifo.h> |
1609 |
+ #include <linux/ioctl.h> |
1610 |
+ #include <linux/firmware.h> |
1611 |
+@@ -280,7 +281,7 @@ static int read_download_mem(struct usb_device *dev, int start_address, |
1612 |
+ { |
1613 |
+ int status = 0; |
1614 |
+ __u8 read_length; |
1615 |
+- __be16 be_start_address; |
1616 |
++ u16 be_start_address; |
1617 |
+ |
1618 |
+ dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length); |
1619 |
+ |
1620 |
+@@ -296,10 +297,14 @@ static int read_download_mem(struct usb_device *dev, int start_address, |
1621 |
+ if (read_length > 1) { |
1622 |
+ dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length); |
1623 |
+ } |
1624 |
+- be_start_address = cpu_to_be16(start_address); |
1625 |
++ /* |
1626 |
++ * NOTE: Must use swab as wIndex is sent in little-endian |
1627 |
++ * byte order regardless of host byte order. |
1628 |
++ */ |
1629 |
++ be_start_address = swab16((u16)start_address); |
1630 |
+ status = ti_vread_sync(dev, UMPC_MEMORY_READ, |
1631 |
+ (__u16)address_type, |
1632 |
+- (__force __u16)be_start_address, |
1633 |
++ be_start_address, |
1634 |
+ buffer, read_length); |
1635 |
+ |
1636 |
+ if (status) { |
1637 |
+@@ -394,7 +399,7 @@ static int write_i2c_mem(struct edgeport_serial *serial, |
1638 |
+ struct device *dev = &serial->serial->dev->dev; |
1639 |
+ int status = 0; |
1640 |
+ int write_length; |
1641 |
+- __be16 be_start_address; |
1642 |
++ u16 be_start_address; |
1643 |
+ |
1644 |
+ /* We can only send a maximum of 1 aligned byte page at a time */ |
1645 |
+ |
1646 |
+@@ -409,11 +414,16 @@ static int write_i2c_mem(struct edgeport_serial *serial, |
1647 |
+ __func__, start_address, write_length); |
1648 |
+ usb_serial_debug_data(dev, __func__, write_length, buffer); |
1649 |
+ |
1650 |
+- /* Write first page */ |
1651 |
+- be_start_address = cpu_to_be16(start_address); |
1652 |
++ /* |
1653 |
++ * Write first page. |
1654 |
++ * |
1655 |
++ * NOTE: Must use swab as wIndex is sent in little-endian byte order |
1656 |
++ * regardless of host byte order. |
1657 |
++ */ |
1658 |
++ be_start_address = swab16((u16)start_address); |
1659 |
+ status = ti_vsend_sync(serial->serial->dev, |
1660 |
+ UMPC_MEMORY_WRITE, (__u16)address_type, |
1661 |
+- (__force __u16)be_start_address, |
1662 |
++ be_start_address, |
1663 |
+ buffer, write_length); |
1664 |
+ if (status) { |
1665 |
+ dev_dbg(dev, "%s - ERROR %d\n", __func__, status); |
1666 |
+@@ -436,11 +446,16 @@ static int write_i2c_mem(struct edgeport_serial *serial, |
1667 |
+ __func__, start_address, write_length); |
1668 |
+ usb_serial_debug_data(dev, __func__, write_length, buffer); |
1669 |
+ |
1670 |
+- /* Write next page */ |
1671 |
+- be_start_address = cpu_to_be16(start_address); |
1672 |
++ /* |
1673 |
++ * Write next page. |
1674 |
++ * |
1675 |
++ * NOTE: Must use swab as wIndex is sent in little-endian byte |
1676 |
++ * order regardless of host byte order. |
1677 |
++ */ |
1678 |
++ be_start_address = swab16((u16)start_address); |
1679 |
+ status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, |
1680 |
+ (__u16)address_type, |
1681 |
+- (__force __u16)be_start_address, |
1682 |
++ be_start_address, |
1683 |
+ buffer, write_length); |
1684 |
+ if (status) { |
1685 |
+ dev_err(dev, "%s - ERROR %d\n", __func__, status); |
1686 |
+@@ -585,8 +600,8 @@ static int get_descriptor_addr(struct edgeport_serial *serial, |
1687 |
+ if (rom_desc->Type == desc_type) |
1688 |
+ return start_address; |
1689 |
+ |
1690 |
+- start_address = start_address + sizeof(struct ti_i2c_desc) |
1691 |
+- + rom_desc->Size; |
1692 |
++ start_address = start_address + sizeof(struct ti_i2c_desc) + |
1693 |
++ le16_to_cpu(rom_desc->Size); |
1694 |
+ |
1695 |
+ } while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type); |
1696 |
+ |
1697 |
+@@ -599,7 +614,7 @@ static int valid_csum(struct ti_i2c_desc *rom_desc, __u8 *buffer) |
1698 |
+ __u16 i; |
1699 |
+ __u8 cs = 0; |
1700 |
+ |
1701 |
+- for (i = 0; i < rom_desc->Size; i++) |
1702 |
++ for (i = 0; i < le16_to_cpu(rom_desc->Size); i++) |
1703 |
+ cs = (__u8)(cs + buffer[i]); |
1704 |
+ |
1705 |
+ if (cs != rom_desc->CheckSum) { |
1706 |
+@@ -650,7 +665,7 @@ static int check_i2c_image(struct edgeport_serial *serial) |
1707 |
+ break; |
1708 |
+ |
1709 |
+ if ((start_address + sizeof(struct ti_i2c_desc) + |
1710 |
+- rom_desc->Size) > TI_MAX_I2C_SIZE) { |
1711 |
++ le16_to_cpu(rom_desc->Size)) > TI_MAX_I2C_SIZE) { |
1712 |
+ status = -ENODEV; |
1713 |
+ dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__); |
1714 |
+ break; |
1715 |
+@@ -665,7 +680,8 @@ static int check_i2c_image(struct edgeport_serial *serial) |
1716 |
+ /* Read the descriptor data */ |
1717 |
+ status = read_rom(serial, start_address + |
1718 |
+ sizeof(struct ti_i2c_desc), |
1719 |
+- rom_desc->Size, buffer); |
1720 |
++ le16_to_cpu(rom_desc->Size), |
1721 |
++ buffer); |
1722 |
+ if (status) |
1723 |
+ break; |
1724 |
+ |
1725 |
+@@ -674,7 +690,7 @@ static int check_i2c_image(struct edgeport_serial *serial) |
1726 |
+ break; |
1727 |
+ } |
1728 |
+ start_address = start_address + sizeof(struct ti_i2c_desc) + |
1729 |
+- rom_desc->Size; |
1730 |
++ le16_to_cpu(rom_desc->Size); |
1731 |
+ |
1732 |
+ } while ((rom_desc->Type != I2C_DESC_TYPE_ION) && |
1733 |
+ (start_address < TI_MAX_I2C_SIZE)); |
1734 |
+@@ -712,7 +728,7 @@ static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer) |
1735 |
+ |
1736 |
+ /* Read the descriptor data */ |
1737 |
+ status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc), |
1738 |
+- rom_desc->Size, buffer); |
1739 |
++ le16_to_cpu(rom_desc->Size), buffer); |
1740 |
+ if (status) |
1741 |
+ goto exit; |
1742 |
+ |
1743 |
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
1744 |
+index 68fc9fe65936..f213ee978516 100644 |
1745 |
+--- a/drivers/usb/serial/option.c |
1746 |
++++ b/drivers/usb/serial/option.c |
1747 |
+@@ -234,8 +234,31 @@ static void option_instat_callback(struct urb *urb); |
1748 |
+ #define QUALCOMM_VENDOR_ID 0x05C6 |
1749 |
+ |
1750 |
+ #define CMOTECH_VENDOR_ID 0x16d8 |
1751 |
+-#define CMOTECH_PRODUCT_6008 0x6008 |
1752 |
+-#define CMOTECH_PRODUCT_6280 0x6280 |
1753 |
++#define CMOTECH_PRODUCT_6001 0x6001 |
1754 |
++#define CMOTECH_PRODUCT_CMU_300 0x6002 |
1755 |
++#define CMOTECH_PRODUCT_6003 0x6003 |
1756 |
++#define CMOTECH_PRODUCT_6004 0x6004 |
1757 |
++#define CMOTECH_PRODUCT_6005 0x6005 |
1758 |
++#define CMOTECH_PRODUCT_CGU_628A 0x6006 |
1759 |
++#define CMOTECH_PRODUCT_CHE_628S 0x6007 |
1760 |
++#define CMOTECH_PRODUCT_CMU_301 0x6008 |
1761 |
++#define CMOTECH_PRODUCT_CHU_628 0x6280 |
1762 |
++#define CMOTECH_PRODUCT_CHU_628S 0x6281 |
1763 |
++#define CMOTECH_PRODUCT_CDU_680 0x6803 |
1764 |
++#define CMOTECH_PRODUCT_CDU_685A 0x6804 |
1765 |
++#define CMOTECH_PRODUCT_CHU_720S 0x7001 |
1766 |
++#define CMOTECH_PRODUCT_7002 0x7002 |
1767 |
++#define CMOTECH_PRODUCT_CHU_629K 0x7003 |
1768 |
++#define CMOTECH_PRODUCT_7004 0x7004 |
1769 |
++#define CMOTECH_PRODUCT_7005 0x7005 |
1770 |
++#define CMOTECH_PRODUCT_CGU_629 0x7006 |
1771 |
++#define CMOTECH_PRODUCT_CHU_629S 0x700a |
1772 |
++#define CMOTECH_PRODUCT_CHU_720I 0x7211 |
1773 |
++#define CMOTECH_PRODUCT_7212 0x7212 |
1774 |
++#define CMOTECH_PRODUCT_7213 0x7213 |
1775 |
++#define CMOTECH_PRODUCT_7251 0x7251 |
1776 |
++#define CMOTECH_PRODUCT_7252 0x7252 |
1777 |
++#define CMOTECH_PRODUCT_7253 0x7253 |
1778 |
+ |
1779 |
+ #define TELIT_VENDOR_ID 0x1bc7 |
1780 |
+ #define TELIT_PRODUCT_UC864E 0x1003 |
1781 |
+@@ -243,6 +266,7 @@ static void option_instat_callback(struct urb *urb); |
1782 |
+ #define TELIT_PRODUCT_CC864_DUAL 0x1005 |
1783 |
+ #define TELIT_PRODUCT_CC864_SINGLE 0x1006 |
1784 |
+ #define TELIT_PRODUCT_DE910_DUAL 0x1010 |
1785 |
++#define TELIT_PRODUCT_UE910_V2 0x1012 |
1786 |
+ #define TELIT_PRODUCT_LE920 0x1200 |
1787 |
+ |
1788 |
+ /* ZTE PRODUCTS */ |
1789 |
+@@ -286,6 +310,7 @@ static void option_instat_callback(struct urb *urb); |
1790 |
+ #define ALCATEL_PRODUCT_X060S_X200 0x0000 |
1791 |
+ #define ALCATEL_PRODUCT_X220_X500D 0x0017 |
1792 |
+ #define ALCATEL_PRODUCT_L100V 0x011e |
1793 |
++#define ALCATEL_PRODUCT_L800MA 0x0203 |
1794 |
+ |
1795 |
+ #define PIRELLI_VENDOR_ID 0x1266 |
1796 |
+ #define PIRELLI_PRODUCT_C100_1 0x1002 |
1797 |
+@@ -348,6 +373,7 @@ static void option_instat_callback(struct urb *urb); |
1798 |
+ #define OLIVETTI_PRODUCT_OLICARD100 0xc000 |
1799 |
+ #define OLIVETTI_PRODUCT_OLICARD145 0xc003 |
1800 |
+ #define OLIVETTI_PRODUCT_OLICARD200 0xc005 |
1801 |
++#define OLIVETTI_PRODUCT_OLICARD500 0xc00b |
1802 |
+ |
1803 |
+ /* Celot products */ |
1804 |
+ #define CELOT_VENDOR_ID 0x211f |
1805 |
+@@ -501,6 +527,10 @@ static const struct option_blacklist_info huawei_cdc12_blacklist = { |
1806 |
+ .reserved = BIT(1) | BIT(2), |
1807 |
+ }; |
1808 |
+ |
1809 |
++static const struct option_blacklist_info net_intf0_blacklist = { |
1810 |
++ .reserved = BIT(0), |
1811 |
++}; |
1812 |
++ |
1813 |
+ static const struct option_blacklist_info net_intf1_blacklist = { |
1814 |
+ .reserved = BIT(1), |
1815 |
+ }; |
1816 |
+@@ -1034,13 +1064,53 @@ static const struct usb_device_id option_ids[] = { |
1817 |
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ |
1818 |
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ |
1819 |
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ |
1820 |
+- { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ |
1821 |
+- { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, |
1822 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, |
1823 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, |
1824 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), |
1825 |
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, |
1826 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) }, |
1827 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) }, |
1828 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) }, |
1829 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S), |
1830 |
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, |
1831 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301), |
1832 |
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, |
1833 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628), |
1834 |
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, |
1835 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) }, |
1836 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) }, |
1837 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) }, |
1838 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S), |
1839 |
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, |
1840 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002), |
1841 |
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, |
1842 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K), |
1843 |
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
1844 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004), |
1845 |
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, |
1846 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) }, |
1847 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629), |
1848 |
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, |
1849 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S), |
1850 |
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
1851 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I), |
1852 |
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, |
1853 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212), |
1854 |
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, |
1855 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213), |
1856 |
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, |
1857 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251), |
1858 |
++ .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, |
1859 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252), |
1860 |
++ .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, |
1861 |
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253), |
1862 |
++ .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, |
1863 |
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, |
1864 |
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) }, |
1865 |
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, |
1866 |
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, |
1867 |
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, |
1868 |
++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, |
1869 |
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), |
1870 |
+ .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, |
1871 |
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ |
1872 |
+@@ -1498,6 +1568,8 @@ static const struct usb_device_id option_ids[] = { |
1873 |
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, |
1874 |
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V), |
1875 |
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
1876 |
++ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA), |
1877 |
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, |
1878 |
+ { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, |
1879 |
+ { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, |
1880 |
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), |
1881 |
+@@ -1543,6 +1615,9 @@ static const struct usb_device_id option_ids[] = { |
1882 |
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200), |
1883 |
+ .driver_info = (kernel_ulong_t)&net_intf6_blacklist |
1884 |
+ }, |
1885 |
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500), |
1886 |
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist |
1887 |
++ }, |
1888 |
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ |
1889 |
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ |
1890 |
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, |
1891 |
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c |
1892 |
+index 968a40201e5f..7ed681a714a5 100644 |
1893 |
+--- a/drivers/usb/serial/qcserial.c |
1894 |
++++ b/drivers/usb/serial/qcserial.c |
1895 |
+@@ -136,9 +136,18 @@ static const struct usb_device_id id_table[] = { |
1896 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 0)}, /* Sierra Wireless MC7710 Device Management */ |
1897 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 2)}, /* Sierra Wireless MC7710 NMEA */ |
1898 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 3)}, /* Sierra Wireless MC7710 Modem */ |
1899 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 0)}, /* Sierra Wireless MC73xx Device Management */ |
1900 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 2)}, /* Sierra Wireless MC73xx NMEA */ |
1901 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 3)}, /* Sierra Wireless MC73xx Modem */ |
1902 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */ |
1903 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */ |
1904 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */ |
1905 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)}, /* Sierra Wireless EM7355 Device Management */ |
1906 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)}, /* Sierra Wireless EM7355 NMEA */ |
1907 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)}, /* Sierra Wireless EM7355 Modem */ |
1908 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)}, /* Sierra Wireless MC7305/MC7355 Device Management */ |
1909 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)}, /* Sierra Wireless MC7305/MC7355 NMEA */ |
1910 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)}, /* Sierra Wireless MC7305/MC7355 Modem */ |
1911 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */ |
1912 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */ |
1913 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */ |
1914 |
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c |
1915 |
+index a9eb6221a815..6b192e602ce0 100644 |
1916 |
+--- a/drivers/usb/serial/sierra.c |
1917 |
++++ b/drivers/usb/serial/sierra.c |
1918 |
+@@ -291,7 +291,6 @@ static const struct usb_device_id id_table[] = { |
1919 |
+ { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ |
1920 |
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist |
1921 |
+ }, |
1922 |
+- { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ |
1923 |
+ |
1924 |
+ { } |
1925 |
+ }; |
1926 |
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c |
1927 |
+index 7c9dc28640bb..b169b0f9b3a2 100644 |
1928 |
+--- a/drivers/usb/serial/usb-serial.c |
1929 |
++++ b/drivers/usb/serial/usb-serial.c |
1930 |
+@@ -1348,10 +1348,12 @@ static int usb_serial_register(struct usb_serial_driver *driver) |
1931 |
+ static void usb_serial_deregister(struct usb_serial_driver *device) |
1932 |
+ { |
1933 |
+ pr_info("USB Serial deregistering driver %s\n", device->description); |
1934 |
++ |
1935 |
+ mutex_lock(&table_lock); |
1936 |
+ list_del(&device->driver_list); |
1937 |
+- usb_serial_bus_deregister(device); |
1938 |
+ mutex_unlock(&table_lock); |
1939 |
++ |
1940 |
++ usb_serial_bus_deregister(device); |
1941 |
+ } |
1942 |
+ |
1943 |
+ /** |
1944 |
+diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c |
1945 |
+index 640fe0173236..b078440e822f 100644 |
1946 |
+--- a/drivers/usb/serial/usb_wwan.c |
1947 |
++++ b/drivers/usb/serial/usb_wwan.c |
1948 |
+@@ -466,6 +466,9 @@ int usb_wwan_port_probe(struct usb_serial_port *port) |
1949 |
+ int err; |
1950 |
+ int i; |
1951 |
+ |
1952 |
++ if (!port->bulk_in_size || !port->bulk_out_size) |
1953 |
++ return -ENODEV; |
1954 |
++ |
1955 |
+ portdata = kzalloc(sizeof(*portdata), GFP_KERNEL); |
1956 |
+ if (!portdata) |
1957 |
+ return -ENOMEM; |
1958 |
+@@ -473,9 +476,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port) |
1959 |
+ init_usb_anchor(&portdata->delayed); |
1960 |
+ |
1961 |
+ for (i = 0; i < N_IN_URB; i++) { |
1962 |
+- if (!port->bulk_in_size) |
1963 |
+- break; |
1964 |
+- |
1965 |
+ buffer = (u8 *)__get_free_page(GFP_KERNEL); |
1966 |
+ if (!buffer) |
1967 |
+ goto bail_out_error; |
1968 |
+@@ -489,9 +489,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port) |
1969 |
+ } |
1970 |
+ |
1971 |
+ for (i = 0; i < N_OUT_URB; i++) { |
1972 |
+- if (!port->bulk_out_size) |
1973 |
+- break; |
1974 |
+- |
1975 |
+ buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL); |
1976 |
+ if (!buffer) |
1977 |
+ goto bail_out_error2; |
1978 |
+diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c |
1979 |
+index 1de2a191b395..640b3cf1a338 100644 |
1980 |
+--- a/drivers/xen/events/events_fifo.c |
1981 |
++++ b/drivers/xen/events/events_fifo.c |
1982 |
+@@ -66,7 +66,22 @@ static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue); |
1983 |
+ static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly; |
1984 |
+ static unsigned event_array_pages __read_mostly; |
1985 |
+ |
1986 |
++/* |
1987 |
++ * sync_set_bit() and friends must be unsigned long aligned on non-x86 |
1988 |
++ * platforms. |
1989 |
++ */ |
1990 |
++#if !defined(CONFIG_X86) && BITS_PER_LONG > 32 |
1991 |
++ |
1992 |
++#define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL) |
1993 |
++#define EVTCHN_FIFO_BIT(b, w) \ |
1994 |
++ (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b) |
1995 |
++ |
1996 |
++#else |
1997 |
++ |
1998 |
+ #define BM(w) ((unsigned long *)(w)) |
1999 |
++#define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b |
2000 |
++ |
2001 |
++#endif |
2002 |
+ |
2003 |
+ static inline event_word_t *event_word_from_port(unsigned port) |
2004 |
+ { |
2005 |
+@@ -161,33 +176,38 @@ static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu) |
2006 |
+ static void evtchn_fifo_clear_pending(unsigned port) |
2007 |
+ { |
2008 |
+ event_word_t *word = event_word_from_port(port); |
2009 |
+- sync_clear_bit(EVTCHN_FIFO_PENDING, BM(word)); |
2010 |
++ sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); |
2011 |
+ } |
2012 |
+ |
2013 |
+ static void evtchn_fifo_set_pending(unsigned port) |
2014 |
+ { |
2015 |
+ event_word_t *word = event_word_from_port(port); |
2016 |
+- sync_set_bit(EVTCHN_FIFO_PENDING, BM(word)); |
2017 |
++ sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); |
2018 |
+ } |
2019 |
+ |
2020 |
+ static bool evtchn_fifo_is_pending(unsigned port) |
2021 |
+ { |
2022 |
+ event_word_t *word = event_word_from_port(port); |
2023 |
+- return sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)); |
2024 |
++ return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); |
2025 |
+ } |
2026 |
+ |
2027 |
+ static bool evtchn_fifo_test_and_set_mask(unsigned port) |
2028 |
+ { |
2029 |
+ event_word_t *word = event_word_from_port(port); |
2030 |
+- return sync_test_and_set_bit(EVTCHN_FIFO_MASKED, BM(word)); |
2031 |
++ return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); |
2032 |
+ } |
2033 |
+ |
2034 |
+ static void evtchn_fifo_mask(unsigned port) |
2035 |
+ { |
2036 |
+ event_word_t *word = event_word_from_port(port); |
2037 |
+- sync_set_bit(EVTCHN_FIFO_MASKED, BM(word)); |
2038 |
++ sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); |
2039 |
+ } |
2040 |
+ |
2041 |
++static bool evtchn_fifo_is_masked(unsigned port) |
2042 |
++{ |
2043 |
++ event_word_t *word = event_word_from_port(port); |
2044 |
++ return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); |
2045 |
++} |
2046 |
+ /* |
2047 |
+ * Clear MASKED, spinning if BUSY is set. |
2048 |
+ */ |
2049 |
+@@ -211,7 +231,7 @@ static void evtchn_fifo_unmask(unsigned port) |
2050 |
+ BUG_ON(!irqs_disabled()); |
2051 |
+ |
2052 |
+ clear_masked(word); |
2053 |
+- if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))) { |
2054 |
++ if (evtchn_fifo_is_pending(port)) { |
2055 |
+ struct evtchn_unmask unmask = { .port = port }; |
2056 |
+ (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); |
2057 |
+ } |
2058 |
+@@ -247,7 +267,7 @@ static void handle_irq_for_port(unsigned port) |
2059 |
+ |
2060 |
+ static void consume_one_event(unsigned cpu, |
2061 |
+ struct evtchn_fifo_control_block *control_block, |
2062 |
+- unsigned priority, uint32_t *ready) |
2063 |
++ unsigned priority, unsigned long *ready) |
2064 |
+ { |
2065 |
+ struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); |
2066 |
+ uint32_t head; |
2067 |
+@@ -277,10 +297,9 @@ static void consume_one_event(unsigned cpu, |
2068 |
+ * copy of the ready word. |
2069 |
+ */ |
2070 |
+ if (head == 0) |
2071 |
+- clear_bit(priority, BM(ready)); |
2072 |
++ clear_bit(priority, ready); |
2073 |
+ |
2074 |
+- if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)) |
2075 |
+- && !sync_test_bit(EVTCHN_FIFO_MASKED, BM(word))) |
2076 |
++ if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) |
2077 |
+ handle_irq_for_port(port); |
2078 |
+ |
2079 |
+ q->head[priority] = head; |
2080 |
+@@ -289,7 +308,7 @@ static void consume_one_event(unsigned cpu, |
2081 |
+ static void evtchn_fifo_handle_events(unsigned cpu) |
2082 |
+ { |
2083 |
+ struct evtchn_fifo_control_block *control_block; |
2084 |
+- uint32_t ready; |
2085 |
++ unsigned long ready; |
2086 |
+ unsigned q; |
2087 |
+ |
2088 |
+ control_block = per_cpu(cpu_control_block, cpu); |
2089 |
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c |
2090 |
+index 849f6132b327..7c6b73c72210 100644 |
2091 |
+--- a/fs/cifs/cifsfs.c |
2092 |
++++ b/fs/cifs/cifsfs.c |
2093 |
+@@ -253,6 +253,11 @@ cifs_alloc_inode(struct super_block *sb) |
2094 |
+ cifs_set_oplock_level(cifs_inode, 0); |
2095 |
+ cifs_inode->delete_pending = false; |
2096 |
+ cifs_inode->invalid_mapping = false; |
2097 |
++ clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cifs_inode->flags); |
2098 |
++ clear_bit(CIFS_INODE_PENDING_WRITERS, &cifs_inode->flags); |
2099 |
++ clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cifs_inode->flags); |
2100 |
++ spin_lock_init(&cifs_inode->writers_lock); |
2101 |
++ cifs_inode->writers = 0; |
2102 |
+ cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ |
2103 |
+ cifs_inode->server_eof = 0; |
2104 |
+ cifs_inode->uniqueid = 0; |
2105 |
+@@ -731,19 +736,26 @@ static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, |
2106 |
+ unsigned long nr_segs, loff_t pos) |
2107 |
+ { |
2108 |
+ struct inode *inode = file_inode(iocb->ki_filp); |
2109 |
++ struct cifsInodeInfo *cinode = CIFS_I(inode); |
2110 |
+ ssize_t written; |
2111 |
+ int rc; |
2112 |
+ |
2113 |
++ written = cifs_get_writer(cinode); |
2114 |
++ if (written) |
2115 |
++ return written; |
2116 |
++ |
2117 |
+ written = generic_file_aio_write(iocb, iov, nr_segs, pos); |
2118 |
+ |
2119 |
+ if (CIFS_CACHE_WRITE(CIFS_I(inode))) |
2120 |
+- return written; |
2121 |
++ goto out; |
2122 |
+ |
2123 |
+ rc = filemap_fdatawrite(inode->i_mapping); |
2124 |
+ if (rc) |
2125 |
+ cifs_dbg(FYI, "cifs_file_aio_write: %d rc on %p inode\n", |
2126 |
+ rc, inode); |
2127 |
+ |
2128 |
++out: |
2129 |
++ cifs_put_writer(cinode); |
2130 |
+ return written; |
2131 |
+ } |
2132 |
+ |
2133 |
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h |
2134 |
+index c0f3718b77a8..30f6e9251a4a 100644 |
2135 |
+--- a/fs/cifs/cifsglob.h |
2136 |
++++ b/fs/cifs/cifsglob.h |
2137 |
+@@ -228,6 +228,8 @@ struct smb_version_operations { |
2138 |
+ /* verify the message */ |
2139 |
+ int (*check_message)(char *, unsigned int); |
2140 |
+ bool (*is_oplock_break)(char *, struct TCP_Server_Info *); |
2141 |
++ void (*downgrade_oplock)(struct TCP_Server_Info *, |
2142 |
++ struct cifsInodeInfo *, bool); |
2143 |
+ /* process transaction2 response */ |
2144 |
+ bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *, |
2145 |
+ char *, int); |
2146 |
+@@ -1113,6 +1115,12 @@ struct cifsInodeInfo { |
2147 |
+ unsigned int epoch; /* used to track lease state changes */ |
2148 |
+ bool delete_pending; /* DELETE_ON_CLOSE is set */ |
2149 |
+ bool invalid_mapping; /* pagecache is invalid */ |
2150 |
++ unsigned long flags; |
2151 |
++#define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */ |
2152 |
++#define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */ |
2153 |
++#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */ |
2154 |
++ spinlock_t writers_lock; |
2155 |
++ unsigned int writers; /* Number of writers on this inode */ |
2156 |
+ unsigned long time; /* jiffies of last update of inode */ |
2157 |
+ u64 server_eof; /* current file size on server -- protected by i_lock */ |
2158 |
+ u64 uniqueid; /* server inode number */ |
2159 |
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h |
2160 |
+index acc4ee8ed075..ca7980a1e303 100644 |
2161 |
+--- a/fs/cifs/cifsproto.h |
2162 |
++++ b/fs/cifs/cifsproto.h |
2163 |
+@@ -127,6 +127,9 @@ extern u64 cifs_UnixTimeToNT(struct timespec); |
2164 |
+ extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, |
2165 |
+ int offset); |
2166 |
+ extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock); |
2167 |
++extern int cifs_get_writer(struct cifsInodeInfo *cinode); |
2168 |
++extern void cifs_put_writer(struct cifsInodeInfo *cinode); |
2169 |
++extern void cifs_done_oplock_break(struct cifsInodeInfo *cinode); |
2170 |
+ extern int cifs_unlock_range(struct cifsFileInfo *cfile, |
2171 |
+ struct file_lock *flock, const unsigned int xid); |
2172 |
+ extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile); |
2173 |
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c |
2174 |
+index 834fce759d80..87c4dd072cde 100644 |
2175 |
+--- a/fs/cifs/file.c |
2176 |
++++ b/fs/cifs/file.c |
2177 |
+@@ -2608,12 +2608,20 @@ cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov, |
2178 |
+ struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); |
2179 |
+ ssize_t written; |
2180 |
+ |
2181 |
++ written = cifs_get_writer(cinode); |
2182 |
++ if (written) |
2183 |
++ return written; |
2184 |
++ |
2185 |
+ if (CIFS_CACHE_WRITE(cinode)) { |
2186 |
+ if (cap_unix(tcon->ses) && |
2187 |
+ (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) |
2188 |
+- && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) |
2189 |
+- return generic_file_aio_write(iocb, iov, nr_segs, pos); |
2190 |
+- return cifs_writev(iocb, iov, nr_segs, pos); |
2191 |
++ && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) { |
2192 |
++ written = generic_file_aio_write( |
2193 |
++ iocb, iov, nr_segs, pos); |
2194 |
++ goto out; |
2195 |
++ } |
2196 |
++ written = cifs_writev(iocb, iov, nr_segs, pos); |
2197 |
++ goto out; |
2198 |
+ } |
2199 |
+ /* |
2200 |
+ * For non-oplocked files in strict cache mode we need to write the data |
2201 |
+@@ -2633,6 +2641,8 @@ cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov, |
2202 |
+ inode); |
2203 |
+ cinode->oplock = 0; |
2204 |
+ } |
2205 |
++out: |
2206 |
++ cifs_put_writer(cinode); |
2207 |
+ return written; |
2208 |
+ } |
2209 |
+ |
2210 |
+@@ -3644,6 +3654,13 @@ static int cifs_launder_page(struct page *page) |
2211 |
+ return rc; |
2212 |
+ } |
2213 |
+ |
2214 |
++static int |
2215 |
++cifs_pending_writers_wait(void *unused) |
2216 |
++{ |
2217 |
++ schedule(); |
2218 |
++ return 0; |
2219 |
++} |
2220 |
++ |
2221 |
+ void cifs_oplock_break(struct work_struct *work) |
2222 |
+ { |
2223 |
+ struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, |
2224 |
+@@ -3651,8 +3668,15 @@ void cifs_oplock_break(struct work_struct *work) |
2225 |
+ struct inode *inode = cfile->dentry->d_inode; |
2226 |
+ struct cifsInodeInfo *cinode = CIFS_I(inode); |
2227 |
+ struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); |
2228 |
++ struct TCP_Server_Info *server = tcon->ses->server; |
2229 |
+ int rc = 0; |
2230 |
+ |
2231 |
++ wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, |
2232 |
++ cifs_pending_writers_wait, TASK_UNINTERRUPTIBLE); |
2233 |
++ |
2234 |
++ server->ops->downgrade_oplock(server, cinode, |
2235 |
++ test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags)); |
2236 |
++ |
2237 |
+ if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) && |
2238 |
+ cifs_has_mand_locks(cinode)) { |
2239 |
+ cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n", |
2240 |
+@@ -3689,6 +3713,7 @@ void cifs_oplock_break(struct work_struct *work) |
2241 |
+ cinode); |
2242 |
+ cifs_dbg(FYI, "Oplock release rc = %d\n", rc); |
2243 |
+ } |
2244 |
++ cifs_done_oplock_break(cinode); |
2245 |
+ } |
2246 |
+ |
2247 |
+ /* |
2248 |
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c |
2249 |
+index 2f9f3790679d..3b0c62e622da 100644 |
2250 |
+--- a/fs/cifs/misc.c |
2251 |
++++ b/fs/cifs/misc.c |
2252 |
+@@ -466,8 +466,22 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) |
2253 |
+ cifs_dbg(FYI, "file id match, oplock break\n"); |
2254 |
+ pCifsInode = CIFS_I(netfile->dentry->d_inode); |
2255 |
+ |
2256 |
+- cifs_set_oplock_level(pCifsInode, |
2257 |
+- pSMB->OplockLevel ? OPLOCK_READ : 0); |
2258 |
++ set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, |
2259 |
++ &pCifsInode->flags); |
2260 |
++ |
2261 |
++ /* |
2262 |
++ * Set flag if the server downgrades the oplock |
2263 |
++ * to L2 else clear. |
2264 |
++ */ |
2265 |
++ if (pSMB->OplockLevel) |
2266 |
++ set_bit( |
2267 |
++ CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
2268 |
++ &pCifsInode->flags); |
2269 |
++ else |
2270 |
++ clear_bit( |
2271 |
++ CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
2272 |
++ &pCifsInode->flags); |
2273 |
++ |
2274 |
+ queue_work(cifsiod_wq, |
2275 |
+ &netfile->oplock_break); |
2276 |
+ netfile->oplock_break_cancelled = false; |
2277 |
+@@ -551,6 +565,62 @@ void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock) |
2278 |
+ cinode->oplock = 0; |
2279 |
+ } |
2280 |
+ |
2281 |
++static int |
2282 |
++cifs_oplock_break_wait(void *unused) |
2283 |
++{ |
2284 |
++ schedule(); |
2285 |
++ return signal_pending(current) ? -ERESTARTSYS : 0; |
2286 |
++} |
2287 |
++ |
2288 |
++/* |
2289 |
++ * We wait for oplock breaks to be processed before we attempt to perform |
2290 |
++ * writes. |
2291 |
++ */ |
2292 |
++int cifs_get_writer(struct cifsInodeInfo *cinode) |
2293 |
++{ |
2294 |
++ int rc; |
2295 |
++ |
2296 |
++start: |
2297 |
++ rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK, |
2298 |
++ cifs_oplock_break_wait, TASK_KILLABLE); |
2299 |
++ if (rc) |
2300 |
++ return rc; |
2301 |
++ |
2302 |
++ spin_lock(&cinode->writers_lock); |
2303 |
++ if (!cinode->writers) |
2304 |
++ set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags); |
2305 |
++ cinode->writers++; |
2306 |
++ /* Check to see if we have started servicing an oplock break */ |
2307 |
++ if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) { |
2308 |
++ cinode->writers--; |
2309 |
++ if (cinode->writers == 0) { |
2310 |
++ clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags); |
2311 |
++ wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS); |
2312 |
++ } |
2313 |
++ spin_unlock(&cinode->writers_lock); |
2314 |
++ goto start; |
2315 |
++ } |
2316 |
++ spin_unlock(&cinode->writers_lock); |
2317 |
++ return 0; |
2318 |
++} |
2319 |
++ |
2320 |
++void cifs_put_writer(struct cifsInodeInfo *cinode) |
2321 |
++{ |
2322 |
++ spin_lock(&cinode->writers_lock); |
2323 |
++ cinode->writers--; |
2324 |
++ if (cinode->writers == 0) { |
2325 |
++ clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags); |
2326 |
++ wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS); |
2327 |
++ } |
2328 |
++ spin_unlock(&cinode->writers_lock); |
2329 |
++} |
2330 |
++ |
2331 |
++void cifs_done_oplock_break(struct cifsInodeInfo *cinode) |
2332 |
++{ |
2333 |
++ clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); |
2334 |
++ wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK); |
2335 |
++} |
2336 |
++ |
2337 |
+ bool |
2338 |
+ backup_cred(struct cifs_sb_info *cifs_sb) |
2339 |
+ { |
2340 |
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c |
2341 |
+index 526fb89f9230..d1fdfa848703 100644 |
2342 |
+--- a/fs/cifs/smb1ops.c |
2343 |
++++ b/fs/cifs/smb1ops.c |
2344 |
+@@ -372,6 +372,16 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr) |
2345 |
+ return 0; |
2346 |
+ } |
2347 |
+ |
2348 |
++static void |
2349 |
++cifs_downgrade_oplock(struct TCP_Server_Info *server, |
2350 |
++ struct cifsInodeInfo *cinode, bool set_level2) |
2351 |
++{ |
2352 |
++ if (set_level2) |
2353 |
++ cifs_set_oplock_level(cinode, OPLOCK_READ); |
2354 |
++ else |
2355 |
++ cifs_set_oplock_level(cinode, 0); |
2356 |
++} |
2357 |
++ |
2358 |
+ static bool |
2359 |
+ cifs_check_trans2(struct mid_q_entry *mid, struct TCP_Server_Info *server, |
2360 |
+ char *buf, int malformed) |
2361 |
+@@ -1019,6 +1029,7 @@ struct smb_version_operations smb1_operations = { |
2362 |
+ .clear_stats = cifs_clear_stats, |
2363 |
+ .print_stats = cifs_print_stats, |
2364 |
+ .is_oplock_break = is_valid_oplock_break, |
2365 |
++ .downgrade_oplock = cifs_downgrade_oplock, |
2366 |
+ .check_trans2 = cifs_check_trans2, |
2367 |
+ .need_neg = cifs_need_neg, |
2368 |
+ .negotiate = cifs_negotiate, |
2369 |
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c |
2370 |
+index fb3966265b6e..b8021fde987d 100644 |
2371 |
+--- a/fs/cifs/smb2misc.c |
2372 |
++++ b/fs/cifs/smb2misc.c |
2373 |
+@@ -575,9 +575,21 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) |
2374 |
+ else |
2375 |
+ cfile->oplock_break_cancelled = false; |
2376 |
+ |
2377 |
+- server->ops->set_oplock_level(cinode, |
2378 |
+- rsp->OplockLevel ? SMB2_OPLOCK_LEVEL_II : 0, |
2379 |
+- 0, NULL); |
2380 |
++ set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, |
2381 |
++ &cinode->flags); |
2382 |
++ |
2383 |
++ /* |
2384 |
++ * Set flag if the server downgrades the oplock |
2385 |
++ * to L2 else clear. |
2386 |
++ */ |
2387 |
++ if (rsp->OplockLevel) |
2388 |
++ set_bit( |
2389 |
++ CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
2390 |
++ &cinode->flags); |
2391 |
++ else |
2392 |
++ clear_bit( |
2393 |
++ CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
2394 |
++ &cinode->flags); |
2395 |
+ |
2396 |
+ queue_work(cifsiod_wq, &cfile->oplock_break); |
2397 |
+ |
2398 |
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c |
2399 |
+index 192f51a12cf1..35ddc3ed119d 100644 |
2400 |
+--- a/fs/cifs/smb2ops.c |
2401 |
++++ b/fs/cifs/smb2ops.c |
2402 |
+@@ -905,6 +905,17 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, |
2403 |
+ } |
2404 |
+ |
2405 |
+ static void |
2406 |
++smb2_downgrade_oplock(struct TCP_Server_Info *server, |
2407 |
++ struct cifsInodeInfo *cinode, bool set_level2) |
2408 |
++{ |
2409 |
++ if (set_level2) |
2410 |
++ server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II, |
2411 |
++ 0, NULL); |
2412 |
++ else |
2413 |
++ server->ops->set_oplock_level(cinode, 0, 0, NULL); |
2414 |
++} |
2415 |
++ |
2416 |
++static void |
2417 |
+ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, |
2418 |
+ unsigned int epoch, bool *purge_cache) |
2419 |
+ { |
2420 |
+@@ -1110,6 +1121,7 @@ struct smb_version_operations smb20_operations = { |
2421 |
+ .clear_stats = smb2_clear_stats, |
2422 |
+ .print_stats = smb2_print_stats, |
2423 |
+ .is_oplock_break = smb2_is_valid_oplock_break, |
2424 |
++ .downgrade_oplock = smb2_downgrade_oplock, |
2425 |
+ .need_neg = smb2_need_neg, |
2426 |
+ .negotiate = smb2_negotiate, |
2427 |
+ .negotiate_wsize = smb2_negotiate_wsize, |
2428 |
+@@ -1184,6 +1196,7 @@ struct smb_version_operations smb21_operations = { |
2429 |
+ .clear_stats = smb2_clear_stats, |
2430 |
+ .print_stats = smb2_print_stats, |
2431 |
+ .is_oplock_break = smb2_is_valid_oplock_break, |
2432 |
++ .downgrade_oplock = smb2_downgrade_oplock, |
2433 |
+ .need_neg = smb2_need_neg, |
2434 |
+ .negotiate = smb2_negotiate, |
2435 |
+ .negotiate_wsize = smb2_negotiate_wsize, |
2436 |
+@@ -1259,6 +1272,7 @@ struct smb_version_operations smb30_operations = { |
2437 |
+ .print_stats = smb2_print_stats, |
2438 |
+ .dump_share_caps = smb2_dump_share_caps, |
2439 |
+ .is_oplock_break = smb2_is_valid_oplock_break, |
2440 |
++ .downgrade_oplock = smb2_downgrade_oplock, |
2441 |
+ .need_neg = smb2_need_neg, |
2442 |
+ .negotiate = smb2_negotiate, |
2443 |
+ .negotiate_wsize = smb2_negotiate_wsize, |
2444 |
+diff --git a/fs/file_table.c b/fs/file_table.c |
2445 |
+index 5b24008ea4f6..79ecae62209a 100644 |
2446 |
+--- a/fs/file_table.c |
2447 |
++++ b/fs/file_table.c |
2448 |
+@@ -209,10 +209,10 @@ static void drop_file_write_access(struct file *file) |
2449 |
+ struct dentry *dentry = file->f_path.dentry; |
2450 |
+ struct inode *inode = dentry->d_inode; |
2451 |
+ |
2452 |
+- put_write_access(inode); |
2453 |
+- |
2454 |
+ if (special_file(inode->i_mode)) |
2455 |
+ return; |
2456 |
++ |
2457 |
++ put_write_access(inode); |
2458 |
+ if (file_check_writeable(file) != 0) |
2459 |
+ return; |
2460 |
+ __mnt_drop_write(mnt); |
2461 |
+diff --git a/fs/open.c b/fs/open.c |
2462 |
+index b9ed8b25c108..2ed7325f713e 100644 |
2463 |
+--- a/fs/open.c |
2464 |
++++ b/fs/open.c |
2465 |
+@@ -641,23 +641,12 @@ out: |
2466 |
+ static inline int __get_file_write_access(struct inode *inode, |
2467 |
+ struct vfsmount *mnt) |
2468 |
+ { |
2469 |
+- int error; |
2470 |
+- error = get_write_access(inode); |
2471 |
++ int error = get_write_access(inode); |
2472 |
+ if (error) |
2473 |
+ return error; |
2474 |
+- /* |
2475 |
+- * Do not take mount writer counts on |
2476 |
+- * special files since no writes to |
2477 |
+- * the mount itself will occur. |
2478 |
+- */ |
2479 |
+- if (!special_file(inode->i_mode)) { |
2480 |
+- /* |
2481 |
+- * Balanced in __fput() |
2482 |
+- */ |
2483 |
+- error = __mnt_want_write(mnt); |
2484 |
+- if (error) |
2485 |
+- put_write_access(inode); |
2486 |
+- } |
2487 |
++ error = __mnt_want_write(mnt); |
2488 |
++ if (error) |
2489 |
++ put_write_access(inode); |
2490 |
+ return error; |
2491 |
+ } |
2492 |
+ |
2493 |
+@@ -690,12 +679,11 @@ static int do_dentry_open(struct file *f, |
2494 |
+ |
2495 |
+ path_get(&f->f_path); |
2496 |
+ inode = f->f_inode = f->f_path.dentry->d_inode; |
2497 |
+- if (f->f_mode & FMODE_WRITE) { |
2498 |
++ if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) { |
2499 |
+ error = __get_file_write_access(inode, f->f_path.mnt); |
2500 |
+ if (error) |
2501 |
+ goto cleanup_file; |
2502 |
+- if (!special_file(inode->i_mode)) |
2503 |
+- file_take_write(f); |
2504 |
++ file_take_write(f); |
2505 |
+ } |
2506 |
+ |
2507 |
+ f->f_mapping = inode->i_mapping; |
2508 |
+@@ -742,7 +730,6 @@ static int do_dentry_open(struct file *f, |
2509 |
+ cleanup_all: |
2510 |
+ fops_put(f->f_op); |
2511 |
+ if (f->f_mode & FMODE_WRITE) { |
2512 |
+- put_write_access(inode); |
2513 |
+ if (!special_file(inode->i_mode)) { |
2514 |
+ /* |
2515 |
+ * We don't consider this a real |
2516 |
+@@ -750,6 +737,7 @@ cleanup_all: |
2517 |
+ * because it all happenend right |
2518 |
+ * here, so just reset the state. |
2519 |
+ */ |
2520 |
++ put_write_access(inode); |
2521 |
+ file_reset_write(f); |
2522 |
+ __mnt_drop_write(f->f_path.mnt); |
2523 |
+ } |
2524 |
+diff --git a/fs/super.c b/fs/super.c |
2525 |
+index 80d5cf2ca765..7624267b2043 100644 |
2526 |
+--- a/fs/super.c |
2527 |
++++ b/fs/super.c |
2528 |
+@@ -802,7 +802,10 @@ void emergency_remount(void) |
2529 |
+ |
2530 |
+ static DEFINE_IDA(unnamed_dev_ida); |
2531 |
+ static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */ |
2532 |
+-static int unnamed_dev_start = 0; /* don't bother trying below it */ |
2533 |
++/* Many userspace utilities consider an FSID of 0 invalid. |
2534 |
++ * Always return at least 1 from get_anon_bdev. |
2535 |
++ */ |
2536 |
++static int unnamed_dev_start = 1; |
2537 |
+ |
2538 |
+ int get_anon_bdev(dev_t *p) |
2539 |
+ { |
2540 |
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h |
2541 |
+index 34c7bdc06014..38a743705cc6 100644 |
2542 |
+--- a/include/asm-generic/pgtable.h |
2543 |
++++ b/include/asm-generic/pgtable.h |
2544 |
+@@ -680,24 +680,35 @@ static inline int pmd_numa(pmd_t pmd) |
2545 |
+ #ifndef pte_mknonnuma |
2546 |
+ static inline pte_t pte_mknonnuma(pte_t pte) |
2547 |
+ { |
2548 |
+- pte = pte_clear_flags(pte, _PAGE_NUMA); |
2549 |
+- return pte_set_flags(pte, _PAGE_PRESENT|_PAGE_ACCESSED); |
2550 |
++ pteval_t val = pte_val(pte); |
2551 |
++ |
2552 |
++ val &= ~_PAGE_NUMA; |
2553 |
++ val |= (_PAGE_PRESENT|_PAGE_ACCESSED); |
2554 |
++ return __pte(val); |
2555 |
+ } |
2556 |
+ #endif |
2557 |
+ |
2558 |
+ #ifndef pmd_mknonnuma |
2559 |
+ static inline pmd_t pmd_mknonnuma(pmd_t pmd) |
2560 |
+ { |
2561 |
+- pmd = pmd_clear_flags(pmd, _PAGE_NUMA); |
2562 |
+- return pmd_set_flags(pmd, _PAGE_PRESENT|_PAGE_ACCESSED); |
2563 |
++ pmdval_t val = pmd_val(pmd); |
2564 |
++ |
2565 |
++ val &= ~_PAGE_NUMA; |
2566 |
++ val |= (_PAGE_PRESENT|_PAGE_ACCESSED); |
2567 |
++ |
2568 |
++ return __pmd(val); |
2569 |
+ } |
2570 |
+ #endif |
2571 |
+ |
2572 |
+ #ifndef pte_mknuma |
2573 |
+ static inline pte_t pte_mknuma(pte_t pte) |
2574 |
+ { |
2575 |
+- pte = pte_set_flags(pte, _PAGE_NUMA); |
2576 |
+- return pte_clear_flags(pte, _PAGE_PRESENT); |
2577 |
++ pteval_t val = pte_val(pte); |
2578 |
++ |
2579 |
++ val &= ~_PAGE_PRESENT; |
2580 |
++ val |= _PAGE_NUMA; |
2581 |
++ |
2582 |
++ return __pte(val); |
2583 |
+ } |
2584 |
+ #endif |
2585 |
+ |
2586 |
+@@ -716,8 +727,12 @@ static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, |
2587 |
+ #ifndef pmd_mknuma |
2588 |
+ static inline pmd_t pmd_mknuma(pmd_t pmd) |
2589 |
+ { |
2590 |
+- pmd = pmd_set_flags(pmd, _PAGE_NUMA); |
2591 |
+- return pmd_clear_flags(pmd, _PAGE_PRESENT); |
2592 |
++ pmdval_t val = pmd_val(pmd); |
2593 |
++ |
2594 |
++ val &= ~_PAGE_PRESENT; |
2595 |
++ val |= _PAGE_NUMA; |
2596 |
++ |
2597 |
++ return __pmd(val); |
2598 |
+ } |
2599 |
+ #endif |
2600 |
+ |
2601 |
+diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h |
2602 |
+index 7c8b20b120ea..a9a53b12397b 100644 |
2603 |
+--- a/include/linux/if_macvlan.h |
2604 |
++++ b/include/linux/if_macvlan.h |
2605 |
+@@ -56,6 +56,7 @@ struct macvlan_dev { |
2606 |
+ int numqueues; |
2607 |
+ netdev_features_t tap_features; |
2608 |
+ int minor; |
2609 |
++ int nest_level; |
2610 |
+ }; |
2611 |
+ |
2612 |
+ static inline void macvlan_count_rx(const struct macvlan_dev *vlan, |
2613 |
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h |
2614 |
+index bbedfb56bd66..72ba6f5cbc8d 100644 |
2615 |
+--- a/include/linux/if_vlan.h |
2616 |
++++ b/include/linux/if_vlan.h |
2617 |
+@@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) |
2618 |
+ /* found in socket.c */ |
2619 |
+ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); |
2620 |
+ |
2621 |
+-static inline int is_vlan_dev(struct net_device *dev) |
2622 |
++static inline bool is_vlan_dev(struct net_device *dev) |
2623 |
+ { |
2624 |
+ return dev->priv_flags & IFF_802_1Q_VLAN; |
2625 |
+ } |
2626 |
+@@ -158,6 +158,7 @@ struct vlan_dev_priv { |
2627 |
+ #ifdef CONFIG_NET_POLL_CONTROLLER |
2628 |
+ struct netpoll *netpoll; |
2629 |
+ #endif |
2630 |
++ unsigned int nest_level; |
2631 |
+ }; |
2632 |
+ |
2633 |
+ static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) |
2634 |
+diff --git a/include/linux/net.h b/include/linux/net.h |
2635 |
+index 94734a6259a4..17d83393afcc 100644 |
2636 |
+--- a/include/linux/net.h |
2637 |
++++ b/include/linux/net.h |
2638 |
+@@ -248,24 +248,17 @@ do { \ |
2639 |
+ bool __net_get_random_once(void *buf, int nbytes, bool *done, |
2640 |
+ struct static_key *done_key); |
2641 |
+ |
2642 |
+-#ifdef HAVE_JUMP_LABEL |
2643 |
+-#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \ |
2644 |
+- { .enabled = ATOMIC_INIT(0), .entries = (void *)1 }) |
2645 |
+-#else /* !HAVE_JUMP_LABEL */ |
2646 |
+-#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE |
2647 |
+-#endif /* HAVE_JUMP_LABEL */ |
2648 |
+- |
2649 |
+ #define net_get_random_once(buf, nbytes) \ |
2650 |
+ ({ \ |
2651 |
+ bool ___ret = false; \ |
2652 |
+ static bool ___done = false; \ |
2653 |
+- static struct static_key ___done_key = \ |
2654 |
+- ___NET_RANDOM_STATIC_KEY_INIT; \ |
2655 |
+- if (!static_key_true(&___done_key)) \ |
2656 |
++ static struct static_key ___once_key = \ |
2657 |
++ STATIC_KEY_INIT_TRUE; \ |
2658 |
++ if (static_key_true(&___once_key)) \ |
2659 |
+ ___ret = __net_get_random_once(buf, \ |
2660 |
+ nbytes, \ |
2661 |
+ &___done, \ |
2662 |
+- &___done_key); \ |
2663 |
++ &___once_key); \ |
2664 |
+ ___ret; \ |
2665 |
+ }) |
2666 |
+ |
2667 |
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h |
2668 |
+index daafd9561cbc..911718fa92ed 100644 |
2669 |
+--- a/include/linux/netdevice.h |
2670 |
++++ b/include/linux/netdevice.h |
2671 |
+@@ -1145,6 +1145,7 @@ struct net_device_ops { |
2672 |
+ netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb, |
2673 |
+ struct net_device *dev, |
2674 |
+ void *priv); |
2675 |
++ int (*ndo_get_lock_subclass)(struct net_device *dev); |
2676 |
+ }; |
2677 |
+ |
2678 |
+ /* |
2679 |
+@@ -2861,7 +2862,12 @@ static inline void netif_addr_lock(struct net_device *dev) |
2680 |
+ |
2681 |
+ static inline void netif_addr_lock_nested(struct net_device *dev) |
2682 |
+ { |
2683 |
+- spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING); |
2684 |
++ int subclass = SINGLE_DEPTH_NESTING; |
2685 |
++ |
2686 |
++ if (dev->netdev_ops->ndo_get_lock_subclass) |
2687 |
++ subclass = dev->netdev_ops->ndo_get_lock_subclass(dev); |
2688 |
++ |
2689 |
++ spin_lock_nested(&dev->addr_list_lock, subclass); |
2690 |
+ } |
2691 |
+ |
2692 |
+ static inline void netif_addr_lock_bh(struct net_device *dev) |
2693 |
+@@ -2988,6 +2994,14 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev, |
2694 |
+ priv; \ |
2695 |
+ priv = netdev_lower_get_next_private_rcu(dev, &(iter))) |
2696 |
+ |
2697 |
++void *netdev_lower_get_next(struct net_device *dev, |
2698 |
++ struct list_head **iter); |
2699 |
++#define netdev_for_each_lower_dev(dev, ldev, iter) \ |
2700 |
++ for (iter = &(dev)->adj_list.lower, \ |
2701 |
++ ldev = netdev_lower_get_next(dev, &(iter)); \ |
2702 |
++ ldev; \ |
2703 |
++ ldev = netdev_lower_get_next(dev, &(iter))) |
2704 |
++ |
2705 |
+ void *netdev_adjacent_get_private(struct list_head *adj_list); |
2706 |
+ void *netdev_lower_get_first_private_rcu(struct net_device *dev); |
2707 |
+ struct net_device *netdev_master_upper_dev_get(struct net_device *dev); |
2708 |
+@@ -3003,6 +3017,8 @@ void netdev_upper_dev_unlink(struct net_device *dev, |
2709 |
+ void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); |
2710 |
+ void *netdev_lower_dev_get_private(struct net_device *dev, |
2711 |
+ struct net_device *lower_dev); |
2712 |
++int dev_get_nest_level(struct net_device *dev, |
2713 |
++ bool (*type_check)(struct net_device *dev)); |
2714 |
+ int skb_checksum_help(struct sk_buff *skb); |
2715 |
+ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, |
2716 |
+ netdev_features_t features, bool tx_path); |
2717 |
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h |
2718 |
+index de83b4eb1642..1841b58cf173 100644 |
2719 |
+--- a/include/linux/preempt.h |
2720 |
++++ b/include/linux/preempt.h |
2721 |
+@@ -15,6 +15,8 @@ |
2722 |
+ */ |
2723 |
+ #define PREEMPT_NEED_RESCHED 0x80000000 |
2724 |
+ |
2725 |
++#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) |
2726 |
++ |
2727 |
+ #include <asm/preempt.h> |
2728 |
+ |
2729 |
+ #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
2730 |
+diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h |
2731 |
+index 8e3e66ac0a52..953937ea5233 100644 |
2732 |
+--- a/include/linux/rtnetlink.h |
2733 |
++++ b/include/linux/rtnetlink.h |
2734 |
+@@ -4,6 +4,7 @@ |
2735 |
+ |
2736 |
+ #include <linux/mutex.h> |
2737 |
+ #include <linux/netdevice.h> |
2738 |
++#include <linux/wait.h> |
2739 |
+ #include <uapi/linux/rtnetlink.h> |
2740 |
+ |
2741 |
+ extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); |
2742 |
+@@ -22,6 +23,10 @@ extern void rtnl_lock(void); |
2743 |
+ extern void rtnl_unlock(void); |
2744 |
+ extern int rtnl_trylock(void); |
2745 |
+ extern int rtnl_is_locked(void); |
2746 |
++ |
2747 |
++extern wait_queue_head_t netdev_unregistering_wq; |
2748 |
++extern struct mutex net_mutex; |
2749 |
++ |
2750 |
+ #ifdef CONFIG_PROVE_LOCKING |
2751 |
+ extern int lockdep_rtnl_is_held(void); |
2752 |
+ #else |
2753 |
+diff --git a/include/linux/sched.h b/include/linux/sched.h |
2754 |
+index a781dec1cd0b..ccd0c6f24f2c 100644 |
2755 |
+--- a/include/linux/sched.h |
2756 |
++++ b/include/linux/sched.h |
2757 |
+@@ -1688,6 +1688,24 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk) |
2758 |
+ } |
2759 |
+ |
2760 |
+ |
2761 |
++static int pid_alive(const struct task_struct *p); |
2762 |
++static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) |
2763 |
++{ |
2764 |
++ pid_t pid = 0; |
2765 |
++ |
2766 |
++ rcu_read_lock(); |
2767 |
++ if (pid_alive(tsk)) |
2768 |
++ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); |
2769 |
++ rcu_read_unlock(); |
2770 |
++ |
2771 |
++ return pid; |
2772 |
++} |
2773 |
++ |
2774 |
++static inline pid_t task_ppid_nr(const struct task_struct *tsk) |
2775 |
++{ |
2776 |
++ return task_ppid_nr_ns(tsk, &init_pid_ns); |
2777 |
++} |
2778 |
++ |
2779 |
+ static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, |
2780 |
+ struct pid_namespace *ns) |
2781 |
+ { |
2782 |
+@@ -1727,7 +1745,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) |
2783 |
+ * |
2784 |
+ * Return: 1 if the process is alive. 0 otherwise. |
2785 |
+ */ |
2786 |
+-static inline int pid_alive(struct task_struct *p) |
2787 |
++static inline int pid_alive(const struct task_struct *p) |
2788 |
+ { |
2789 |
+ return p->pids[PIDTYPE_PID].pid != NULL; |
2790 |
+ } |
2791 |
+diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h |
2792 |
+index 54f91d35e5fd..302ab805b0bb 100644 |
2793 |
+--- a/include/linux/sock_diag.h |
2794 |
++++ b/include/linux/sock_diag.h |
2795 |
+@@ -23,7 +23,7 @@ int sock_diag_check_cookie(void *sk, __u32 *cookie); |
2796 |
+ void sock_diag_save_cookie(void *sk, __u32 *cookie); |
2797 |
+ |
2798 |
+ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); |
2799 |
+-int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk, |
2800 |
++int sock_diag_put_filterinfo(struct sock *sk, |
2801 |
+ struct sk_buff *skb, int attrtype); |
2802 |
+ |
2803 |
+ #endif |
2804 |
+diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h |
2805 |
+index fddbe2023a5d..a629e4b23217 100644 |
2806 |
+--- a/include/linux/thread_info.h |
2807 |
++++ b/include/linux/thread_info.h |
2808 |
+@@ -118,8 +118,6 @@ static inline __deprecated void set_need_resched(void) |
2809 |
+ */ |
2810 |
+ } |
2811 |
+ |
2812 |
+-#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) |
2813 |
+- |
2814 |
+ #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK |
2815 |
+ /* |
2816 |
+ * An arch can define its own version of set_restore_sigmask() to get the |
2817 |
+diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h |
2818 |
+index 7d64d3609ec9..428277869400 100644 |
2819 |
+--- a/include/net/af_vsock.h |
2820 |
++++ b/include/net/af_vsock.h |
2821 |
+@@ -155,7 +155,11 @@ struct vsock_transport { |
2822 |
+ |
2823 |
+ /**** CORE ****/ |
2824 |
+ |
2825 |
+-int vsock_core_init(const struct vsock_transport *t); |
2826 |
++int __vsock_core_init(const struct vsock_transport *t, struct module *owner); |
2827 |
++static inline int vsock_core_init(const struct vsock_transport *t) |
2828 |
++{ |
2829 |
++ return __vsock_core_init(t, THIS_MODULE); |
2830 |
++} |
2831 |
+ void vsock_core_exit(void); |
2832 |
+ |
2833 |
+ /**** UTILS ****/ |
2834 |
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h |
2835 |
+index 017badb1aec7..2e74c6cfa612 100644 |
2836 |
+--- a/include/net/ip6_route.h |
2837 |
++++ b/include/net/ip6_route.h |
2838 |
+@@ -32,6 +32,11 @@ struct route_info { |
2839 |
+ #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010 |
2840 |
+ #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 |
2841 |
+ |
2842 |
++/* We do not (yet ?) support IPv6 jumbograms (RFC 2675) |
2843 |
++ * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header |
2844 |
++ */ |
2845 |
++#define IP6_MAX_MTU (0xFFFF + sizeof(struct ipv6hdr)) |
2846 |
++ |
2847 |
+ /* |
2848 |
+ * rt6_srcprefs2flags() and rt6_flags2srcprefs() translate |
2849 |
+ * between IPV6_ADDR_PREFERENCES socket option values |
2850 |
+diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h |
2851 |
+index 956b175523ff..55d15049ab2f 100644 |
2852 |
+--- a/include/net/netfilter/nf_conntrack_extend.h |
2853 |
++++ b/include/net/netfilter/nf_conntrack_extend.h |
2854 |
+@@ -47,8 +47,8 @@ enum nf_ct_ext_id { |
2855 |
+ /* Extensions: optional stuff which isn't permanently in struct. */ |
2856 |
+ struct nf_ct_ext { |
2857 |
+ struct rcu_head rcu; |
2858 |
+- u8 offset[NF_CT_EXT_NUM]; |
2859 |
+- u8 len; |
2860 |
++ u16 offset[NF_CT_EXT_NUM]; |
2861 |
++ u16 len; |
2862 |
+ char data[0]; |
2863 |
+ }; |
2864 |
+ |
2865 |
+diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h |
2866 |
+index cf2b7ae2b9d8..a75fc8e27cd6 100644 |
2867 |
+--- a/include/net/netfilter/nf_tables_core.h |
2868 |
++++ b/include/net/netfilter/nf_tables_core.h |
2869 |
+@@ -13,6 +13,16 @@ struct nft_cmp_fast_expr { |
2870 |
+ u8 len; |
2871 |
+ }; |
2872 |
+ |
2873 |
++/* Calculate the mask for the nft_cmp_fast expression. On big endian the |
2874 |
++ * mask needs to include the *upper* bytes when interpreting that data as |
2875 |
++ * something smaller than the full u32, therefore a cpu_to_le32 is done. |
2876 |
++ */ |
2877 |
++static inline u32 nft_cmp_fast_mask(unsigned int len) |
2878 |
++{ |
2879 |
++ return cpu_to_le32(~0U >> (FIELD_SIZEOF(struct nft_cmp_fast_expr, |
2880 |
++ data) * BITS_PER_BYTE - len)); |
2881 |
++} |
2882 |
++ |
2883 |
+ extern const struct nft_expr_ops nft_cmp_fast_ops; |
2884 |
+ |
2885 |
+ int nft_cmp_module_init(void); |
2886 |
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h |
2887 |
+index 6ee76c804893..0dfcc92600e8 100644 |
2888 |
+--- a/include/net/sctp/structs.h |
2889 |
++++ b/include/net/sctp/structs.h |
2890 |
+@@ -1241,6 +1241,7 @@ struct sctp_endpoint { |
2891 |
+ /* SCTP-AUTH: endpoint shared keys */ |
2892 |
+ struct list_head endpoint_shared_keys; |
2893 |
+ __u16 active_key_id; |
2894 |
++ __u8 auth_enable; |
2895 |
+ }; |
2896 |
+ |
2897 |
+ /* Recover the outter endpoint structure. */ |
2898 |
+@@ -1269,7 +1270,8 @@ struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *, |
2899 |
+ int sctp_has_association(struct net *net, const union sctp_addr *laddr, |
2900 |
+ const union sctp_addr *paddr); |
2901 |
+ |
2902 |
+-int sctp_verify_init(struct net *net, const struct sctp_association *asoc, |
2903 |
++int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep, |
2904 |
++ const struct sctp_association *asoc, |
2905 |
+ sctp_cid_t, sctp_init_chunk_t *peer_init, |
2906 |
+ struct sctp_chunk *chunk, struct sctp_chunk **err_chunk); |
2907 |
+ int sctp_process_init(struct sctp_association *, struct sctp_chunk *chunk, |
2908 |
+@@ -1653,6 +1655,17 @@ struct sctp_association { |
2909 |
+ /* This is the last advertised value of rwnd over a SACK chunk. */ |
2910 |
+ __u32 a_rwnd; |
2911 |
+ |
2912 |
++ /* Number of bytes by which the rwnd has slopped. The rwnd is allowed |
2913 |
++ * to slop over a maximum of the association's frag_point. |
2914 |
++ */ |
2915 |
++ __u32 rwnd_over; |
2916 |
++ |
2917 |
++ /* Keeps treack of rwnd pressure. This happens when we have |
2918 |
++ * a window, but not recevie buffer (i.e small packets). This one |
2919 |
++ * is releases slowly (1 PMTU at a time ). |
2920 |
++ */ |
2921 |
++ __u32 rwnd_press; |
2922 |
++ |
2923 |
+ /* This is the sndbuf size in use for the association. |
2924 |
+ * This corresponds to the sndbuf size for the association, |
2925 |
+ * as specified in the sk->sndbuf. |
2926 |
+@@ -1881,7 +1894,8 @@ void sctp_assoc_update(struct sctp_association *old, |
2927 |
+ __u32 sctp_association_get_next_tsn(struct sctp_association *); |
2928 |
+ |
2929 |
+ void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); |
2930 |
+-void sctp_assoc_rwnd_update(struct sctp_association *, bool); |
2931 |
++void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); |
2932 |
++void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); |
2933 |
+ void sctp_assoc_set_primary(struct sctp_association *, |
2934 |
+ struct sctp_transport *); |
2935 |
+ void sctp_assoc_del_nonprimary_peers(struct sctp_association *, |
2936 |
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h |
2937 |
+index d65fbec2533d..b4f1effc9216 100644 |
2938 |
+--- a/include/scsi/scsi_device.h |
2939 |
++++ b/include/scsi/scsi_device.h |
2940 |
+@@ -257,7 +257,7 @@ struct scsi_target { |
2941 |
+ struct list_head siblings; |
2942 |
+ struct list_head devices; |
2943 |
+ struct device dev; |
2944 |
+- unsigned int reap_ref; /* protected by the host lock */ |
2945 |
++ struct kref reap_ref; /* last put renders target invisible */ |
2946 |
+ unsigned int channel; |
2947 |
+ unsigned int id; /* target id ... replace |
2948 |
+ * scsi_device.id eventually */ |
2949 |
+@@ -284,7 +284,6 @@ struct scsi_target { |
2950 |
+ #define SCSI_DEFAULT_TARGET_BLOCKED 3 |
2951 |
+ |
2952 |
+ char scsi_level; |
2953 |
+- struct execute_work ew; |
2954 |
+ enum scsi_target_state state; |
2955 |
+ void *hostdata; /* available to low-level driver */ |
2956 |
+ unsigned long starget_data[0]; /* for the transport */ |
2957 |
+diff --git a/include/trace/events/block.h b/include/trace/events/block.h |
2958 |
+index e76ae19a8d6f..e8a5eca1dbe5 100644 |
2959 |
+--- a/include/trace/events/block.h |
2960 |
++++ b/include/trace/events/block.h |
2961 |
+@@ -132,6 +132,7 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue, |
2962 |
+ * block_rq_complete - block IO operation completed by device driver |
2963 |
+ * @q: queue containing the block operation request |
2964 |
+ * @rq: block operations request |
2965 |
++ * @nr_bytes: number of completed bytes |
2966 |
+ * |
2967 |
+ * The block_rq_complete tracepoint event indicates that some portion |
2968 |
+ * of operation request has been completed by the device driver. If |
2969 |
+@@ -139,11 +140,37 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue, |
2970 |
+ * do for the request. If @rq->bio is non-NULL then there is |
2971 |
+ * additional work required to complete the request. |
2972 |
+ */ |
2973 |
+-DEFINE_EVENT(block_rq_with_error, block_rq_complete, |
2974 |
++TRACE_EVENT(block_rq_complete, |
2975 |
+ |
2976 |
+- TP_PROTO(struct request_queue *q, struct request *rq), |
2977 |
++ TP_PROTO(struct request_queue *q, struct request *rq, |
2978 |
++ unsigned int nr_bytes), |
2979 |
+ |
2980 |
+- TP_ARGS(q, rq) |
2981 |
++ TP_ARGS(q, rq, nr_bytes), |
2982 |
++ |
2983 |
++ TP_STRUCT__entry( |
2984 |
++ __field( dev_t, dev ) |
2985 |
++ __field( sector_t, sector ) |
2986 |
++ __field( unsigned int, nr_sector ) |
2987 |
++ __field( int, errors ) |
2988 |
++ __array( char, rwbs, RWBS_LEN ) |
2989 |
++ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) |
2990 |
++ ), |
2991 |
++ |
2992 |
++ TP_fast_assign( |
2993 |
++ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; |
2994 |
++ __entry->sector = blk_rq_pos(rq); |
2995 |
++ __entry->nr_sector = nr_bytes >> 9; |
2996 |
++ __entry->errors = rq->errors; |
2997 |
++ |
2998 |
++ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes); |
2999 |
++ blk_dump_cmd(__get_str(cmd), rq); |
3000 |
++ ), |
3001 |
++ |
3002 |
++ TP_printk("%d,%d %s (%s) %llu + %u [%d]", |
3003 |
++ MAJOR(__entry->dev), MINOR(__entry->dev), |
3004 |
++ __entry->rwbs, __get_str(cmd), |
3005 |
++ (unsigned long long)__entry->sector, |
3006 |
++ __entry->nr_sector, __entry->errors) |
3007 |
+ ); |
3008 |
+ |
3009 |
+ DECLARE_EVENT_CLASS(block_rq, |
3010 |
+diff --git a/init/Kconfig b/init/Kconfig |
3011 |
+index d56cb03c1b49..93c5ef0c5210 100644 |
3012 |
+--- a/init/Kconfig |
3013 |
++++ b/init/Kconfig |
3014 |
+@@ -1621,6 +1621,18 @@ config MMAP_ALLOW_UNINITIALIZED |
3015 |
+ |
3016 |
+ See Documentation/nommu-mmap.txt for more information. |
3017 |
+ |
3018 |
++config SYSTEM_TRUSTED_KEYRING |
3019 |
++ bool "Provide system-wide ring of trusted keys" |
3020 |
++ depends on KEYS |
3021 |
++ help |
3022 |
++ Provide a system keyring to which trusted keys can be added. Keys in |
3023 |
++ the keyring are considered to be trusted. Keys may be added at will |
3024 |
++ by the kernel from compiled-in data and from hardware key stores, but |
3025 |
++ userspace may only add extra keys if those keys can be verified by |
3026 |
++ keys already in the keyring. |
3027 |
++ |
3028 |
++ Keys in this keyring are used by module signature checking. |
3029 |
++ |
3030 |
+ config PROFILING |
3031 |
+ bool "Profiling support" |
3032 |
+ help |
3033 |
+@@ -1656,18 +1668,6 @@ config BASE_SMALL |
3034 |
+ default 0 if BASE_FULL |
3035 |
+ default 1 if !BASE_FULL |
3036 |
+ |
3037 |
+-config SYSTEM_TRUSTED_KEYRING |
3038 |
+- bool "Provide system-wide ring of trusted keys" |
3039 |
+- depends on KEYS |
3040 |
+- help |
3041 |
+- Provide a system keyring to which trusted keys can be added. Keys in |
3042 |
+- the keyring are considered to be trusted. Keys may be added at will |
3043 |
+- by the kernel from compiled-in data and from hardware key stores, but |
3044 |
+- userspace may only add extra keys if those keys can be verified by |
3045 |
+- keys already in the keyring. |
3046 |
+- |
3047 |
+- Keys in this keyring are used by module signature checking. |
3048 |
+- |
3049 |
+ menuconfig MODULES |
3050 |
+ bool "Enable loadable module support" |
3051 |
+ option modules |
3052 |
+diff --git a/kernel/audit.c b/kernel/audit.c |
3053 |
+index 95a20f3f52f1..d5f31c17813a 100644 |
3054 |
+--- a/kernel/audit.c |
3055 |
++++ b/kernel/audit.c |
3056 |
+@@ -1829,10 +1829,10 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) |
3057 |
+ spin_unlock_irq(&tsk->sighand->siglock); |
3058 |
+ |
3059 |
+ audit_log_format(ab, |
3060 |
+- " ppid=%ld pid=%d auid=%u uid=%u gid=%u" |
3061 |
++ " ppid=%d pid=%d auid=%u uid=%u gid=%u" |
3062 |
+ " euid=%u suid=%u fsuid=%u" |
3063 |
+ " egid=%u sgid=%u fsgid=%u tty=%s ses=%u", |
3064 |
+- sys_getppid(), |
3065 |
++ task_ppid_nr(tsk), |
3066 |
+ tsk->pid, |
3067 |
+ from_kuid(&init_user_ns, audit_get_loginuid(tsk)), |
3068 |
+ from_kuid(&init_user_ns, cred->uid), |
3069 |
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c |
3070 |
+index 7aef2f4b6c64..3b29605ea1b2 100644 |
3071 |
+--- a/kernel/auditsc.c |
3072 |
++++ b/kernel/auditsc.c |
3073 |
+@@ -459,7 +459,7 @@ static int audit_filter_rules(struct task_struct *tsk, |
3074 |
+ case AUDIT_PPID: |
3075 |
+ if (ctx) { |
3076 |
+ if (!ctx->ppid) |
3077 |
+- ctx->ppid = sys_getppid(); |
3078 |
++ ctx->ppid = task_ppid_nr(tsk); |
3079 |
+ result = audit_comparator(ctx->ppid, f->op, f->val); |
3080 |
+ } |
3081 |
+ break; |
3082 |
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c |
3083 |
+index 20b2fe37d105..0de9d7f5045c 100644 |
3084 |
+--- a/kernel/time/tick-common.c |
3085 |
++++ b/kernel/time/tick-common.c |
3086 |
+@@ -276,7 +276,7 @@ static bool tick_check_preferred(struct clock_event_device *curdev, |
3087 |
+ bool tick_check_replacement(struct clock_event_device *curdev, |
3088 |
+ struct clock_event_device *newdev) |
3089 |
+ { |
3090 |
+- if (tick_check_percpu(curdev, newdev, smp_processor_id())) |
3091 |
++ if (!tick_check_percpu(curdev, newdev, smp_processor_id())) |
3092 |
+ return false; |
3093 |
+ |
3094 |
+ return tick_check_preferred(curdev, newdev); |
3095 |
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c |
3096 |
+index 9f8af69c67ec..6558b7ac112d 100644 |
3097 |
+--- a/kernel/time/tick-sched.c |
3098 |
++++ b/kernel/time/tick-sched.c |
3099 |
+@@ -84,6 +84,9 @@ static void tick_do_update_jiffies64(ktime_t now) |
3100 |
+ |
3101 |
+ /* Keep the tick_next_period variable up to date */ |
3102 |
+ tick_next_period = ktime_add(last_jiffies_update, tick_period); |
3103 |
++ } else { |
3104 |
++ write_sequnlock(&jiffies_lock); |
3105 |
++ return; |
3106 |
+ } |
3107 |
+ write_sequnlock(&jiffies_lock); |
3108 |
+ update_wall_time(); |
3109 |
+@@ -967,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void) |
3110 |
+ struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
3111 |
+ ktime_t next; |
3112 |
+ |
3113 |
+- if (!tick_nohz_active) |
3114 |
++ if (!tick_nohz_enabled) |
3115 |
+ return; |
3116 |
+ |
3117 |
+ local_irq_disable(); |
3118 |
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c |
3119 |
+index b418cb0d7242..4f3a3c03eadb 100644 |
3120 |
+--- a/kernel/trace/blktrace.c |
3121 |
++++ b/kernel/trace/blktrace.c |
3122 |
+@@ -702,6 +702,7 @@ void blk_trace_shutdown(struct request_queue *q) |
3123 |
+ * blk_add_trace_rq - Add a trace for a request oriented action |
3124 |
+ * @q: queue the io is for |
3125 |
+ * @rq: the source request |
3126 |
++ * @nr_bytes: number of completed bytes |
3127 |
+ * @what: the action |
3128 |
+ * |
3129 |
+ * Description: |
3130 |
+@@ -709,7 +710,7 @@ void blk_trace_shutdown(struct request_queue *q) |
3131 |
+ * |
3132 |
+ **/ |
3133 |
+ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, |
3134 |
+- u32 what) |
3135 |
++ unsigned int nr_bytes, u32 what) |
3136 |
+ { |
3137 |
+ struct blk_trace *bt = q->blk_trace; |
3138 |
+ |
3139 |
+@@ -718,11 +719,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, |
3140 |
+ |
3141 |
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
3142 |
+ what |= BLK_TC_ACT(BLK_TC_PC); |
3143 |
+- __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags, |
3144 |
++ __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags, |
3145 |
+ what, rq->errors, rq->cmd_len, rq->cmd); |
3146 |
+ } else { |
3147 |
+ what |= BLK_TC_ACT(BLK_TC_FS); |
3148 |
+- __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), |
3149 |
++ __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes, |
3150 |
+ rq->cmd_flags, what, rq->errors, 0, NULL); |
3151 |
+ } |
3152 |
+ } |
3153 |
+@@ -730,33 +731,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, |
3154 |
+ static void blk_add_trace_rq_abort(void *ignore, |
3155 |
+ struct request_queue *q, struct request *rq) |
3156 |
+ { |
3157 |
+- blk_add_trace_rq(q, rq, BLK_TA_ABORT); |
3158 |
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT); |
3159 |
+ } |
3160 |
+ |
3161 |
+ static void blk_add_trace_rq_insert(void *ignore, |
3162 |
+ struct request_queue *q, struct request *rq) |
3163 |
+ { |
3164 |
+- blk_add_trace_rq(q, rq, BLK_TA_INSERT); |
3165 |
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT); |
3166 |
+ } |
3167 |
+ |
3168 |
+ static void blk_add_trace_rq_issue(void *ignore, |
3169 |
+ struct request_queue *q, struct request *rq) |
3170 |
+ { |
3171 |
+- blk_add_trace_rq(q, rq, BLK_TA_ISSUE); |
3172 |
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE); |
3173 |
+ } |
3174 |
+ |
3175 |
+ static void blk_add_trace_rq_requeue(void *ignore, |
3176 |
+ struct request_queue *q, |
3177 |
+ struct request *rq) |
3178 |
+ { |
3179 |
+- blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); |
3180 |
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE); |
3181 |
+ } |
3182 |
+ |
3183 |
+ static void blk_add_trace_rq_complete(void *ignore, |
3184 |
+ struct request_queue *q, |
3185 |
+- struct request *rq) |
3186 |
++ struct request *rq, |
3187 |
++ unsigned int nr_bytes) |
3188 |
+ { |
3189 |
+- blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); |
3190 |
++ blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE); |
3191 |
+ } |
3192 |
+ |
3193 |
+ /** |
3194 |
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c |
3195 |
+index 8efbb69b04f0..6d6a789e579e 100644 |
3196 |
+--- a/kernel/trace/trace_events_trigger.c |
3197 |
++++ b/kernel/trace/trace_events_trigger.c |
3198 |
+@@ -77,7 +77,7 @@ event_triggers_call(struct ftrace_event_file *file, void *rec) |
3199 |
+ data->ops->func(data); |
3200 |
+ continue; |
3201 |
+ } |
3202 |
+- filter = rcu_dereference(data->filter); |
3203 |
++ filter = rcu_dereference_sched(data->filter); |
3204 |
+ if (filter && !filter_match_preds(filter, rec)) |
3205 |
+ continue; |
3206 |
+ if (data->cmd_ops->post_trigger) { |
3207 |
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c |
3208 |
+index 79e52d93860b..bd0c9b133b54 100644 |
3209 |
+--- a/kernel/trace/trace_uprobe.c |
3210 |
++++ b/kernel/trace/trace_uprobe.c |
3211 |
+@@ -728,9 +728,15 @@ static int uprobe_buffer_enable(void) |
3212 |
+ |
3213 |
+ static void uprobe_buffer_disable(void) |
3214 |
+ { |
3215 |
++ int cpu; |
3216 |
++ |
3217 |
+ BUG_ON(!mutex_is_locked(&event_mutex)); |
3218 |
+ |
3219 |
+ if (--uprobe_buffer_refcnt == 0) { |
3220 |
++ for_each_possible_cpu(cpu) |
3221 |
++ free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, |
3222 |
++ cpu)->buf); |
3223 |
++ |
3224 |
+ free_percpu(uprobe_cpu_buffer); |
3225 |
+ uprobe_cpu_buffer = NULL; |
3226 |
+ } |
3227 |
+diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c |
3228 |
+index 031cc5655a51..63630aef3bd3 100644 |
3229 |
+--- a/kernel/tracepoint.c |
3230 |
++++ b/kernel/tracepoint.c |
3231 |
+@@ -641,6 +641,9 @@ static int tracepoint_module_coming(struct module *mod) |
3232 |
+ struct tp_module *tp_mod, *iter; |
3233 |
+ int ret = 0; |
3234 |
+ |
3235 |
++ if (!mod->num_tracepoints) |
3236 |
++ return 0; |
3237 |
++ |
3238 |
+ /* |
3239 |
+ * We skip modules that taint the kernel, especially those with different |
3240 |
+ * module headers (for forced load), to make sure we don't cause a crash. |
3241 |
+@@ -684,6 +687,9 @@ static int tracepoint_module_going(struct module *mod) |
3242 |
+ { |
3243 |
+ struct tp_module *pos; |
3244 |
+ |
3245 |
++ if (!mod->num_tracepoints) |
3246 |
++ return 0; |
3247 |
++ |
3248 |
+ mutex_lock(&tracepoints_mutex); |
3249 |
+ tracepoint_update_probe_range(mod->tracepoints_ptrs, |
3250 |
+ mod->tracepoints_ptrs + mod->num_tracepoints); |
3251 |
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c |
3252 |
+index 1546655a2d78..1c42d0c36d0b 100644 |
3253 |
+--- a/mm/huge_memory.c |
3254 |
++++ b/mm/huge_memory.c |
3255 |
+@@ -1611,16 +1611,23 @@ pmd_t *page_check_address_pmd(struct page *page, |
3256 |
+ enum page_check_address_pmd_flag flag, |
3257 |
+ spinlock_t **ptl) |
3258 |
+ { |
3259 |
++ pgd_t *pgd; |
3260 |
++ pud_t *pud; |
3261 |
+ pmd_t *pmd; |
3262 |
+ |
3263 |
+ if (address & ~HPAGE_PMD_MASK) |
3264 |
+ return NULL; |
3265 |
+ |
3266 |
+- pmd = mm_find_pmd(mm, address); |
3267 |
+- if (!pmd) |
3268 |
++ pgd = pgd_offset(mm, address); |
3269 |
++ if (!pgd_present(*pgd)) |
3270 |
+ return NULL; |
3271 |
++ pud = pud_offset(pgd, address); |
3272 |
++ if (!pud_present(*pud)) |
3273 |
++ return NULL; |
3274 |
++ pmd = pmd_offset(pud, address); |
3275 |
++ |
3276 |
+ *ptl = pmd_lock(mm, pmd); |
3277 |
+- if (pmd_none(*pmd)) |
3278 |
++ if (!pmd_present(*pmd)) |
3279 |
+ goto unlock; |
3280 |
+ if (pmd_page(*pmd) != page) |
3281 |
+ goto unlock; |
3282 |
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
3283 |
+index 2de3c845f03a..06a9bc0a3120 100644 |
3284 |
+--- a/mm/hugetlb.c |
3285 |
++++ b/mm/hugetlb.c |
3286 |
+@@ -1134,6 +1134,7 @@ static void return_unused_surplus_pages(struct hstate *h, |
3287 |
+ while (nr_pages--) { |
3288 |
+ if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) |
3289 |
+ break; |
3290 |
++ cond_resched_lock(&hugetlb_lock); |
3291 |
+ } |
3292 |
+ } |
3293 |
+ |
3294 |
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c |
3295 |
+index 175273f38cb1..44ebd5c2cd4a 100644 |
3296 |
+--- a/net/8021q/vlan.c |
3297 |
++++ b/net/8021q/vlan.c |
3298 |
+@@ -169,6 +169,7 @@ int register_vlan_dev(struct net_device *dev) |
3299 |
+ if (err < 0) |
3300 |
+ goto out_uninit_mvrp; |
3301 |
+ |
3302 |
++ vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1; |
3303 |
+ err = register_netdevice(dev); |
3304 |
+ if (err < 0) |
3305 |
+ goto out_uninit_mvrp; |
3306 |
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c |
3307 |
+index 27bfe2f8e2de..cc0d21895420 100644 |
3308 |
+--- a/net/8021q/vlan_dev.c |
3309 |
++++ b/net/8021q/vlan_dev.c |
3310 |
+@@ -524,6 +524,11 @@ static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass) |
3311 |
+ netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass); |
3312 |
+ } |
3313 |
+ |
3314 |
++static int vlan_dev_get_lock_subclass(struct net_device *dev) |
3315 |
++{ |
3316 |
++ return vlan_dev_priv(dev)->nest_level; |
3317 |
++} |
3318 |
++ |
3319 |
+ static const struct header_ops vlan_header_ops = { |
3320 |
+ .create = vlan_dev_hard_header, |
3321 |
+ .rebuild = vlan_dev_rebuild_header, |
3322 |
+@@ -559,7 +564,7 @@ static const struct net_device_ops vlan_netdev_ops; |
3323 |
+ static int vlan_dev_init(struct net_device *dev) |
3324 |
+ { |
3325 |
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; |
3326 |
+- int subclass = 0, i; |
3327 |
++ int i; |
3328 |
+ |
3329 |
+ netif_carrier_off(dev); |
3330 |
+ |
3331 |
+@@ -608,10 +613,7 @@ static int vlan_dev_init(struct net_device *dev) |
3332 |
+ |
3333 |
+ SET_NETDEV_DEVTYPE(dev, &vlan_type); |
3334 |
+ |
3335 |
+- if (is_vlan_dev(real_dev)) |
3336 |
+- subclass = 1; |
3337 |
+- |
3338 |
+- vlan_dev_set_lockdep_class(dev, subclass); |
3339 |
++ vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev)); |
3340 |
+ |
3341 |
+ vlan_dev_priv(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats); |
3342 |
+ if (!vlan_dev_priv(dev)->vlan_pcpu_stats) |
3343 |
+@@ -791,6 +793,7 @@ static const struct net_device_ops vlan_netdev_ops = { |
3344 |
+ .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, |
3345 |
+ #endif |
3346 |
+ .ndo_fix_features = vlan_dev_fix_features, |
3347 |
++ .ndo_get_lock_subclass = vlan_dev_get_lock_subclass, |
3348 |
+ }; |
3349 |
+ |
3350 |
+ void vlan_setup(struct net_device *dev) |
3351 |
+diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c |
3352 |
+index 8323bced8e5b..d074d06ce094 100644 |
3353 |
+--- a/net/batman-adv/bat_iv_ogm.c |
3354 |
++++ b/net/batman-adv/bat_iv_ogm.c |
3355 |
+@@ -1545,6 +1545,8 @@ out_neigh: |
3356 |
+ if ((orig_neigh_node) && (!is_single_hop_neigh)) |
3357 |
+ batadv_orig_node_free_ref(orig_neigh_node); |
3358 |
+ out: |
3359 |
++ if (router_ifinfo) |
3360 |
++ batadv_neigh_ifinfo_free_ref(router_ifinfo); |
3361 |
+ if (router) |
3362 |
+ batadv_neigh_node_free_ref(router); |
3363 |
+ if (router_router) |
3364 |
+diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c |
3365 |
+index edee50411892..bd8219a7dddf 100644 |
3366 |
+--- a/net/batman-adv/distributed-arp-table.c |
3367 |
++++ b/net/batman-adv/distributed-arp-table.c |
3368 |
+@@ -940,8 +940,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, |
3369 |
+ * additional DAT answer may trigger kernel warnings about |
3370 |
+ * a packet coming from the wrong port. |
3371 |
+ */ |
3372 |
+- if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, |
3373 |
+- BATADV_NO_FLAGS)) { |
3374 |
++ if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) { |
3375 |
+ ret = true; |
3376 |
+ goto out; |
3377 |
+ } |
3378 |
+diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c |
3379 |
+index 88df9b1d552d..cc1cfd60c094 100644 |
3380 |
+--- a/net/batman-adv/fragmentation.c |
3381 |
++++ b/net/batman-adv/fragmentation.c |
3382 |
+@@ -418,12 +418,13 @@ bool batadv_frag_send_packet(struct sk_buff *skb, |
3383 |
+ struct batadv_neigh_node *neigh_node) |
3384 |
+ { |
3385 |
+ struct batadv_priv *bat_priv; |
3386 |
+- struct batadv_hard_iface *primary_if; |
3387 |
++ struct batadv_hard_iface *primary_if = NULL; |
3388 |
+ struct batadv_frag_packet frag_header; |
3389 |
+ struct sk_buff *skb_fragment; |
3390 |
+ unsigned mtu = neigh_node->if_incoming->net_dev->mtu; |
3391 |
+ unsigned header_size = sizeof(frag_header); |
3392 |
+ unsigned max_fragment_size, max_packet_size; |
3393 |
++ bool ret = false; |
3394 |
+ |
3395 |
+ /* To avoid merge and refragmentation at next-hops we never send |
3396 |
+ * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE |
3397 |
+@@ -483,7 +484,11 @@ bool batadv_frag_send_packet(struct sk_buff *skb, |
3398 |
+ skb->len + ETH_HLEN); |
3399 |
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); |
3400 |
+ |
3401 |
+- return true; |
3402 |
++ ret = true; |
3403 |
++ |
3404 |
+ out_err: |
3405 |
+- return false; |
3406 |
++ if (primary_if) |
3407 |
++ batadv_hardif_free_ref(primary_if); |
3408 |
++ |
3409 |
++ return ret; |
3410 |
+ } |
3411 |
+diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c |
3412 |
+index 55cf2260d295..36b9ae61f5e8 100644 |
3413 |
+--- a/net/batman-adv/gateway_client.c |
3414 |
++++ b/net/batman-adv/gateway_client.c |
3415 |
+@@ -42,8 +42,10 @@ |
3416 |
+ |
3417 |
+ static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node) |
3418 |
+ { |
3419 |
+- if (atomic_dec_and_test(&gw_node->refcount)) |
3420 |
++ if (atomic_dec_and_test(&gw_node->refcount)) { |
3421 |
++ batadv_orig_node_free_ref(gw_node->orig_node); |
3422 |
+ kfree_rcu(gw_node, rcu); |
3423 |
++ } |
3424 |
+ } |
3425 |
+ |
3426 |
+ static struct batadv_gw_node * |
3427 |
+@@ -408,9 +410,14 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, |
3428 |
+ if (gateway->bandwidth_down == 0) |
3429 |
+ return; |
3430 |
+ |
3431 |
++ if (!atomic_inc_not_zero(&orig_node->refcount)) |
3432 |
++ return; |
3433 |
++ |
3434 |
+ gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC); |
3435 |
+- if (!gw_node) |
3436 |
++ if (!gw_node) { |
3437 |
++ batadv_orig_node_free_ref(orig_node); |
3438 |
+ return; |
3439 |
++ } |
3440 |
+ |
3441 |
+ INIT_HLIST_NODE(&gw_node->list); |
3442 |
+ gw_node->orig_node = orig_node; |
3443 |
+diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c |
3444 |
+index 853941629dc1..abf612d7d0ae 100644 |
3445 |
+--- a/net/batman-adv/originator.c |
3446 |
++++ b/net/batman-adv/originator.c |
3447 |
+@@ -500,12 +500,17 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node, |
3448 |
+ static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu) |
3449 |
+ { |
3450 |
+ struct batadv_orig_ifinfo *orig_ifinfo; |
3451 |
++ struct batadv_neigh_node *router; |
3452 |
+ |
3453 |
+ orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu); |
3454 |
+ |
3455 |
+ if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT) |
3456 |
+ batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing); |
3457 |
+ |
3458 |
++ /* this is the last reference to this object */ |
3459 |
++ router = rcu_dereference_protected(orig_ifinfo->router, true); |
3460 |
++ if (router) |
3461 |
++ batadv_neigh_node_free_ref_now(router); |
3462 |
+ kfree(orig_ifinfo); |
3463 |
+ } |
3464 |
+ |
3465 |
+@@ -697,6 +702,47 @@ free_orig_node: |
3466 |
+ } |
3467 |
+ |
3468 |
+ /** |
3469 |
++ * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor |
3470 |
++ * @bat_priv: the bat priv with all the soft interface information |
3471 |
++ * @neigh: orig node which is to be checked |
3472 |
++ */ |
3473 |
++static void |
3474 |
++batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv, |
3475 |
++ struct batadv_neigh_node *neigh) |
3476 |
++{ |
3477 |
++ struct batadv_neigh_ifinfo *neigh_ifinfo; |
3478 |
++ struct batadv_hard_iface *if_outgoing; |
3479 |
++ struct hlist_node *node_tmp; |
3480 |
++ |
3481 |
++ spin_lock_bh(&neigh->ifinfo_lock); |
3482 |
++ |
3483 |
++ /* for all ifinfo objects for this neighinator */ |
3484 |
++ hlist_for_each_entry_safe(neigh_ifinfo, node_tmp, |
3485 |
++ &neigh->ifinfo_list, list) { |
3486 |
++ if_outgoing = neigh_ifinfo->if_outgoing; |
3487 |
++ |
3488 |
++ /* always keep the default interface */ |
3489 |
++ if (if_outgoing == BATADV_IF_DEFAULT) |
3490 |
++ continue; |
3491 |
++ |
3492 |
++ /* don't purge if the interface is not (going) down */ |
3493 |
++ if ((if_outgoing->if_status != BATADV_IF_INACTIVE) && |
3494 |
++ (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) && |
3495 |
++ (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED)) |
3496 |
++ continue; |
3497 |
++ |
3498 |
++ batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
3499 |
++ "neighbor/ifinfo purge: neighbor %pM, iface: %s\n", |
3500 |
++ neigh->addr, if_outgoing->net_dev->name); |
3501 |
++ |
3502 |
++ hlist_del_rcu(&neigh_ifinfo->list); |
3503 |
++ batadv_neigh_ifinfo_free_ref(neigh_ifinfo); |
3504 |
++ } |
3505 |
++ |
3506 |
++ spin_unlock_bh(&neigh->ifinfo_lock); |
3507 |
++} |
3508 |
++ |
3509 |
++/** |
3510 |
+ * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator |
3511 |
+ * @bat_priv: the bat priv with all the soft interface information |
3512 |
+ * @orig_node: orig node which is to be checked |
3513 |
+@@ -795,6 +841,11 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv, |
3514 |
+ |
3515 |
+ hlist_del_rcu(&neigh_node->list); |
3516 |
+ batadv_neigh_node_free_ref(neigh_node); |
3517 |
++ } else { |
3518 |
++ /* only necessary if not the whole neighbor is to be |
3519 |
++ * deleted, but some interface has been removed. |
3520 |
++ */ |
3521 |
++ batadv_purge_neigh_ifinfo(bat_priv, neigh_node); |
3522 |
+ } |
3523 |
+ } |
3524 |
+ |
3525 |
+@@ -852,7 +903,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, |
3526 |
+ { |
3527 |
+ struct batadv_neigh_node *best_neigh_node; |
3528 |
+ struct batadv_hard_iface *hard_iface; |
3529 |
+- bool changed; |
3530 |
++ bool changed_ifinfo, changed_neigh; |
3531 |
+ |
3532 |
+ if (batadv_has_timed_out(orig_node->last_seen, |
3533 |
+ 2 * BATADV_PURGE_TIMEOUT)) { |
3534 |
+@@ -862,10 +913,10 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, |
3535 |
+ jiffies_to_msecs(orig_node->last_seen)); |
3536 |
+ return true; |
3537 |
+ } |
3538 |
+- changed = batadv_purge_orig_ifinfo(bat_priv, orig_node); |
3539 |
+- changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node); |
3540 |
++ changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node); |
3541 |
++ changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node); |
3542 |
+ |
3543 |
+- if (!changed) |
3544 |
++ if (!changed_ifinfo && !changed_neigh) |
3545 |
+ return false; |
3546 |
+ |
3547 |
+ /* first for NULL ... */ |
3548 |
+@@ -1023,7 +1074,8 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset) |
3549 |
+ bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface); |
3550 |
+ |
3551 |
+ out: |
3552 |
+- batadv_hardif_free_ref(hard_iface); |
3553 |
++ if (hard_iface) |
3554 |
++ batadv_hardif_free_ref(hard_iface); |
3555 |
+ return 0; |
3556 |
+ } |
3557 |
+ |
3558 |
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c |
3559 |
+index d0cca3c65f01..7985deaff52f 100644 |
3560 |
+--- a/net/bridge/br_input.c |
3561 |
++++ b/net/bridge/br_input.c |
3562 |
+@@ -73,7 +73,7 @@ int br_handle_frame_finish(struct sk_buff *skb) |
3563 |
+ goto drop; |
3564 |
+ |
3565 |
+ if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid)) |
3566 |
+- goto drop; |
3567 |
++ goto out; |
3568 |
+ |
3569 |
+ /* insert into forwarding database after filtering to avoid spoofing */ |
3570 |
+ br = p->br; |
3571 |
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c |
3572 |
+index e74b6d530cb6..e8844d975b32 100644 |
3573 |
+--- a/net/bridge/br_netlink.c |
3574 |
++++ b/net/bridge/br_netlink.c |
3575 |
+@@ -445,6 +445,20 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[]) |
3576 |
+ return 0; |
3577 |
+ } |
3578 |
+ |
3579 |
++static int br_dev_newlink(struct net *src_net, struct net_device *dev, |
3580 |
++ struct nlattr *tb[], struct nlattr *data[]) |
3581 |
++{ |
3582 |
++ struct net_bridge *br = netdev_priv(dev); |
3583 |
++ |
3584 |
++ if (tb[IFLA_ADDRESS]) { |
3585 |
++ spin_lock_bh(&br->lock); |
3586 |
++ br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); |
3587 |
++ spin_unlock_bh(&br->lock); |
3588 |
++ } |
3589 |
++ |
3590 |
++ return register_netdevice(dev); |
3591 |
++} |
3592 |
++ |
3593 |
+ static size_t br_get_link_af_size(const struct net_device *dev) |
3594 |
+ { |
3595 |
+ struct net_port_vlans *pv; |
3596 |
+@@ -473,6 +487,7 @@ struct rtnl_link_ops br_link_ops __read_mostly = { |
3597 |
+ .priv_size = sizeof(struct net_bridge), |
3598 |
+ .setup = br_dev_setup, |
3599 |
+ .validate = br_validate, |
3600 |
++ .newlink = br_dev_newlink, |
3601 |
+ .dellink = br_dev_delete, |
3602 |
+ }; |
3603 |
+ |
3604 |
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c |
3605 |
+index f23c74b3a953..ba7757b7737d 100644 |
3606 |
+--- a/net/bridge/br_vlan.c |
3607 |
++++ b/net/bridge/br_vlan.c |
3608 |
+@@ -170,7 +170,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, |
3609 |
+ * rejected. |
3610 |
+ */ |
3611 |
+ if (!v) |
3612 |
+- return false; |
3613 |
++ goto drop; |
3614 |
+ |
3615 |
+ /* If vlan tx offload is disabled on bridge device and frame was |
3616 |
+ * sent from vlan device on the bridge device, it does not have |
3617 |
+@@ -193,7 +193,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, |
3618 |
+ * vlan untagged or priority-tagged traffic belongs to. |
3619 |
+ */ |
3620 |
+ if (pvid == VLAN_N_VID) |
3621 |
+- return false; |
3622 |
++ goto drop; |
3623 |
+ |
3624 |
+ /* PVID is set on this port. Any untagged or priority-tagged |
3625 |
+ * ingress frame is considered to belong to this vlan. |
3626 |
+@@ -216,7 +216,8 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, |
3627 |
+ /* Frame had a valid vlan tag. See if vlan is allowed */ |
3628 |
+ if (test_bit(*vid, v->vlan_bitmap)) |
3629 |
+ return true; |
3630 |
+- |
3631 |
++drop: |
3632 |
++ kfree_skb(skb); |
3633 |
+ return false; |
3634 |
+ } |
3635 |
+ |
3636 |
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c |
3637 |
+index 0e474b13463b..1059ed3bc255 100644 |
3638 |
+--- a/net/bridge/netfilter/ebtables.c |
3639 |
++++ b/net/bridge/netfilter/ebtables.c |
3640 |
+@@ -1044,10 +1044,9 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, |
3641 |
+ if (repl->num_counters && |
3642 |
+ copy_to_user(repl->counters, counterstmp, |
3643 |
+ repl->num_counters * sizeof(struct ebt_counter))) { |
3644 |
+- ret = -EFAULT; |
3645 |
++ /* Silent error, can't fail, new table is already in place */ |
3646 |
++ net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n"); |
3647 |
+ } |
3648 |
+- else |
3649 |
+- ret = 0; |
3650 |
+ |
3651 |
+ /* decrease module count and free resources */ |
3652 |
+ EBT_ENTRY_ITERATE(table->entries, table->entries_size, |
3653 |
+diff --git a/net/core/dev.c b/net/core/dev.c |
3654 |
+index 45fa2f11f84d..fccc195e0fc8 100644 |
3655 |
+--- a/net/core/dev.c |
3656 |
++++ b/net/core/dev.c |
3657 |
+@@ -2289,7 +2289,7 @@ EXPORT_SYMBOL(skb_checksum_help); |
3658 |
+ __be16 skb_network_protocol(struct sk_buff *skb, int *depth) |
3659 |
+ { |
3660 |
+ __be16 type = skb->protocol; |
3661 |
+- int vlan_depth = ETH_HLEN; |
3662 |
++ int vlan_depth = skb->mac_len; |
3663 |
+ |
3664 |
+ /* Tunnel gso handlers can set protocol to ethernet. */ |
3665 |
+ if (type == htons(ETH_P_TEB)) { |
3666 |
+@@ -3944,6 +3944,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff |
3667 |
+ } |
3668 |
+ NAPI_GRO_CB(skb)->count = 1; |
3669 |
+ NAPI_GRO_CB(skb)->age = jiffies; |
3670 |
++ NAPI_GRO_CB(skb)->last = skb; |
3671 |
+ skb_shinfo(skb)->gso_size = skb_gro_len(skb); |
3672 |
+ skb->next = napi->gro_list; |
3673 |
+ napi->gro_list = skb; |
3674 |
+@@ -4050,6 +4051,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) |
3675 |
+ skb->vlan_tci = 0; |
3676 |
+ skb->dev = napi->dev; |
3677 |
+ skb->skb_iif = 0; |
3678 |
++ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); |
3679 |
+ |
3680 |
+ napi->skb = skb; |
3681 |
+ } |
3682 |
+@@ -4605,6 +4607,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev, |
3683 |
+ EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); |
3684 |
+ |
3685 |
+ /** |
3686 |
++ * netdev_lower_get_next - Get the next device from the lower neighbour |
3687 |
++ * list |
3688 |
++ * @dev: device |
3689 |
++ * @iter: list_head ** of the current position |
3690 |
++ * |
3691 |
++ * Gets the next netdev_adjacent from the dev's lower neighbour |
3692 |
++ * list, starting from iter position. The caller must hold RTNL lock or |
3693 |
++ * its own locking that guarantees that the neighbour lower |
3694 |
++ * list will remain unchainged. |
3695 |
++ */ |
3696 |
++void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) |
3697 |
++{ |
3698 |
++ struct netdev_adjacent *lower; |
3699 |
++ |
3700 |
++ lower = list_entry((*iter)->next, struct netdev_adjacent, list); |
3701 |
++ |
3702 |
++ if (&lower->list == &dev->adj_list.lower) |
3703 |
++ return NULL; |
3704 |
++ |
3705 |
++ *iter = &lower->list; |
3706 |
++ |
3707 |
++ return lower->dev; |
3708 |
++} |
3709 |
++EXPORT_SYMBOL(netdev_lower_get_next); |
3710 |
++ |
3711 |
++/** |
3712 |
+ * netdev_lower_get_first_private_rcu - Get the first ->private from the |
3713 |
+ * lower neighbour list, RCU |
3714 |
+ * variant |
3715 |
+@@ -5054,6 +5082,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev, |
3716 |
+ } |
3717 |
+ EXPORT_SYMBOL(netdev_lower_dev_get_private); |
3718 |
+ |
3719 |
++ |
3720 |
++int dev_get_nest_level(struct net_device *dev, |
3721 |
++ bool (*type_check)(struct net_device *dev)) |
3722 |
++{ |
3723 |
++ struct net_device *lower = NULL; |
3724 |
++ struct list_head *iter; |
3725 |
++ int max_nest = -1; |
3726 |
++ int nest; |
3727 |
++ |
3728 |
++ ASSERT_RTNL(); |
3729 |
++ |
3730 |
++ netdev_for_each_lower_dev(dev, lower, iter) { |
3731 |
++ nest = dev_get_nest_level(lower, type_check); |
3732 |
++ if (max_nest < nest) |
3733 |
++ max_nest = nest; |
3734 |
++ } |
3735 |
++ |
3736 |
++ if (type_check(dev)) |
3737 |
++ max_nest++; |
3738 |
++ |
3739 |
++ return max_nest; |
3740 |
++} |
3741 |
++EXPORT_SYMBOL(dev_get_nest_level); |
3742 |
++ |
3743 |
+ static void dev_change_rx_flags(struct net_device *dev, int flags) |
3744 |
+ { |
3745 |
+ const struct net_device_ops *ops = dev->netdev_ops; |
3746 |
+@@ -5523,7 +5575,7 @@ static int dev_new_index(struct net *net) |
3747 |
+ |
3748 |
+ /* Delayed registration/unregisteration */ |
3749 |
+ static LIST_HEAD(net_todo_list); |
3750 |
+-static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); |
3751 |
++DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); |
3752 |
+ |
3753 |
+ static void net_set_todo(struct net_device *dev) |
3754 |
+ { |
3755 |
+diff --git a/net/core/filter.c b/net/core/filter.c |
3756 |
+index ad30d626a5bd..ebce437678fc 100644 |
3757 |
+--- a/net/core/filter.c |
3758 |
++++ b/net/core/filter.c |
3759 |
+@@ -355,6 +355,8 @@ load_b: |
3760 |
+ |
3761 |
+ if (skb_is_nonlinear(skb)) |
3762 |
+ return 0; |
3763 |
++ if (skb->len < sizeof(struct nlattr)) |
3764 |
++ return 0; |
3765 |
+ if (A > skb->len - sizeof(struct nlattr)) |
3766 |
+ return 0; |
3767 |
+ |
3768 |
+@@ -371,11 +373,13 @@ load_b: |
3769 |
+ |
3770 |
+ if (skb_is_nonlinear(skb)) |
3771 |
+ return 0; |
3772 |
++ if (skb->len < sizeof(struct nlattr)) |
3773 |
++ return 0; |
3774 |
+ if (A > skb->len - sizeof(struct nlattr)) |
3775 |
+ return 0; |
3776 |
+ |
3777 |
+ nla = (struct nlattr *)&skb->data[A]; |
3778 |
+- if (nla->nla_len > A - skb->len) |
3779 |
++ if (nla->nla_len > skb->len - A) |
3780 |
+ return 0; |
3781 |
+ |
3782 |
+ nla = nla_find_nested(nla, X); |
3783 |
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c |
3784 |
+index e16129019c66..7d95f69635c6 100644 |
3785 |
+--- a/net/core/neighbour.c |
3786 |
++++ b/net/core/neighbour.c |
3787 |
+@@ -1247,8 +1247,8 @@ void __neigh_set_probe_once(struct neighbour *neigh) |
3788 |
+ neigh->updated = jiffies; |
3789 |
+ if (!(neigh->nud_state & NUD_FAILED)) |
3790 |
+ return; |
3791 |
+- neigh->nud_state = NUD_PROBE; |
3792 |
+- atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES)); |
3793 |
++ neigh->nud_state = NUD_INCOMPLETE; |
3794 |
++ atomic_set(&neigh->probes, neigh_max_probes(neigh)); |
3795 |
+ neigh_add_timer(neigh, |
3796 |
+ jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME)); |
3797 |
+ } |
3798 |
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c |
3799 |
+index 81d3a9a08453..7c8ffd974961 100644 |
3800 |
+--- a/net/core/net_namespace.c |
3801 |
++++ b/net/core/net_namespace.c |
3802 |
+@@ -24,7 +24,7 @@ |
3803 |
+ |
3804 |
+ static LIST_HEAD(pernet_list); |
3805 |
+ static struct list_head *first_device = &pernet_list; |
3806 |
+-static DEFINE_MUTEX(net_mutex); |
3807 |
++DEFINE_MUTEX(net_mutex); |
3808 |
+ |
3809 |
+ LIST_HEAD(net_namespace_list); |
3810 |
+ EXPORT_SYMBOL_GPL(net_namespace_list); |
3811 |
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c |
3812 |
+index 120eecc0f5a4..83b9d6ae5119 100644 |
3813 |
+--- a/net/core/rtnetlink.c |
3814 |
++++ b/net/core/rtnetlink.c |
3815 |
+@@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops) |
3816 |
+ } |
3817 |
+ EXPORT_SYMBOL_GPL(__rtnl_link_unregister); |
3818 |
+ |
3819 |
++/* Return with the rtnl_lock held when there are no network |
3820 |
++ * devices unregistering in any network namespace. |
3821 |
++ */ |
3822 |
++static void rtnl_lock_unregistering_all(void) |
3823 |
++{ |
3824 |
++ struct net *net; |
3825 |
++ bool unregistering; |
3826 |
++ DEFINE_WAIT(wait); |
3827 |
++ |
3828 |
++ for (;;) { |
3829 |
++ prepare_to_wait(&netdev_unregistering_wq, &wait, |
3830 |
++ TASK_UNINTERRUPTIBLE); |
3831 |
++ unregistering = false; |
3832 |
++ rtnl_lock(); |
3833 |
++ for_each_net(net) { |
3834 |
++ if (net->dev_unreg_count > 0) { |
3835 |
++ unregistering = true; |
3836 |
++ break; |
3837 |
++ } |
3838 |
++ } |
3839 |
++ if (!unregistering) |
3840 |
++ break; |
3841 |
++ __rtnl_unlock(); |
3842 |
++ schedule(); |
3843 |
++ } |
3844 |
++ finish_wait(&netdev_unregistering_wq, &wait); |
3845 |
++} |
3846 |
++ |
3847 |
+ /** |
3848 |
+ * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. |
3849 |
+ * @ops: struct rtnl_link_ops * to unregister |
3850 |
+ */ |
3851 |
+ void rtnl_link_unregister(struct rtnl_link_ops *ops) |
3852 |
+ { |
3853 |
+- rtnl_lock(); |
3854 |
++ /* Close the race with cleanup_net() */ |
3855 |
++ mutex_lock(&net_mutex); |
3856 |
++ rtnl_lock_unregistering_all(); |
3857 |
+ __rtnl_link_unregister(ops); |
3858 |
+ rtnl_unlock(); |
3859 |
++ mutex_unlock(&net_mutex); |
3860 |
+ } |
3861 |
+ EXPORT_SYMBOL_GPL(rtnl_link_unregister); |
3862 |
+ |
3863 |
+@@ -774,7 +805,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, |
3864 |
+ return 0; |
3865 |
+ } |
3866 |
+ |
3867 |
+-static size_t rtnl_port_size(const struct net_device *dev) |
3868 |
++static size_t rtnl_port_size(const struct net_device *dev, |
3869 |
++ u32 ext_filter_mask) |
3870 |
+ { |
3871 |
+ size_t port_size = nla_total_size(4) /* PORT_VF */ |
3872 |
+ + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ |
3873 |
+@@ -790,7 +822,8 @@ static size_t rtnl_port_size(const struct net_device *dev) |
3874 |
+ size_t port_self_size = nla_total_size(sizeof(struct nlattr)) |
3875 |
+ + port_size; |
3876 |
+ |
3877 |
+- if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent) |
3878 |
++ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || |
3879 |
++ !(ext_filter_mask & RTEXT_FILTER_VF)) |
3880 |
+ return 0; |
3881 |
+ if (dev_num_vf(dev->dev.parent)) |
3882 |
+ return port_self_size + vf_ports_size + |
3883 |
+@@ -825,7 +858,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, |
3884 |
+ + nla_total_size(ext_filter_mask |
3885 |
+ & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ |
3886 |
+ + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ |
3887 |
+- + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ |
3888 |
++ + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ |
3889 |
+ + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ |
3890 |
+ + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */ |
3891 |
+ + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */ |
3892 |
+@@ -887,11 +920,13 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) |
3893 |
+ return 0; |
3894 |
+ } |
3895 |
+ |
3896 |
+-static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev) |
3897 |
++static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, |
3898 |
++ u32 ext_filter_mask) |
3899 |
+ { |
3900 |
+ int err; |
3901 |
+ |
3902 |
+- if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent) |
3903 |
++ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || |
3904 |
++ !(ext_filter_mask & RTEXT_FILTER_VF)) |
3905 |
+ return 0; |
3906 |
+ |
3907 |
+ err = rtnl_port_self_fill(skb, dev); |
3908 |
+@@ -1076,7 +1111,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, |
3909 |
+ nla_nest_end(skb, vfinfo); |
3910 |
+ } |
3911 |
+ |
3912 |
+- if (rtnl_port_fill(skb, dev)) |
3913 |
++ if (rtnl_port_fill(skb, dev, ext_filter_mask)) |
3914 |
+ goto nla_put_failure; |
3915 |
+ |
3916 |
+ if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { |
3917 |
+@@ -1130,6 +1165,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) |
3918 |
+ struct hlist_head *head; |
3919 |
+ struct nlattr *tb[IFLA_MAX+1]; |
3920 |
+ u32 ext_filter_mask = 0; |
3921 |
++ int err; |
3922 |
+ |
3923 |
+ s_h = cb->args[0]; |
3924 |
+ s_idx = cb->args[1]; |
3925 |
+@@ -1150,11 +1186,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) |
3926 |
+ hlist_for_each_entry_rcu(dev, head, index_hlist) { |
3927 |
+ if (idx < s_idx) |
3928 |
+ goto cont; |
3929 |
+- if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, |
3930 |
+- NETLINK_CB(cb->skb).portid, |
3931 |
+- cb->nlh->nlmsg_seq, 0, |
3932 |
+- NLM_F_MULTI, |
3933 |
+- ext_filter_mask) <= 0) |
3934 |
++ err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, |
3935 |
++ NETLINK_CB(cb->skb).portid, |
3936 |
++ cb->nlh->nlmsg_seq, 0, |
3937 |
++ NLM_F_MULTI, |
3938 |
++ ext_filter_mask); |
3939 |
++ /* If we ran out of room on the first message, |
3940 |
++ * we're in trouble |
3941 |
++ */ |
3942 |
++ WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); |
3943 |
++ |
3944 |
++ if (err <= 0) |
3945 |
+ goto out; |
3946 |
+ |
3947 |
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb)); |
3948 |
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
3949 |
+index 90b96a11b974..e5ae776ee9b4 100644 |
3950 |
+--- a/net/core/skbuff.c |
3951 |
++++ b/net/core/skbuff.c |
3952 |
+@@ -3076,7 +3076,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) |
3953 |
+ if (unlikely(p->len + len >= 65536)) |
3954 |
+ return -E2BIG; |
3955 |
+ |
3956 |
+- lp = NAPI_GRO_CB(p)->last ?: p; |
3957 |
++ lp = NAPI_GRO_CB(p)->last; |
3958 |
+ pinfo = skb_shinfo(lp); |
3959 |
+ |
3960 |
+ if (headlen <= offset) { |
3961 |
+@@ -3192,7 +3192,7 @@ merge: |
3962 |
+ |
3963 |
+ __skb_pull(skb, offset); |
3964 |
+ |
3965 |
+- if (!NAPI_GRO_CB(p)->last) |
3966 |
++ if (NAPI_GRO_CB(p)->last == p) |
3967 |
+ skb_shinfo(p)->frag_list = skb; |
3968 |
+ else |
3969 |
+ NAPI_GRO_CB(p)->last->next = skb; |
3970 |
+@@ -3951,12 +3951,14 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet); |
3971 |
+ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) |
3972 |
+ { |
3973 |
+ const struct skb_shared_info *shinfo = skb_shinfo(skb); |
3974 |
+- unsigned int hdr_len; |
3975 |
+ |
3976 |
+ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) |
3977 |
+- hdr_len = tcp_hdrlen(skb); |
3978 |
+- else |
3979 |
+- hdr_len = sizeof(struct udphdr); |
3980 |
+- return hdr_len + shinfo->gso_size; |
3981 |
++ return tcp_hdrlen(skb) + shinfo->gso_size; |
3982 |
++ |
3983 |
++ /* UFO sets gso_size to the size of the fragmentation |
3984 |
++ * payload, i.e. the size of the L4 (UDP) header is already |
3985 |
++ * accounted for. |
3986 |
++ */ |
3987 |
++ return shinfo->gso_size; |
3988 |
+ } |
3989 |
+ EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); |
3990 |
+diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c |
3991 |
+index a0e9cf6379de..6a7fae228634 100644 |
3992 |
+--- a/net/core/sock_diag.c |
3993 |
++++ b/net/core/sock_diag.c |
3994 |
+@@ -49,7 +49,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype) |
3995 |
+ } |
3996 |
+ EXPORT_SYMBOL_GPL(sock_diag_put_meminfo); |
3997 |
+ |
3998 |
+-int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk, |
3999 |
++int sock_diag_put_filterinfo(struct sock *sk, |
4000 |
+ struct sk_buff *skb, int attrtype) |
4001 |
+ { |
4002 |
+ struct nlattr *attr; |
4003 |
+@@ -57,7 +57,7 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk, |
4004 |
+ unsigned int len; |
4005 |
+ int err = 0; |
4006 |
+ |
4007 |
+- if (!ns_capable(user_ns, CAP_NET_ADMIN)) { |
4008 |
++ if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { |
4009 |
+ nla_reserve(skb, attrtype, 0); |
4010 |
+ return 0; |
4011 |
+ } |
4012 |
+diff --git a/net/core/utils.c b/net/core/utils.c |
4013 |
+index 2f737bf90b3f..eed34338736c 100644 |
4014 |
+--- a/net/core/utils.c |
4015 |
++++ b/net/core/utils.c |
4016 |
+@@ -348,8 +348,8 @@ static void __net_random_once_deferred(struct work_struct *w) |
4017 |
+ { |
4018 |
+ struct __net_random_once_work *work = |
4019 |
+ container_of(w, struct __net_random_once_work, work); |
4020 |
+- if (!static_key_enabled(work->key)) |
4021 |
+- static_key_slow_inc(work->key); |
4022 |
++ BUG_ON(!static_key_enabled(work->key)); |
4023 |
++ static_key_slow_dec(work->key); |
4024 |
+ kfree(work); |
4025 |
+ } |
4026 |
+ |
4027 |
+@@ -367,7 +367,7 @@ static void __net_random_once_disable_jump(struct static_key *key) |
4028 |
+ } |
4029 |
+ |
4030 |
+ bool __net_get_random_once(void *buf, int nbytes, bool *done, |
4031 |
+- struct static_key *done_key) |
4032 |
++ struct static_key *once_key) |
4033 |
+ { |
4034 |
+ static DEFINE_SPINLOCK(lock); |
4035 |
+ unsigned long flags; |
4036 |
+@@ -382,7 +382,7 @@ bool __net_get_random_once(void *buf, int nbytes, bool *done, |
4037 |
+ *done = true; |
4038 |
+ spin_unlock_irqrestore(&lock, flags); |
4039 |
+ |
4040 |
+- __net_random_once_disable_jump(done_key); |
4041 |
++ __net_random_once_disable_jump(once_key); |
4042 |
+ |
4043 |
+ return true; |
4044 |
+ } |
4045 |
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c |
4046 |
+index b53f0bf84dca..9d43468722ed 100644 |
4047 |
+--- a/net/ipv4/fib_semantics.c |
4048 |
++++ b/net/ipv4/fib_semantics.c |
4049 |
+@@ -820,13 +820,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg) |
4050 |
+ fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); |
4051 |
+ if (fi == NULL) |
4052 |
+ goto failure; |
4053 |
++ fib_info_cnt++; |
4054 |
+ if (cfg->fc_mx) { |
4055 |
+ fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); |
4056 |
+ if (!fi->fib_metrics) |
4057 |
+ goto failure; |
4058 |
+ } else |
4059 |
+ fi->fib_metrics = (u32 *) dst_default_metrics; |
4060 |
+- fib_info_cnt++; |
4061 |
+ |
4062 |
+ fi->fib_net = hold_net(net); |
4063 |
+ fi->fib_protocol = cfg->fc_protocol; |
4064 |
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c |
4065 |
+index f3869c186d97..1c6bd4359cbd 100644 |
4066 |
+--- a/net/ipv4/ip_forward.c |
4067 |
++++ b/net/ipv4/ip_forward.c |
4068 |
+@@ -42,12 +42,12 @@ |
4069 |
+ static bool ip_may_fragment(const struct sk_buff *skb) |
4070 |
+ { |
4071 |
+ return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) || |
4072 |
+- !skb->local_df; |
4073 |
++ skb->local_df; |
4074 |
+ } |
4075 |
+ |
4076 |
+ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) |
4077 |
+ { |
4078 |
+- if (skb->len <= mtu || skb->local_df) |
4079 |
++ if (skb->len <= mtu) |
4080 |
+ return false; |
4081 |
+ |
4082 |
+ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) |
4083 |
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c |
4084 |
+index ec4f762efda5..94213c891565 100644 |
4085 |
+--- a/net/ipv4/ip_gre.c |
4086 |
++++ b/net/ipv4/ip_gre.c |
4087 |
+@@ -463,6 +463,7 @@ static const struct net_device_ops ipgre_netdev_ops = { |
4088 |
+ static void ipgre_tunnel_setup(struct net_device *dev) |
4089 |
+ { |
4090 |
+ dev->netdev_ops = &ipgre_netdev_ops; |
4091 |
++ dev->type = ARPHRD_IPGRE; |
4092 |
+ ip_tunnel_setup(dev, ipgre_net_id); |
4093 |
+ } |
4094 |
+ |
4095 |
+@@ -501,7 +502,6 @@ static int ipgre_tunnel_init(struct net_device *dev) |
4096 |
+ memcpy(dev->dev_addr, &iph->saddr, 4); |
4097 |
+ memcpy(dev->broadcast, &iph->daddr, 4); |
4098 |
+ |
4099 |
+- dev->type = ARPHRD_IPGRE; |
4100 |
+ dev->flags = IFF_NOARP; |
4101 |
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; |
4102 |
+ dev->addr_len = 4; |
4103 |
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c |
4104 |
+index a82a22d8f77f..0c3a5d17b4a9 100644 |
4105 |
+--- a/net/ipv4/ip_tunnel.c |
4106 |
++++ b/net/ipv4/ip_tunnel.c |
4107 |
+@@ -438,6 +438,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, |
4108 |
+ tunnel->i_seqno = ntohl(tpi->seq) + 1; |
4109 |
+ } |
4110 |
+ |
4111 |
++ skb_reset_network_header(skb); |
4112 |
++ |
4113 |
+ err = IP_ECN_decapsulate(iph, skb); |
4114 |
+ if (unlikely(err)) { |
4115 |
+ if (log_ecn_error) |
4116 |
+@@ -534,9 +536,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, |
4117 |
+ unsigned int max_headroom; /* The extra header space needed */ |
4118 |
+ __be32 dst; |
4119 |
+ int err; |
4120 |
+- bool connected = true; |
4121 |
++ bool connected; |
4122 |
+ |
4123 |
+ inner_iph = (const struct iphdr *)skb_inner_network_header(skb); |
4124 |
++ connected = (tunnel->parms.iph.daddr != 0); |
4125 |
+ |
4126 |
+ dst = tnl_params->daddr; |
4127 |
+ if (dst == 0) { |
4128 |
+@@ -872,6 +875,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, |
4129 |
+ */ |
4130 |
+ if (!IS_ERR(itn->fb_tunnel_dev)) { |
4131 |
+ itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; |
4132 |
++ itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev); |
4133 |
+ ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev)); |
4134 |
+ } |
4135 |
+ rtnl_unlock(); |
4136 |
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c |
4137 |
+index 48eafae51769..e4a8f76c8995 100644 |
4138 |
+--- a/net/ipv4/ip_vti.c |
4139 |
++++ b/net/ipv4/ip_vti.c |
4140 |
+@@ -207,6 +207,7 @@ static const struct net_device_ops vti_netdev_ops = { |
4141 |
+ static void vti_tunnel_setup(struct net_device *dev) |
4142 |
+ { |
4143 |
+ dev->netdev_ops = &vti_netdev_ops; |
4144 |
++ dev->type = ARPHRD_TUNNEL; |
4145 |
+ ip_tunnel_setup(dev, vti_net_id); |
4146 |
+ } |
4147 |
+ |
4148 |
+@@ -218,7 +219,6 @@ static int vti_tunnel_init(struct net_device *dev) |
4149 |
+ memcpy(dev->dev_addr, &iph->saddr, 4); |
4150 |
+ memcpy(dev->broadcast, &iph->daddr, 4); |
4151 |
+ |
4152 |
+- dev->type = ARPHRD_TUNNEL; |
4153 |
+ dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); |
4154 |
+ dev->mtu = ETH_DATA_LEN; |
4155 |
+ dev->flags = IFF_NOARP; |
4156 |
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c |
4157 |
+index 59da7cde0724..f95b6f93814b 100644 |
4158 |
+--- a/net/ipv4/netfilter/arp_tables.c |
4159 |
++++ b/net/ipv4/netfilter/arp_tables.c |
4160 |
+@@ -1044,8 +1044,10 @@ static int __do_replace(struct net *net, const char *name, |
4161 |
+ |
4162 |
+ xt_free_table_info(oldinfo); |
4163 |
+ if (copy_to_user(counters_ptr, counters, |
4164 |
+- sizeof(struct xt_counters) * num_counters) != 0) |
4165 |
+- ret = -EFAULT; |
4166 |
++ sizeof(struct xt_counters) * num_counters) != 0) { |
4167 |
++ /* Silent error, can't fail, new table is already in place */ |
4168 |
++ net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n"); |
4169 |
++ } |
4170 |
+ vfree(counters); |
4171 |
+ xt_table_unlock(t); |
4172 |
+ return ret; |
4173 |
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c |
4174 |
+index 718dfbd30cbe..99e810f84671 100644 |
4175 |
+--- a/net/ipv4/netfilter/ip_tables.c |
4176 |
++++ b/net/ipv4/netfilter/ip_tables.c |
4177 |
+@@ -1231,8 +1231,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, |
4178 |
+ |
4179 |
+ xt_free_table_info(oldinfo); |
4180 |
+ if (copy_to_user(counters_ptr, counters, |
4181 |
+- sizeof(struct xt_counters) * num_counters) != 0) |
4182 |
+- ret = -EFAULT; |
4183 |
++ sizeof(struct xt_counters) * num_counters) != 0) { |
4184 |
++ /* Silent error, can't fail, new table is already in place */ |
4185 |
++ net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n"); |
4186 |
++ } |
4187 |
+ vfree(counters); |
4188 |
+ xt_table_unlock(t); |
4189 |
+ return ret; |
4190 |
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c |
4191 |
+index 2d11c094296e..e21934b06d4c 100644 |
4192 |
+--- a/net/ipv4/ping.c |
4193 |
++++ b/net/ipv4/ping.c |
4194 |
+@@ -252,26 +252,33 @@ int ping_init_sock(struct sock *sk) |
4195 |
+ { |
4196 |
+ struct net *net = sock_net(sk); |
4197 |
+ kgid_t group = current_egid(); |
4198 |
+- struct group_info *group_info = get_current_groups(); |
4199 |
+- int i, j, count = group_info->ngroups; |
4200 |
++ struct group_info *group_info; |
4201 |
++ int i, j, count; |
4202 |
+ kgid_t low, high; |
4203 |
++ int ret = 0; |
4204 |
+ |
4205 |
+ inet_get_ping_group_range_net(net, &low, &high); |
4206 |
+ if (gid_lte(low, group) && gid_lte(group, high)) |
4207 |
+ return 0; |
4208 |
+ |
4209 |
++ group_info = get_current_groups(); |
4210 |
++ count = group_info->ngroups; |
4211 |
+ for (i = 0; i < group_info->nblocks; i++) { |
4212 |
+ int cp_count = min_t(int, NGROUPS_PER_BLOCK, count); |
4213 |
+ for (j = 0; j < cp_count; j++) { |
4214 |
+ kgid_t gid = group_info->blocks[i][j]; |
4215 |
+ if (gid_lte(low, gid) && gid_lte(gid, high)) |
4216 |
+- return 0; |
4217 |
++ goto out_release_group; |
4218 |
+ } |
4219 |
+ |
4220 |
+ count -= cp_count; |
4221 |
+ } |
4222 |
+ |
4223 |
+- return -EACCES; |
4224 |
++ ret = -EACCES; |
4225 |
++ |
4226 |
++out_release_group: |
4227 |
++ put_group_info(group_info); |
4228 |
++ return ret; |
4229 |
+ } |
4230 |
+ EXPORT_SYMBOL_GPL(ping_init_sock); |
4231 |
+ |
4232 |
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c |
4233 |
+index 4c011ec69ed4..134437309b1e 100644 |
4234 |
+--- a/net/ipv4/route.c |
4235 |
++++ b/net/ipv4/route.c |
4236 |
+@@ -1526,7 +1526,7 @@ static int __mkroute_input(struct sk_buff *skb, |
4237 |
+ struct in_device *out_dev; |
4238 |
+ unsigned int flags = 0; |
4239 |
+ bool do_cache; |
4240 |
+- u32 itag; |
4241 |
++ u32 itag = 0; |
4242 |
+ |
4243 |
+ /* get a working reference to the output device */ |
4244 |
+ out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); |
4245 |
+@@ -2364,7 +2364,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, |
4246 |
+ } |
4247 |
+ } else |
4248 |
+ #endif |
4249 |
+- if (nla_put_u32(skb, RTA_IIF, rt->rt_iif)) |
4250 |
++ if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex)) |
4251 |
+ goto nla_put_failure; |
4252 |
+ } |
4253 |
+ |
4254 |
+diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c |
4255 |
+index 828e4c3ffbaf..121a9a22dc98 100644 |
4256 |
+--- a/net/ipv4/tcp_cubic.c |
4257 |
++++ b/net/ipv4/tcp_cubic.c |
4258 |
+@@ -409,7 +409,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) |
4259 |
+ ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT; |
4260 |
+ ratio += cnt; |
4261 |
+ |
4262 |
+- ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT); |
4263 |
++ ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT); |
4264 |
+ } |
4265 |
+ |
4266 |
+ /* Some calls are for duplicates without timetamps */ |
4267 |
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c |
4268 |
+index 075602fc6b6a..1e55f5eba185 100644 |
4269 |
+--- a/net/ipv6/ip6_fib.c |
4270 |
++++ b/net/ipv6/ip6_fib.c |
4271 |
+@@ -1418,7 +1418,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w) |
4272 |
+ |
4273 |
+ if (w->skip) { |
4274 |
+ w->skip--; |
4275 |
+- continue; |
4276 |
++ goto skip; |
4277 |
+ } |
4278 |
+ |
4279 |
+ err = w->func(w); |
4280 |
+@@ -1428,6 +1428,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w) |
4281 |
+ w->count++; |
4282 |
+ continue; |
4283 |
+ } |
4284 |
++skip: |
4285 |
+ w->state = FWS_U; |
4286 |
+ case FWS_U: |
4287 |
+ if (fn == w->root) |
4288 |
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c |
4289 |
+index f3ffb43f59c0..2465d18e8a26 100644 |
4290 |
+--- a/net/ipv6/ip6_gre.c |
4291 |
++++ b/net/ipv6/ip6_gre.c |
4292 |
+@@ -1566,6 +1566,15 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], |
4293 |
+ return 0; |
4294 |
+ } |
4295 |
+ |
4296 |
++static void ip6gre_dellink(struct net_device *dev, struct list_head *head) |
4297 |
++{ |
4298 |
++ struct net *net = dev_net(dev); |
4299 |
++ struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); |
4300 |
++ |
4301 |
++ if (dev != ign->fb_tunnel_dev) |
4302 |
++ unregister_netdevice_queue(dev, head); |
4303 |
++} |
4304 |
++ |
4305 |
+ static size_t ip6gre_get_size(const struct net_device *dev) |
4306 |
+ { |
4307 |
+ return |
4308 |
+@@ -1643,6 +1652,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { |
4309 |
+ .validate = ip6gre_tunnel_validate, |
4310 |
+ .newlink = ip6gre_newlink, |
4311 |
+ .changelink = ip6gre_changelink, |
4312 |
++ .dellink = ip6gre_dellink, |
4313 |
+ .get_size = ip6gre_get_size, |
4314 |
+ .fill_info = ip6gre_fill_info, |
4315 |
+ }; |
4316 |
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c |
4317 |
+index 59f95affceb0..b2f091566f88 100644 |
4318 |
+--- a/net/ipv6/ip6_offload.c |
4319 |
++++ b/net/ipv6/ip6_offload.c |
4320 |
+@@ -196,7 +196,6 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, |
4321 |
+ unsigned int off; |
4322 |
+ u16 flush = 1; |
4323 |
+ int proto; |
4324 |
+- __wsum csum; |
4325 |
+ |
4326 |
+ off = skb_gro_offset(skb); |
4327 |
+ hlen = off + sizeof(*iph); |
4328 |
+@@ -264,13 +263,10 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, |
4329 |
+ |
4330 |
+ NAPI_GRO_CB(skb)->flush |= flush; |
4331 |
+ |
4332 |
+- csum = skb->csum; |
4333 |
+- skb_postpull_rcsum(skb, iph, skb_network_header_len(skb)); |
4334 |
++ skb_gro_postpull_rcsum(skb, iph, nlen); |
4335 |
+ |
4336 |
+ pp = ops->callbacks.gro_receive(head, skb); |
4337 |
+ |
4338 |
+- skb->csum = csum; |
4339 |
+- |
4340 |
+ out_unlock: |
4341 |
+ rcu_read_unlock(); |
4342 |
+ |
4343 |
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
4344 |
+index 3702d179506d..a62b610307ec 100644 |
4345 |
+--- a/net/ipv6/ip6_output.c |
4346 |
++++ b/net/ipv6/ip6_output.c |
4347 |
+@@ -344,12 +344,16 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) |
4348 |
+ |
4349 |
+ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) |
4350 |
+ { |
4351 |
+- if (skb->len <= mtu || skb->local_df) |
4352 |
++ if (skb->len <= mtu) |
4353 |
+ return false; |
4354 |
+ |
4355 |
++ /* ipv6 conntrack defrag sets max_frag_size + local_df */ |
4356 |
+ if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) |
4357 |
+ return true; |
4358 |
+ |
4359 |
++ if (skb->local_df) |
4360 |
++ return false; |
4361 |
++ |
4362 |
+ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) |
4363 |
+ return false; |
4364 |
+ |
4365 |
+@@ -1225,7 +1229,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, |
4366 |
+ unsigned int maxnonfragsize, headersize; |
4367 |
+ |
4368 |
+ headersize = sizeof(struct ipv6hdr) + |
4369 |
+- (opt ? opt->tot_len : 0) + |
4370 |
++ (opt ? opt->opt_flen + opt->opt_nflen : 0) + |
4371 |
+ (dst_allfrag(&rt->dst) ? |
4372 |
+ sizeof(struct frag_hdr) : 0) + |
4373 |
+ rt->rt6i_nfheader_len; |
4374 |
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c |
4375 |
+index 5db8d310f9c0..0e51f68ab163 100644 |
4376 |
+--- a/net/ipv6/ip6_tunnel.c |
4377 |
++++ b/net/ipv6/ip6_tunnel.c |
4378 |
+@@ -1564,7 +1564,7 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[]) |
4379 |
+ { |
4380 |
+ u8 proto; |
4381 |
+ |
4382 |
+- if (!data) |
4383 |
++ if (!data || !data[IFLA_IPTUN_PROTO]) |
4384 |
+ return 0; |
4385 |
+ |
4386 |
+ proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); |
4387 |
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c |
4388 |
+index 710238f58aa9..e080fbbbc0e5 100644 |
4389 |
+--- a/net/ipv6/netfilter/ip6_tables.c |
4390 |
++++ b/net/ipv6/netfilter/ip6_tables.c |
4391 |
+@@ -1241,8 +1241,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, |
4392 |
+ |
4393 |
+ xt_free_table_info(oldinfo); |
4394 |
+ if (copy_to_user(counters_ptr, counters, |
4395 |
+- sizeof(struct xt_counters) * num_counters) != 0) |
4396 |
+- ret = -EFAULT; |
4397 |
++ sizeof(struct xt_counters) * num_counters) != 0) { |
4398 |
++ /* Silent error, can't fail, new table is already in place */ |
4399 |
++ net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n"); |
4400 |
++ } |
4401 |
+ vfree(counters); |
4402 |
+ xt_table_unlock(t); |
4403 |
+ return ret; |
4404 |
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c |
4405 |
+index fba54a407bb2..7cc1102e298c 100644 |
4406 |
+--- a/net/ipv6/route.c |
4407 |
++++ b/net/ipv6/route.c |
4408 |
+@@ -1342,7 +1342,7 @@ static unsigned int ip6_mtu(const struct dst_entry *dst) |
4409 |
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); |
4410 |
+ |
4411 |
+ if (mtu) |
4412 |
+- return mtu; |
4413 |
++ goto out; |
4414 |
+ |
4415 |
+ mtu = IPV6_MIN_MTU; |
4416 |
+ |
4417 |
+@@ -1352,7 +1352,8 @@ static unsigned int ip6_mtu(const struct dst_entry *dst) |
4418 |
+ mtu = idev->cnf.mtu6; |
4419 |
+ rcu_read_unlock(); |
4420 |
+ |
4421 |
+- return mtu; |
4422 |
++out: |
4423 |
++ return min_t(unsigned int, mtu, IP6_MAX_MTU); |
4424 |
+ } |
4425 |
+ |
4426 |
+ static struct dst_entry *icmp6_dst_gc_list; |
4427 |
+diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c |
4428 |
+index 0d78132ff18a..8517d3cd1aed 100644 |
4429 |
+--- a/net/ipv6/tcpv6_offload.c |
4430 |
++++ b/net/ipv6/tcpv6_offload.c |
4431 |
+@@ -42,7 +42,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, |
4432 |
+ if (NAPI_GRO_CB(skb)->flush) |
4433 |
+ goto skip_csum; |
4434 |
+ |
4435 |
+- wsum = skb->csum; |
4436 |
++ wsum = NAPI_GRO_CB(skb)->csum; |
4437 |
+ |
4438 |
+ switch (skb->ip_summed) { |
4439 |
+ case CHECKSUM_NONE: |
4440 |
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c |
4441 |
+index 5990919356a5..ec6606325cda 100644 |
4442 |
+--- a/net/l2tp/l2tp_ppp.c |
4443 |
++++ b/net/l2tp/l2tp_ppp.c |
4444 |
+@@ -756,9 +756,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, |
4445 |
+ session->deref = pppol2tp_session_sock_put; |
4446 |
+ |
4447 |
+ /* If PMTU discovery was enabled, use the MTU that was discovered */ |
4448 |
+- dst = sk_dst_get(sk); |
4449 |
++ dst = sk_dst_get(tunnel->sock); |
4450 |
+ if (dst != NULL) { |
4451 |
+- u32 pmtu = dst_mtu(__sk_dst_get(sk)); |
4452 |
++ u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock)); |
4453 |
+ if (pmtu != 0) |
4454 |
+ session->mtu = session->mru = pmtu - |
4455 |
+ PPPOL2TP_HEADER_OVERHEAD; |
4456 |
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c |
4457 |
+index adce01e8bb57..c68e5e0628df 100644 |
4458 |
+--- a/net/netfilter/nf_tables_api.c |
4459 |
++++ b/net/netfilter/nf_tables_api.c |
4460 |
+@@ -1934,7 +1934,8 @@ static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const |
4461 |
+ |
4462 |
+ static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = { |
4463 |
+ [NFTA_SET_TABLE] = { .type = NLA_STRING }, |
4464 |
+- [NFTA_SET_NAME] = { .type = NLA_STRING }, |
4465 |
++ [NFTA_SET_NAME] = { .type = NLA_STRING, |
4466 |
++ .len = IFNAMSIZ - 1 }, |
4467 |
+ [NFTA_SET_FLAGS] = { .type = NLA_U32 }, |
4468 |
+ [NFTA_SET_KEY_TYPE] = { .type = NLA_U32 }, |
4469 |
+ [NFTA_SET_KEY_LEN] = { .type = NLA_U32 }, |
4470 |
+diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c |
4471 |
+index 90998a6ff8b9..804105391b9a 100644 |
4472 |
+--- a/net/netfilter/nf_tables_core.c |
4473 |
++++ b/net/netfilter/nf_tables_core.c |
4474 |
+@@ -25,9 +25,8 @@ static void nft_cmp_fast_eval(const struct nft_expr *expr, |
4475 |
+ struct nft_data data[NFT_REG_MAX + 1]) |
4476 |
+ { |
4477 |
+ const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr); |
4478 |
+- u32 mask; |
4479 |
++ u32 mask = nft_cmp_fast_mask(priv->len); |
4480 |
+ |
4481 |
+- mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - priv->len); |
4482 |
+ if ((data[priv->sreg].data[0] & mask) == priv->data) |
4483 |
+ return; |
4484 |
+ data[NFT_REG_VERDICT].verdict = NFT_BREAK; |
4485 |
+diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c |
4486 |
+index 954925db414d..e2b3f51c81f1 100644 |
4487 |
+--- a/net/netfilter/nft_cmp.c |
4488 |
++++ b/net/netfilter/nft_cmp.c |
4489 |
+@@ -128,7 +128,7 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx, |
4490 |
+ BUG_ON(err < 0); |
4491 |
+ desc.len *= BITS_PER_BYTE; |
4492 |
+ |
4493 |
+- mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - desc.len); |
4494 |
++ mask = nft_cmp_fast_mask(desc.len); |
4495 |
+ priv->data = data.data[0] & mask; |
4496 |
+ priv->len = desc.len; |
4497 |
+ return 0; |
4498 |
+diff --git a/net/packet/diag.c b/net/packet/diag.c |
4499 |
+index 533ce4ff108a..435ff99ba8c7 100644 |
4500 |
+--- a/net/packet/diag.c |
4501 |
++++ b/net/packet/diag.c |
4502 |
+@@ -172,7 +172,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, |
4503 |
+ goto out_nlmsg_trim; |
4504 |
+ |
4505 |
+ if ((req->pdiag_show & PACKET_SHOW_FILTER) && |
4506 |
+- sock_diag_put_filterinfo(user_ns, sk, skb, PACKET_DIAG_FILTER)) |
4507 |
++ sock_diag_put_filterinfo(sk, skb, PACKET_DIAG_FILTER)) |
4508 |
+ goto out_nlmsg_trim; |
4509 |
+ |
4510 |
+ return nlmsg_end(skb, nlh); |
4511 |
+diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c |
4512 |
+index eed8404443d8..f435a88d899a 100644 |
4513 |
+--- a/net/sched/cls_tcindex.c |
4514 |
++++ b/net/sched/cls_tcindex.c |
4515 |
+@@ -188,6 +188,12 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = { |
4516 |
+ [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 }, |
4517 |
+ }; |
4518 |
+ |
4519 |
++static void tcindex_filter_result_init(struct tcindex_filter_result *r) |
4520 |
++{ |
4521 |
++ memset(r, 0, sizeof(*r)); |
4522 |
++ tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); |
4523 |
++} |
4524 |
++ |
4525 |
+ static int |
4526 |
+ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, |
4527 |
+ u32 handle, struct tcindex_data *p, |
4528 |
+@@ -207,15 +213,11 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, |
4529 |
+ return err; |
4530 |
+ |
4531 |
+ memcpy(&cp, p, sizeof(cp)); |
4532 |
+- memset(&new_filter_result, 0, sizeof(new_filter_result)); |
4533 |
+- tcf_exts_init(&new_filter_result.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); |
4534 |
++ tcindex_filter_result_init(&new_filter_result); |
4535 |
+ |
4536 |
++ tcindex_filter_result_init(&cr); |
4537 |
+ if (old_r) |
4538 |
+- memcpy(&cr, r, sizeof(cr)); |
4539 |
+- else { |
4540 |
+- memset(&cr, 0, sizeof(cr)); |
4541 |
+- tcf_exts_init(&cr.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); |
4542 |
+- } |
4543 |
++ cr.res = r->res; |
4544 |
+ |
4545 |
+ if (tb[TCA_TCINDEX_HASH]) |
4546 |
+ cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); |
4547 |
+@@ -267,9 +269,14 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, |
4548 |
+ err = -ENOMEM; |
4549 |
+ if (!cp.perfect && !cp.h) { |
4550 |
+ if (valid_perfect_hash(&cp)) { |
4551 |
++ int i; |
4552 |
++ |
4553 |
+ cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL); |
4554 |
+ if (!cp.perfect) |
4555 |
+ goto errout; |
4556 |
++ for (i = 0; i < cp.hash; i++) |
4557 |
++ tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT, |
4558 |
++ TCA_TCINDEX_POLICE); |
4559 |
+ balloc = 1; |
4560 |
+ } else { |
4561 |
+ cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL); |
4562 |
+@@ -295,14 +302,17 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, |
4563 |
+ tcf_bind_filter(tp, &cr.res, base); |
4564 |
+ } |
4565 |
+ |
4566 |
+- tcf_exts_change(tp, &cr.exts, &e); |
4567 |
++ if (old_r) |
4568 |
++ tcf_exts_change(tp, &r->exts, &e); |
4569 |
++ else |
4570 |
++ tcf_exts_change(tp, &cr.exts, &e); |
4571 |
+ |
4572 |
+ tcf_tree_lock(tp); |
4573 |
+ if (old_r && old_r != r) |
4574 |
+- memset(old_r, 0, sizeof(*old_r)); |
4575 |
++ tcindex_filter_result_init(old_r); |
4576 |
+ |
4577 |
+ memcpy(p, &cp, sizeof(cp)); |
4578 |
+- memcpy(r, &cr, sizeof(cr)); |
4579 |
++ r->res = cr.res; |
4580 |
+ |
4581 |
+ if (r == &new_filter_result) { |
4582 |
+ struct tcindex_filter **fp; |
4583 |
+diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c |
4584 |
+index 647680b1c625..039977830783 100644 |
4585 |
+--- a/net/sched/sch_hhf.c |
4586 |
++++ b/net/sched/sch_hhf.c |
4587 |
+@@ -553,11 +553,6 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt) |
4588 |
+ if (err < 0) |
4589 |
+ return err; |
4590 |
+ |
4591 |
+- sch_tree_lock(sch); |
4592 |
+- |
4593 |
+- if (tb[TCA_HHF_BACKLOG_LIMIT]) |
4594 |
+- sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]); |
4595 |
+- |
4596 |
+ if (tb[TCA_HHF_QUANTUM]) |
4597 |
+ new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]); |
4598 |
+ |
4599 |
+@@ -567,6 +562,12 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt) |
4600 |
+ non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight; |
4601 |
+ if (non_hh_quantum > INT_MAX) |
4602 |
+ return -EINVAL; |
4603 |
++ |
4604 |
++ sch_tree_lock(sch); |
4605 |
++ |
4606 |
++ if (tb[TCA_HHF_BACKLOG_LIMIT]) |
4607 |
++ sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]); |
4608 |
++ |
4609 |
+ q->quantum = new_quantum; |
4610 |
+ q->hhf_non_hh_weight = new_hhf_non_hh_weight; |
4611 |
+ |
4612 |
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c |
4613 |
+index ee13d28d39d1..878e17aafbe5 100644 |
4614 |
+--- a/net/sctp/associola.c |
4615 |
++++ b/net/sctp/associola.c |
4616 |
+@@ -1396,35 +1396,44 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc) |
4617 |
+ return false; |
4618 |
+ } |
4619 |
+ |
4620 |
+-/* Update asoc's rwnd for the approximated state in the buffer, |
4621 |
+- * and check whether SACK needs to be sent. |
4622 |
+- */ |
4623 |
+-void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer) |
4624 |
++/* Increase asoc's rwnd by len and send any window update SACK if needed. */ |
4625 |
++void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) |
4626 |
+ { |
4627 |
+- int rx_count; |
4628 |
+ struct sctp_chunk *sack; |
4629 |
+ struct timer_list *timer; |
4630 |
+ |
4631 |
+- if (asoc->ep->rcvbuf_policy) |
4632 |
+- rx_count = atomic_read(&asoc->rmem_alloc); |
4633 |
+- else |
4634 |
+- rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); |
4635 |
++ if (asoc->rwnd_over) { |
4636 |
++ if (asoc->rwnd_over >= len) { |
4637 |
++ asoc->rwnd_over -= len; |
4638 |
++ } else { |
4639 |
++ asoc->rwnd += (len - asoc->rwnd_over); |
4640 |
++ asoc->rwnd_over = 0; |
4641 |
++ } |
4642 |
++ } else { |
4643 |
++ asoc->rwnd += len; |
4644 |
++ } |
4645 |
+ |
4646 |
+- if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0) |
4647 |
+- asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1; |
4648 |
+- else |
4649 |
+- asoc->rwnd = 0; |
4650 |
++ /* If we had window pressure, start recovering it |
4651 |
++ * once our rwnd had reached the accumulated pressure |
4652 |
++ * threshold. The idea is to recover slowly, but up |
4653 |
++ * to the initial advertised window. |
4654 |
++ */ |
4655 |
++ if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { |
4656 |
++ int change = min(asoc->pathmtu, asoc->rwnd_press); |
4657 |
++ asoc->rwnd += change; |
4658 |
++ asoc->rwnd_press -= change; |
4659 |
++ } |
4660 |
+ |
4661 |
+- pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n", |
4662 |
+- __func__, asoc, asoc->rwnd, rx_count, |
4663 |
+- asoc->base.sk->sk_rcvbuf); |
4664 |
++ pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", |
4665 |
++ __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, |
4666 |
++ asoc->a_rwnd); |
4667 |
+ |
4668 |
+ /* Send a window update SACK if the rwnd has increased by at least the |
4669 |
+ * minimum of the association's PMTU and half of the receive buffer. |
4670 |
+ * The algorithm used is similar to the one described in |
4671 |
+ * Section 4.2.3.3 of RFC 1122. |
4672 |
+ */ |
4673 |
+- if (update_peer && sctp_peer_needs_update(asoc)) { |
4674 |
++ if (sctp_peer_needs_update(asoc)) { |
4675 |
+ asoc->a_rwnd = asoc->rwnd; |
4676 |
+ |
4677 |
+ pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " |
4678 |
+@@ -1446,6 +1455,45 @@ void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer) |
4679 |
+ } |
4680 |
+ } |
4681 |
+ |
4682 |
++/* Decrease asoc's rwnd by len. */ |
4683 |
++void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) |
4684 |
++{ |
4685 |
++ int rx_count; |
4686 |
++ int over = 0; |
4687 |
++ |
4688 |
++ if (unlikely(!asoc->rwnd || asoc->rwnd_over)) |
4689 |
++ pr_debug("%s: association:%p has asoc->rwnd:%u, " |
4690 |
++ "asoc->rwnd_over:%u!\n", __func__, asoc, |
4691 |
++ asoc->rwnd, asoc->rwnd_over); |
4692 |
++ |
4693 |
++ if (asoc->ep->rcvbuf_policy) |
4694 |
++ rx_count = atomic_read(&asoc->rmem_alloc); |
4695 |
++ else |
4696 |
++ rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); |
4697 |
++ |
4698 |
++ /* If we've reached or overflowed our receive buffer, announce |
4699 |
++ * a 0 rwnd if rwnd would still be positive. Store the |
4700 |
++ * the potential pressure overflow so that the window can be restored |
4701 |
++ * back to original value. |
4702 |
++ */ |
4703 |
++ if (rx_count >= asoc->base.sk->sk_rcvbuf) |
4704 |
++ over = 1; |
4705 |
++ |
4706 |
++ if (asoc->rwnd >= len) { |
4707 |
++ asoc->rwnd -= len; |
4708 |
++ if (over) { |
4709 |
++ asoc->rwnd_press += asoc->rwnd; |
4710 |
++ asoc->rwnd = 0; |
4711 |
++ } |
4712 |
++ } else { |
4713 |
++ asoc->rwnd_over = len - asoc->rwnd; |
4714 |
++ asoc->rwnd = 0; |
4715 |
++ } |
4716 |
++ |
4717 |
++ pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", |
4718 |
++ __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, |
4719 |
++ asoc->rwnd_press); |
4720 |
++} |
4721 |
+ |
4722 |
+ /* Build the bind address list for the association based on info from the |
4723 |
+ * local endpoint and the remote peer. |
4724 |
+diff --git a/net/sctp/auth.c b/net/sctp/auth.c |
4725 |
+index 683c7d1b1306..0e8529113dc5 100644 |
4726 |
+--- a/net/sctp/auth.c |
4727 |
++++ b/net/sctp/auth.c |
4728 |
+@@ -386,14 +386,13 @@ nomem: |
4729 |
+ */ |
4730 |
+ int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp) |
4731 |
+ { |
4732 |
+- struct net *net = sock_net(asoc->base.sk); |
4733 |
+ struct sctp_auth_bytes *secret; |
4734 |
+ struct sctp_shared_key *ep_key; |
4735 |
+ |
4736 |
+ /* If we don't support AUTH, or peer is not capable |
4737 |
+ * we don't need to do anything. |
4738 |
+ */ |
4739 |
+- if (!net->sctp.auth_enable || !asoc->peer.auth_capable) |
4740 |
++ if (!asoc->ep->auth_enable || !asoc->peer.auth_capable) |
4741 |
+ return 0; |
4742 |
+ |
4743 |
+ /* If the key_id is non-zero and we couldn't find an |
4744 |
+@@ -440,16 +439,16 @@ struct sctp_shared_key *sctp_auth_get_shkey( |
4745 |
+ */ |
4746 |
+ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp) |
4747 |
+ { |
4748 |
+- struct net *net = sock_net(ep->base.sk); |
4749 |
+ struct crypto_hash *tfm = NULL; |
4750 |
+ __u16 id; |
4751 |
+ |
4752 |
+- /* if the transforms are already allocted, we are done */ |
4753 |
+- if (!net->sctp.auth_enable) { |
4754 |
++ /* If AUTH extension is disabled, we are done */ |
4755 |
++ if (!ep->auth_enable) { |
4756 |
+ ep->auth_hmacs = NULL; |
4757 |
+ return 0; |
4758 |
+ } |
4759 |
+ |
4760 |
++ /* If the transforms are already allocated, we are done */ |
4761 |
+ if (ep->auth_hmacs) |
4762 |
+ return 0; |
4763 |
+ |
4764 |
+@@ -665,12 +664,10 @@ static int __sctp_auth_cid(sctp_cid_t chunk, struct sctp_chunks_param *param) |
4765 |
+ /* Check if peer requested that this chunk is authenticated */ |
4766 |
+ int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc) |
4767 |
+ { |
4768 |
+- struct net *net; |
4769 |
+ if (!asoc) |
4770 |
+ return 0; |
4771 |
+ |
4772 |
+- net = sock_net(asoc->base.sk); |
4773 |
+- if (!net->sctp.auth_enable || !asoc->peer.auth_capable) |
4774 |
++ if (!asoc->ep->auth_enable || !asoc->peer.auth_capable) |
4775 |
+ return 0; |
4776 |
+ |
4777 |
+ return __sctp_auth_cid(chunk, asoc->peer.peer_chunks); |
4778 |
+@@ -679,12 +676,10 @@ int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc) |
4779 |
+ /* Check if we requested that peer authenticate this chunk. */ |
4780 |
+ int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc) |
4781 |
+ { |
4782 |
+- struct net *net; |
4783 |
+ if (!asoc) |
4784 |
+ return 0; |
4785 |
+ |
4786 |
+- net = sock_net(asoc->base.sk); |
4787 |
+- if (!net->sctp.auth_enable) |
4788 |
++ if (!asoc->ep->auth_enable) |
4789 |
+ return 0; |
4790 |
+ |
4791 |
+ return __sctp_auth_cid(chunk, |
4792 |
+diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c |
4793 |
+index 8e5fdea05216..3d9f429858dc 100644 |
4794 |
+--- a/net/sctp/endpointola.c |
4795 |
++++ b/net/sctp/endpointola.c |
4796 |
+@@ -68,7 +68,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, |
4797 |
+ if (!ep->digest) |
4798 |
+ return NULL; |
4799 |
+ |
4800 |
+- if (net->sctp.auth_enable) { |
4801 |
++ ep->auth_enable = net->sctp.auth_enable; |
4802 |
++ if (ep->auth_enable) { |
4803 |
+ /* Allocate space for HMACS and CHUNKS authentication |
4804 |
+ * variables. There are arrays that we encode directly |
4805 |
+ * into parameters to make the rest of the operations easier. |
4806 |
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c |
4807 |
+index 4e1d0fcb028e..a62a215dd22e 100644 |
4808 |
+--- a/net/sctp/protocol.c |
4809 |
++++ b/net/sctp/protocol.c |
4810 |
+@@ -491,8 +491,13 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, |
4811 |
+ continue; |
4812 |
+ if ((laddr->state == SCTP_ADDR_SRC) && |
4813 |
+ (AF_INET == laddr->a.sa.sa_family)) { |
4814 |
+- fl4->saddr = laddr->a.v4.sin_addr.s_addr; |
4815 |
+ fl4->fl4_sport = laddr->a.v4.sin_port; |
4816 |
++ flowi4_update_output(fl4, |
4817 |
++ asoc->base.sk->sk_bound_dev_if, |
4818 |
++ RT_CONN_FLAGS(asoc->base.sk), |
4819 |
++ daddr->v4.sin_addr.s_addr, |
4820 |
++ laddr->a.v4.sin_addr.s_addr); |
4821 |
++ |
4822 |
+ rt = ip_route_output_key(sock_net(sk), fl4); |
4823 |
+ if (!IS_ERR(rt)) { |
4824 |
+ dst = &rt->dst; |
4825 |
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c |
4826 |
+index 3a1767ef3201..fee5552ddf92 100644 |
4827 |
+--- a/net/sctp/sm_make_chunk.c |
4828 |
++++ b/net/sctp/sm_make_chunk.c |
4829 |
+@@ -219,6 +219,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, |
4830 |
+ gfp_t gfp, int vparam_len) |
4831 |
+ { |
4832 |
+ struct net *net = sock_net(asoc->base.sk); |
4833 |
++ struct sctp_endpoint *ep = asoc->ep; |
4834 |
+ sctp_inithdr_t init; |
4835 |
+ union sctp_params addrs; |
4836 |
+ size_t chunksize; |
4837 |
+@@ -278,7 +279,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, |
4838 |
+ chunksize += vparam_len; |
4839 |
+ |
4840 |
+ /* Account for AUTH related parameters */ |
4841 |
+- if (net->sctp.auth_enable) { |
4842 |
++ if (ep->auth_enable) { |
4843 |
+ /* Add random parameter length*/ |
4844 |
+ chunksize += sizeof(asoc->c.auth_random); |
4845 |
+ |
4846 |
+@@ -363,7 +364,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, |
4847 |
+ } |
4848 |
+ |
4849 |
+ /* Add SCTP-AUTH chunks to the parameter list */ |
4850 |
+- if (net->sctp.auth_enable) { |
4851 |
++ if (ep->auth_enable) { |
4852 |
+ sctp_addto_chunk(retval, sizeof(asoc->c.auth_random), |
4853 |
+ asoc->c.auth_random); |
4854 |
+ if (auth_hmacs) |
4855 |
+@@ -2010,7 +2011,7 @@ static void sctp_process_ext_param(struct sctp_association *asoc, |
4856 |
+ /* if the peer reports AUTH, assume that he |
4857 |
+ * supports AUTH. |
4858 |
+ */ |
4859 |
+- if (net->sctp.auth_enable) |
4860 |
++ if (asoc->ep->auth_enable) |
4861 |
+ asoc->peer.auth_capable = 1; |
4862 |
+ break; |
4863 |
+ case SCTP_CID_ASCONF: |
4864 |
+@@ -2102,6 +2103,7 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, |
4865 |
+ * SCTP_IERROR_NO_ERROR - continue with the chunk |
4866 |
+ */ |
4867 |
+ static sctp_ierror_t sctp_verify_param(struct net *net, |
4868 |
++ const struct sctp_endpoint *ep, |
4869 |
+ const struct sctp_association *asoc, |
4870 |
+ union sctp_params param, |
4871 |
+ sctp_cid_t cid, |
4872 |
+@@ -2152,7 +2154,7 @@ static sctp_ierror_t sctp_verify_param(struct net *net, |
4873 |
+ goto fallthrough; |
4874 |
+ |
4875 |
+ case SCTP_PARAM_RANDOM: |
4876 |
+- if (!net->sctp.auth_enable) |
4877 |
++ if (!ep->auth_enable) |
4878 |
+ goto fallthrough; |
4879 |
+ |
4880 |
+ /* SCTP-AUTH: Secion 6.1 |
4881 |
+@@ -2169,7 +2171,7 @@ static sctp_ierror_t sctp_verify_param(struct net *net, |
4882 |
+ break; |
4883 |
+ |
4884 |
+ case SCTP_PARAM_CHUNKS: |
4885 |
+- if (!net->sctp.auth_enable) |
4886 |
++ if (!ep->auth_enable) |
4887 |
+ goto fallthrough; |
4888 |
+ |
4889 |
+ /* SCTP-AUTH: Section 3.2 |
4890 |
+@@ -2185,7 +2187,7 @@ static sctp_ierror_t sctp_verify_param(struct net *net, |
4891 |
+ break; |
4892 |
+ |
4893 |
+ case SCTP_PARAM_HMAC_ALGO: |
4894 |
+- if (!net->sctp.auth_enable) |
4895 |
++ if (!ep->auth_enable) |
4896 |
+ goto fallthrough; |
4897 |
+ |
4898 |
+ hmacs = (struct sctp_hmac_algo_param *)param.p; |
4899 |
+@@ -2220,10 +2222,9 @@ fallthrough: |
4900 |
+ } |
4901 |
+ |
4902 |
+ /* Verify the INIT packet before we process it. */ |
4903 |
+-int sctp_verify_init(struct net *net, const struct sctp_association *asoc, |
4904 |
+- sctp_cid_t cid, |
4905 |
+- sctp_init_chunk_t *peer_init, |
4906 |
+- struct sctp_chunk *chunk, |
4907 |
++int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep, |
4908 |
++ const struct sctp_association *asoc, sctp_cid_t cid, |
4909 |
++ sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk, |
4910 |
+ struct sctp_chunk **errp) |
4911 |
+ { |
4912 |
+ union sctp_params param; |
4913 |
+@@ -2264,8 +2265,8 @@ int sctp_verify_init(struct net *net, const struct sctp_association *asoc, |
4914 |
+ |
4915 |
+ /* Verify all the variable length parameters */ |
4916 |
+ sctp_walk_params(param, peer_init, init_hdr.params) { |
4917 |
+- |
4918 |
+- result = sctp_verify_param(net, asoc, param, cid, chunk, errp); |
4919 |
++ result = sctp_verify_param(net, ep, asoc, param, cid, |
4920 |
++ chunk, errp); |
4921 |
+ switch (result) { |
4922 |
+ case SCTP_IERROR_ABORT: |
4923 |
+ case SCTP_IERROR_NOMEM: |
4924 |
+@@ -2497,6 +2498,7 @@ static int sctp_process_param(struct sctp_association *asoc, |
4925 |
+ struct sctp_af *af; |
4926 |
+ union sctp_addr_param *addr_param; |
4927 |
+ struct sctp_transport *t; |
4928 |
++ struct sctp_endpoint *ep = asoc->ep; |
4929 |
+ |
4930 |
+ /* We maintain all INIT parameters in network byte order all the |
4931 |
+ * time. This allows us to not worry about whether the parameters |
4932 |
+@@ -2636,7 +2638,7 @@ do_addr_param: |
4933 |
+ goto fall_through; |
4934 |
+ |
4935 |
+ case SCTP_PARAM_RANDOM: |
4936 |
+- if (!net->sctp.auth_enable) |
4937 |
++ if (!ep->auth_enable) |
4938 |
+ goto fall_through; |
4939 |
+ |
4940 |
+ /* Save peer's random parameter */ |
4941 |
+@@ -2649,7 +2651,7 @@ do_addr_param: |
4942 |
+ break; |
4943 |
+ |
4944 |
+ case SCTP_PARAM_HMAC_ALGO: |
4945 |
+- if (!net->sctp.auth_enable) |
4946 |
++ if (!ep->auth_enable) |
4947 |
+ goto fall_through; |
4948 |
+ |
4949 |
+ /* Save peer's HMAC list */ |
4950 |
+@@ -2665,7 +2667,7 @@ do_addr_param: |
4951 |
+ break; |
4952 |
+ |
4953 |
+ case SCTP_PARAM_CHUNKS: |
4954 |
+- if (!net->sctp.auth_enable) |
4955 |
++ if (!ep->auth_enable) |
4956 |
+ goto fall_through; |
4957 |
+ |
4958 |
+ asoc->peer.peer_chunks = kmemdup(param.p, |
4959 |
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c |
4960 |
+index 5d6883ff00c3..fef2acdf4a2e 100644 |
4961 |
+--- a/net/sctp/sm_sideeffect.c |
4962 |
++++ b/net/sctp/sm_sideeffect.c |
4963 |
+@@ -496,11 +496,10 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands, |
4964 |
+ |
4965 |
+ /* If the transport error count is greater than the pf_retrans |
4966 |
+ * threshold, and less than pathmaxrtx, and if the current state |
4967 |
+- * is not SCTP_UNCONFIRMED, then mark this transport as Partially |
4968 |
+- * Failed, see SCTP Quick Failover Draft, section 5.1 |
4969 |
++ * is SCTP_ACTIVE, then mark this transport as Partially Failed, |
4970 |
++ * see SCTP Quick Failover Draft, section 5.1 |
4971 |
+ */ |
4972 |
+- if ((transport->state != SCTP_PF) && |
4973 |
+- (transport->state != SCTP_UNCONFIRMED) && |
4974 |
++ if ((transport->state == SCTP_ACTIVE) && |
4975 |
+ (asoc->pf_retrans < transport->pathmaxrxt) && |
4976 |
+ (transport->error_count > asoc->pf_retrans)) { |
4977 |
+ |
4978 |
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c |
4979 |
+index 01e002430c85..5170a1ff95a1 100644 |
4980 |
+--- a/net/sctp/sm_statefuns.c |
4981 |
++++ b/net/sctp/sm_statefuns.c |
4982 |
+@@ -357,7 +357,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net, |
4983 |
+ |
4984 |
+ /* Verify the INIT chunk before processing it. */ |
4985 |
+ err_chunk = NULL; |
4986 |
+- if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type, |
4987 |
++ if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, |
4988 |
+ (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, |
4989 |
+ &err_chunk)) { |
4990 |
+ /* This chunk contains fatal error. It is to be discarded. |
4991 |
+@@ -524,7 +524,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net, |
4992 |
+ |
4993 |
+ /* Verify the INIT chunk before processing it. */ |
4994 |
+ err_chunk = NULL; |
4995 |
+- if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type, |
4996 |
++ if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, |
4997 |
+ (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, |
4998 |
+ &err_chunk)) { |
4999 |
+ |
5000 |
+@@ -1430,7 +1430,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( |
5001 |
+ |
5002 |
+ /* Verify the INIT chunk before processing it. */ |
5003 |
+ err_chunk = NULL; |
5004 |
+- if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type, |
5005 |
++ if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, |
5006 |
+ (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, |
5007 |
+ &err_chunk)) { |
5008 |
+ /* This chunk contains fatal error. It is to be discarded. |
5009 |
+@@ -6178,7 +6178,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, |
5010 |
+ * PMTU. In cases, such as loopback, this might be a rather |
5011 |
+ * large spill over. |
5012 |
+ */ |
5013 |
+- if ((!chunk->data_accepted) && (!asoc->rwnd || |
5014 |
++ if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || |
5015 |
+ (datalen > asoc->rwnd + asoc->frag_point))) { |
5016 |
+ |
5017 |
+ /* If this is the next TSN, consider reneging to make |
5018 |
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c |
5019 |
+index 981aaf8b6ace..604a6acdf92e 100644 |
5020 |
+--- a/net/sctp/socket.c |
5021 |
++++ b/net/sctp/socket.c |
5022 |
+@@ -2115,6 +2115,12 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, |
5023 |
+ sctp_skb_pull(skb, copied); |
5024 |
+ skb_queue_head(&sk->sk_receive_queue, skb); |
5025 |
+ |
5026 |
++ /* When only partial message is copied to the user, increase |
5027 |
++ * rwnd by that amount. If all the data in the skb is read, |
5028 |
++ * rwnd is updated when the event is freed. |
5029 |
++ */ |
5030 |
++ if (!sctp_ulpevent_is_notification(event)) |
5031 |
++ sctp_assoc_rwnd_increase(event->asoc, copied); |
5032 |
+ goto out; |
5033 |
+ } else if ((event->msg_flags & MSG_NOTIFICATION) || |
5034 |
+ (event->msg_flags & MSG_EOR)) |
5035 |
+@@ -3315,10 +3321,10 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk, |
5036 |
+ char __user *optval, |
5037 |
+ unsigned int optlen) |
5038 |
+ { |
5039 |
+- struct net *net = sock_net(sk); |
5040 |
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
5041 |
+ struct sctp_authchunk val; |
5042 |
+ |
5043 |
+- if (!net->sctp.auth_enable) |
5044 |
++ if (!ep->auth_enable) |
5045 |
+ return -EACCES; |
5046 |
+ |
5047 |
+ if (optlen != sizeof(struct sctp_authchunk)) |
5048 |
+@@ -3335,7 +3341,7 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk, |
5049 |
+ } |
5050 |
+ |
5051 |
+ /* add this chunk id to the endpoint */ |
5052 |
+- return sctp_auth_ep_add_chunkid(sctp_sk(sk)->ep, val.sauth_chunk); |
5053 |
++ return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); |
5054 |
+ } |
5055 |
+ |
5056 |
+ /* |
5057 |
+@@ -3348,12 +3354,12 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk, |
5058 |
+ char __user *optval, |
5059 |
+ unsigned int optlen) |
5060 |
+ { |
5061 |
+- struct net *net = sock_net(sk); |
5062 |
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
5063 |
+ struct sctp_hmacalgo *hmacs; |
5064 |
+ u32 idents; |
5065 |
+ int err; |
5066 |
+ |
5067 |
+- if (!net->sctp.auth_enable) |
5068 |
++ if (!ep->auth_enable) |
5069 |
+ return -EACCES; |
5070 |
+ |
5071 |
+ if (optlen < sizeof(struct sctp_hmacalgo)) |
5072 |
+@@ -3370,7 +3376,7 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk, |
5073 |
+ goto out; |
5074 |
+ } |
5075 |
+ |
5076 |
+- err = sctp_auth_ep_set_hmacs(sctp_sk(sk)->ep, hmacs); |
5077 |
++ err = sctp_auth_ep_set_hmacs(ep, hmacs); |
5078 |
+ out: |
5079 |
+ kfree(hmacs); |
5080 |
+ return err; |
5081 |
+@@ -3386,12 +3392,12 @@ static int sctp_setsockopt_auth_key(struct sock *sk, |
5082 |
+ char __user *optval, |
5083 |
+ unsigned int optlen) |
5084 |
+ { |
5085 |
+- struct net *net = sock_net(sk); |
5086 |
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
5087 |
+ struct sctp_authkey *authkey; |
5088 |
+ struct sctp_association *asoc; |
5089 |
+ int ret; |
5090 |
+ |
5091 |
+- if (!net->sctp.auth_enable) |
5092 |
++ if (!ep->auth_enable) |
5093 |
+ return -EACCES; |
5094 |
+ |
5095 |
+ if (optlen <= sizeof(struct sctp_authkey)) |
5096 |
+@@ -3412,7 +3418,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk, |
5097 |
+ goto out; |
5098 |
+ } |
5099 |
+ |
5100 |
+- ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); |
5101 |
++ ret = sctp_auth_set_key(ep, asoc, authkey); |
5102 |
+ out: |
5103 |
+ kzfree(authkey); |
5104 |
+ return ret; |
5105 |
+@@ -3428,11 +3434,11 @@ static int sctp_setsockopt_active_key(struct sock *sk, |
5106 |
+ char __user *optval, |
5107 |
+ unsigned int optlen) |
5108 |
+ { |
5109 |
+- struct net *net = sock_net(sk); |
5110 |
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
5111 |
+ struct sctp_authkeyid val; |
5112 |
+ struct sctp_association *asoc; |
5113 |
+ |
5114 |
+- if (!net->sctp.auth_enable) |
5115 |
++ if (!ep->auth_enable) |
5116 |
+ return -EACCES; |
5117 |
+ |
5118 |
+ if (optlen != sizeof(struct sctp_authkeyid)) |
5119 |
+@@ -3444,8 +3450,7 @@ static int sctp_setsockopt_active_key(struct sock *sk, |
5120 |
+ if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) |
5121 |
+ return -EINVAL; |
5122 |
+ |
5123 |
+- return sctp_auth_set_active_key(sctp_sk(sk)->ep, asoc, |
5124 |
+- val.scact_keynumber); |
5125 |
++ return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); |
5126 |
+ } |
5127 |
+ |
5128 |
+ /* |
5129 |
+@@ -3457,11 +3462,11 @@ static int sctp_setsockopt_del_key(struct sock *sk, |
5130 |
+ char __user *optval, |
5131 |
+ unsigned int optlen) |
5132 |
+ { |
5133 |
+- struct net *net = sock_net(sk); |
5134 |
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
5135 |
+ struct sctp_authkeyid val; |
5136 |
+ struct sctp_association *asoc; |
5137 |
+ |
5138 |
+- if (!net->sctp.auth_enable) |
5139 |
++ if (!ep->auth_enable) |
5140 |
+ return -EACCES; |
5141 |
+ |
5142 |
+ if (optlen != sizeof(struct sctp_authkeyid)) |
5143 |
+@@ -3473,8 +3478,7 @@ static int sctp_setsockopt_del_key(struct sock *sk, |
5144 |
+ if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) |
5145 |
+ return -EINVAL; |
5146 |
+ |
5147 |
+- return sctp_auth_del_key_id(sctp_sk(sk)->ep, asoc, |
5148 |
+- val.scact_keynumber); |
5149 |
++ return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); |
5150 |
+ |
5151 |
+ } |
5152 |
+ |
5153 |
+@@ -5381,16 +5385,16 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len, |
5154 |
+ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, |
5155 |
+ char __user *optval, int __user *optlen) |
5156 |
+ { |
5157 |
+- struct net *net = sock_net(sk); |
5158 |
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
5159 |
+ struct sctp_hmacalgo __user *p = (void __user *)optval; |
5160 |
+ struct sctp_hmac_algo_param *hmacs; |
5161 |
+ __u16 data_len = 0; |
5162 |
+ u32 num_idents; |
5163 |
+ |
5164 |
+- if (!net->sctp.auth_enable) |
5165 |
++ if (!ep->auth_enable) |
5166 |
+ return -EACCES; |
5167 |
+ |
5168 |
+- hmacs = sctp_sk(sk)->ep->auth_hmacs_list; |
5169 |
++ hmacs = ep->auth_hmacs_list; |
5170 |
+ data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); |
5171 |
+ |
5172 |
+ if (len < sizeof(struct sctp_hmacalgo) + data_len) |
5173 |
+@@ -5411,11 +5415,11 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, |
5174 |
+ static int sctp_getsockopt_active_key(struct sock *sk, int len, |
5175 |
+ char __user *optval, int __user *optlen) |
5176 |
+ { |
5177 |
+- struct net *net = sock_net(sk); |
5178 |
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
5179 |
+ struct sctp_authkeyid val; |
5180 |
+ struct sctp_association *asoc; |
5181 |
+ |
5182 |
+- if (!net->sctp.auth_enable) |
5183 |
++ if (!ep->auth_enable) |
5184 |
+ return -EACCES; |
5185 |
+ |
5186 |
+ if (len < sizeof(struct sctp_authkeyid)) |
5187 |
+@@ -5430,7 +5434,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, |
5188 |
+ if (asoc) |
5189 |
+ val.scact_keynumber = asoc->active_key_id; |
5190 |
+ else |
5191 |
+- val.scact_keynumber = sctp_sk(sk)->ep->active_key_id; |
5192 |
++ val.scact_keynumber = ep->active_key_id; |
5193 |
+ |
5194 |
+ len = sizeof(struct sctp_authkeyid); |
5195 |
+ if (put_user(len, optlen)) |
5196 |
+@@ -5444,7 +5448,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, |
5197 |
+ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, |
5198 |
+ char __user *optval, int __user *optlen) |
5199 |
+ { |
5200 |
+- struct net *net = sock_net(sk); |
5201 |
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
5202 |
+ struct sctp_authchunks __user *p = (void __user *)optval; |
5203 |
+ struct sctp_authchunks val; |
5204 |
+ struct sctp_association *asoc; |
5205 |
+@@ -5452,7 +5456,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, |
5206 |
+ u32 num_chunks = 0; |
5207 |
+ char __user *to; |
5208 |
+ |
5209 |
+- if (!net->sctp.auth_enable) |
5210 |
++ if (!ep->auth_enable) |
5211 |
+ return -EACCES; |
5212 |
+ |
5213 |
+ if (len < sizeof(struct sctp_authchunks)) |
5214 |
+@@ -5489,7 +5493,7 @@ num: |
5215 |
+ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, |
5216 |
+ char __user *optval, int __user *optlen) |
5217 |
+ { |
5218 |
+- struct net *net = sock_net(sk); |
5219 |
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
5220 |
+ struct sctp_authchunks __user *p = (void __user *)optval; |
5221 |
+ struct sctp_authchunks val; |
5222 |
+ struct sctp_association *asoc; |
5223 |
+@@ -5497,7 +5501,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, |
5224 |
+ u32 num_chunks = 0; |
5225 |
+ char __user *to; |
5226 |
+ |
5227 |
+- if (!net->sctp.auth_enable) |
5228 |
++ if (!ep->auth_enable) |
5229 |
+ return -EACCES; |
5230 |
+ |
5231 |
+ if (len < sizeof(struct sctp_authchunks)) |
5232 |
+@@ -5514,7 +5518,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, |
5233 |
+ if (asoc) |
5234 |
+ ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; |
5235 |
+ else |
5236 |
+- ch = sctp_sk(sk)->ep->auth_chunk_list; |
5237 |
++ ch = ep->auth_chunk_list; |
5238 |
+ |
5239 |
+ if (!ch) |
5240 |
+ goto num; |
5241 |
+@@ -6593,6 +6597,46 @@ static void __sctp_write_space(struct sctp_association *asoc) |
5242 |
+ } |
5243 |
+ } |
5244 |
+ |
5245 |
++static void sctp_wake_up_waiters(struct sock *sk, |
5246 |
++ struct sctp_association *asoc) |
5247 |
++{ |
5248 |
++ struct sctp_association *tmp = asoc; |
5249 |
++ |
5250 |
++ /* We do accounting for the sndbuf space per association, |
5251 |
++ * so we only need to wake our own association. |
5252 |
++ */ |
5253 |
++ if (asoc->ep->sndbuf_policy) |
5254 |
++ return __sctp_write_space(asoc); |
5255 |
++ |
5256 |
++ /* If association goes down and is just flushing its |
5257 |
++ * outq, then just normally notify others. |
5258 |
++ */ |
5259 |
++ if (asoc->base.dead) |
5260 |
++ return sctp_write_space(sk); |
5261 |
++ |
5262 |
++ /* Accounting for the sndbuf space is per socket, so we |
5263 |
++ * need to wake up others, try to be fair and in case of |
5264 |
++ * other associations, let them have a go first instead |
5265 |
++ * of just doing a sctp_write_space() call. |
5266 |
++ * |
5267 |
++ * Note that we reach sctp_wake_up_waiters() only when |
5268 |
++ * associations free up queued chunks, thus we are under |
5269 |
++ * lock and the list of associations on a socket is |
5270 |
++ * guaranteed not to change. |
5271 |
++ */ |
5272 |
++ for (tmp = list_next_entry(tmp, asocs); 1; |
5273 |
++ tmp = list_next_entry(tmp, asocs)) { |
5274 |
++ /* Manually skip the head element. */ |
5275 |
++ if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) |
5276 |
++ continue; |
5277 |
++ /* Wake up association. */ |
5278 |
++ __sctp_write_space(tmp); |
5279 |
++ /* We've reached the end. */ |
5280 |
++ if (tmp == asoc) |
5281 |
++ break; |
5282 |
++ } |
5283 |
++} |
5284 |
++ |
5285 |
+ /* Do accounting for the sndbuf space. |
5286 |
+ * Decrement the used sndbuf space of the corresponding association by the |
5287 |
+ * data size which was just transmitted(freed). |
5288 |
+@@ -6620,7 +6664,7 @@ static void sctp_wfree(struct sk_buff *skb) |
5289 |
+ sk_mem_uncharge(sk, skb->truesize); |
5290 |
+ |
5291 |
+ sock_wfree(skb); |
5292 |
+- __sctp_write_space(asoc); |
5293 |
++ sctp_wake_up_waiters(sk, asoc); |
5294 |
+ |
5295 |
+ sctp_association_put(asoc); |
5296 |
+ } |
5297 |
+diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c |
5298 |
+index 35c8923b5554..c82fdc1eab7c 100644 |
5299 |
+--- a/net/sctp/sysctl.c |
5300 |
++++ b/net/sctp/sysctl.c |
5301 |
+@@ -64,6 +64,9 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, |
5302 |
+ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, |
5303 |
+ void __user *buffer, size_t *lenp, |
5304 |
+ loff_t *ppos); |
5305 |
++static int proc_sctp_do_auth(struct ctl_table *ctl, int write, |
5306 |
++ void __user *buffer, size_t *lenp, |
5307 |
++ loff_t *ppos); |
5308 |
+ |
5309 |
+ static struct ctl_table sctp_table[] = { |
5310 |
+ { |
5311 |
+@@ -266,7 +269,7 @@ static struct ctl_table sctp_net_table[] = { |
5312 |
+ .data = &init_net.sctp.auth_enable, |
5313 |
+ .maxlen = sizeof(int), |
5314 |
+ .mode = 0644, |
5315 |
+- .proc_handler = proc_dointvec, |
5316 |
++ .proc_handler = proc_sctp_do_auth, |
5317 |
+ }, |
5318 |
+ { |
5319 |
+ .procname = "addr_scope_policy", |
5320 |
+@@ -400,6 +403,37 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, |
5321 |
+ return ret; |
5322 |
+ } |
5323 |
+ |
5324 |
++static int proc_sctp_do_auth(struct ctl_table *ctl, int write, |
5325 |
++ void __user *buffer, size_t *lenp, |
5326 |
++ loff_t *ppos) |
5327 |
++{ |
5328 |
++ struct net *net = current->nsproxy->net_ns; |
5329 |
++ struct ctl_table tbl; |
5330 |
++ int new_value, ret; |
5331 |
++ |
5332 |
++ memset(&tbl, 0, sizeof(struct ctl_table)); |
5333 |
++ tbl.maxlen = sizeof(unsigned int); |
5334 |
++ |
5335 |
++ if (write) |
5336 |
++ tbl.data = &new_value; |
5337 |
++ else |
5338 |
++ tbl.data = &net->sctp.auth_enable; |
5339 |
++ |
5340 |
++ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); |
5341 |
++ |
5342 |
++ if (write) { |
5343 |
++ struct sock *sk = net->sctp.ctl_sock; |
5344 |
++ |
5345 |
++ net->sctp.auth_enable = new_value; |
5346 |
++ /* Update the value in the control socket */ |
5347 |
++ lock_sock(sk); |
5348 |
++ sctp_sk(sk)->ep->auth_enable = new_value; |
5349 |
++ release_sock(sk); |
5350 |
++ } |
5351 |
++ |
5352 |
++ return ret; |
5353 |
++} |
5354 |
++ |
5355 |
+ int sctp_sysctl_net_register(struct net *net) |
5356 |
+ { |
5357 |
+ struct ctl_table *table = sctp_net_table; |
5358 |
+diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c |
5359 |
+index 8d198ae03606..85c64658bd0b 100644 |
5360 |
+--- a/net/sctp/ulpevent.c |
5361 |
++++ b/net/sctp/ulpevent.c |
5362 |
+@@ -989,7 +989,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, |
5363 |
+ skb = sctp_event2skb(event); |
5364 |
+ /* Set the owner and charge rwnd for bytes received. */ |
5365 |
+ sctp_ulpevent_set_owner(event, asoc); |
5366 |
+- sctp_assoc_rwnd_update(asoc, false); |
5367 |
++ sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); |
5368 |
+ |
5369 |
+ if (!skb->data_len) |
5370 |
+ return; |
5371 |
+@@ -1011,7 +1011,6 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) |
5372 |
+ { |
5373 |
+ struct sk_buff *skb, *frag; |
5374 |
+ unsigned int len; |
5375 |
+- struct sctp_association *asoc; |
5376 |
+ |
5377 |
+ /* Current stack structures assume that the rcv buffer is |
5378 |
+ * per socket. For UDP style sockets this is not true as |
5379 |
+@@ -1036,11 +1035,8 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) |
5380 |
+ } |
5381 |
+ |
5382 |
+ done: |
5383 |
+- asoc = event->asoc; |
5384 |
+- sctp_association_hold(asoc); |
5385 |
++ sctp_assoc_rwnd_increase(event->asoc, len); |
5386 |
+ sctp_ulpevent_release_owner(event); |
5387 |
+- sctp_assoc_rwnd_update(asoc, true); |
5388 |
+- sctp_association_put(asoc); |
5389 |
+ } |
5390 |
+ |
5391 |
+ static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) |
5392 |
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c |
5393 |
+index 5adfd94c5b85..85d232bed87d 100644 |
5394 |
+--- a/net/vmw_vsock/af_vsock.c |
5395 |
++++ b/net/vmw_vsock/af_vsock.c |
5396 |
+@@ -1925,9 +1925,23 @@ static struct miscdevice vsock_device = { |
5397 |
+ .fops = &vsock_device_ops, |
5398 |
+ }; |
5399 |
+ |
5400 |
+-static int __vsock_core_init(void) |
5401 |
++int __vsock_core_init(const struct vsock_transport *t, struct module *owner) |
5402 |
+ { |
5403 |
+- int err; |
5404 |
++ int err = mutex_lock_interruptible(&vsock_register_mutex); |
5405 |
++ |
5406 |
++ if (err) |
5407 |
++ return err; |
5408 |
++ |
5409 |
++ if (transport) { |
5410 |
++ err = -EBUSY; |
5411 |
++ goto err_busy; |
5412 |
++ } |
5413 |
++ |
5414 |
++ /* Transport must be the owner of the protocol so that it can't |
5415 |
++ * unload while there are open sockets. |
5416 |
++ */ |
5417 |
++ vsock_proto.owner = owner; |
5418 |
++ transport = t; |
5419 |
+ |
5420 |
+ vsock_init_tables(); |
5421 |
+ |
5422 |
+@@ -1951,36 +1965,19 @@ static int __vsock_core_init(void) |
5423 |
+ goto err_unregister_proto; |
5424 |
+ } |
5425 |
+ |
5426 |
++ mutex_unlock(&vsock_register_mutex); |
5427 |
+ return 0; |
5428 |
+ |
5429 |
+ err_unregister_proto: |
5430 |
+ proto_unregister(&vsock_proto); |
5431 |
+ err_misc_deregister: |
5432 |
+ misc_deregister(&vsock_device); |
5433 |
+- return err; |
5434 |
+-} |
5435 |
+- |
5436 |
+-int vsock_core_init(const struct vsock_transport *t) |
5437 |
+-{ |
5438 |
+- int retval = mutex_lock_interruptible(&vsock_register_mutex); |
5439 |
+- if (retval) |
5440 |
+- return retval; |
5441 |
+- |
5442 |
+- if (transport) { |
5443 |
+- retval = -EBUSY; |
5444 |
+- goto out; |
5445 |
+- } |
5446 |
+- |
5447 |
+- transport = t; |
5448 |
+- retval = __vsock_core_init(); |
5449 |
+- if (retval) |
5450 |
+- transport = NULL; |
5451 |
+- |
5452 |
+-out: |
5453 |
++ transport = NULL; |
5454 |
++err_busy: |
5455 |
+ mutex_unlock(&vsock_register_mutex); |
5456 |
+- return retval; |
5457 |
++ return err; |
5458 |
+ } |
5459 |
+-EXPORT_SYMBOL_GPL(vsock_core_init); |
5460 |
++EXPORT_SYMBOL_GPL(__vsock_core_init); |
5461 |
+ |
5462 |
+ void vsock_core_exit(void) |
5463 |
+ { |
5464 |
+@@ -2000,5 +1997,5 @@ EXPORT_SYMBOL_GPL(vsock_core_exit); |
5465 |
+ |
5466 |
+ MODULE_AUTHOR("VMware, Inc."); |
5467 |
+ MODULE_DESCRIPTION("VMware Virtual Socket Family"); |
5468 |
+-MODULE_VERSION("1.0.0.0-k"); |
5469 |
++MODULE_VERSION("1.0.1.0-k"); |
5470 |
+ MODULE_LICENSE("GPL v2"); |
5471 |
+diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c |
5472 |
+index 1587ea392ad6..30e8e0c3f117 100644 |
5473 |
+--- a/tools/lib/traceevent/event-parse.c |
5474 |
++++ b/tools/lib/traceevent/event-parse.c |
5475 |
+@@ -4321,6 +4321,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event |
5476 |
+ format, len_arg, arg); |
5477 |
+ trace_seq_terminate(&p); |
5478 |
+ trace_seq_puts(s, p.buffer); |
5479 |
++ trace_seq_destroy(&p); |
5480 |
+ arg = arg->next; |
5481 |
+ break; |
5482 |
+ default: |