Skip to content

Commit

Permalink
Linux: 3.18.43
Browse files Browse the repository at this point in the history
  • Loading branch information
DarkLord1731 authored and lzzy12 committed Mar 23, 2018
1 parent 3c96eef commit b75393c
Show file tree
Hide file tree
Showing 82 changed files with 762 additions and 538 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 18
SUBLEVEL = 42
SUBLEVEL = 43
EXTRAVERSION =
NAME = Shuffling Zombie Juror

Expand Down
11 changes: 9 additions & 2 deletions arch/arc/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,10 @@
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
"3: mov %0, %3\n" \
"3: # return -EFAULT\n" \
" mov %0, %3\n" \
" # zero out dst ptr\n" \
" mov %1, 0\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \
Expand All @@ -101,7 +104,11 @@
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
"3: mov %0, %3\n" \
"3: # return -EFAULT\n" \
" mov %0, %3\n" \
" # zero out dst ptr\n" \
" mov %1, 0\n" \
" mov %R1, 0\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/boot/dts/kirkwood-ib62x0.dts
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@

partition@e0000 {
label = "u-boot environment";
reg = <0xe0000 0x100000>;
reg = <0xe0000 0x20000>;
};

partition@100000 {
Expand Down
2 changes: 0 additions & 2 deletions arch/arm/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,8 +153,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
int i;

kvm_free_stage2_pgd(kvm);

for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_arch_vcpu_free(kvm->vcpus[i]);
Expand Down
1 change: 1 addition & 0 deletions arch/arm/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1493,6 +1493,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm)

void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
kvm_free_stage2_pgd(kvm);
}

void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/mach-imx/pm-imx6.c
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
val |= 0x3 << BP_CLPCR_STBY_COUNT;
val |= BM_CLPCR_VSTBY;
val |= BM_CLPCR_SBYOS;
if (cpu_is_imx6sl())
if (cpu_is_imx6sl() || cpu_is_imx6sx())
val |= BM_CLPCR_BYPASS_PMIC_READY;
if (cpu_is_imx6sl() || cpu_is_imx6sx())
val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
Expand Down
12 changes: 12 additions & 0 deletions arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
Original file line number Diff line number Diff line change
Expand Up @@ -724,8 +724,20 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
* display serial interface controller
*/

static struct omap_hwmod_class_sysconfig omap3xxx_dsi_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};

static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = {
.name = "dsi",
.sysc = &omap3xxx_dsi_sysc,
};

static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = {
Expand Down
8 changes: 4 additions & 4 deletions arch/arm/plat-orion/gpio.c
Original file line number Diff line number Diff line change
Expand Up @@ -505,9 +505,9 @@ static void orion_gpio_unmask_irq(struct irq_data *d)
u32 mask = d->mask;

irq_gc_lock(gc);
reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask);
reg_val = irq_reg_readl(gc, ct->regs.mask);
reg_val |= mask;
irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask);
irq_reg_writel(gc, reg_val, ct->regs.mask);
irq_gc_unlock(gc);
}

Expand All @@ -519,9 +519,9 @@ static void orion_gpio_mask_irq(struct irq_data *d)
u32 reg_val;

irq_gc_lock(gc);
reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask);
reg_val = irq_reg_readl(gc, ct->regs.mask);
reg_val &= ~mask;
irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask);
irq_reg_writel(gc, reg_val, ct->regs.mask);
irq_gc_unlock(gc);
}

Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/crypto/aes-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
err = blkcipher_walk_done(desc, &walk,
walk.nbytes % AES_BLOCK_SIZE);
}
if (nbytes) {
if (walk.nbytes % AES_BLOCK_SIZE) {
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
u8 __aligned(8) tail[AES_BLOCK_SIZE];
Expand Down
10 changes: 10 additions & 0 deletions arch/arm64/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -231,4 +231,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()

/*
* Accesses appearing in program order before a spin_lock() operation
* can be reordered with accesses inside the critical section, by virtue
* of arch_spin_lock being constructed using acquire semantics.
*
* In cases where this is problematic (e.g. try_to_wake_up), an
* smp_mb__before_spinlock() can restore the required ordering.
*/
#define smp_mb__before_spinlock() smp_mb()

#endif /* __ASM_SPINLOCK_H */
11 changes: 10 additions & 1 deletion arch/avr32/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from,

extern __kernel_size_t copy_to_user(void __user *to, const void *from,
__kernel_size_t n);
extern __kernel_size_t copy_from_user(void *to, const void __user *from,
extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
__kernel_size_t n);

static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
Expand All @@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to,
{
return __copy_user(to, (const void __force *)from, n);
}
static inline __kernel_size_t copy_from_user(void *to,
const void __user *from,
__kernel_size_t n)
{
size_t res = ___copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}

#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
Expand Down
2 changes: 1 addition & 1 deletion arch/avr32/kernel/avr32_ksyms.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page);
/*
* Userspace access stuff.
*/
EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(___copy_from_user);
EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(strncpy_from_user);
Expand Down
8 changes: 4 additions & 4 deletions arch/avr32/lib/copy_user.S
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,13 @@
*/
.text
.align 1
.global copy_from_user
.type copy_from_user, @function
copy_from_user:
.global ___copy_from_user
.type ___copy_from_user, @function
___copy_from_user:
branch_if_kernel r8, __copy_user
ret_if_privileged r8, r11, r10, r10
rjmp __copy_user
.size copy_from_user, . - copy_from_user
.size ___copy_from_user, . - ___copy_from_user

.global copy_to_user
.type copy_to_user, @function
Expand Down
9 changes: 5 additions & 4 deletions arch/blackfin/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -177,11 +177,12 @@ static inline int bad_user_access_length(void)
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
if (likely(access_ok(VERIFY_READ, from, n))) {
memcpy(to, (const void __force *)from, n);
else
return n;
return 0;
return 0;
}
memset(to, 0, n);
return n;
}

static inline unsigned long __must_check
Expand Down
12 changes: 9 additions & 3 deletions arch/frv/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -263,19 +263,25 @@ do { \
extern long __memset_user(void *dst, unsigned long count);
extern long __memcpy_user(void *dst, const void *src, unsigned long count);

#define clear_user(dst,count) __memset_user(____force(dst), (count))
#define __clear_user(dst,count) __memset_user(____force(dst), (count))
#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n))
#define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n))

#else

#define clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
#define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
#define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0)
#define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0)

#endif

#define __clear_user clear_user
static inline unsigned long __must_check
clear_user(void __user *to, unsigned long n)
{
if (likely(__access_ok(to, n)))
n = __clear_user(to, n);
return n;
}

static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
Expand Down
36 changes: 19 additions & 17 deletions arch/hexagon/include/asm/cacheflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,7 @@
#ifndef _ASM_CACHEFLUSH_H
#define _ASM_CACHEFLUSH_H

#include <linux/cache.h>
#include <linux/mm.h>
#include <asm/string.h>
#include <asm-generic/cacheflush.h>
#include <linux/mm_types.h>

/* Cache flushing:
*
Expand All @@ -41,6 +38,20 @@
#define LINESIZE 32
#define LINEBITS 5

#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)

/*
* Flush Dcache range through current map.
*/
Expand All @@ -49,7 +60,6 @@ extern void flush_dcache_range(unsigned long start, unsigned long end);
/*
* Flush Icache range through current map.
*/
#undef flush_icache_range
extern void flush_icache_range(unsigned long start, unsigned long end);

/*
Expand Down Expand Up @@ -79,19 +89,11 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
/* generic_ptrace_pokedata doesn't wind up here, does it? */
}

#undef copy_to_user_page
static inline void copy_to_user_page(struct vm_area_struct *vma,
struct page *page,
unsigned long vaddr,
void *dst, void *src, int len)
{
memcpy(dst, src, len);
if (vma->vm_flags & VM_EXEC) {
flush_icache_range((unsigned long) dst,
(unsigned long) dst + len);
}
}
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len);

#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)

extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
Expand Down
5 changes: 0 additions & 5 deletions arch/hexagon/include/asm/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,9 @@
#ifdef __KERNEL__

#include <linux/types.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <asm/string.h>
#include <asm/mem-layout.h>
#include <asm/iomap.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

/*
* We don't have PCI yet.
Expand Down
3 changes: 2 additions & 1 deletion arch/hexagon/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
{
long res = __strnlen_user(src, n);

/* return from strnlen can't be zero -- that would be rubbish. */
if (unlikely(!res))
return -EFAULT;

if (res > n) {
copy_from_user(dst, src, n);
Expand Down
1 change: 1 addition & 0 deletions arch/hexagon/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
*/

#include <linux/init.h>
#include <linux/delay.h>
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <linux/mm.h>
Expand Down
10 changes: 10 additions & 0 deletions arch/hexagon/mm/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -127,3 +127,13 @@ void flush_cache_all_hexagon(void)
local_irq_restore(flags);
mb();
}

void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len)
{
memcpy(dst, src, len);
if (vma->vm_flags & VM_EXEC) {
flush_icache_range((unsigned long) dst,
(unsigned long) dst + len);
}
}
1 change: 1 addition & 0 deletions arch/hexagon/mm/ioremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

#include <linux/io.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>

void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
{
Expand Down
20 changes: 9 additions & 11 deletions arch/ia64/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -262,17 +262,15 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
__cu_len; \
})

#define copy_from_user(to, from, n) \
({ \
void *__cu_to = (to); \
const void __user *__cu_from = (from); \
long __cu_len = (n); \
\
__chk_user_ptr(__cu_from); \
if (__access_ok(__cu_from, __cu_len, get_fs())) \
__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
__cu_len; \
})
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (likely(__access_ok(from, n, get_fs())))
n = __copy_user((__force void __user *) to, from, n);
else
memset(to, 0, n);
return n;
}

#define __copy_in_user(to, from, size) __copy_user((to), (from), (size))

Expand Down
2 changes: 1 addition & 1 deletion arch/m32r/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ extern int fixup_exception(struct pt_regs *regs);
#define __get_user_nocheck(x,ptr,size) \
({ \
long __gu_err = 0; \
unsigned long __gu_val; \
unsigned long __gu_val = 0; \
might_fault(); \
__get_user_size(__gu_val,(ptr),(size),__gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
Expand Down
Loading

0 comments on commit b75393c

Please sign in to comment.