aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm/tlbflush.h
blob: 6448bb5be10cc20efd029ef1bf09021ce317e8c4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _S390_TLBFLUSH_H
#define _S390_TLBFLUSH_H

#include <linux/mm.h>
#include <linux/sched.h>
#include <asm/processor.h>

/*
 * Flush all TLB entries on the local CPU.
 */
static inline void __tlb_flush_local(void)
{
	asm volatile("ptlb" : : : "memory");
}

/*
 * Flush TLB entries for a specific ASCE on all CPUs
 */
static inline void __tlb_flush_idte(unsigned long asce)
{
	unsigned long opt;

	opt = IDTE_PTOA;
	if (MACHINE_HAS_TLB_GUEST)
		opt |= IDTE_GUEST_ASCE;
	/* Global TLB flush for the mm */
	asm volatile(
		"	.insn	rrf,0xb98e0000,0,%0,%1,0"
		: : "a" (opt), "a" (asce) : "cc");
}

/*
 * Flush all TLB entries on all CPUs.
 */
static inline void __tlb_flush_global(void)
{
	unsigned int dummy = 0;

	csp(&dummy, 0, 0);
}

/*
 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
 * this implicates multiple ASCEs!).
 */
static inline void __tlb_flush_mm(struct mm_struct *mm)
{
	unsigned long gmap_asce;

	/*
	 * If the machine has IDTE we prefer to do a per mm flush
	 * on all cpus instead of doing a local flush if the mm
	 * only ran on the local cpu.
	 */
	preempt_disable();
	atomic_inc(&mm->context.flush_count);
	/* Reset TLB flush mask */
	cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
	barrier();
	gmap_asce = READ_ONCE(mm->context.gmap_asce);
	if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
		if (gmap_asce)
			__tlb_flush_idte(gmap_asce);
		__tlb_flush_idte(mm->context.asce);
	} else {
		/* Global TLB flush */
		__tlb_flush_global();
	}
	atomic_dec(&mm->context.flush_count);
	preempt_enable();
}

static inline void __tlb_flush_kernel(void)
{
	if (MACHINE_HAS_IDTE)
		__tlb_flush_idte(init_mm.context.asce);
	else
		__tlb_flush_global();
}

static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
{
	spin_lock(&mm->context.lock);
	if (mm->context.flush_mm) {
		mm->context.flush_mm = 0;
		__tlb_flush_mm(mm);
	}
	spin_unlock(&mm->context.lock);
}

/*
 * TLB flushing:
 *  flush_tlb() - flushes the current mm struct TLBs
 *  flush_tlb_all() - flushes all processes TLBs
 *  flush_tlb_mm(mm) - flushes the specified mm context TLB's
 *  flush_tlb_page(vma, vmaddr) - flushes one page
 *  flush_tlb_range(vma, start, end) - flushes a range of pages
 *  flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
 */

/*
 * flush_tlb_mm goes together with ptep_set_wrprotect for the
 * copy_page_range operation and flush_tlb_range is related to
 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
 * ptep_get_and_clear do not flush the TLBs directly if the mm has
 * only one user. At the end of the update the flush_tlb_mm and
 * flush_tlb_range functions need to do the flush.
 */
#define flush_tlb()				do { } while (0)
#define flush_tlb_all()				do { } while (0)
#define flush_tlb_page(vma, addr)		do { } while (0)

static inline void flush_tlb_mm(struct mm_struct *mm)
{
	__tlb_flush_mm_lazy(mm);
}

static inline void flush_tlb_range(struct vm_area_struct *vma,
				   unsigned long start, unsigned long end)
{
	__tlb_flush_mm_lazy(vma->vm_mm);
}

static inline void flush_tlb_kernel_range(unsigned long start,
					  unsigned long end)
{
	__tlb_flush_kernel();
}

#endif /* _S390_TLBFLUSH_H */

Privacy Policy