aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/parisc/iommu-helpers.h
blob: 0905be256de087d016759f85f4df55046bf882ce (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/prefetch.h>

/**
 * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
 * @ioc: The I/O Controller.
 * @startsg: The scatter/gather list of coalesced chunks.
 * @nents: The number of entries in the scatter/gather list.
 * @hint: The DMA Hint.
 *
 * This function inserts the coalesced scatter/gather list chunks into the
 * I/O Controller's I/O Pdir.
 */ 
static inline unsigned int
iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, 
		unsigned long hint,
		void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
					    unsigned long))
{
	struct scatterlist *dma_sg = startsg;	/* pointer to current DMA */
	unsigned int n_mappings = 0;
	unsigned long dma_offset = 0, dma_len = 0;
	u64 *pdirp = NULL;

	/* Horrible hack.  For efficiency's sake, dma_sg starts one 
	 * entry below the true start (it is immediately incremented
	 * in the loop) */
	 dma_sg--;

	while (nents-- > 0) {
		unsigned long vaddr;
		long size;

		DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
			   (unsigned long)sg_dma_address(startsg), cnt,
			   sg_virt(startsg), startsg->length
		);


		/*
		** Look for the start of a new DMA stream
		*/
		
		if (sg_dma_address(startsg) & PIDE_FLAG) {
			u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;

			BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg)));

			dma_sg++;

			dma_len = sg_dma_len(startsg);
			sg_dma_len(startsg) = 0;
			dma_offset = (unsigned long) pide & ~IOVP_MASK;
			n_mappings++;
#if defined(ZX1_SUPPORT)
			/* Pluto IOMMU IO Virt Address is not zero based */
			sg_dma_address(dma_sg) = pide | ioc->ibase;
#else
			/* SBA, ccio, and dino are zero based.
			 * Trying to save a few CPU cycles for most users.
			 */
			sg_dma_address(dma_sg) = pide;
#endif
			pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
			prefetchw(pdirp);
		}
		
		BUG_ON(pdirp == NULL);
		
		vaddr = (unsigned long)sg_virt(startsg);
		sg_dma_len(dma_sg) += startsg->length;
		size = startsg->length + dma_offset;
		dma_offset = 0;
#ifdef IOMMU_MAP_STATS
		ioc->msg_pages += startsg->length >> IOVP_SHIFT;
#endif
		do {
			iommu_io_pdir_entry(pdirp, KERNEL_SPACE, 
					    vaddr, hint);
			vaddr += IOVP_SIZE;
			size -= IOVP_SIZE;
			pdirp++;
		} while(unlikely(size > 0));
		startsg++;
	}
	return(n_mappings);
}


/*
** First pass is to walk the SG list and determine where the breaks are
** in the DMA stream. Allocates PDIR entries but does not fill them.
** Returns the number of DMA chunks.
**
** Doing the fill separate from the coalescing/allocation keeps the
** code simpler. Future enhancement could make one pass through
** the sglist do both.
*/

static inline unsigned int
iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
		struct scatterlist *startsg, int nents,
		int (*iommu_alloc_range)(struct ioc *, struct device *, size_t))
{
	struct scatterlist *contig_sg;	   /* contig chunk head */
	unsigned long dma_offset, dma_len; /* start/len of DMA stream */
	unsigned int n_mappings = 0;
	unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
					(unsigned)DMA_CHUNK_SIZE);
	unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
	if (max_seg_boundary)	/* check if the addition above didn't overflow */
		max_seg_size = min(max_seg_size, max_seg_boundary);

	while (nents > 0) {

		/*
		** Prepare for first/next DMA stream
		*/
		contig_sg = startsg;
		dma_len = startsg->length;
		dma_offset = startsg->offset;

		/* PARANOID: clear entries */
		sg_dma_address(startsg) = 0;
		sg_dma_len(startsg) = 0;

		/*
		** This loop terminates one iteration "early" since
		** it's always looking one "ahead".
		*/
		while(--nents > 0) {
			unsigned long prev_end, sg_start;

			prev_end = (unsigned long)sg_virt(startsg) +
							startsg->length;

			startsg++;
			sg_start = (unsigned long)sg_virt(startsg);

			/* PARANOID: clear entries */
			sg_dma_address(startsg) = 0;
			sg_dma_len(startsg) = 0;

			/*
			** First make sure current dma stream won't
			** exceed max_seg_size if we coalesce the
			** next entry.
			*/   
			if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
				     max_seg_size))
				break;

			/*
			* Next see if we can append the next chunk (i.e.
			* it must end on one page and begin on another, or
			* it must start on the same address as the previous
			* entry ended.
			*/
			if (unlikely((prev_end != sg_start) ||
				((prev_end | sg_start) & ~PAGE_MASK)))
				break;
			
			dma_len += startsg->length;
		}

		/*
		** End of DMA Stream
		** Terminate last VCONTIG block.
		** Allocate space for DMA stream.
		*/
		sg_dma_len(contig_sg) = dma_len;
		dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
		sg_dma_address(contig_sg) =
			PIDE_FLAG 
			| (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
			| dma_offset;
		n_mappings++;
	}

	return n_mappings;
}

Privacy Policy