1/*-
2 * Copyright (c) 2007-2009 Kip Macy <kmacy@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 *
28 */
29
30#ifndef	_SYS_BUF_RING_H_
31#define	_SYS_BUF_RING_H_
32
33#include <machine/cpu.h>
34
35#if defined(INVARIANTS) && !defined(DEBUG_BUFRING)
36#define DEBUG_BUFRING 1
37#endif
38
39#ifdef DEBUG_BUFRING
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#endif
43
44struct buf_ring {
45	volatile uint32_t	br_prod_head;
46	volatile uint32_t	br_prod_tail;
47	int              	br_prod_size;
48	int              	br_prod_mask;
49	uint64_t		br_drops;
50	volatile uint32_t	br_cons_head __aligned(CACHE_LINE_SIZE);
51	volatile uint32_t	br_cons_tail;
52	int		 	br_cons_size;
53	int              	br_cons_mask;
54#ifdef DEBUG_BUFRING
55	struct mtx		*br_lock;
56#endif
57	void			*br_ring[0] __aligned(CACHE_LINE_SIZE);
58};
59
60/*
61 * multi-producer safe lock-free ring buffer enqueue
62 *
63 */
64static __inline int
65buf_ring_enqueue(struct buf_ring *br, void *buf)
66{
67	uint32_t prod_head, prod_next;
68	uint32_t cons_tail;
69#ifdef DEBUG_BUFRING
70	int i;
71	for (i = br->br_cons_head; i != br->br_prod_head;
72	     i = ((i + 1) & br->br_cons_mask))
73		if(br->br_ring[i] == buf)
74			panic("buf=%p already enqueue at %d prod=%d cons=%d",
75			    buf, i, br->br_prod_tail, br->br_cons_tail);
76#endif
77	critical_enter();
78	do {
79		prod_head = br->br_prod_head;
80		cons_tail = br->br_cons_tail;
81
82		prod_next = (prod_head + 1) & br->br_prod_mask;
83
84		if (prod_next == cons_tail) {
85			br->br_drops++;
86			critical_exit();
87			return (ENOBUFS);
88		}
89	} while (!atomic_cmpset_int(&br->br_prod_head, prod_head, prod_next));
90#ifdef DEBUG_BUFRING
91	if (br->br_ring[prod_head] != NULL)
92		panic("dangling value in enqueue");
93#endif
94	br->br_ring[prod_head] = buf;
95
96	/*
97	 * The full memory barrier also avoids that br_prod_tail store
98	 * is reordered before the br_ring[prod_head] is full setup.
99	 */
100	mb();
101
102	/*
103	 * If there are other enqueues in progress
104	 * that preceeded us, we need to wait for them
105	 * to complete
106	 */
107	while (br->br_prod_tail != prod_head)
108		cpu_spinwait();
109	br->br_prod_tail = prod_next;
110	critical_exit();
111	return (0);
112}
113
114/*
115 * multi-consumer safe dequeue
116 *
117 */
118static __inline void *
119buf_ring_dequeue_mc(struct buf_ring *br)
120{
121	uint32_t cons_head, cons_next;
122	uint32_t prod_tail;
123	void *buf;
124	int success;
125
126	critical_enter();
127	do {
128		cons_head = br->br_cons_head;
129		prod_tail = br->br_prod_tail;
130
131		cons_next = (cons_head + 1) & br->br_cons_mask;
132
133		if (cons_head == prod_tail) {
134			critical_exit();
135			return (NULL);
136		}
137
138		success = atomic_cmpset_int(&br->br_cons_head, cons_head,
139		    cons_next);
140	} while (success == 0);
141
142	buf = br->br_ring[cons_head];
143#ifdef DEBUG_BUFRING
144	br->br_ring[cons_head] = NULL;
145#endif
146
147	/*
148	 * The full memory barrier also avoids that br_ring[cons_read]
149	 * load is reordered after br_cons_tail is set.
150	 */
151	mb();
152
153	/*
154	 * If there are other dequeues in progress
155	 * that preceeded us, we need to wait for them
156	 * to complete
157	 */
158	while (br->br_cons_tail != cons_head)
159		cpu_spinwait();
160
161	br->br_cons_tail = cons_next;
162	critical_exit();
163
164	return (buf);
165}
166
167/*
168 * single-consumer dequeue
169 * use where dequeue is protected by a lock
170 * e.g. a network driver's tx queue lock
171 */
172static __inline void *
173buf_ring_dequeue_sc(struct buf_ring *br)
174{
175	uint32_t cons_head, cons_next;
176#ifdef PREFETCH_DEFINED
177	uint32_t cons_next_next;
178#endif
179	uint32_t prod_tail;
180	void *buf;
181
182	cons_head = br->br_cons_head;
183	prod_tail = br->br_prod_tail;
184
185	cons_next = (cons_head + 1) & br->br_cons_mask;
186#ifdef PREFETCH_DEFINED
187	cons_next_next = (cons_head + 2) & br->br_cons_mask;
188#endif
189
190	if (cons_head == prod_tail)
191		return (NULL);
192
193#ifdef PREFETCH_DEFINED
194	if (cons_next != prod_tail) {
195		prefetch(br->br_ring[cons_next]);
196		if (cons_next_next != prod_tail)
197			prefetch(br->br_ring[cons_next_next]);
198	}
199#endif
200	br->br_cons_head = cons_next;
201	buf = br->br_ring[cons_head];
202
203#ifdef DEBUG_BUFRING
204	br->br_ring[cons_head] = NULL;
205	if (!mtx_owned(br->br_lock))
206		panic("lock not held on single consumer dequeue");
207	if (br->br_cons_tail != cons_head)
208		panic("inconsistent list cons_tail=%d cons_head=%d",
209		    br->br_cons_tail, cons_head);
210#endif
211	br->br_cons_tail = cons_next;
212	return (buf);
213}
214
215/*
216 * single-consumer advance after a peek
217 * use where it is protected by a lock
218 * e.g. a network driver's tx queue lock
219 */
220static __inline void
221buf_ring_advance_sc(struct buf_ring *br)
222{
223	uint32_t cons_head, cons_next;
224	uint32_t prod_tail;
225
226	cons_head = br->br_cons_head;
227	prod_tail = br->br_prod_tail;
228
229	cons_next = (cons_head + 1) & br->br_cons_mask;
230	if (cons_head == prod_tail)
231		return;
232	br->br_cons_head = cons_next;
233#ifdef DEBUG_BUFRING
234	br->br_ring[cons_head] = NULL;
235#endif
236	br->br_cons_tail = cons_next;
237}
238
239/*
240 * Used to return a buffer (most likely already there)
241 * to the top od the ring. The caller should *not*
242 * have used any dequeue to pull it out of the ring
243 * but instead should have used the peek() function.
244 * This is normally used where the transmit queue
245 * of a driver is full, and an mubf must be returned.
246 * Most likely whats in the ring-buffer is what
247 * is being put back (since it was not removed), but
248 * sometimes the lower transmit function may have
249 * done a pullup or other function that will have
250 * changed it. As an optimzation we always put it
251 * back (since jhb says the store is probably cheaper),
252 * if we have to do a multi-queue version we will need
253 * the compare and an atomic.
254 */
255static __inline void
256buf_ring_putback_sc(struct buf_ring *br, void *new)
257{
258	KASSERT(br->br_cons_head != br->br_prod_tail,
259		("Buf-Ring has none in putback")) ;
260	br->br_ring[br->br_cons_head] = new;
261}
262
263/*
264 * return a pointer to the first entry in the ring
265 * without modifying it, or NULL if the ring is empty
266 * race-prone if not protected by a lock
267 */
268static __inline void *
269buf_ring_peek(struct buf_ring *br)
270{
271
272#ifdef DEBUG_BUFRING
273	if ((br->br_lock != NULL) && !mtx_owned(br->br_lock))
274		panic("lock not held on single consumer dequeue");
275#endif
276	/*
277	 * I believe it is safe to not have a memory barrier
278	 * here because we control cons and tail is worst case
279	 * a lagging indicator so we worst case we might
280	 * return NULL immediately after a buffer has been enqueued
281	 */
282	if (br->br_cons_head == br->br_prod_tail)
283		return (NULL);
284
285	return (br->br_ring[br->br_cons_head]);
286}
287
288static __inline int
289buf_ring_full(struct buf_ring *br)
290{
291
292	return (((br->br_prod_head + 1) & br->br_prod_mask) == br->br_cons_tail);
293}
294
295static __inline int
296buf_ring_empty(struct buf_ring *br)
297{
298
299	return (br->br_cons_head == br->br_prod_tail);
300}
301
302static __inline int
303buf_ring_count(struct buf_ring *br)
304{
305
306	return ((br->br_prod_size + br->br_prod_tail - br->br_cons_tail)
307	    & br->br_prod_mask);
308}
309
310struct buf_ring *buf_ring_alloc(int count, struct malloc_type *type, int flags,
311    struct mtx *);
312void buf_ring_free(struct buf_ring *br, struct malloc_type *type);
313
314
315
316#endif
317