summaryrefslogtreecommitdiffstats
path: root/drivers/net/wwan/iosm/iosm_ipc_task_queue.c
blob: 852a99166144ff48ed4c9b510e1a91a59ea4ef33 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2020-21 Intel Corporation.
 */

#include "iosm_ipc_imem.h"
#include "iosm_ipc_task_queue.h"

/* Actual tasklet function, will be called whenever tasklet is scheduled.
 * Calls event handler involves callback for each element in the message queue
 */
static void ipc_task_queue_handler(unsigned long data)
{
	struct ipc_task_queue *ipc_task = (struct ipc_task_queue *)data;
	unsigned int q_rpos = ipc_task->q_rpos;

	/* Loop over the input queue contents. */
	while (q_rpos != ipc_task->q_wpos) {
		/* Get the current first queue element. */
		struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];

		/* Process the input message. */
		if (args->func)
			args->response = args->func(args->ipc_imem, args->arg,
						    args->msg, args->size);

		/* Signal completion for synchronous calls */
		if (args->completion)
			complete(args->completion);

		/* Free message if copy was allocated. */
		if (args->is_copy)
			kfree(args->msg);

		/* Set invalid queue element. Technically
		 * spin_lock_irqsave is not required here as
		 * the array element has been processed already
		 * so we can assume that immediately after processing
		 * ipc_task element, queue will not rotate again to
		 * ipc_task same element within such short time.
		 */
		args->completion = NULL;
		args->func = NULL;
		args->msg = NULL;
		args->size = 0;
		args->is_copy = false;

		/* calculate the new read ptr and update the volatile read
		 * ptr
		 */
		q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
		ipc_task->q_rpos = q_rpos;
	}
}

/* Free memory alloc and trigger completions left in the queue during dealloc */
static void ipc_task_queue_cleanup(struct ipc_task_queue *ipc_task)
{
	unsigned int q_rpos = ipc_task->q_rpos;

	while (q_rpos != ipc_task->q_wpos) {
		struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];

		if (args->completion)
			complete(args->completion);

		if (args->is_copy)
			kfree(args->msg);

		q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
		ipc_task->q_rpos = q_rpos;
	}
}

/* Add a message to the queue and trigger the ipc_task. */
static int
ipc_task_queue_add_task(struct iosm_imem *ipc_imem,
			int arg, void *msg,
			int (*func)(struct iosm_imem *ipc_imem, int arg,
				    void *msg, size_t size),
			size_t size, bool is_copy, bool wait)
{
	struct tasklet_struct *ipc_tasklet = ipc_imem->ipc_task->ipc_tasklet;
	struct ipc_task_queue *ipc_task = &ipc_imem->ipc_task->ipc_queue;
	struct completion completion;
	unsigned int pos, nextpos;
	unsigned long flags;
	int result = -EIO;

	init_completion(&completion);

	/* tasklet send may be called from both interrupt or thread
	 * context, therefore protect queue operation by spinlock
	 */
	spin_lock_irqsave(&ipc_task->q_lock, flags);

	pos = ipc_task->q_wpos;
	nextpos = (pos + 1) % IPC_THREAD_QUEUE_SIZE;

	/* Get next queue position. */
	if (nextpos != ipc_task->q_rpos) {
		/* Get the reference to the queue element and save the passed
		 * values.
		 */
		ipc_task->args[pos].arg = arg;
		ipc_task->args[pos].msg = msg;
		ipc_task->args[pos].func = func;
		ipc_task->args[pos].ipc_imem = ipc_imem;
		ipc_task->args[pos].size = size;
		ipc_task->args[pos].is_copy = is_copy;
		ipc_task->args[pos].completion = wait ? &completion : NULL;
		ipc_task->args[pos].response = -1;

		/* apply write barrier so that ipc_task->q_rpos elements
		 * are updated before ipc_task->q_wpos is being updated.
		 */
		smp_wmb();

		/* Update the status of the free queue space. */
		ipc_task->q_wpos = nextpos;
		result = 0;
	}

	spin_unlock_irqrestore(&ipc_task->q_lock, flags);

	if (result == 0) {
		tasklet_schedule(ipc_tasklet);

		if (wait) {
			wait_for_completion(&completion);
			result = ipc_task->args[pos].response;
		}
	} else {
		dev_err(ipc_imem->ipc_task->dev, "queue is full");
	}

	return result;
}

int ipc_task_queue_send_task(struct iosm_imem *imem,
			     int (*func)(struct iosm_imem *ipc_imem, int arg,
					 void *msg, size_t size),
			     int arg, void *msg, size_t size, bool wait)
{
	bool is_copy = false;
	void *copy = msg;
	int ret = -ENOMEM;

	if (size > 0) {
		copy = kmemdup(msg, size, GFP_ATOMIC);
		if (!copy)
			goto out;

		is_copy = true;
	}

	ret = ipc_task_queue_add_task(imem, arg, copy, func,
				      size, is_copy, wait);
	if (ret < 0) {
		dev_err(imem->ipc_task->dev,
			"add task failed for %ps %d, %p, %zu, %d", func, arg,
			copy, size, is_copy);
		if (is_copy)
			kfree(copy);
		goto out;
	}

	ret = 0;
out:
	return ret;
}

int ipc_task_init(struct ipc_task *ipc_task)
{
	struct ipc_task_queue *ipc_queue = &ipc_task->ipc_queue;

	ipc_task->ipc_tasklet = kzalloc(sizeof(*ipc_task->ipc_tasklet),
					GFP_KERNEL);

	if (!ipc_task->ipc_tasklet)
		return -ENOMEM;

	/* Initialize the spinlock needed to protect the message queue of the
	 * ipc_task
	 */
	spin_lock_init(&ipc_queue->q_lock);

	tasklet_init(ipc_task->ipc_tasklet, ipc_task_queue_handler,
		     (unsigned long)ipc_queue);
	return 0;
}

void ipc_task_deinit(struct ipc_task *ipc_task)
{
	tasklet_kill(ipc_task->ipc_tasklet);

	kfree(ipc_task->ipc_tasklet);
	/* This will free/complete any outstanding messages,
	 * without calling the actual handler
	 */
	ipc_task_queue_cleanup(&ipc_task->ipc_queue);
}