-
Notifications
You must be signed in to change notification settings - Fork 4
/
pcieuni_ioctl_dma.c
346 lines (301 loc) · 11.9 KB
/
pcieuni_ioctl_dma.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
/**
* @file pcieuni_ioctl_dma.c
* @brief Implementation of DMA related IOCTL handlers
*/
#include "pcieuni_fnc.h"
#include <gpcieuni/pcieuni_buffer.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/types.h>
#ifdef PCIEUNI_DEBUG
static atomic_t dma_request_counter = ATOMIC_INIT(0);
#endif
/**
* @brief Initiates DMA read from device
*
* This function will initiate DMA read from device into the target buffer. The DMA transfer size and offset are taken
* from the target buffer structure. The target buffer is expected to be already synched for device access.
* In case the device is busy this fuction will block. Once the DMA transfer is initiated the function will return immediately.
* @note This function may block.
*
* @param dev Traget device structure
* @param targetBuffer Target buffer
*
* @return 0 Success
* @retval -EBUSY Cannot initiate DMA because target device is busy
* @retval -EINTR Operation was interupted
* @retval -EIO Failed to write to device registers
*/
int pcieuni_start_dma_read(pcieuni_dev* dev, pcieuni_buffer* targetBuffer) {
struct module_dev* mdev = pcieuni_get_mdev(dev);
int retVal = 0;
#ifdef PCIEUNI_DEBUG
atomic_inc(&dma_request_counter);
PDEBUG(dev->name, "pcieuni_start_dma_read(offset=0x%lx, maxSize=0x%lx) number %i\n", targetBuffer->dma_offset,
targetBuffer->dma_size, atomic_read(&dma_request_counter));
#endif /*PCIEUNI_DEBUG*/
// reserve device registers IO
retVal = pcieuni_dma_reserve(mdev, targetBuffer);
if(retVal) return retVal;
// write DMA source address to device register
retVal = pcieuni_register_write32(dev, dev->memmory_base2, DMA_BOARD_ADDRESS, targetBuffer->dma_offset, false);
if(retVal) goto cleanup_releaseDevice;
// write DMA destination address to device register
retVal = pcieuni_register_write32(
dev, dev->memmory_base2, DMA_CPU_ADDRESS, (u32)(targetBuffer->dma_handle & 0xFFFFFFFF), true);
if(retVal) goto cleanup_releaseDevice;
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
do_gettimeofday(&(mdev->dma_start_time));
#else
ktime_get_real_ts64(&(mdev->dma_start_time));
#endif
// Setup env for irq handler
mdev->waitFlag = 0;
mdev->dma_buffer = targetBuffer;
// write DMA size and start DMA
retVal = pcieuni_register_write32(dev, dev->memmory_base2, DMA_SIZE_ADDRESS, targetBuffer->dma_size, false);
if(retVal) goto cleanup_releaseDevice;
PDEBUG(dev->name, "pcieuni_start_dma_read(): DMA started, offset=0x%lx, size=0x%lx \n", targetBuffer->dma_offset,
targetBuffer->dma_size);
cleanup_releaseDevice:
if(retVal) {
// release device registers IO
pcieuni_dma_release(mdev);
}
return retVal;
}
/**
* @brief Waits until DMA read to target buffer is finished
* @note This function may block.
*
* @param mdev Target device
* @param buffer Target driver buffer
*
* @retval 0 Success
* @retval -EIO Timed out while waiting for end of DMA IRQ
* @retval -EINTR Interrupted while waiting for end of DMA IRQ
*/
int pcieuni_wait_dma_read(module_dev* mdev, pcieuni_buffer* buffer) {
int code;
ulong timeout = HZ / 1; // Timeout in 1 second
PDEBUG(
mdev->parent_dev->name, "pcieuni_wait_dma_read(offset=0x%lx, size=0x%lx)", buffer->dma_offset, buffer->dma_size);
while(test_bit(BUFFER_STATE_WAITING, &buffer->state)) {
// DMA not finished yet - wait for IRQ handler
PDEBUG(mdev->parent_dev->name, "pcieuni_wait_dma_read(offset=0x%lx, size=0x%lx): Waiting... \n", buffer->dma_offset,
buffer->dma_size);
code = wait_event_timeout(mdev->waitDMA, !test_bit(BUFFER_STATE_WAITING, &buffer->state), timeout);
if(code == 0) {
printk(KERN_ERR "pcieuni(%s): error waiting for DMA to buffer (offset=0x%lx, size=0x%lx): TIMEOUT!\n",
mdev->parent_dev->name, buffer->dma_offset, buffer->dma_size);
// assuming we missed the interrupt
pcieuni_dma_release(mdev);
return -EIO;
}
else if(code < 0) {
printk(KERN_ERR "pcieuni(%s): error waiting for DMA to buffer (offset=0x%lx, size=0x%lx): errno=%d!\n",
mdev->parent_dev->name, buffer->dma_offset, buffer->dma_size, code);
// assuming we missed the interrupt
pcieuni_dma_release(mdev);
return -EINTR;
}
}
PDEBUG(mdev->parent_dev->name, "pcieuni_wait_dma_read(offset=0x%lx, size=0x%lx): Done!", buffer->dma_offset,
buffer->dma_size);
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
do_gettimeofday(&(mdev->dma_stop_time));
#else
ktime_get_real_ts64(&(mdev->dma_stop_time));
#endif
return 0;
}
/**
* @brief Reads from board memory via DMA using driver allocated buffers
*
* @param dev Target device
* @param devOffset DMA offset to read from
* @param dataSize Size of data to be read
* @param userBuffer Target user-space buffer
*
* @retval 0 Success
* @retval -EFAULT Failed to copy data to userspace
* @retval -ENOMEM Failed to get target driver buffer
* @retval -EBUSY Cannot initiate DMA because target device is busy
* @retval -EINTR Operation was interupted
* @retval -EIO Failed to write to device registers
* @retval -EIO Timed out while waiting for end of DMA IRQ from device
*/
int pcieuni_dma_read(pcieuni_dev* dev, unsigned long devOffset, unsigned long dataSize, void* userBuffer) {
int retVal = 0;
unsigned long dmaSize =
PCIEUNI_DMA_SYZE * DIV_ROUND_UP(dataSize, PCIEUNI_DMA_SYZE); // round up total read-size to page boundary
unsigned long dataReq = 0; // Total size of data that was requested from device
unsigned long dataRead = 0; // Total size of data read from device
pcieuni_buffer* prevBuffer = 0; // buffer used for read in previous loop
pcieuni_buffer* nextBuffer = 0; // buffer to read to in this loop
struct module_dev* mdev = pcieuni_get_mdev(dev);
PDEBUG(dev->name, "pcieuni_dma_read(devOffset=0x%lx, dataSize=0x%lx)\n", devOffset, dataSize);
if(!dev->memmory_base2) {
PDEBUG(dev->name, "pcieuni_dma_read: ERROR: DMA BAR not mapped!\n");
return -EFAULT;
}
// Loop until data is read
for(; !IS_ERR(prevBuffer) && (dataRead < dmaSize);) {
if(retVal) {
nextBuffer = ERR_PTR(retVal);
}
else {
// if there is more data to be requested from device
if(dataReq < dmaSize) {
// Find and reserve target buffer
nextBuffer = pcieuni_bufferList_get_free(&mdev->dmaBuffers);
if(!IS_ERR(nextBuffer)) {
// prepare buffer to accept DMA data from device
dma_sync_single_for_device(
&dev->pcieuni_pci_dev->dev, nextBuffer->dma_handle, (size_t)nextBuffer->size, DMA_FROM_DEVICE);
// request read of next data chunk
nextBuffer->dma_size = min(dmaSize - dataReq, nextBuffer->size);
nextBuffer->dma_offset = devOffset + dataReq;
retVal = pcieuni_start_dma_read(dev, nextBuffer);
if(retVal) {
// make buffer available for next DMA request
dma_sync_single_for_cpu(
&dev->pcieuni_pci_dev->dev, nextBuffer->dma_handle, (size_t)nextBuffer->size, DMA_FROM_DEVICE);
pcieuni_bufferList_set_free(&mdev->dmaBuffers, nextBuffer);
nextBuffer = ERR_PTR(retVal);
}
}
if(!IS_ERR(nextBuffer)) {
dataReq += nextBuffer->dma_size; // add to total data requested
}
}
else {
nextBuffer = 0;
}
}
// if data read was requested for prevBuffer
if(prevBuffer) {
// wait until data read is completed (device irq)
retVal = pcieuni_wait_dma_read(mdev, prevBuffer);
if(!retVal) {
// copy data to proper offset in the target user-space buffer
dma_sync_single_for_cpu(
&dev->pcieuni_pci_dev->dev, prevBuffer->dma_handle, (size_t)prevBuffer->size, DMA_FROM_DEVICE);
if(copy_to_user(
userBuffer + dataRead, (void*)prevBuffer->kaddr, min(prevBuffer->dma_size, dataSize - dataRead))) {
retVal = -EFAULT;
}
else {
// add to total data read
dataRead += prevBuffer->dma_size;
}
}
// mark buffer available
pcieuni_bufferList_set_free(&mdev->dmaBuffers, prevBuffer);
}
prevBuffer = nextBuffer;
}
if(IS_ERR(prevBuffer)) {
retVal = retVal ? retVal : PTR_ERR(prevBuffer);
}
PDEBUG(
dev->name, "pcieuni_dma_read(devOffset=0x%lx, dataSize=0x%lx): Return code(%i)\n", devOffset, dataSize, retVal);
return retVal;
}
/**
* @brief IOCTL handler for DMA related commnads
*
* @param filp Device file
* @param cmd_p IOCTL command
* @param arg_p IOCLT arguments
* @param pcieuni_cdev_m
* @retval 0 success
* @retval <0 error code
*/
long pcieuni_ioctl_dma(struct file* filp, unsigned int* cmd_p, unsigned long* arg_p, pcieuni_cdev* pcieuni_cdev_m) {
unsigned int cmd;
unsigned long arg;
pid_t cur_proc = 0;
int minor = 0;
int d_num = 0;
int retval = 0;
int err = 0;
struct pci_dev* pdev;
int size_time;
int io_dma_size;
device_ioctrl_time time_data;
device_ioctrl_dma dma_data;
module_dev* module_dev_pp;
pcieuni_dev* dev = filp->private_data;
module_dev_pp = pcieuni_get_mdev(dev);
cmd = *cmd_p;
arg = *arg_p;
size_time = sizeof(device_ioctrl_time);
io_dma_size = sizeof(device_ioctrl_dma);
minor = dev->dev_minor;
d_num = dev->dev_num;
cur_proc = current->group_leader->pid;
pdev = (dev->pcieuni_pci_dev);
if(!dev->dev_sts) {
printk(KERN_DEBUG "pcieuni: no device %d\n", dev->dev_num);
retval = -EFAULT;
return retval;
}
PDEBUG(dev->name, "pcieuni_ioctl_dma(nr=%d )", _IOC_NR(cmd));
/*
* the direction is a bitmask, and VERIFY_WRITE catches R/W
* transfers. `Type' is user-oriented, while
* access_ok is kernel-oriented, so the concept of "read" and
* "write" is reversed
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
if(_IOC_DIR(cmd) & _IOC_READ)
err = !access_ok(VERIFY_WRITE, (void __user*)arg, _IOC_SIZE(cmd));
else if(_IOC_DIR(cmd) & _IOC_WRITE)
err = !access_ok(VERIFY_READ, (void __user*)arg, _IOC_SIZE(cmd));
#else
err = !access_ok((void __user*)arg, _IOC_SIZE(cmd));
#endif
if(err) return -EFAULT;
if(mutex_lock_interruptible(&dev->dev_mut)) return -ERESTARTSYS;
switch(cmd) {
case PCIEUNI_GET_DMA_TIME:
retval = 0;
module_dev_pp->dma_start_time.tv_sec += (long)dev->slot_num;
module_dev_pp->dma_stop_time.tv_sec += (long)dev->slot_num;
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
module_dev_pp->dma_start_time.tv_usec += (long)dev->brd_num;
module_dev_pp->dma_stop_time.tv_usec += (long)dev->brd_num;
time_data.start_time = module_dev_pp->dma_start_time;
time_data.stop_time = module_dev_pp->dma_stop_time;
#else
module_dev_pp->dma_start_time.tv_nsec += (long)dev->brd_num * NSEC_PER_USEC;
module_dev_pp->dma_stop_time.tv_nsec += (long)dev->brd_num * NSEC_PER_USEC;
time_data.start_time.tv_sec = module_dev_pp->dma_start_time.tv_sec;
time_data.stop_time.tv_sec = module_dev_pp->dma_stop_time.tv_sec;
time_data.start_time.tv_usec = module_dev_pp->dma_start_time.tv_nsec / NSEC_PER_USEC;
time_data.stop_time.tv_usec = module_dev_pp->dma_stop_time.tv_nsec / NSEC_PER_USEC;
#endif
if(copy_to_user((device_ioctrl_time*)arg, &time_data, (size_t)size_time)) {
retval = -EIO;
mutex_unlock(&dev->dev_mut);
return retval;
}
break;
case PCIEUNI_READ_DMA: {
// Copy DMA transfer arguments into workqeue-data structure
if(copy_from_user(&dma_data, (void*)arg, io_dma_size)) {
mutex_unlock(&dev->dev_mut);
return -EFAULT;
}
retval = pcieuni_dma_read(dev, dma_data.dma_offset, dma_data.dma_size, (void*)arg);
break;
}
default:
return -ENOTTY;
break;
}
mutex_unlock(&dev->dev_mut);
return retval;
}