Version:  2.0.40 2.2.26 2.4.37 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16

Linux/drivers/staging/sep/sep_main.c

  1 /*
  2  *
  3  *  sep_main.c - Security Processor Driver main group of functions
  4  *
  5  *  Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
  6  *  Contributions(c) 2009-2011 Discretix. All rights reserved.
  7  *
  8  *  This program is free software; you can redistribute it and/or modify it
  9  *  under the terms of the GNU General Public License as published by the Free
 10  *  Software Foundation; version 2 of the License.
 11  *
 12  *  This program is distributed in the hope that it will be useful, but WITHOUT
 13  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 14  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 15  *  more details.
 16  *
 17  *  You should have received a copy of the GNU General Public License along with
 18  *  this program; if not, write to the Free Software Foundation, Inc., 59
 19  *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 20  *
 21  *  CONTACTS:
 22  *
 23  *  Mark Allyn          mark.a.allyn@intel.com
 24  *  Jayant Mangalampalli jayant.mangalampalli@intel.com
 25  *
 26  *  CHANGES:
 27  *
 28  *  2009.06.26  Initial publish
 29  *  2010.09.14  Upgrade to Medfield
 30  *  2011.01.21  Move to sep_main.c to allow for sep_crypto.c
 31  *  2011.02.22  Enable kernel crypto operation
 32  *
 33  *  Please note that this driver is based on information in the Discretix
 34  *  CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
 35  *  Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
 36  *  Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
 37  *  Overview and Integration Guide.
 38  */
 39 /* #define DEBUG */
 40 /* #define SEP_PERF_DEBUG */
 41 
 42 #include <linux/kernel.h>
 43 #include <linux/module.h>
 44 #include <linux/miscdevice.h>
 45 #include <linux/fs.h>
 46 #include <linux/cdev.h>
 47 #include <linux/kdev_t.h>
 48 #include <linux/mutex.h>
 49 #include <linux/sched.h>
 50 #include <linux/mm.h>
 51 #include <linux/poll.h>
 52 #include <linux/wait.h>
 53 #include <linux/pci.h>
 54 #include <linux/pm_runtime.h>
 55 #include <linux/slab.h>
 56 #include <linux/ioctl.h>
 57 #include <asm/current.h>
 58 #include <linux/ioport.h>
 59 #include <linux/io.h>
 60 #include <linux/interrupt.h>
 61 #include <linux/pagemap.h>
 62 #include <asm/cacheflush.h>
 63 #include <linux/delay.h>
 64 #include <linux/jiffies.h>
 65 #include <linux/async.h>
 66 #include <linux/crypto.h>
 67 #include <crypto/internal/hash.h>
 68 #include <crypto/scatterwalk.h>
 69 #include <crypto/sha.h>
 70 #include <crypto/md5.h>
 71 #include <crypto/aes.h>
 72 #include <crypto/des.h>
 73 #include <crypto/hash.h>
 74 
 75 #include "sep_driver_hw_defs.h"
 76 #include "sep_driver_config.h"
 77 #include "sep_driver_api.h"
 78 #include "sep_dev.h"
 79 #include "sep_crypto.h"
 80 
 81 #define CREATE_TRACE_POINTS
 82 #include "sep_trace_events.h"
 83 
 84 /*
 85  * Let's not spend cycles iterating over message
 86  * area contents if debugging not enabled
 87  */
 88 #ifdef DEBUG
 89 #define sep_dump_message(sep)   _sep_dump_message(sep)
 90 #else
 91 #define sep_dump_message(sep)
 92 #endif
 93 
 94 /**
 95  * Currently, there is only one SEP device per platform;
 96  * In event platforms in the future have more than one SEP
 97  * device, this will be a linked list
 98  */
 99 
100 struct sep_device *sep_dev;
101 
102 /**
103  * sep_queue_status_remove - Removes transaction from status queue
104  * @sep: SEP device
105  * @sep_queue_info: pointer to status queue
106  *
107  * This function will remove information about transaction from the queue.
108  */
109 void sep_queue_status_remove(struct sep_device *sep,
110                                       struct sep_queue_info **queue_elem)
111 {
112         unsigned long lck_flags;
113 
114         dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
115                 current->pid);
116 
117         if (!queue_elem || !(*queue_elem)) {
118                 dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
119                                         current->pid, __func__);
120                 return;
121         }
122 
123         spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
124         list_del(&(*queue_elem)->list);
125         sep->sep_queue_num--;
126         spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
127 
128         kfree(*queue_elem);
129         *queue_elem = NULL;
130 
131         dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
132                 current->pid);
133         return;
134 }
135 
136 /**
137  * sep_queue_status_add - Adds transaction to status queue
138  * @sep: SEP device
139  * @opcode: transaction opcode
140  * @size: input data size
141  * @pid: pid of current process
142  * @name: current process name
143  * @name_len: length of name (current process)
144  *
145  * This function adds information about about transaction started to the status
146  * queue.
147  */
148 struct sep_queue_info *sep_queue_status_add(
149                                                 struct sep_device *sep,
150                                                 u32 opcode,
151                                                 u32 size,
152                                                 u32 pid,
153                                                 u8 *name, size_t name_len)
154 {
155         unsigned long lck_flags;
156         struct sep_queue_info *my_elem = NULL;
157 
158         my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
159 
160         if (!my_elem)
161                 return NULL;
162 
163         dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
164 
165         my_elem->data.opcode = opcode;
166         my_elem->data.size = size;
167         my_elem->data.pid = pid;
168 
169         if (name_len > TASK_COMM_LEN)
170                 name_len = TASK_COMM_LEN;
171 
172         memcpy(&my_elem->data.name, name, name_len);
173 
174         spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
175 
176         list_add_tail(&my_elem->list, &sep->sep_queue_status);
177         sep->sep_queue_num++;
178 
179         spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
180 
181         return my_elem;
182 }
183 
184 /**
185  *      sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
186  *      @sep: SEP device
187  *      @dmatables_region: Destination pointer for the buffer
188  *      @dma_ctx: DMA context for the transaction
189  *      @table_count: Number of MLLI/DMA tables to create
190  *      The buffer created will not work as-is for DMA operations,
191  *      it needs to be copied over to the appropriate place in the
192  *      shared area.
193  */
194 static int sep_allocate_dmatables_region(struct sep_device *sep,
195                                          void **dmatables_region,
196                                          struct sep_dma_context *dma_ctx,
197                                          const u32 table_count)
198 {
199         const size_t new_len =
200                 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
201 
202         void *tmp_region = NULL;
203 
204         dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
205                                 current->pid, dma_ctx);
206         dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
207                                 current->pid, dmatables_region);
208 
209         if (!dma_ctx || !dmatables_region) {
210                 dev_warn(&sep->pdev->dev,
211                         "[PID%d] dma context/region uninitialized\n",
212                         current->pid);
213                 return -EINVAL;
214         }
215 
216         dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
217                                 current->pid, new_len);
218         dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
219                                 dma_ctx->dmatables_len);
220         tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
221         if (!tmp_region)
222                 return -ENOMEM;
223 
224         /* Were there any previous tables that need to be preserved ? */
225         if (*dmatables_region) {
226                 memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
227                 kfree(*dmatables_region);
228                 *dmatables_region = NULL;
229         }
230 
231         *dmatables_region = tmp_region;
232 
233         dma_ctx->dmatables_len += new_len;
234 
235         return 0;
236 }
237 
238 /**
239  *      sep_wait_transaction - Used for synchronizing transactions
240  *      @sep: SEP device
241  */
242 int sep_wait_transaction(struct sep_device *sep)
243 {
244         int error = 0;
245         DEFINE_WAIT(wait);
246 
247         if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
248                                 &sep->in_use_flags)) {
249                 dev_dbg(&sep->pdev->dev,
250                         "[PID%d] no transactions, returning\n",
251                                 current->pid);
252                 goto end_function_setpid;
253         }
254 
255         /*
256          * Looping needed even for exclusive waitq entries
257          * due to process wakeup latencies, previous process
258          * might have already created another transaction.
259          */
260         for (;;) {
261                 /*
262                  * Exclusive waitq entry, so that only one process is
263                  * woken up from the queue at a time.
264                  */
265                 prepare_to_wait_exclusive(&sep->event_transactions,
266                                           &wait,
267                                           TASK_INTERRUPTIBLE);
268                 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
269                                           &sep->in_use_flags)) {
270                         dev_dbg(&sep->pdev->dev,
271                                 "[PID%d] no transactions, breaking\n",
272                                         current->pid);
273                         break;
274                 }
275                 dev_dbg(&sep->pdev->dev,
276                         "[PID%d] transactions ongoing, sleeping\n",
277                                 current->pid);
278                 schedule();
279                 dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
280 
281                 if (signal_pending(current)) {
282                         dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
283                                                         current->pid);
284                         error = -EINTR;
285                         goto end_function;
286                 }
287         }
288 end_function_setpid:
289         /*
290          * The pid_doing_transaction indicates that this process
291          * now owns the facilities to perform a transaction with
292          * the SEP. While this process is performing a transaction,
293          * no other process who has the SEP device open can perform
294          * any transactions. This method allows more than one process
295          * to have the device open at any given time, which provides
296          * finer granularity for device utilization by multiple
297          * processes.
298          */
299         /* Only one process is able to progress here at a time */
300         sep->pid_doing_transaction = current->pid;
301 
302 end_function:
303         finish_wait(&sep->event_transactions, &wait);
304 
305         return error;
306 }
307 
308 /**
309  * sep_check_transaction_owner - Checks if current process owns transaction
310  * @sep: SEP device
311  */
312 static inline int sep_check_transaction_owner(struct sep_device *sep)
313 {
314         dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
315                 current->pid,
316                 sep->pid_doing_transaction);
317 
318         if ((sep->pid_doing_transaction == 0) ||
319                 (current->pid != sep->pid_doing_transaction)) {
320                 return -EACCES;
321         }
322 
323         /* We own the transaction */
324         return 0;
325 }
326 
327 #ifdef DEBUG
328 
329 /**
330  * sep_dump_message - dump the message that is pending
331  * @sep: SEP device
332  * This will only print dump if DEBUG is set; it does
333  * follow kernel debug print enabling
334  */
335 static void _sep_dump_message(struct sep_device *sep)
336 {
337         int count;
338 
339         u32 *p = sep->shared_addr;
340 
341         for (count = 0; count < 10 * 4; count += 4)
342                 dev_dbg(&sep->pdev->dev,
343                         "[PID%d] Word %d of the message is %x\n",
344                                 current->pid, count/4, *p++);
345 }
346 
347 #endif
348 
349 /**
350  * sep_map_and_alloc_shared_area -allocate shared block
351  * @sep: security processor
352  * @size: size of shared area
353  */
354 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
355 {
356         sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
357                 sep->shared_size,
358                 &sep->shared_bus, GFP_KERNEL);
359 
360         if (!sep->shared_addr) {
361                 dev_dbg(&sep->pdev->dev,
362                         "[PID%d] shared memory dma_alloc_coherent failed\n",
363                                 current->pid);
364                 return -ENOMEM;
365         }
366         dev_dbg(&sep->pdev->dev,
367                 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
368                                 current->pid,
369                                 sep->shared_size, sep->shared_addr,
370                                 (unsigned long long)sep->shared_bus);
371         return 0;
372 }
373 
374 /**
375  * sep_unmap_and_free_shared_area - free shared block
376  * @sep: security processor
377  */
378 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
379 {
380         dma_free_coherent(&sep->pdev->dev, sep->shared_size,
381                                 sep->shared_addr, sep->shared_bus);
382 }
383 
384 #ifdef DEBUG
385 
386 /**
387  * sep_shared_bus_to_virt - convert bus/virt addresses
388  * @sep: pointer to struct sep_device
389  * @bus_address: address to convert
390  *
391  * Returns virtual address inside the shared area according
392  * to the bus address.
393  */
394 static void *sep_shared_bus_to_virt(struct sep_device *sep,
395                                                 dma_addr_t bus_address)
396 {
397         return sep->shared_addr + (bus_address - sep->shared_bus);
398 }
399 
400 #endif
401 
402 /**
403  * sep_open - device open method
404  * @inode: inode of SEP device
405  * @filp: file handle to SEP device
406  *
407  * Open method for the SEP device. Called when userspace opens
408  * the SEP device node.
409  *
410  * Returns zero on success otherwise an error code.
411  */
412 static int sep_open(struct inode *inode, struct file *filp)
413 {
414         struct sep_device *sep;
415         struct sep_private_data *priv;
416 
417         dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
418 
419         if (filp->f_flags & O_NONBLOCK)
420                 return -ENOTSUPP;
421 
422         /*
423          * Get the SEP device structure and use it for the
424          * private_data field in filp for other methods
425          */
426 
427         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
428         if (!priv)
429                 return -ENOMEM;
430 
431         sep = sep_dev;
432         priv->device = sep;
433         filp->private_data = priv;
434 
435         dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
436                                         current->pid, priv);
437 
438         /* Anyone can open; locking takes place at transaction level */
439         return 0;
440 }
441 
442 /**
443  * sep_free_dma_table_data_handler - free DMA table
444  * @sep: pointer to struct sep_device
445  * @dma_ctx: dma context
446  *
447  * Handles the request to free DMA table for synchronic actions
448  */
449 int sep_free_dma_table_data_handler(struct sep_device *sep,
450                                            struct sep_dma_context **dma_ctx)
451 {
452         int count;
453         int dcb_counter;
454         /* Pointer to the current dma_resource struct */
455         struct sep_dma_resource *dma;
456 
457         dev_dbg(&sep->pdev->dev,
458                 "[PID%d] sep_free_dma_table_data_handler\n",
459                         current->pid);
460 
461         if (!dma_ctx || !(*dma_ctx)) {
462                 /* No context or context already freed */
463                 dev_dbg(&sep->pdev->dev,
464                         "[PID%d] no DMA context or context already freed\n",
465                                 current->pid);
466 
467                 return 0;
468         }
469 
470         dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
471                                         current->pid,
472                                         (*dma_ctx)->nr_dcb_creat);
473 
474         for (dcb_counter = 0;
475              dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
476                 dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
477 
478                 /* Unmap and free input map array */
479                 if (dma->in_map_array) {
480                         for (count = 0; count < dma->in_num_pages; count++) {
481                                 dma_unmap_page(&sep->pdev->dev,
482                                         dma->in_map_array[count].dma_addr,
483                                         dma->in_map_array[count].size,
484                                         DMA_TO_DEVICE);
485                         }
486                         kfree(dma->in_map_array);
487                 }
488 
489                 /**
490                  * Output is handled different. If
491                  * this was a secure dma into restricted memory,
492                  * then we skip this step altogether as restricted
493                  * memory is not available to the o/s at all.
494                  */
495                 if (!(*dma_ctx)->secure_dma && dma->out_map_array) {
496 
497                         for (count = 0; count < dma->out_num_pages; count++) {
498                                 dma_unmap_page(&sep->pdev->dev,
499                                         dma->out_map_array[count].dma_addr,
500                                         dma->out_map_array[count].size,
501                                         DMA_FROM_DEVICE);
502                         }
503                         kfree(dma->out_map_array);
504                 }
505 
506                 /* Free page cache for output */
507                 if (dma->in_page_array) {
508                         for (count = 0; count < dma->in_num_pages; count++) {
509                                 flush_dcache_page(dma->in_page_array[count]);
510                                 page_cache_release(dma->in_page_array[count]);
511                         }
512                         kfree(dma->in_page_array);
513                 }
514 
515                 /* Again, we do this only for non secure dma */
516                 if (!(*dma_ctx)->secure_dma && dma->out_page_array) {
517 
518                         for (count = 0; count < dma->out_num_pages; count++) {
519                                 if (!PageReserved(dma->out_page_array[count]))
520 
521                                         SetPageDirty(dma->
522                                         out_page_array[count]);
523 
524                                 flush_dcache_page(dma->out_page_array[count]);
525                                 page_cache_release(dma->out_page_array[count]);
526                         }
527                         kfree(dma->out_page_array);
528                 }
529 
530                 /**
531                  * Note that here we use in_map_num_entries because we
532                  * don't have a page array; the page array is generated
533                  * only in the lock_user_pages, which is not called
534                  * for kernel crypto, which is what the sg (scatter gather
535                  * is used for exclusively)
536                  */
537                 if (dma->src_sg) {
538                         dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
539                                 dma->in_map_num_entries, DMA_TO_DEVICE);
540                         dma->src_sg = NULL;
541                 }
542 
543                 if (dma->dst_sg) {
544                         dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
545                                 dma->in_map_num_entries, DMA_FROM_DEVICE);
546                         dma->dst_sg = NULL;
547                 }
548 
549                 /* Reset all the values */
550                 dma->in_page_array = NULL;
551                 dma->out_page_array = NULL;
552                 dma->in_num_pages = 0;
553                 dma->out_num_pages = 0;
554                 dma->in_map_array = NULL;
555                 dma->out_map_array = NULL;
556                 dma->in_map_num_entries = 0;
557                 dma->out_map_num_entries = 0;
558         }
559 
560         (*dma_ctx)->nr_dcb_creat = 0;
561         (*dma_ctx)->num_lli_tables_created = 0;
562 
563         kfree(*dma_ctx);
564         *dma_ctx = NULL;
565 
566         dev_dbg(&sep->pdev->dev,
567                 "[PID%d] sep_free_dma_table_data_handler end\n",
568                         current->pid);
569 
570         return 0;
571 }
572 
573 /**
574  * sep_end_transaction_handler - end transaction
575  * @sep: pointer to struct sep_device
576  * @dma_ctx: DMA context
577  * @call_status: Call status
578  *
579  * This API handles the end transaction request.
580  */
581 static int sep_end_transaction_handler(struct sep_device *sep,
582                                        struct sep_dma_context **dma_ctx,
583                                        struct sep_call_status *call_status,
584                                        struct sep_queue_info **my_queue_elem)
585 {
586         dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
587 
588         /*
589          * Extraneous transaction clearing would mess up PM
590          * device usage counters and SEP would get suspended
591          * just before we send a command to SEP in the next
592          * transaction
593          * */
594         if (sep_check_transaction_owner(sep)) {
595                 dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
596                                                 current->pid);
597                 return 0;
598         }
599 
600         /* Update queue status */
601         sep_queue_status_remove(sep, my_queue_elem);
602 
603         /* Check that all the DMA resources were freed */
604         if (dma_ctx)
605                 sep_free_dma_table_data_handler(sep, dma_ctx);
606 
607         /* Reset call status for next transaction */
608         if (call_status)
609                 call_status->status = 0;
610 
611         /* Clear the message area to avoid next transaction reading
612          * sensitive results from previous transaction */
613         memset(sep->shared_addr, 0,
614                SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
615 
616         /* start suspend delay */
617 #ifdef SEP_ENABLE_RUNTIME_PM
618         if (sep->in_use) {
619                 sep->in_use = 0;
620                 pm_runtime_mark_last_busy(&sep->pdev->dev);
621                 pm_runtime_put_autosuspend(&sep->pdev->dev);
622         }
623 #endif
624 
625         clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
626         sep->pid_doing_transaction = 0;
627 
628         /* Now it's safe for next process to proceed */
629         dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
630                                         current->pid);
631         clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
632         wake_up(&sep->event_transactions);
633 
634         return 0;
635 }
636 
637 
638 /**
639  * sep_release - close a SEP device
640  * @inode: inode of SEP device
641  * @filp: file handle being closed
642  *
643  * Called on the final close of a SEP device.
644  */
645 static int sep_release(struct inode *inode, struct file *filp)
646 {
647         struct sep_private_data * const private_data = filp->private_data;
648         struct sep_call_status *call_status = &private_data->call_status;
649         struct sep_device *sep = private_data->device;
650         struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
651         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
652 
653         dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
654 
655         sep_end_transaction_handler(sep, dma_ctx, call_status,
656                 my_queue_elem);
657 
658         kfree(filp->private_data);
659 
660         return 0;
661 }
662 
663 /**
664  * sep_mmap -  maps the shared area to user space
665  * @filp: pointer to struct file
666  * @vma: pointer to vm_area_struct
667  *
668  * Called on an mmap of our space via the normal SEP device
669  */
670 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
671 {
672         struct sep_private_data * const private_data = filp->private_data;
673         struct sep_call_status *call_status = &private_data->call_status;
674         struct sep_device *sep = private_data->device;
675         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
676         dma_addr_t bus_addr;
677         unsigned long error = 0;
678 
679         dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
680 
681         /* Set the transaction busy (own the device) */
682         /*
683          * Problem for multithreaded applications is that here we're
684          * possibly going to sleep while holding a write lock on
685          * current->mm->mmap_sem, which will cause deadlock for ongoing
686          * transaction trying to create DMA tables
687          */
688         error = sep_wait_transaction(sep);
689         if (error)
690                 /* Interrupted by signal, don't clear transaction */
691                 goto end_function;
692 
693         /* Clear the message area to avoid next transaction reading
694          * sensitive results from previous transaction */
695         memset(sep->shared_addr, 0,
696                SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
697 
698         /*
699          * Check that the size of the mapped range is as the size of the message
700          * shared area
701          */
702         if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
703                 error = -EINVAL;
704                 goto end_function_with_error;
705         }
706 
707         dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
708                                         current->pid, sep->shared_addr);
709 
710         /* Get bus address */
711         bus_addr = sep->shared_bus;
712 
713         if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
714                 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
715                 dev_dbg(&sep->pdev->dev, "[PID%d] remap_pfn_range failed\n",
716                                                 current->pid);
717                 error = -EAGAIN;
718                 goto end_function_with_error;
719         }
720 
721         /* Update call status */
722         set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
723 
724         goto end_function;
725 
726 end_function_with_error:
727         /* Clear our transaction */
728         sep_end_transaction_handler(sep, NULL, call_status,
729                 my_queue_elem);
730 
731 end_function:
732         return error;
733 }
734 
735 /**
736  * sep_poll - poll handler
737  * @filp:       pointer to struct file
738  * @wait:       pointer to poll_table
739  *
740  * Called by the OS when the kernel is asked to do a poll on
741  * a SEP file handle.
742  */
743 static unsigned int sep_poll(struct file *filp, poll_table *wait)
744 {
745         struct sep_private_data * const private_data = filp->private_data;
746         struct sep_call_status *call_status = &private_data->call_status;
747         struct sep_device *sep = private_data->device;
748         u32 mask = 0;
749         u32 retval = 0;
750         u32 retval2 = 0;
751         unsigned long lock_irq_flag;
752 
753         /* Am I the process that owns the transaction? */
754         if (sep_check_transaction_owner(sep)) {
755                 dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
756                                                 current->pid);
757                 mask = POLLERR;
758                 goto end_function;
759         }
760 
761         /* Check if send command or send_reply were activated previously */
762         if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
763                           &call_status->status)) {
764                 dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
765                                                 current->pid);
766                 mask = POLLERR;
767                 goto end_function;
768         }
769 
770 
771         /* Add the event to the polling wait table */
772         dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
773                                         current->pid);
774 
775         poll_wait(filp, &sep->event_interrupt, wait);
776 
777         dev_dbg(&sep->pdev->dev,
778                 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
779                         current->pid, sep->send_ct, sep->reply_ct);
780 
781         /* Check if error occurred during poll */
782         retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
783         if ((retval2 != 0x0) && (retval2 != 0x8)) {
784                 dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
785                                                 current->pid, retval2);
786                 mask |= POLLERR;
787                 goto end_function;
788         }
789 
790         spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
791 
792         if (sep->send_ct == sep->reply_ct) {
793                 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
794                 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
795                 dev_dbg(&sep->pdev->dev,
796                         "[PID%d] poll: data ready check (GPR2)  %x\n",
797                                 current->pid, retval);
798 
799                 /* Check if printf request  */
800                 if ((retval >> 30) & 0x1) {
801                         dev_dbg(&sep->pdev->dev,
802                                 "[PID%d] poll: SEP printf request\n",
803                                         current->pid);
804                         goto end_function;
805                 }
806 
807                 /* Check if the this is SEP reply or request */
808                 if (retval >> 31) {
809                         dev_dbg(&sep->pdev->dev,
810                                 "[PID%d] poll: SEP request\n",
811                                         current->pid);
812                 } else {
813                         dev_dbg(&sep->pdev->dev,
814                                 "[PID%d] poll: normal return\n",
815                                         current->pid);
816                         sep_dump_message(sep);
817                         dev_dbg(&sep->pdev->dev,
818                                 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
819                                         current->pid);
820                         mask |= POLLIN | POLLRDNORM;
821                 }
822                 set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
823         } else {
824                 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
825                 dev_dbg(&sep->pdev->dev,
826                         "[PID%d] poll; no reply; returning mask of 0\n",
827                                 current->pid);
828                 mask = 0;
829         }
830 
831 end_function:
832         return mask;
833 }
834 
835 /**
836  * sep_time_address - address in SEP memory of time
837  * @sep: SEP device we want the address from
838  *
839  * Return the address of the two dwords in memory used for time
840  * setting.
841  */
842 static u32 *sep_time_address(struct sep_device *sep)
843 {
844         return sep->shared_addr +
845                 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
846 }
847 
848 /**
849  * sep_set_time - set the SEP time
850  * @sep: the SEP we are setting the time for
851  *
852  * Calculates time and sets it at the predefined address.
853  * Called with the SEP mutex held.
854  */
855 static unsigned long sep_set_time(struct sep_device *sep)
856 {
857         struct timeval time;
858         u32 *time_addr; /* Address of time as seen by the kernel */
859 
860 
861         do_gettimeofday(&time);
862 
863         /* Set value in the SYSTEM MEMORY offset */
864         time_addr = sep_time_address(sep);
865 
866         time_addr[0] = SEP_TIME_VAL_TOKEN;
867         time_addr[1] = time.tv_sec;
868 
869         dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
870                                         current->pid, time.tv_sec);
871         dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
872                                         current->pid, time_addr);
873         dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
874                                         current->pid, sep->shared_addr);
875 
876         return time.tv_sec;
877 }
878 
879 /**
880  * sep_send_command_handler - kick off a command
881  * @sep: SEP being signalled
882  *
883  * This function raises interrupt to SEP that signals that is has a new
884  * command from the host
885  *
886  * Note that this function does fall under the ioctl lock
887  */
888 int sep_send_command_handler(struct sep_device *sep)
889 {
890         unsigned long lock_irq_flag;
891         u32 *msg_pool;
892         int error = 0;
893 
894         /* Basic sanity check; set msg pool to start of shared area */
895         msg_pool = (u32 *)sep->shared_addr;
896         msg_pool += 2;
897 
898         /* Look for start msg token */
899         if (*msg_pool != SEP_START_MSG_TOKEN) {
900                 dev_warn(&sep->pdev->dev, "start message token not present\n");
901                 error = -EPROTO;
902                 goto end_function;
903         }
904 
905         /* Do we have a reasonable size? */
906         msg_pool += 1;
907         if ((*msg_pool < 2) ||
908                 (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
909 
910                 dev_warn(&sep->pdev->dev, "invalid message size\n");
911                 error = -EPROTO;
912                 goto end_function;
913         }
914 
915         /* Does the command look reasonable? */
916         msg_pool += 1;
917         if (*msg_pool < 2) {
918                 dev_warn(&sep->pdev->dev, "invalid message opcode\n");
919                 error = -EPROTO;
920                 goto end_function;
921         }
922 
923 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
924         dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
925                                         current->pid,
926                                         sep->pdev->dev.power.runtime_status);
927         sep->in_use = 1; /* device is about to be used */
928         pm_runtime_get_sync(&sep->pdev->dev);
929 #endif
930 
931         if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
932                 error = -EPROTO;
933                 goto end_function;
934         }
935         sep->in_use = 1; /* device is about to be used */
936         sep_set_time(sep);
937 
938         sep_dump_message(sep);
939 
940         /* Update counter */
941         spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
942         sep->send_ct++;
943         spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
944 
945         dev_dbg(&sep->pdev->dev,
946                 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
947                         current->pid, sep->send_ct, sep->reply_ct);
948 
949         /* Send interrupt to SEP */
950         sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
951 
952 end_function:
953         return error;
954 }
955 
956 /**
957  *      sep_crypto_dma -
958  *      @sep: pointer to struct sep_device
959  *      @sg: pointer to struct scatterlist
960  *      @direction:
961  *      @dma_maps: pointer to place a pointer to array of dma maps
962  *       This is filled in; anything previous there will be lost
963  *       The structure for dma maps is sep_dma_map
964  *      @returns number of dma maps on success; negative on error
965  *
966  *      This creates the dma table from the scatterlist
967  *      It is used only for kernel crypto as it works with scatterlists
968  *      representation of data buffers
969  *
970  */
971 static int sep_crypto_dma(
972         struct sep_device *sep,
973         struct scatterlist *sg,
974         struct sep_dma_map **dma_maps,
975         enum dma_data_direction direction)
976 {
977         struct scatterlist *temp_sg;
978 
979         u32 count_segment;
980         u32 count_mapped;
981         struct sep_dma_map *sep_dma;
982         int ct1;
983 
984         if (sg->length == 0)
985                 return 0;
986 
987         /* Count the segments */
988         temp_sg = sg;
989         count_segment = 0;
990         while (temp_sg) {
991                 count_segment += 1;
992                 temp_sg = scatterwalk_sg_next(temp_sg);
993         }
994         dev_dbg(&sep->pdev->dev,
995                 "There are (hex) %x segments in sg\n", count_segment);
996 
997         /* DMA map segments */
998         count_mapped = dma_map_sg(&sep->pdev->dev, sg,
999                 count_segment, direction);
1000 
1001         dev_dbg(&sep->pdev->dev,
1002                 "There are (hex) %x maps in sg\n", count_mapped);
1003 
1004         if (count_mapped == 0) {
1005                 dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
1006                 return -ENOMEM;
1007         }
1008 
1009         sep_dma = kmalloc(sizeof(struct sep_dma_map) *
1010                 count_mapped, GFP_ATOMIC);
1011 
1012         if (sep_dma == NULL) {
1013                 dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
1014                 return -ENOMEM;
1015         }
1016 
1017         for_each_sg(sg, temp_sg, count_mapped, ct1) {
1018                 sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
1019                 sep_dma[ct1].size = sg_dma_len(temp_sg);
1020                 dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
1021                         ct1, (unsigned long)sep_dma[ct1].dma_addr,
1022                         (unsigned long)sep_dma[ct1].size);
1023                 }
1024 
1025         *dma_maps = sep_dma;
1026         return count_mapped;
1027 
1028 }
1029 
1030 /**
1031  *      sep_crypto_lli -
1032  *      @sep: pointer to struct sep_device
1033  *      @sg: pointer to struct scatterlist
1034  *      @data_size: total data size
1035  *      @direction:
1036  *      @dma_maps: pointer to place a pointer to array of dma maps
1037  *       This is filled in; anything previous there will be lost
1038  *       The structure for dma maps is sep_dma_map
1039  *      @lli_maps: pointer to place a pointer to array of lli maps
1040  *       This is filled in; anything previous there will be lost
1041  *       The structure for dma maps is sep_dma_map
1042  *      @returns number of dma maps on success; negative on error
1043  *
1044  *      This creates the LLI table from the scatterlist
1045  *      It is only used for kernel crypto as it works exclusively
1046  *      with scatterlists (struct scatterlist) representation of
1047  *      data buffers
1048  */
1049 static int sep_crypto_lli(
1050         struct sep_device *sep,
1051         struct scatterlist *sg,
1052         struct sep_dma_map **maps,
1053         struct sep_lli_entry **llis,
1054         u32 data_size,
1055         enum dma_data_direction direction)
1056 {
1057 
1058         int ct1;
1059         struct sep_lli_entry *sep_lli;
1060         struct sep_dma_map *sep_map;
1061 
1062         int nbr_ents;
1063 
1064         nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
1065         if (nbr_ents <= 0) {
1066                 dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
1067                         nbr_ents);
1068                 return nbr_ents;
1069         }
1070 
1071         sep_map = *maps;
1072 
1073         sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
1074 
1075         if (sep_lli == NULL) {
1076                 dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
1077 
1078                 kfree(*maps);
1079                 *maps = NULL;
1080                 return -ENOMEM;
1081         }
1082 
1083         for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
1084                 sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
1085 
1086                 /* Maximum for page is total data size */
1087                 if (sep_map[ct1].size > data_size)
1088                         sep_map[ct1].size = data_size;
1089 
1090                 sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
1091         }
1092 
1093         *llis = sep_lli;
1094         return nbr_ents;
1095 }
1096 
1097 /**
1098  *      sep_lock_kernel_pages - map kernel pages for DMA
1099  *      @sep: pointer to struct sep_device
1100  *      @kernel_virt_addr: address of data buffer in kernel
1101  *      @data_size: size of data
1102  *      @lli_array_ptr: lli array
1103  *      @in_out_flag: input into device or output from device
1104  *
1105  *      This function locks all the physical pages of the kernel virtual buffer
1106  *      and construct a basic lli  array, where each entry holds the physical
1107  *      page address and the size that application data holds in this page
1108  *      This function is used only during kernel crypto mod calls from within
1109  *      the kernel (when ioctl is not used)
1110  *
1111  *      This is used only for kernel crypto. Kernel pages
1112  *      are handled differently as they are done via
1113  *      scatter gather lists (struct scatterlist)
1114  */
1115 static int sep_lock_kernel_pages(struct sep_device *sep,
1116         unsigned long kernel_virt_addr,
1117         u32 data_size,
1118         struct sep_lli_entry **lli_array_ptr,
1119         int in_out_flag,
1120         struct sep_dma_context *dma_ctx)
1121 
1122 {
1123         u32 num_pages;
1124         struct scatterlist *sg;
1125 
1126         /* Array of lli */
1127         struct sep_lli_entry *lli_array;
1128         /* Map array */
1129         struct sep_dma_map *map_array;
1130 
1131         enum dma_data_direction direction;
1132 
1133         lli_array = NULL;
1134         map_array = NULL;
1135 
1136         if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1137                 direction = DMA_TO_DEVICE;
1138                 sg = dma_ctx->src_sg;
1139         } else {
1140                 direction = DMA_FROM_DEVICE;
1141                 sg = dma_ctx->dst_sg;
1142         }
1143 
1144         num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
1145                 data_size, direction);
1146 
1147         if (num_pages <= 0) {
1148                 dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
1149                         num_pages);
1150                 return -ENOMEM;
1151         }
1152 
1153         /* Put mapped kernel sg into kernel resource array */
1154 
1155         /* Set output params according to the in_out flag */
1156         if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1157                 *lli_array_ptr = lli_array;
1158                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1159                                                                 num_pages;
1160                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1161                                                                 NULL;
1162                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1163                                                                 map_array;
1164                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1165                                                                 num_pages;
1166                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
1167                         dma_ctx->src_sg;
1168         } else {
1169                 *lli_array_ptr = lli_array;
1170                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1171                                                                 num_pages;
1172                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1173                                                                 NULL;
1174                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1175                                                                 map_array;
1176                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1177                                         out_map_num_entries = num_pages;
1178                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
1179                         dma_ctx->dst_sg;
1180         }
1181 
1182         return 0;
1183 }
1184 
1185 /**
1186  * sep_lock_user_pages - lock and map user pages for DMA
1187  * @sep: pointer to struct sep_device
1188  * @app_virt_addr: user memory data buffer
1189  * @data_size: size of data buffer
1190  * @lli_array_ptr: lli array
1191  * @in_out_flag: input or output to device
1192  *
1193  * This function locks all the physical pages of the application
1194  * virtual buffer and construct a basic lli  array, where each entry
1195  * holds the physical page address and the size that application
1196  * data holds in this physical pages
1197  */
1198 static int sep_lock_user_pages(struct sep_device *sep,
1199         u32 app_virt_addr,
1200         u32 data_size,
1201         struct sep_lli_entry **lli_array_ptr,
1202         int in_out_flag,
1203         struct sep_dma_context *dma_ctx)
1204 
1205 {
1206         int error = 0;
1207         u32 count;
1208         int result;
1209         /* The the page of the end address of the user space buffer */
1210         u32 end_page;
1211         /* The page of the start address of the user space buffer */
1212         u32 start_page;
1213         /* The range in pages */
1214         u32 num_pages;
1215         /* Array of pointers to page */
1216         struct page **page_array;
1217         /* Array of lli */
1218         struct sep_lli_entry *lli_array;
1219         /* Map array */
1220         struct sep_dma_map *map_array;
1221 
1222         /* Set start and end pages and num pages */
1223         end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1224         start_page = app_virt_addr >> PAGE_SHIFT;
1225         num_pages = end_page - start_page + 1;
1226 
1227         dev_dbg(&sep->pdev->dev,
1228                 "[PID%d] lock user pages app_virt_addr is %x\n",
1229                         current->pid, app_virt_addr);
1230 
1231         dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1232                                         current->pid, data_size);
1233         dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1234                                         current->pid, start_page);
1235         dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1236                                         current->pid, end_page);
1237         dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1238                                         current->pid, num_pages);
1239 
1240         /* Allocate array of pages structure pointers */
1241         page_array = kmalloc_array(num_pages, sizeof(struct page *),
1242                                    GFP_ATOMIC);
1243         if (!page_array) {
1244                 error = -ENOMEM;
1245                 goto end_function;
1246         }
1247 
1248         map_array = kmalloc_array(num_pages, sizeof(struct sep_dma_map),
1249                                   GFP_ATOMIC);
1250         if (!map_array) {
1251                 error = -ENOMEM;
1252                 goto end_function_with_error1;
1253         }
1254 
1255         lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1256                                   GFP_ATOMIC);
1257         if (!lli_array) {
1258                 error = -ENOMEM;
1259                 goto end_function_with_error2;
1260         }
1261 
1262         /* Convert the application virtual address into a set of physical */
1263         result = get_user_pages_fast(app_virt_addr, num_pages,
1264                 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1), page_array);
1265 
1266         /* Check the number of pages locked - if not all then exit with error */
1267         if (result != num_pages) {
1268                 dev_warn(&sep->pdev->dev,
1269                         "[PID%d] not all pages locked by get_user_pages, result 0x%X, num_pages 0x%X\n",
1270                         current->pid, result, num_pages);
1271                 error = -ENOMEM;
1272                 goto end_function_with_error3;
1273         }
1274 
1275         dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
1276                                         current->pid);
1277 
1278         /*
1279          * Fill the array using page array data and
1280          * map the pages - this action will also flush the cache as needed
1281          */
1282         for (count = 0; count < num_pages; count++) {
1283                 /* Fill the map array */
1284                 map_array[count].dma_addr =
1285                         dma_map_page(&sep->pdev->dev, page_array[count],
1286                         0, PAGE_SIZE, DMA_BIDIRECTIONAL);
1287 
1288                 map_array[count].size = PAGE_SIZE;
1289 
1290                 /* Fill the lli array entry */
1291                 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1292                 lli_array[count].block_size = PAGE_SIZE;
1293 
1294                 dev_dbg(&sep->pdev->dev,
1295                         "[PID%d] lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is (hex) %x\n",
1296                         current->pid, count,
1297                         (unsigned long)lli_array[count].bus_address,
1298                         count, lli_array[count].block_size);
1299         }
1300 
1301         /* Check the offset for the first page */
1302         lli_array[0].bus_address =
1303                 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1304 
1305         /* Check that not all the data is in the first page only */
1306         if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1307                 lli_array[0].block_size = data_size;
1308         else
1309                 lli_array[0].block_size =
1310                         PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1311 
1312                 dev_dbg(&sep->pdev->dev,
1313                         "[PID%d] After check if page 0 has all data\n",
1314                         current->pid);
1315                 dev_dbg(&sep->pdev->dev,
1316                         "[PID%d] lli_array[0].bus_address is (hex) %08lx, lli_array[0].block_size is (hex) %x\n",
1317                         current->pid,
1318                         (unsigned long)lli_array[0].bus_address,
1319                         lli_array[0].block_size);
1320 
1321 
1322         /* Check the size of the last page */
1323         if (num_pages > 1) {
1324                 lli_array[num_pages - 1].block_size =
1325                         (app_virt_addr + data_size) & (~PAGE_MASK);
1326                 if (lli_array[num_pages - 1].block_size == 0)
1327                         lli_array[num_pages - 1].block_size = PAGE_SIZE;
1328 
1329                 dev_dbg(&sep->pdev->dev,
1330                         "[PID%d] After last page size adjustment\n",
1331                         current->pid);
1332                 dev_dbg(&sep->pdev->dev,
1333                         "[PID%d] lli_array[%x].bus_address is (hex) %08lx, lli_array[%x].block_size is (hex) %x\n",
1334                         current->pid,
1335                         num_pages - 1,
1336                         (unsigned long)lli_array[num_pages - 1].bus_address,
1337                         num_pages - 1,
1338                         lli_array[num_pages - 1].block_size);
1339         }
1340 
1341         /* Set output params according to the in_out flag */
1342         if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1343                 *lli_array_ptr = lli_array;
1344                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1345                                                                 num_pages;
1346                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1347                                                                 page_array;
1348                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1349                                                                 map_array;
1350                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1351                                                                 num_pages;
1352                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
1353         } else {
1354                 *lli_array_ptr = lli_array;
1355                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1356                                                                 num_pages;
1357                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1358                                                                 page_array;
1359                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1360                                                                 map_array;
1361                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1362                                         out_map_num_entries = num_pages;
1363                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
1364         }
1365         goto end_function;
1366 
1367 end_function_with_error3:
1368         /* Free lli array */
1369         kfree(lli_array);
1370 
1371 end_function_with_error2:
1372         kfree(map_array);
1373 
1374 end_function_with_error1:
1375         /* Free page array */
1376         kfree(page_array);
1377 
1378 end_function:
1379         return error;
1380 }
1381 
1382 /**
1383  *      sep_lli_table_secure_dma - get lli array for IMR addresses
1384  *      @sep: pointer to struct sep_device
1385  *      @app_virt_addr: user memory data buffer
1386  *      @data_size: size of data buffer
1387  *      @lli_array_ptr: lli array
1388  *      @in_out_flag: not used
1389  *      @dma_ctx: pointer to struct sep_dma_context
1390  *
1391  *      This function creates lli tables for outputting data to
1392  *      IMR memory, which is memory that cannot be accessed by the
1393  *      the x86 processor.
1394  */
1395 static int sep_lli_table_secure_dma(struct sep_device *sep,
1396         u32 app_virt_addr,
1397         u32 data_size,
1398         struct sep_lli_entry **lli_array_ptr,
1399         int in_out_flag,
1400         struct sep_dma_context *dma_ctx)
1401 
1402 {
1403         u32 count;
1404         /* The the page of the end address of the user space buffer */
1405         u32 end_page;
1406         /* The page of the start address of the user space buffer */
1407         u32 start_page;
1408         /* The range in pages */
1409         u32 num_pages;
1410         /* Array of lli */
1411         struct sep_lli_entry *lli_array;
1412 
1413         /* Set start and end pages and num pages */
1414         end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1415         start_page = app_virt_addr >> PAGE_SHIFT;
1416         num_pages = end_page - start_page + 1;
1417 
1418         dev_dbg(&sep->pdev->dev,
1419                 "[PID%d] lock user pages  app_virt_addr is %x\n",
1420                 current->pid, app_virt_addr);
1421 
1422         dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1423                 current->pid, data_size);
1424         dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1425                 current->pid, start_page);
1426         dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1427                 current->pid, end_page);
1428         dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1429                 current->pid, num_pages);
1430 
1431         lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1432                                   GFP_ATOMIC);
1433         if (!lli_array)
1434                 return -ENOMEM;
1435 
1436         /*
1437          * Fill the lli_array
1438          */
1439         start_page = start_page << PAGE_SHIFT;
1440         for (count = 0; count < num_pages; count++) {
1441                 /* Fill the lli array entry */
1442                 lli_array[count].bus_address = start_page;
1443                 lli_array[count].block_size = PAGE_SIZE;
1444 
1445                 start_page += PAGE_SIZE;
1446 
1447                 dev_dbg(&sep->pdev->dev,
1448                         "[PID%d] lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is (hex) %x\n",
1449                         current->pid,
1450                         count, (unsigned long)lli_array[count].bus_address,
1451                         count, lli_array[count].block_size);
1452         }
1453 
1454         /* Check the offset for the first page */
1455         lli_array[0].bus_address =
1456                 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1457 
1458         /* Check that not all the data is in the first page only */
1459         if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1460                 lli_array[0].block_size = data_size;
1461         else
1462                 lli_array[0].block_size =
1463                         PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1464 
1465         dev_dbg(&sep->pdev->dev,
1466                 "[PID%d] After check if page 0 has all data\n"
1467                 "lli_array[0].bus_address is (hex) %08lx, lli_array[0].block_size is (hex) %x\n",
1468                 current->pid,
1469                 (unsigned long)lli_array[0].bus_address,
1470                 lli_array[0].block_size);
1471 
1472         /* Check the size of the last page */
1473         if (num_pages > 1) {
1474                 lli_array[num_pages - 1].block_size =
1475                         (app_virt_addr + data_size) & (~PAGE_MASK);
1476                 if (lli_array[num_pages - 1].block_size == 0)
1477                         lli_array[num_pages - 1].block_size = PAGE_SIZE;
1478 
1479                 dev_dbg(&sep->pdev->dev,
1480                         "[PID%d] After last page size adjustment\n"
1481                         "lli_array[%x].bus_address is (hex) %08lx, lli_array[%x].block_size is (hex) %x\n",
1482                         current->pid, num_pages - 1,
1483                         (unsigned long)lli_array[num_pages - 1].bus_address,
1484                         num_pages - 1,
1485                         lli_array[num_pages - 1].block_size);
1486         }
1487         *lli_array_ptr = lli_array;
1488         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
1489         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
1490         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
1491         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
1492 
1493         return 0;
1494 }
1495 
1496 /**
1497  * sep_calculate_lli_table_max_size - size the LLI table
1498  * @sep: pointer to struct sep_device
1499  * @lli_in_array_ptr
1500  * @num_array_entries
1501  * @last_table_flag
1502  *
1503  * This function calculates the size of data that can be inserted into
1504  * the lli table from this array, such that either the table is full
1505  * (all entries are entered), or there are no more entries in the
1506  * lli array
1507  */
1508 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1509         struct sep_lli_entry *lli_in_array_ptr,
1510         u32 num_array_entries,
1511         u32 *last_table_flag)
1512 {
1513         u32 counter;
1514         /* Table data size */
1515         u32 table_data_size = 0;
1516         /* Data size for the next table */
1517         u32 next_table_data_size;
1518 
1519         *last_table_flag = 0;
1520 
1521         /*
1522          * Calculate the data in the out lli table till we fill the whole
1523          * table or till the data has ended
1524          */
1525         for (counter = 0;
1526                 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1527                         (counter < num_array_entries); counter++)
1528                 table_data_size += lli_in_array_ptr[counter].block_size;
1529 
1530         /*
1531          * Check if we reached the last entry,
1532          * meaning this ia the last table to build,
1533          * and no need to check the block alignment
1534          */
1535         if (counter == num_array_entries) {
1536                 /* Set the last table flag */
1537                 *last_table_flag = 1;
1538                 goto end_function;
1539         }
1540 
1541         /*
1542          * Calculate the data size of the next table.
1543          * Stop if no entries left or if data size is more the DMA restriction
1544          */
1545         next_table_data_size = 0;
1546         for (; counter < num_array_entries; counter++) {
1547                 next_table_data_size += lli_in_array_ptr[counter].block_size;
1548                 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1549                         break;
1550         }
1551 
1552         /*
1553          * Check if the next table data size is less then DMA rstriction.
1554          * if it is - recalculate the current table size, so that the next
1555          * table data size will be adaquete for DMA
1556          */
1557         if (next_table_data_size &&
1558                 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1559 
1560                 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1561                         next_table_data_size);
1562 
1563 end_function:
1564         return table_data_size;
1565 }
1566 
1567 /**
1568  * sep_build_lli_table - build an lli array for the given table
1569  * @sep: pointer to struct sep_device
1570  * @lli_array_ptr: pointer to lli array
1571  * @lli_table_ptr: pointer to lli table
1572  * @num_processed_entries_ptr: pointer to number of entries
1573  * @num_table_entries_ptr: pointer to number of tables
1574  * @table_data_size: total data size
1575  *
1576  * Builds an lli table from the lli_array according to
1577  * the given size of data
1578  */
1579 static void sep_build_lli_table(struct sep_device *sep,
1580         struct sep_lli_entry    *lli_array_ptr,
1581         struct sep_lli_entry    *lli_table_ptr,
1582         u32 *num_processed_entries_ptr,
1583         u32 *num_table_entries_ptr,
1584         u32 table_data_size)
1585 {
1586         /* Current table data size */
1587         u32 curr_table_data_size;
1588         /* Counter of lli array entry */
1589         u32 array_counter;
1590 
1591         /* Init current table data size and lli array entry counter */
1592         curr_table_data_size = 0;
1593         array_counter = 0;
1594         *num_table_entries_ptr = 1;
1595 
1596         dev_dbg(&sep->pdev->dev,
1597                 "[PID%d] build lli table table_data_size: (hex) %x\n",
1598                         current->pid, table_data_size);
1599 
1600         /* Fill the table till table size reaches the needed amount */
1601         while (curr_table_data_size < table_data_size) {
1602                 /* Update the number of entries in table */
1603                 (*num_table_entries_ptr)++;
1604 
1605                 lli_table_ptr->bus_address =
1606                         cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1607 
1608                 lli_table_ptr->block_size =
1609                         cpu_to_le32(lli_array_ptr[array_counter].block_size);
1610 
1611                 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1612 
1613                 dev_dbg(&sep->pdev->dev,
1614                         "[PID%d] lli_table_ptr is %p\n",
1615                                 current->pid, lli_table_ptr);
1616                 dev_dbg(&sep->pdev->dev,
1617                         "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1618                                 current->pid,
1619                                 (unsigned long)lli_table_ptr->bus_address);
1620 
1621                 dev_dbg(&sep->pdev->dev,
1622                         "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1623                                 current->pid, lli_table_ptr->block_size);
1624 
1625                 /* Check for overflow of the table data */
1626                 if (curr_table_data_size > table_data_size) {
1627                         dev_dbg(&sep->pdev->dev,
1628                                 "[PID%d] curr_table_data_size too large\n",
1629                                         current->pid);
1630 
1631                         /* Update the size of block in the table */
1632                         lli_table_ptr->block_size =
1633                                 cpu_to_le32(lli_table_ptr->block_size) -
1634                                 (curr_table_data_size - table_data_size);
1635 
1636                         /* Update the physical address in the lli array */
1637                         lli_array_ptr[array_counter].bus_address +=
1638                         cpu_to_le32(lli_table_ptr->block_size);
1639 
1640                         /* Update the block size left in the lli array */
1641                         lli_array_ptr[array_counter].block_size =
1642                                 (curr_table_data_size - table_data_size);
1643                 } else
1644                         /* Advance to the next entry in the lli_array */
1645                         array_counter++;
1646 
1647                 dev_dbg(&sep->pdev->dev,
1648                         "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1649                                 current->pid,
1650                                 (unsigned long)lli_table_ptr->bus_address);
1651                 dev_dbg(&sep->pdev->dev,
1652                         "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1653                                 current->pid,
1654                                 lli_table_ptr->block_size);
1655 
1656                 /* Move to the next entry in table */
1657                 lli_table_ptr++;
1658         }
1659 
1660         /* Set the info entry to default */
1661         lli_table_ptr->bus_address = 0xffffffff;
1662         lli_table_ptr->block_size = 0;
1663 
1664         /* Set the output parameter */
1665         *num_processed_entries_ptr += array_counter;
1666 
1667 }
1668 
1669 /**
1670  * sep_shared_area_virt_to_bus - map shared area to bus address
1671  * @sep: pointer to struct sep_device
1672  * @virt_address: virtual address to convert
1673  *
1674  * This functions returns the physical address inside shared area according
1675  * to the virtual address. It can be either on the external RAM device
1676  * (ioremapped), or on the system RAM
1677  * This implementation is for the external RAM
1678  */
1679 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1680         void *virt_address)
1681 {
1682         dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
1683                                         current->pid, virt_address);
1684         dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
1685                 current->pid,
1686                 (unsigned long)
1687                 sep->shared_bus + (virt_address - sep->shared_addr));
1688 
1689         return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1690 }
1691 
1692 /**
1693  * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1694  * @sep: pointer to struct sep_device
1695  * @bus_address: bus address to convert
1696  *
1697  * This functions returns the virtual address inside shared area
1698  * according to the physical address. It can be either on the
1699  * external RAM device (ioremapped), or on the system RAM
1700  * This implementation is for the external RAM
1701  */
1702 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1703         dma_addr_t bus_address)
1704 {
1705         dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1706                 current->pid,
1707                 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1708                         (size_t)(bus_address - sep->shared_bus)));
1709 
1710         return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1711 }
1712 
1713 /**
1714  * sep_debug_print_lli_tables - dump LLI table
1715  * @sep: pointer to struct sep_device
1716  * @lli_table_ptr: pointer to sep_lli_entry
1717  * @num_table_entries: number of entries
1718  * @table_data_size: total data size
1719  *
1720  * Walk the the list of the print created tables and print all the data
1721  */
1722 static void sep_debug_print_lli_tables(struct sep_device *sep,
1723         struct sep_lli_entry *lli_table_ptr,
1724         unsigned long num_table_entries,
1725         unsigned long table_data_size)
1726 {
1727 #ifdef DEBUG
1728         unsigned long table_count = 1;
1729         unsigned long entries_count = 0;
1730 
1731         dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
1732                                         current->pid);
1733         if (num_table_entries == 0) {
1734                 dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
1735                         current->pid);
1736                 return;
1737         }
1738 
1739         while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1740                 dev_dbg(&sep->pdev->dev,
1741                         "[PID%d] lli table %08lx, table_data_size is (hex) %lx\n",
1742                         current->pid, table_count, table_data_size);
1743                 dev_dbg(&sep->pdev->dev,
1744                         "[PID%d] num_table_entries is (hex) %lx\n",
1745                                 current->pid, num_table_entries);
1746 
1747                 /* Print entries of the table (without info entry) */
1748                 for (entries_count = 0; entries_count < num_table_entries;
1749                         entries_count++, lli_table_ptr++) {
1750 
1751                         dev_dbg(&sep->pdev->dev,
1752                                 "[PID%d] lli_table_ptr address is %08lx\n",
1753                                 current->pid,
1754                                 (unsigned long) lli_table_ptr);
1755 
1756                         dev_dbg(&sep->pdev->dev,
1757                                 "[PID%d] phys address is %08lx block size is (hex) %x\n",
1758                                 current->pid,
1759                                 (unsigned long)lli_table_ptr->bus_address,
1760                                 lli_table_ptr->block_size);
1761                 }
1762 
1763                 /* Point to the info entry */
1764                 lli_table_ptr--;
1765 
1766                 dev_dbg(&sep->pdev->dev,
1767                         "[PID%d] phys lli_table_ptr->block_size is (hex) %x\n",
1768                         current->pid,
1769                         lli_table_ptr->block_size);
1770 
1771                 dev_dbg(&sep->pdev->dev,
1772                         "[PID%d] phys lli_table_ptr->physical_address is %08lx\n",
1773                         current->pid,
1774                         (unsigned long)lli_table_ptr->bus_address);
1775 
1776 
1777                 table_data_size = lli_table_ptr->block_size & 0xffffff;
1778                 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1779 
1780                 dev_dbg(&sep->pdev->dev,
1781                         "[PID%d] phys table_data_size is (hex) %lx num_table_entries is %lx bus_address is%lx\n",
1782                         current->pid,
1783                         table_data_size,
1784                         num_table_entries,
1785                         (unsigned long)lli_table_ptr->bus_address);
1786 
1787                 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1788                         lli_table_ptr = (struct sep_lli_entry *)
1789                                 sep_shared_bus_to_virt(sep,
1790                                 (unsigned long)lli_table_ptr->bus_address);
1791 
1792                 table_count++;
1793         }
1794         dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
1795                                         current->pid);
1796 #endif
1797 }
1798 
1799 
1800 /**
1801  * sep_prepare_empty_lli_table - create a blank LLI table
1802  * @sep: pointer to struct sep_device
1803  * @lli_table_addr_ptr: pointer to lli table
1804  * @num_entries_ptr: pointer to number of entries
1805  * @table_data_size_ptr: point to table data size
1806  * @dmatables_region: Optional buffer for DMA tables
1807  * @dma_ctx: DMA context
1808  *
1809  * This function creates empty lli tables when there is no data
1810  */
1811 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1812                 dma_addr_t *lli_table_addr_ptr,
1813                 u32 *num_entries_ptr,
1814                 u32 *table_data_size_ptr,
1815                 void **dmatables_region,
1816                 struct sep_dma_context *dma_ctx)
1817 {
1818         struct sep_lli_entry *lli_table_ptr;
1819 
1820         /* Find the area for new table */
1821         lli_table_ptr =
1822                 (struct sep_lli_entry *)(sep->shared_addr +
1823                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1824                 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1825                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1826 
1827         if (dmatables_region && *dmatables_region)
1828                 lli_table_ptr = *dmatables_region;
1829 
1830         lli_table_ptr->bus_address = 0;
1831         lli_table_ptr->block_size = 0;
1832 
1833         lli_table_ptr++;
1834         lli_table_ptr->bus_address = 0xFFFFFFFF;
1835         lli_table_ptr->block_size = 0;
1836 
1837         /* Set the output parameter value */
1838         *lli_table_addr_ptr = sep->shared_bus +
1839                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1840                 dma_ctx->num_lli_tables_created *
1841                 sizeof(struct sep_lli_entry) *
1842                 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1843 
1844         /* Set the num of entries and table data size for empty table */
1845         *num_entries_ptr = 2;
1846         *table_data_size_ptr = 0;
1847 
1848         /* Update the number of created tables */
1849         dma_ctx->num_lli_tables_created++;
1850 }
1851 
1852 /**
1853  * sep_prepare_input_dma_table - prepare input DMA mappings
1854  * @sep: pointer to struct sep_device
1855  * @data_size:
1856  * @block_size:
1857  * @lli_table_ptr:
1858  * @num_entries_ptr:
1859  * @table_data_size_ptr:
1860  * @is_kva: set for kernel data (kernel crypt io call)
1861  *
1862  * This function prepares only input DMA table for synchronic symmetric
1863  * operations (HASH)
1864  * Note that all bus addresses that are passed to the SEP
1865  * are in 32 bit format; the SEP is a 32 bit device
1866  */
1867 static int sep_prepare_input_dma_table(struct sep_device *sep,
1868         unsigned long app_virt_addr,
1869         u32 data_size,
1870         u32 block_size,
1871         dma_addr_t *lli_table_ptr,
1872         u32 *num_entries_ptr,
1873         u32 *table_data_size_ptr,
1874         bool is_kva,
1875         void **dmatables_region,
1876         struct sep_dma_context *dma_ctx
1877 )
1878 {
1879         int error = 0;
1880         /* Pointer to the info entry of the table - the last entry */
1881         struct sep_lli_entry *info_entry_ptr;
1882         /* Array of pointers to page */
1883         struct sep_lli_entry *lli_array_ptr;
1884         /* Points to the first entry to be processed in the lli_in_array */
1885         u32 current_entry = 0;
1886         /* Num entries in the virtual buffer */
1887         u32 sep_lli_entries = 0;
1888         /* Lli table pointer */
1889         struct sep_lli_entry *in_lli_table_ptr;
1890         /* The total data in one table */
1891         u32 table_data_size = 0;
1892         /* Flag for last table */
1893         u32 last_table_flag = 0;
1894         /* Number of entries in lli table */
1895         u32 num_entries_in_table = 0;
1896         /* Next table address */
1897         void *lli_table_alloc_addr = NULL;
1898         void *dma_lli_table_alloc_addr = NULL;
1899         void *dma_in_lli_table_ptr = NULL;
1900 
1901         dev_dbg(&sep->pdev->dev,
1902                 "[PID%d] prepare intput dma tbl data size: (hex) %x\n",
1903                 current->pid, data_size);
1904 
1905         dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
1906                                         current->pid, block_size);
1907 
1908         /* Initialize the pages pointers */
1909         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
1910         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
1911 
1912         /* Set the kernel address for first table to be allocated */
1913         lli_table_alloc_addr = (void *)(sep->shared_addr +
1914                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1915                 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1916                 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1917 
1918         if (data_size == 0) {
1919                 if (dmatables_region) {
1920                         error = sep_allocate_dmatables_region(sep,
1921                                                 dmatables_region,
1922                                                 dma_ctx,
1923                                                 1);
1924                         if (error)
1925                                 return error;
1926                 }
1927                 /* Special case  - create meptu table - 2 entries, zero data */
1928                 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1929                                 num_entries_ptr, table_data_size_ptr,
1930                                 dmatables_region, dma_ctx);
1931                 goto update_dcb_counter;
1932         }
1933 
1934         /* Check if the pages are in Kernel Virtual Address layout */
1935         if (is_kva)
1936                 error = sep_lock_kernel_pages(sep, app_virt_addr,
1937                         data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1938                         dma_ctx);
1939         else
1940                 /*
1941                  * Lock the pages of the user buffer
1942                  * and translate them to pages
1943                  */
1944                 error = sep_lock_user_pages(sep, app_virt_addr,
1945                         data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1946                         dma_ctx);
1947 
1948         if (error)
1949                 goto end_function;
1950 
1951         dev_dbg(&sep->pdev->dev,
1952                 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1953                 current->pid,
1954                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
1955 
1956         current_entry = 0;
1957         info_entry_ptr = NULL;
1958 
1959         sep_lli_entries =
1960                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
1961 
1962         dma_lli_table_alloc_addr = lli_table_alloc_addr;
1963         if (dmatables_region) {
1964                 error = sep_allocate_dmatables_region(sep,
1965                                         dmatables_region,
1966                                         dma_ctx,
1967                                         sep_lli_entries);
1968                 if (error)
1969                         goto end_function_error;
1970                 lli_table_alloc_addr = *dmatables_region;
1971         }
1972 
1973         /* Loop till all the entries in in array are processed */
1974         while (current_entry < sep_lli_entries) {
1975 
1976                 /* Set the new input and output tables */
1977                 in_lli_table_ptr =
1978                         (struct sep_lli_entry *)lli_table_alloc_addr;
1979                 dma_in_lli_table_ptr =
1980                         (struct sep_lli_entry *)dma_lli_table_alloc_addr;
1981 
1982                 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1983                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1984                 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1985                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1986 
1987                 if (dma_lli_table_alloc_addr >
1988                         ((void *)sep->shared_addr +
1989                         SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1990                         SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1991 
1992                         error = -ENOMEM;
1993                         goto end_function_error;
1994 
1995                 }
1996 
1997                 /* Update the number of created tables */
1998                 dma_ctx->num_lli_tables_created++;
1999 
2000                 /* Calculate the maximum size of data for input table */
2001                 table_data_size = sep_calculate_lli_table_max_size(sep,
2002                         &lli_array_ptr[current_entry],
2003                         (sep_lli_entries - current_entry),
2004                         &last_table_flag);
2005 
2006                 /*
2007                  * If this is not the last table -
2008                  * then align it to the block size
2009                  */
2010                 if (!last_table_flag)
2011                         table_data_size =
2012                                 (table_data_size / block_size) * block_size;
2013 
2014                 dev_dbg(&sep->pdev->dev,
2015                         "[PID%d] output table_data_size is (hex) %x\n",
2016                                 current->pid,
2017                                 table_data_size);
2018 
2019                 /* Construct input lli table */
2020                 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
2021                         in_lli_table_ptr,
2022                         &current_entry, &num_entries_in_table, table_data_size);
2023 
2024                 if (info_entry_ptr == NULL) {
2025 
2026                         /* Set the output parameters to physical addresses */
2027                         *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
2028                                 dma_in_lli_table_ptr);
2029                         *num_entries_ptr = num_entries_in_table;
2030                         *table_data_size_ptr = table_data_size;
2031 
2032                         dev_dbg(&sep->pdev->dev,
2033                                 "[PID%d] output lli_table_in_ptr is %08lx\n",
2034                                 current->pid,
2035                                 (unsigned long)*lli_table_ptr);
2036 
2037                 } else {
2038                         /* Update the info entry of the previous in table */
2039                         info_entry_ptr->bus_address =
2040                                 sep_shared_area_virt_to_bus(sep,
2041                                                         dma_in_lli_table_ptr);
2042                         info_entry_ptr->block_size =
2043                                 ((num_entries_in_table) << 24) |
2044                                 (table_data_size);
2045                 }
2046                 /* Save the pointer to the info entry of the current tables */
2047                 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
2048         }
2049         /* Print input tables */
2050         if (!dmatables_region) {
2051                 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
2052                         sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
2053                         *num_entries_ptr, *table_data_size_ptr);
2054         }
2055 
2056         /* The array of the pages */
2057         kfree(lli_array_ptr);
2058 
2059 update_dcb_counter:
2060         /* Update DCB counter */
2061         dma_ctx->nr_dcb_creat++;
2062         goto end_function;
2063 
2064 end_function_error:
2065         /* Free all the allocated resources */
2066         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2067         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2068         kfree(lli_array_ptr);
2069         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2070         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2071 
2072 end_function:
2073         return error;
2074 
2075 }
2076 
2077 /**
2078  * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2079  * @sep: pointer to struct sep_device
2080  * @lli_in_array:
2081  * @sep_in_lli_entries:
2082  * @lli_out_array:
2083  * @sep_out_lli_entries
2084  * @block_size
2085  * @lli_table_in_ptr
2086  * @lli_table_out_ptr
2087  * @in_num_entries_ptr
2088  * @out_num_entries_ptr
2089  * @table_data_size_ptr
2090  *
2091  * This function creates the input and output DMA tables for
2092  * symmetric operations (AES/DES) according to the block
2093  * size from LLI arays
2094  * Note that all bus addresses that are passed to the SEP
2095  * are in 32 bit format; the SEP is a 32 bit device
2096  */
2097 static int sep_construct_dma_tables_from_lli(
2098         struct sep_device *sep,
2099         struct sep_lli_entry *lli_in_array,
2100         u32     sep_in_lli_entries,
2101         struct sep_lli_entry *lli_out_array,
2102         u32     sep_out_lli_entries,
2103         u32     block_size,
2104         dma_addr_t *lli_table_in_ptr,
2105         dma_addr_t *lli_table_out_ptr,
2106         u32     *in_num_entries_ptr,
2107         u32     *out_num_entries_ptr,
2108         u32     *table_data_size_ptr,
2109         void    **dmatables_region,
2110         struct sep_dma_context *dma_ctx)
2111 {
2112         /* Points to the area where next lli table can be allocated */
2113         void *lli_table_alloc_addr = NULL;
2114         /*
2115          * Points to the area in shared region where next lli table
2116          * can be allocated
2117          */
2118         void *dma_lli_table_alloc_addr = NULL;
2119         /* Input lli table in dmatables_region or shared region */
2120         struct sep_lli_entry *in_lli_table_ptr = NULL;
2121         /* Input lli table location in the shared region */
2122         struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
2123         /* Output lli table in dmatables_region or shared region */
2124         struct sep_lli_entry *out_lli_table_ptr = NULL;
2125         /* Output lli table location in the shared region */
2126         struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
2127         /* Pointer to the info entry of the table - the last entry */
2128         struct sep_lli_entry *info_in_entry_ptr = NULL;
2129         /* Pointer to the info entry of the table - the last entry */
2130         struct sep_lli_entry *info_out_entry_ptr = NULL;
2131         /* Points to the first entry to be processed in the lli_in_array */
2132         u32 current_in_entry = 0;
2133         /* Points to the first entry to be processed in the lli_out_array */
2134         u32 current_out_entry = 0;
2135         /* Max size of the input table */
2136         u32 in_table_data_size = 0;
2137         /* Max size of the output table */
2138         u32 out_table_data_size = 0;
2139         /* Flag te signifies if this is the last tables build */
2140         u32 last_table_flag = 0;
2141         /* The data size that should be in table */
2142         u32 table_data_size = 0;
2143         /* Number of entries in the input table */
2144         u32 num_entries_in_table = 0;
2145         /* Number of entries in the output table */
2146         u32 num_entries_out_table = 0;
2147 
2148         if (!dma_ctx) {
2149                 dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
2150                 return -EINVAL;
2151         }
2152 
2153         /* Initiate to point after the message area */
2154         lli_table_alloc_addr = (void *)(sep->shared_addr +
2155                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2156                 (dma_ctx->num_lli_tables_created *
2157                 (sizeof(struct sep_lli_entry) *
2158                 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
2159         dma_lli_table_alloc_addr = lli_table_alloc_addr;
2160 
2161         if (dmatables_region) {
2162                 /* 2 for both in+out table */
2163                 if (sep_allocate_dmatables_region(sep,
2164                                         dmatables_region,
2165                                         dma_ctx,
2166                                         2*sep_in_lli_entries))
2167                         return -ENOMEM;
2168                 lli_table_alloc_addr = *dmatables_region;
2169         }
2170 
2171         /* Loop till all the entries in in array are not processed */
2172         while (current_in_entry < sep_in_lli_entries) {
2173                 /* Set the new input and output tables */
2174                 in_lli_table_ptr =
2175                         (struct sep_lli_entry *)lli_table_alloc_addr;
2176                 dma_in_lli_table_ptr =
2177                         (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2178 
2179                 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2180                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2181                 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2182                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2183 
2184                 /* Set the first output tables */
2185                 out_lli_table_ptr =
2186                         (struct sep_lli_entry *)lli_table_alloc_addr;
2187                 dma_out_lli_table_ptr =
2188                         (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2189 
2190                 /* Check if the DMA table area limit was overrun */
2191                 if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
2192                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
2193                         ((void *)sep->shared_addr +
2194                         SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2195                         SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2196 
2197                         dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
2198                         return -ENOMEM;
2199                 }
2200 
2201                 /* Update the number of the lli tables created */
2202                 dma_ctx->num_lli_tables_created += 2;
2203 
2204                 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2205                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2206                 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2207                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2208 
2209                 /* Calculate the maximum size of data for input table */
2210                 in_table_data_size =
2211                         sep_calculate_lli_table_max_size(sep,
2212                         &lli_in_array[current_in_entry],
2213                         (sep_in_lli_entries - current_in_entry),
2214                         &last_table_flag);
2215 
2216                 /* Calculate the maximum size of data for output table */
2217                 out_table_data_size =
2218                         sep_calculate_lli_table_max_size(sep,
2219                         &lli_out_array[current_out_entry],
2220                         (sep_out_lli_entries - current_out_entry),
2221                         &last_table_flag);
2222 
2223                 if (!last_table_flag) {
2224                         in_table_data_size = (in_table_data_size /
2225                                 block_size) * block_size;
2226                         out_table_data_size = (out_table_data_size /
2227                                 block_size) * block_size;
2228                 }
2229 
2230                 table_data_size = in_table_data_size;
2231                 if (table_data_size > out_table_data_size)
2232                         table_data_size = out_table_data_size;
2233 
2234                 dev_dbg(&sep->pdev->dev,
2235                         "[PID%d] construct tables from lli in_table_data_size is (hex) %x\n",
2236                         current->pid, in_table_data_size);
2237 
2238                 dev_dbg(&sep->pdev->dev,
2239                         "[PID%d] construct tables from lli out_table_data_size is (hex) %x\n",
2240                         current->pid, out_table_data_size);
2241 
2242                 /* Construct input lli table */
2243                 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2244                         in_lli_table_ptr,
2245                         &current_in_entry,
2246                         &num_entries_in_table,
2247                         table_data_size);
2248 
2249                 /* Construct output lli table */
2250                 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2251                         out_lli_table_ptr,
2252                         &current_out_entry,
2253                         &num_entries_out_table,
2254                         table_data_size);
2255 
2256                 /* If info entry is null - this is the first table built */
2257                 if (info_in_entry_ptr == NULL || info_out_entry_ptr == NULL) {
2258                         /* Set the output parameters to physical addresses */
2259                         *lli_table_in_ptr =
2260                         sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
2261 
2262                         *in_num_entries_ptr = num_entries_in_table;
2263 
2264                         *lli_table_out_ptr =
2265                                 sep_shared_area_virt_to_bus(sep,
2266                                 dma_out_lli_table_ptr);
2267 
2268                         *out_num_entries_ptr = num_entries_out_table;
2269                         *table_data_size_ptr = table_data_size;
2270 
2271                         dev_dbg(&sep->pdev->dev,
2272                                 "[PID%d] output lli_table_in_ptr is %08lx\n",
2273                                 current->pid,
2274                                 (unsigned long)*lli_table_in_ptr);
2275                         dev_dbg(&sep->pdev->dev,
2276                                 "[PID%d] output lli_table_out_ptr is %08lx\n",
2277                                 current->pid,
2278                                 (unsigned long)*lli_table_out_ptr);
2279                 } else {
2280                         /* Update the info entry of the previous in table */
2281                         info_in_entry_ptr->bus_address =
2282                                 sep_shared_area_virt_to_bus(sep,
2283                                 dma_in_lli_table_ptr);
2284 
2285                         info_in_entry_ptr->block_size =
2286                                 ((num_entries_in_table) << 24) |
2287                                 (table_data_size);
2288 
2289                         /* Update the info entry of the previous in table */
2290                         info_out_entry_ptr->bus_address =
2291                                 sep_shared_area_virt_to_bus(sep,
2292                                 dma_out_lli_table_ptr);
2293 
2294                         info_out_entry_ptr->block_size =
2295                                 ((num_entries_out_table) << 24) |
2296                                 (table_data_size);
2297 
2298                         dev_dbg(&sep->pdev->dev,
2299                                 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2300                                 current->pid,
2301                                 (unsigned long)info_in_entry_ptr->bus_address,
2302                                 info_in_entry_ptr->block_size);
2303 
2304                         dev_dbg(&sep->pdev->dev,
2305                                 "[PID%d] output lli_table_out_ptr: %08lx  %08x\n",
2306                                 current->pid,
2307                                 (unsigned long)info_out_entry_ptr->bus_address,
2308                                 info_out_entry_ptr->block_size);
2309                 }
2310 
2311                 /* Save the pointer to the info entry of the current tables */
2312                 info_in_entry_ptr = in_lli_table_ptr +
2313                         num_entries_in_table - 1;
2314                 info_out_entry_ptr = out_lli_table_ptr +
2315                         num_entries_out_table - 1;
2316 
2317                 dev_dbg(&sep->pdev->dev,
2318                         "[PID%d] output num_entries_out_table is %x\n",
2319                         current->pid,
2320                         (u32)num_entries_out_table);
2321                 dev_dbg(&sep->pdev->dev,
2322                         "[PID%d] output info_in_entry_ptr is %lx\n",
2323                         current->pid,
2324                         (unsigned long)info_in_entry_ptr);
2325                 dev_dbg(&sep->pdev->dev,
2326                         "[PID%d] output info_out_entry_ptr is %lx\n",
2327                         current->pid,
2328                         (unsigned long)info_out_entry_ptr);
2329         }
2330 
2331         /* Print input tables */
2332         if (!dmatables_region) {
2333                 sep_debug_print_lli_tables(
2334                         sep,
2335                         (struct sep_lli_entry *)
2336                         sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2337                         *in_num_entries_ptr,
2338                         *table_data_size_ptr);
2339         }
2340 
2341         /* Print output tables */
2342         if (!dmatables_region) {
2343                 sep_debug_print_lli_tables(
2344                         sep,
2345                         (struct sep_lli_entry *)
2346                         sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2347                         *out_num_entries_ptr,
2348                         *table_data_size_ptr);
2349         }
2350 
2351         return 0;
2352 }
2353 
2354 /**
2355  * sep_prepare_input_output_dma_table - prepare DMA I/O table
2356  * @app_virt_in_addr:
2357  * @app_virt_out_addr:
2358  * @data_size:
2359  * @block_size:
2360  * @lli_table_in_ptr:
2361  * @lli_table_out_ptr:
2362  * @in_num_entries_ptr:
2363  * @out_num_entries_ptr:
2364  * @table_data_size_ptr:
2365  * @is_kva: set for kernel data; used only for kernel crypto module
2366  *
2367  * This function builds input and output DMA tables for synchronic
2368  * symmetric operations (AES, DES, HASH). It also checks that each table
2369  * is of the modular block size
2370  * Note that all bus addresses that are passed to the SEP
2371  * are in 32 bit format; the SEP is a 32 bit device
2372  */
2373 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2374         unsigned long app_virt_in_addr,
2375         unsigned long app_virt_out_addr,
2376         u32 data_size,
2377         u32 block_size,
2378         dma_addr_t *lli_table_in_ptr,
2379         dma_addr_t *lli_table_out_ptr,
2380         u32 *in_num_entries_ptr,
2381         u32 *out_num_entries_ptr,
2382         u32 *table_data_size_ptr,
2383         bool is_kva,
2384         void **dmatables_region,
2385         struct sep_dma_context *dma_ctx)
2386 
2387 {
2388         int error = 0;
2389         /* Array of pointers of page */
2390         struct sep_lli_entry *lli_in_array;
2391         /* Array of pointers of page */
2392         struct sep_lli_entry *lli_out_array;
2393 
2394         if (!dma_ctx) {
2395                 error = -EINVAL;
2396                 goto end_function;
2397         }
2398 
2399         if (data_size == 0) {
2400                 /* Prepare empty table for input and output */
2401                 if (dmatables_region) {
2402                         error = sep_allocate_dmatables_region(
2403                                         sep,
2404                                         dmatables_region,
2405                                         dma_ctx,
2406                                         2);
2407                   if (error)
2408                         goto end_function;
2409                 }
2410                 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2411                         in_num_entries_ptr, table_data_size_ptr,
2412                         dmatables_region, dma_ctx);
2413 
2414                 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2415                         out_num_entries_ptr, table_data_size_ptr,
2416                         dmatables_region, dma_ctx);
2417 
2418                 goto update_dcb_counter;
2419         }
2420 
2421         /* Initialize the pages pointers */
2422         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2423         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2424 
2425         /* Lock the pages of the buffer and translate them to pages */
2426         if (is_kva) {
2427                 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
2428                                                 current->pid);
2429                 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2430                                 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2431                                 dma_ctx);
2432                 if (error) {
2433                         dev_warn(&sep->pdev->dev,
2434                                 "[PID%d] sep_lock_kernel_pages for input virtual buffer failed\n",
2435                                 current->pid);
2436 
2437                         goto end_function;
2438                 }
2439 
2440                 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
2441                                                 current->pid);
2442                 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2443                                 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2444                                 dma_ctx);
2445 
2446                 if (error) {
2447                         dev_warn(&sep->pdev->dev,
2448                                 "[PID%d] sep_lock_kernel_pages for output virtual buffer failed\n",
2449                                 current->pid);
2450 
2451                         goto end_function_free_lli_in;
2452                 }
2453 
2454         }
2455 
2456         else {
2457                 dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
2458                                                 current->pid);
2459                 error = sep_lock_user_pages(sep, app_virt_in_addr,
2460                                 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2461                                 dma_ctx);
2462                 if (error) {
2463                         dev_warn(&sep->pdev->dev,
2464                                 "[PID%d] sep_lock_user_pages for input virtual buffer failed\n",
2465                                 current->pid);
2466 
2467                         goto end_function;
2468                 }
2469 
2470                 if (dma_ctx->secure_dma) {
2471                         /* secure_dma requires use of non accessible memory */
2472                         dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
2473                                 current->pid);
2474                         error = sep_lli_table_secure_dma(sep,
2475                                 app_virt_out_addr, data_size, &lli_out_array,
2476                                 SEP_DRIVER_OUT_FLAG, dma_ctx);
2477                         if (error) {
2478                                 dev_warn(&sep->pdev->dev,
2479                                         "[PID%d] secure dma table setup for output virtual buffer failed\n",
2480                                         current->pid);
2481 
2482                                 goto end_function_free_lli_in;
2483                         }
2484                 } else {
2485                         /* For normal, non-secure dma */
2486                         dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
2487                                 current->pid);
2488 
2489                         dev_dbg(&sep->pdev->dev,
2490                                 "[PID%d] Locking user output pages\n",
2491                                 current->pid);
2492 
2493                         error = sep_lock_user_pages(sep, app_virt_out_addr,
2494                                 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2495                                 dma_ctx);
2496 
2497                         if (error) {
2498                                 dev_warn(&sep->pdev->dev,
2499                                         "[PID%d] sep_lock_user_pages for output virtual buffer failed\n",
2500                                         current->pid);
2501 
2502                                 goto end_function_free_lli_in;
2503                         }
2504                 }
2505         }
2506 
2507         dev_dbg(&sep->pdev->dev,
2508                 "[PID%d] After lock; prep input output dma table sep_in_num_pages is (hex) %x\n",
2509                 current->pid,
2510                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
2511 
2512         dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
2513                 current->pid,
2514                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
2515 
2516         dev_dbg(&sep->pdev->dev,
2517                 "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is (hex) %x\n",
2518                 current->pid, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2519 
2520         /* Call the function that creates table from the lli arrays */
2521         dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
2522                                         current->pid);
2523         error = sep_construct_dma_tables_from_lli(
2524                         sep, lli_in_array,
2525                         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2526                                                                 in_num_pages,
2527                         lli_out_array,
2528                         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2529                                                                 out_num_pages,
2530                         block_size, lli_table_in_ptr, lli_table_out_ptr,
2531                         in_num_entries_ptr, out_num_entries_ptr,
2532                         table_data_size_ptr, dmatables_region, dma_ctx);
2533 
2534         if (error) {
2535                 dev_warn(&sep->pdev->dev,
2536                         "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2537                         current->pid);
2538                 goto end_function_with_error;
2539         }
2540 
2541         kfree(lli_out_array);
2542         kfree(lli_in_array);
2543 
2544 update_dcb_counter:
2545         /* Update DCB counter */
2546         dma_ctx->nr_dcb_creat++;
2547 
2548         goto end_function;
2549 
2550 end_function_with_error:
2551         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
2552         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
2553         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
2554         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2555         kfree(lli_out_array);
2556 
2557 
2558 end_function_free_lli_in:
2559         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2560         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2561         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2562         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2563         kfree(lli_in_array);
2564 
2565 end_function:
2566 
2567         return error;
2568 
2569 }
2570 
2571 /**
2572  * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2573  * @app_in_address: unsigned long; for data buffer in (user space)
2574  * @app_out_address: unsigned long; for data buffer out (user space)
2575  * @data_in_size: u32; for size of data
2576  * @block_size: u32; for block size
2577  * @tail_block_size: u32; for size of tail block
2578  * @isapplet: bool; to indicate external app
2579  * @is_kva: bool; kernel buffer; only used for kernel crypto module
2580  * @secure_dma; indicates whether this is secure_dma using IMR
2581  *
2582  * This function prepares the linked DMA tables and puts the
2583  * address for the linked list of tables inta a DCB (data control
2584  * block) the address of which is known by the SEP hardware
2585  * Note that all bus addresses that are passed to the SEP
2586  * are in 32 bit format; the SEP is a 32 bit device
2587  */
2588 int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2589         unsigned long  app_in_address,
2590         unsigned long  app_out_address,
2591         u32  data_in_size,
2592         u32  block_size,
2593         u32  tail_block_size,
2594         bool isapplet,
2595         bool    is_kva,
2596         bool    secure_dma,
2597         struct sep_dcblock *dcb_region,
2598         void **dmatables_region,
2599         struct sep_dma_context **dma_ctx,
2600         struct scatterlist *src_sg,
2601         struct scatterlist *dst_sg)
2602 {
2603         int error = 0;
2604         /* Size of tail */
2605         u32 tail_size = 0;
2606         /* Address of the created DCB table */
2607         struct sep_dcblock *dcb_table_ptr = NULL;
2608         /* The physical address of the first input DMA table */
2609         dma_addr_t in_first_mlli_address = 0;
2610         /* Number of entries in the first input DMA table */
2611         u32  in_first_num_entries = 0;
2612         /* The physical address of the first output DMA table */
2613         dma_addr_t  out_first_mlli_address = 0;
2614         /* Number of entries in the first output DMA table */
2615         u32  out_first_num_entries = 0;
2616         /* Data in the first input/output table */
2617         u32  first_data_size = 0;
2618 
2619         dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
2620                 current->pid, app_in_address);
2621 
2622         dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
2623                 current->pid, app_out_address);
2624 
2625         dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
2626                 current->pid, data_in_size);
2627 
2628         dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
2629                 current->pid, block_size);
2630 
2631         dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
2632                 current->pid, tail_block_size);
2633 
2634         dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
2635                 current->pid, isapplet);
2636 
2637         dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
2638                 current->pid, is_kva);
2639 
2640         dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
2641                 current->pid, src_sg);
2642 
2643         dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
2644                 current->pid, dst_sg);
2645 
2646         if (!dma_ctx) {
2647                 dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
2648                                                 current->pid);
2649                 error = -EINVAL;
2650                 goto end_function;
2651         }
2652 
2653         if (*dma_ctx) {
2654                 /* In case there are multiple DCBs for this transaction */
2655                 dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
2656                                                 current->pid);
2657         } else {
2658                 *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
2659                 if (!(*dma_ctx)) {
2660                         dev_dbg(&sep->pdev->dev,
2661                                 "[PID%d] Not enough memory for DMA context\n",
2662                                 current->pid);
2663                   error = -ENOMEM;
2664                   goto end_function;
2665                 }
2666                 dev_dbg(&sep->pdev->dev,
2667                         "[PID%d] Created DMA context addr at 0x%p\n",
2668                         current->pid, *dma_ctx);
2669         }
2670 
2671         (*dma_ctx)->secure_dma = secure_dma;
2672 
2673         /* these are for kernel crypto only */
2674         (*dma_ctx)->src_sg = src_sg;
2675         (*dma_ctx)->dst_sg = dst_sg;
2676 
2677         if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2678                 /* No more DCBs to allocate */
2679                 dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
2680                                                 current->pid);
2681                 error = -ENOSPC;
2682                 goto end_function_error;
2683         }
2684 
2685         /* Allocate new DCB */
2686         if (dcb_region) {
2687                 dcb_table_ptr = dcb_region;
2688         } else {
2689                 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2690                         SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2691                         ((*dma_ctx)->nr_dcb_creat *
2692                                                 sizeof(struct sep_dcblock)));
2693         }
2694 
2695         /* Set the default values in the DCB */
2696         dcb_table_ptr->input_mlli_address = 0;
2697         dcb_table_ptr->input_mlli_num_entries = 0;
2698         dcb_table_ptr->input_mlli_data_size = 0;
2699         dcb_table_ptr->output_mlli_address = 0;
2700         dcb_table_ptr->output_mlli_num_entries = 0;
2701         dcb_table_ptr->output_mlli_data_size = 0;
2702         dcb_table_ptr->tail_data_size = 0;
2703         dcb_table_ptr->out_vr_tail_pt = 0;
2704 
2705         if (isapplet) {
2706 
2707                 /* Check if there is enough data for DMA operation */
2708                 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2709                         if (is_kva) {
2710                                 error = -ENODEV;
2711                                 goto end_function_error;
2712                         } else {
2713                                 if (copy_from_user(dcb_table_ptr->tail_data,
2714                                         (void __user *)app_in_address,
2715                                         data_in_size)) {
2716                                         error = -EFAULT;
2717                                         goto end_function_error;
2718                                 }
2719                         }
2720 
2721                         dcb_table_ptr->tail_data_size = data_in_size;
2722 
2723                         /* Set the output user-space address for mem2mem op */
2724                         if (app_out_address)
2725                                 dcb_table_ptr->out_vr_tail_pt =
2726                                 (aligned_u64)app_out_address;
2727 
2728                         /*
2729                          * Update both data length parameters in order to avoid
2730                          * second data copy and allow building of empty mlli
2731                          * tables
2732                          */
2733                         tail_size = 0x0;
2734                         data_in_size = 0x0;
2735 
2736                 } else {
2737                         if (!app_out_address) {
2738                                 tail_size = data_in_size % block_size;
2739                                 if (!tail_size) {
2740                                         if (tail_block_size == block_size)
2741                                                 tail_size = block_size;
2742                                 }
2743                         } else {
2744                                 tail_size = 0;
2745                         }
2746                 }
2747                 if (tail_size) {
2748                         if (tail_size > sizeof(dcb_table_ptr->tail_data))
2749                                 return -EINVAL;
2750                         if (is_kva) {
2751                                 error = -ENODEV;
2752                                 goto end_function_error;
2753                         } else {
2754                                 /* We have tail data - copy it to DCB */
2755                                 if (copy_from_user(dcb_table_ptr->tail_data,
2756                                         (void __user *)(app_in_address +
2757                                         data_in_size - tail_size), tail_size)) {
2758                                         error = -EFAULT;
2759                                         goto end_function_error;
2760                                 }
2761                         }
2762                         if (app_out_address)
2763                                 /*
2764                                  * Calculate the output address
2765                                  * according to tail data size
2766                                  */
2767                                 dcb_table_ptr->out_vr_tail_pt =
2768                                         (aligned_u64)app_out_address +
2769                                         data_in_size - tail_size;
2770 
2771                         /* Save the real tail data size */
2772                         dcb_table_ptr->tail_data_size = tail_size;
2773                         /*
2774                          * Update the data size without the tail
2775                          * data size AKA data for the dma
2776                          */
2777                         data_in_size = (data_in_size - tail_size);
2778                 }
2779         }
2780         /* Check if we need to build only input table or input/output */
2781         if (app_out_address) {
2782                 /* Prepare input/output tables */
2783                 error = sep_prepare_input_output_dma_table(sep,
2784                                 app_in_address,
2785                                 app_out_address,
2786                                 data_in_size,
2787                                 block_size,
2788                                 &in_first_mlli_address,
2789                                 &out_first_mlli_address,
2790                                 &in_first_num_entries,
2791                                 &out_first_num_entries,
2792                                 &first_data_size,
2793                                 is_kva,
2794                                 dmatables_region,
2795                                 *dma_ctx);
2796         } else {
2797                 /* Prepare input tables */
2798                 error = sep_prepare_input_dma_table(sep,
2799                                 app_in_address,
2800                                 data_in_size,
2801                                 block_size,
2802                                 &in_first_mlli_address,
2803                                 &in_first_num_entries,
2804                                 &first_data_size,
2805                                 is_kva,
2806                                 dmatables_region,
2807                                 *dma_ctx);
2808         }
2809 
2810         if (error) {
2811                 dev_warn(&sep->pdev->dev,
2812                         "prepare DMA table call failed from prepare DCB call\n");
2813                 goto end_function_error;
2814         }
2815 
2816         /* Set the DCB values */
2817         dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2818         dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2819         dcb_table_ptr->input_mlli_data_size = first_data_size;
2820         dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2821         dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2822         dcb_table_ptr->output_mlli_data_size = first_data_size;
2823 
2824         goto end_function;
2825 
2826 end_function_error:
2827         kfree(*dma_ctx);
2828         *dma_ctx = NULL;
2829 
2830 end_function:
2831         return error;
2832 
2833 }
2834 
2835 
2836 /**
2837  * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2838  * @sep: pointer to struct sep_device
2839  * @isapplet: indicates external application (used for kernel access)
2840  * @is_kva: indicates kernel addresses (only used for kernel crypto)
2841  *
2842  * This function frees the DMA tables and DCB
2843  */
2844 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2845         bool is_kva, struct sep_dma_context **dma_ctx)
2846 {
2847         struct sep_dcblock *dcb_table_ptr;
2848         unsigned long pt_hold;
2849         void *tail_pt;
2850 
2851         int i = 0;
2852         int error = 0;
2853         int error_temp = 0;
2854 
2855         dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
2856                                         current->pid);
2857         if (!dma_ctx || !*dma_ctx) /* nothing to be done here*/
2858                 return 0;
2859 
2860         if (!(*dma_ctx)->secure_dma && isapplet) {
2861                 dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
2862                         current->pid);
2863 
2864                 /* Tail stuff is only for non secure_dma */
2865                 /* Set pointer to first DCB table */
2866                 dcb_table_ptr = (struct sep_dcblock *)
2867                         (sep->shared_addr +
2868                         SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2869 
2870                 /**
2871                  * Go over each DCB and see if
2872                  * tail pointer must be updated
2873                  */
2874                 for (i = 0; i < (*dma_ctx)->nr_dcb_creat;
2875                      i++, dcb_table_ptr++) {
2876                         if (dcb_table_ptr->out_vr_tail_pt) {
2877                                 pt_hold = (unsigned long)dcb_table_ptr->
2878                                         out_vr_tail_pt;
2879                                 tail_pt = (void *)pt_hold;
2880                                 if (is_kva) {
2881                                         error = -ENODEV;
2882                                         break;
2883                                 } else {
2884                                         error_temp = copy_to_user(
2885                                                 (void __user *)tail_pt,
2886                                                 dcb_table_ptr->tail_data,
2887                                                 dcb_table_ptr->tail_data_size);
2888                                 }
2889                                 if (error_temp) {
2890                                         /* Release the DMA resource */
2891                                         error = -EFAULT;
2892                                         break;
2893                                 }
2894                         }
2895                 }
2896         }
2897 
2898         /* Free the output pages, if any */
2899         sep_free_dma_table_data_handler(sep, dma_ctx);
2900 
2901         dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2902                                         current->pid);
2903 
2904         return error;
2905 }
2906 
2907 /**
2908  * sep_prepare_dcb_handler - prepare a control block
2909  * @sep: pointer to struct sep_device
2910  * @arg: pointer to user parameters
2911  * @secure_dma: indicate whether we are using secure_dma on IMR
2912  *
2913  * This function will retrieve the RAR buffer physical addresses, type
2914  * & size corresponding to the RAR handles provided in the buffers vector.
2915  */
2916 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
2917                                    bool secure_dma,
2918                                    struct sep_dma_context **dma_ctx)
2919 {
2920         int error;
2921         /* Command arguments */
2922         static struct build_dcb_struct command_args;
2923 
2924         /* Get the command arguments */
2925         if (copy_from_user(&command_args, (void __user *)arg,
2926                                         sizeof(struct build_dcb_struct))) {
2927                 error = -EFAULT;
2928                 goto end_function;
2929         }
2930 
2931         dev_dbg(&sep->pdev->dev,
2932                 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2933                         current->pid, command_args.app_in_address);
2934         dev_dbg(&sep->pdev->dev,
2935                 "[PID%d] app_out_address is %08llx\n",
2936                         current->pid, command_args.app_out_address);
2937         dev_dbg(&sep->pdev->dev,
2938                 "[PID%d] data_size is %x\n",
2939                         current->pid, command_args.data_in_size);
2940         dev_dbg(&sep->pdev->dev,
2941                 "[PID%d] block_size is %x\n",
2942                         current->pid, command_args.block_size);
2943         dev_dbg(&sep->pdev->dev,
2944                 "[PID%d] tail block_size is %x\n",
2945                         current->pid, command_args.tail_block_size);
2946         dev_dbg(&sep->pdev->dev,
2947                 "[PID%d] is_applet is %x\n",
2948                         current->pid, command_args.is_applet);
2949 
2950         if (!command_args.app_in_address) {
2951                 dev_warn(&sep->pdev->dev,
2952                         "[PID%d] null app_in_address\n", current->pid);
2953                 error = -EINVAL;
2954                 goto end_function;
2955         }
2956 
2957         error = sep_prepare_input_output_dma_table_in_dcb(sep,
2958                         (unsigned long)command_args.app_in_address,
2959                         (unsigned long)command_args.app_out_address,
2960                         command_args.data_in_size, command_args.block_size,
2961                         command_args.tail_block_size,
2962                         command_args.is_applet, false,
2963                         secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
2964 
2965 end_function:
2966         return error;
2967 
2968 }
2969 
2970 /**
2971  * sep_free_dcb_handler - free control block resources
2972  * @sep: pointer to struct sep_device
2973  *
2974  * This function frees the DCB resources and updates the needed
2975  * user-space buffers.
2976  */
2977 static int sep_free_dcb_handler(struct sep_device *sep,
2978                                 struct sep_dma_context **dma_ctx)
2979 {
2980         if (!dma_ctx || !(*dma_ctx)) {
2981                 dev_dbg(&sep->pdev->dev,
2982                         "[PID%d] no dma context defined, nothing to free\n",
2983                         current->pid);
2984                 return -EINVAL;
2985         }
2986 
2987         dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
2988                 current->pid,
2989                 (*dma_ctx)->nr_dcb_creat);
2990 
2991         return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
2992 }
2993 
2994 /**
2995  * sep_ioctl - ioctl handler for sep device
2996  * @filp: pointer to struct file
2997  * @cmd: command
2998  * @arg: pointer to argument structure
2999  *
3000  * Implement the ioctl methods available on the SEP device.
3001  */
3002 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3003 {
3004         struct sep_private_data * const private_data = filp->private_data;
3005         struct sep_call_status *call_status = &private_data->call_status;
3006         struct sep_device *sep = private_data->device;
3007         struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3008         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3009         int error = 0;
3010 
3011         dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
3012                 current->pid, cmd);
3013         dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
3014                 current->pid, *dma_ctx);
3015 
3016         /* Make sure we own this device */
3017         error = sep_check_transaction_owner(sep);
3018         if (error) {
3019                 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
3020                         current->pid);
3021                 goto end_function;
3022         }
3023 
3024         /* Check that sep_mmap has been called before */
3025         if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
3026                                 &call_status->status)) {
3027                 dev_dbg(&sep->pdev->dev,
3028                         "[PID%d] mmap not called\n", current->pid);
3029                 error = -EPROTO;
3030                 goto end_function;
3031         }
3032 
3033         /* Check that the command is for SEP device */
3034         if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3035                 error = -ENOTTY;
3036                 goto end_function;
3037         }
3038 
3039         switch (cmd) {
3040         case SEP_IOCSENDSEPCOMMAND:
3041                 dev_dbg(&sep->pdev->dev,
3042                         "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3043                         current->pid);
3044                 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3045                                   &call_status->status)) {
3046                         dev_warn(&sep->pdev->dev,
3047                                 "[PID%d] send msg already done\n",
3048                                 current->pid);
3049                         error = -EPROTO;
3050                         goto end_function;
3051                 }
3052                 /* Send command to SEP */
3053                 error = sep_send_command_handler(sep);
3054                 if (!error)
3055                         set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3056                                 &call_status->status);
3057                 dev_dbg(&sep->pdev->dev,
3058                         "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3059                         current->pid);
3060                 break;
3061         case SEP_IOCENDTRANSACTION:
3062                 dev_dbg(&sep->pdev->dev,
3063                         "[PID%d] SEP_IOCENDTRANSACTION start\n",
3064                         current->pid);
3065                 error = sep_end_transaction_handler(sep, dma_ctx, call_status,
3066                                                     my_queue_elem);
3067                 dev_dbg(&sep->pdev->dev,
3068                         "[PID%d] SEP_IOCENDTRANSACTION end\n",
3069                         current->pid);
3070                 break;
3071         case SEP_IOCPREPAREDCB:
3072                 dev_dbg(&sep->pdev->dev,
3073                         "[PID%d] SEP_IOCPREPAREDCB start\n",
3074                         current->pid);
3075                 /* fall-through */
3076         case SEP_IOCPREPAREDCB_SECURE_DMA:
3077                 dev_dbg(&sep->pdev->dev,
3078                         "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3079                         current->pid);
3080                 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3081                                   &call_status->status)) {
3082                         dev_dbg(&sep->pdev->dev,
3083                                 "[PID%d] dcb prep needed before send msg\n",
3084                                 current->pid);
3085                         error = -EPROTO;
3086                         goto end_function;
3087                 }
3088 
3089                 if (!arg) {
3090                         dev_dbg(&sep->pdev->dev,
3091                                 "[PID%d] dcb null arg\n", current->pid);
3092                         error = -EINVAL;
3093                         goto end_function;
3094                 }
3095 
3096                 if (cmd == SEP_IOCPREPAREDCB) {
3097                         /* No secure dma */
3098                         dev_dbg(&sep->pdev->dev,
3099                                 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3100                                 current->pid);
3101 
3102                         error = sep_prepare_dcb_handler(sep, arg, false,
3103                                 dma_ctx);
3104                 } else {
3105                         /* Secure dma */
3106                         dev_dbg(&sep->pdev->dev,
3107                                 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3108                                 current->pid);
3109 
3110                         error = sep_prepare_dcb_handler(sep, arg, true,
3111                                 dma_ctx);
3112                 }
3113                 dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
3114                         current->pid);
3115                 break;
3116         case SEP_IOCFREEDCB:
3117                 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
3118                         current->pid);
3119         case SEP_IOCFREEDCB_SECURE_DMA:
3120                 dev_dbg(&sep->pdev->dev,
3121                         "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3122                         current->pid);
3123                 error = sep_free_dcb_handler(sep, dma_ctx);
3124                 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
3125                         current->pid);
3126                 break;
3127         default:
3128                 error = -ENOTTY;
3129                 dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
3130                         current->pid);
3131                 break;
3132         }
3133 
3134 end_function:
3135         dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
3136 
3137         return error;
3138 }
3139 
3140 /**
3141  * sep_inthandler - interrupt handler for sep device
3142  * @irq: interrupt
3143  * @dev_id: device id
3144  */
3145 static irqreturn_t sep_inthandler(int irq, void *dev_id)
3146 {
3147         unsigned long lock_irq_flag;
3148         u32 reg_val, reg_val2 = 0;
3149         struct sep_device *sep = dev_id;
3150         irqreturn_t int_error = IRQ_HANDLED;
3151 
3152         /* Are we in power save? */
3153 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3154         if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
3155                 dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
3156                 return IRQ_NONE;
3157         }
3158 #endif
3159 
3160         if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
3161                 dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
3162                 return IRQ_NONE;
3163         }
3164 
3165         /* Read the IRR register to check if this is SEP interrupt */
3166         reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3167 
3168         dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
3169 
3170         if (reg_val & (0x1 << 13)) {
3171 
3172                 /* Lock and update the counter of reply messages */
3173                 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
3174                 sep->reply_ct++;
3175                 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
3176 
3177                 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3178                                         sep->send_ct, sep->reply_ct);
3179 
3180                 /* Is this a kernel client request */
3181                 if (sep->in_kernel) {
3182                         tasklet_schedule(&sep->finish_tasklet);
3183                         goto finished_interrupt;
3184                 }
3185 
3186                 /* Is this printf or daemon request? */
3187                 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3188                 dev_dbg(&sep->pdev->dev,
3189                         "SEP Interrupt - GPR2 is %08x\n", reg_val2);
3190 
3191                 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
3192 
3193                 if ((reg_val2 >> 30) & 0x1) {
3194                         dev_dbg(&sep->pdev->dev, "int: printf request\n");
3195                 } else if (reg_val2 >> 31) {
3196                         dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3197                 } else {
3198                         dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3199                         wake_up(&sep->event_interrupt);
3200                 }
3201         } else {
3202                 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3203                 int_error = IRQ_NONE;
3204         }
3205 
3206 finished_interrupt:
3207 
3208         if (int_error == IRQ_HANDLED)
3209                 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3210 
3211         return int_error;
3212 }
3213 
3214 /**
3215  * sep_reconfig_shared_area - reconfigure shared area
3216  * @sep: pointer to struct sep_device
3217  *
3218  * Reconfig the shared area between HOST and SEP - needed in case
3219  * the DX_CC_Init function was called before OS loading.
3220  */
3221 static int sep_reconfig_shared_area(struct sep_device *sep)
3222 {
3223         int ret_val;
3224 
3225         /* use to limit waiting for SEP */
3226         unsigned long end_time;
3227 
3228         /* Send the new SHARED MESSAGE AREA to the SEP */
3229         dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
3230                                 (unsigned long long)sep->shared_bus);
3231 
3232         sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3233 
3234         /* Poll for SEP response */
3235         ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3236 
3237         end_time = jiffies + (WAIT_TIME * HZ);
3238 
3239         while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
3240                 (ret_val != sep->shared_bus))
3241                 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3242 
3243         /* Check the return value (register) */
3244         if (ret_val != sep->shared_bus) {
3245                 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3246                 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3247                 ret_val = -ENOMEM;
3248         } else
3249                 ret_val = 0;
3250 
3251         dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3252 
3253         return ret_val;
3254 }
3255 
3256 /**
3257  *      sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3258  *                                              contexts into use
3259  *      @sep: SEP device
3260  *      @dcb_region: DCB region copy
3261  *      @dmatables_region: MLLI/DMA tables copy
3262  *      @dma_ctx: DMA context for current transaction
3263  */
3264 ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
3265                                         struct sep_dcblock **dcb_region,
3266                                         void **dmatables_region,
3267                                         struct sep_dma_context *dma_ctx)
3268 {
3269         void *dmaregion_free_start = NULL;
3270         void *dmaregion_free_end = NULL;
3271         void *dcbregion_free_start = NULL;
3272         void *dcbregion_free_end = NULL;
3273         ssize_t error = 0;
3274 
3275         dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
3276                 current->pid);
3277 
3278         if (1 > dma_ctx->nr_dcb_creat) {
3279                 dev_warn(&sep->pdev->dev,
3280                          "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3281                          current->pid, dma_ctx->nr_dcb_creat);
3282                 error = -EINVAL;
3283                 goto end_function;
3284         }
3285 
3286         dmaregion_free_start = sep->shared_addr
3287                                 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
3288         dmaregion_free_end = dmaregion_free_start
3289                                 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
3290 
3291         if (dmaregion_free_start
3292              + dma_ctx->dmatables_len > dmaregion_free_end) {
3293                 error = -ENOMEM;
3294                 goto end_function;
3295         }
3296         memcpy(dmaregion_free_start,
3297                *dmatables_region,
3298                dma_ctx->dmatables_len);
3299         /* Free MLLI table copy */
3300         kfree(*dmatables_region);
3301         *dmatables_region = NULL;
3302 
3303         /* Copy thread's DCB  table copy to DCB table region */
3304         dcbregion_free_start = sep->shared_addr +
3305                                 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
3306         dcbregion_free_end = dcbregion_free_start +
3307                                 (SEP_MAX_NUM_SYNC_DMA_OPS *
3308                                         sizeof(struct sep_dcblock)) - 1;
3309 
3310         if (dcbregion_free_start
3311              + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
3312              > dcbregion_free_end) {
3313                 error = -ENOMEM;
3314                 goto end_function;
3315         }
3316 
3317         memcpy(dcbregion_free_start,
3318                *dcb_region,
3319                dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
3320 
3321         /* Print the tables */
3322         dev_dbg(&sep->pdev->dev, "activate: input table\n");
3323         sep_debug_print_lli_tables(sep,
3324                 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3325                 (*dcb_region)->input_mlli_address),
3326                 (*dcb_region)->input_mlli_num_entries,
3327                 (*dcb_region)->input_mlli_data_size);
3328 
3329         dev_dbg(&sep->pdev->dev, "activate: output table\n");
3330         sep_debug_print_lli_tables(sep,
3331                 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3332                 (*dcb_region)->output_mlli_address),
3333                 (*dcb_region)->output_mlli_num_entries,
3334                 (*dcb_region)->output_mlli_data_size);
3335 
3336         dev_dbg(&sep->pdev->dev,
3337                  "[PID%d] printing activated tables\n", current->pid);
3338 
3339 end_function:
3340         kfree(*dmatables_region);
3341         *dmatables_region = NULL;
3342 
3343         kfree(*dcb_region);
3344         *dcb_region = NULL;
3345 
3346         return error;
3347 }
3348 
3349 /**
3350  *      sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3351  *      @sep: SEP device
3352  *      @dcb_region: DCB region buf to create for current transaction
3353  *      @dmatables_region: MLLI/DMA tables buf to create for current transaction
3354  *      @dma_ctx: DMA context buf to create for current transaction
3355  *      @user_dcb_args: User arguments for DCB/MLLI creation
3356  *      @num_dcbs: Number of DCBs to create
3357  *      @secure_dma: Indicate use of IMR restricted memory secure dma
3358  */
3359 static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
3360                         struct sep_dcblock **dcb_region,
3361                         void **dmatables_region,
3362                         struct sep_dma_context **dma_ctx,
3363                         const struct build_dcb_struct __user *user_dcb_args,
3364                         const u32 num_dcbs, bool secure_dma)
3365 {
3366         int error = 0;
3367         int i = 0;
3368         struct build_dcb_struct *dcb_args = NULL;
3369 
3370         dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3371                 current->pid);
3372 
3373         if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
3374                 error = -EINVAL;
3375                 goto end_function;
3376         }
3377 
3378         if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3379                 dev_warn(&sep->pdev->dev,
3380                          "[PID%d] invalid number of dcbs 0x%08X\n",
3381                          current->pid, num_dcbs);
3382                 error = -EINVAL;
3383                 goto end_function;
3384         }
3385 
3386         dcb_args = kcalloc(num_dcbs, sizeof(struct build_dcb_struct),
3387                            GFP_KERNEL);
3388         if (!dcb_args) {
3389                 error = -ENOMEM;
3390                 goto end_function;
3391         }
3392 
3393         if (copy_from_user(dcb_args,
3394                         user_dcb_args,
3395                         num_dcbs * sizeof(struct build_dcb_struct))) {
3396                 error = -EFAULT;
3397                 goto end_function;
3398         }
3399 
3400         /* Allocate thread-specific memory for DCB */
3401         *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3402                               GFP_KERNEL);
3403         if (!(*dcb_region)) {
3404                 error = -ENOMEM;
3405                 goto end_function;
3406         }
3407 
3408         /* Prepare DCB and MLLI table into the allocated regions */
3409         for (i = 0; i < num_dcbs; i++) {
3410                 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3411                                 (unsigned long)dcb_args[i].app_in_address,
3412                                 (unsigned long)dcb_args[i].app_out_address,
3413                                 dcb_args[i].data_in_size,
3414                                 dcb_args[i].block_size,
3415                                 dcb_args[i].tail_block_size,
3416                                 dcb_args[i].is_applet,
3417                                 false, secure_dma,
3418                                 *dcb_region, dmatables_region,
3419                                 dma_ctx,
3420                                 NULL,
3421                                 NULL);
3422                 if (error) {
3423                         dev_warn(&sep->pdev->dev,
3424                                  "[PID%d] dma table creation failed\n",
3425                                  current->pid);
3426                         goto end_function;
3427                 }
3428 
3429                 if (dcb_args[i].app_in_address != 0)
3430                         (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
3431         }
3432 
3433 end_function:
3434         kfree(dcb_args);
3435         return error;
3436 
3437 }
3438 
3439 /**
3440  *      sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3441  *      for kernel crypto
3442  *      @sep: SEP device
3443  *      @dcb_region: DCB region buf to create for current transaction
3444  *      @dmatables_region: MLLI/DMA tables buf to create for current transaction
3445  *      @dma_ctx: DMA context buf to create for current transaction
3446  *      @user_dcb_args: User arguments for DCB/MLLI creation
3447  *      @num_dcbs: Number of DCBs to create
3448  *      This does that same thing as sep_create_dcb_dmatables_context
3449  *      except that it is used only for the kernel crypto operation. It is
3450  *      separate because there is no user data involved; the dcb data structure
3451  *      is specific for kernel crypto (build_dcb_struct_kernel)
3452  */
3453 int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
3454                         struct sep_dcblock **dcb_region,
3455                         void **dmatables_region,
3456                         struct sep_dma_context **dma_ctx,
3457                         const struct build_dcb_struct_kernel *dcb_data,
3458                         const u32 num_dcbs)
3459 {
3460         int error = 0;
3461         int i = 0;
3462 
3463         dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3464                 current->pid);
3465 
3466         if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
3467                 error = -EINVAL;
3468                 goto end_function;
3469         }
3470 
3471         if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3472                 dev_warn(&sep->pdev->dev,
3473                          "[PID%d] invalid number of dcbs 0x%08X\n",
3474                          current->pid, num_dcbs);
3475                 error = -EINVAL;
3476                 goto end_function;
3477         }
3478 
3479         dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
3480                 current->pid, num_dcbs);
3481 
3482         /* Allocate thread-specific memory for DCB */
3483         *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3484                               GFP_KERNEL);
3485         if (!(*dcb_region)) {
3486                 error = -ENOMEM;
3487                 goto end_function;
3488         }
3489 
3490         /* Prepare DCB and MLLI table into the allocated regions */
3491         for (i = 0; i < num_dcbs; i++) {
3492                 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3493                                 (unsigned long)dcb_data->app_in_address,
3494                                 (unsigned long)dcb_data->app_out_address,
3495                                 dcb_data->data_in_size,
3496                                 dcb_data->block_size,
3497                                 dcb_data->tail_block_size,
3498                                 dcb_data->is_applet,
3499                                 true,
3500                                 false,
3501                                 *dcb_region, dmatables_region,
3502                                 dma_ctx,
3503                                 dcb_data->src_sg,
3504                                 dcb_data->dst_sg);
3505                 if (error) {
3506                         dev_warn(&sep->pdev->dev,
3507                                  "[PID%d] dma table creation failed\n",
3508                                  current->pid);
3509                         goto end_function;
3510                 }
3511         }
3512 
3513 end_function:
3514         return error;
3515 
3516 }
3517 
3518 /**
3519  *      sep_activate_msgarea_context - Takes the message area context into use
3520  *      @sep: SEP device
3521  *      @msg_region: Message area context buf
3522  *      @msg_len: Message area context buffer size
3523  */
3524 static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
3525                                             void **msg_region,
3526                                             const size_t msg_len)
3527 {
3528         dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
3529                 current->pid);
3530 
3531         if (!msg_region || !(*msg_region) ||
3532             SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
3533                 dev_warn(&sep->pdev->dev,
3534                          "[PID%d] invalid act msgarea len 0x%08zX\n",
3535                          current->pid, msg_len);
3536                 return -EINVAL;
3537         }
3538 
3539         memcpy(sep->shared_addr, *msg_region, msg_len);
3540 
3541         return 0;
3542 }
3543 
3544 /**
3545  *      sep_create_msgarea_context - Creates message area context
3546  *      @sep: SEP device
3547  *      @msg_region: Msg area region buf to create for current transaction
3548  *      @msg_user: Content for msg area region from user
3549  *      @msg_len: Message area size
3550  */
3551 static ssize_t sep_create_msgarea_context(struct sep_device *sep,
3552                                           void **msg_region,
3553                                           const void __user *msg_user,
3554                                           const size_t msg_len)
3555 {
3556         int error = 0;
3557 
3558         dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
3559                 current->pid);
3560 
3561         if (!msg_region ||
3562             !msg_user ||
3563             SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
3564             SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
3565                 dev_warn(&sep->pdev->dev,
3566                          "[PID%d] invalid creat msgarea len 0x%08zX\n",
3567                          current->pid, msg_len);
3568                 error = -EINVAL;
3569                 goto end_function;
3570         }
3571 
3572         /* Allocate thread-specific memory for message buffer */
3573         *msg_region = kzalloc(msg_len, GFP_KERNEL);
3574         if (!(*msg_region)) {
3575                 error = -ENOMEM;
3576                 goto end_function;
3577         }
3578 
3579         /* Copy input data to write() to allocated message buffer */
3580         if (copy_from_user(*msg_region, msg_user, msg_len)) {
3581                 error = -EFAULT;
3582                 goto end_function;
3583         }
3584 
3585 end_function:
3586         if (error && msg_region) {
3587                 kfree(*msg_region);
3588                 *msg_region = NULL;
3589         }
3590 
3591         return error;
3592 }
3593 
3594 
3595 /**
3596  *      sep_read - Returns results of an operation for fastcall interface
3597  *      @filp: File pointer
3598  *      @buf_user: User buffer for storing results
3599  *      @count_user: User buffer size
3600  *      @offset: File offset, not supported
3601  *
3602  *      The implementation does not support reading in chunks, all data must be
3603  *      consumed during a single read system call.
3604  */
3605 static ssize_t sep_read(struct file *filp,
3606                         char __user *buf_user, size_t count_user,
3607                         loff_t *offset)
3608 {
3609         struct sep_private_data * const private_data = filp->private_data;
3610         struct sep_call_status *call_status = &private_data->call_status;
3611         struct sep_device *sep = private_data->device;
3612         struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3613         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3614         ssize_t error = 0, error_tmp = 0;
3615 
3616         /* Am I the process that owns the transaction? */
3617         error = sep_check_transaction_owner(sep);
3618         if (error) {
3619                 dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
3620                         current->pid);
3621                 goto end_function;
3622         }
3623 
3624         /* Checks that user has called necessary apis */
3625         if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
3626                         &call_status->status)) {
3627                 dev_warn(&sep->pdev->dev,
3628                          "[PID%d] fastcall write not called\n",
3629                          current->pid);
3630                 error = -EPROTO;
3631                 goto end_function_error;
3632         }
3633 
3634         if (!buf_user) {
3635                 dev_warn(&sep->pdev->dev,
3636                          "[PID%d] null user buffer\n",
3637                          current->pid);
3638                 error = -EINVAL;
3639                 goto end_function_error;
3640         }
3641 
3642 
3643         /* Wait for SEP to finish */
3644         wait_event(sep->event_interrupt,
3645                    test_bit(SEP_WORKING_LOCK_BIT,
3646                             &sep->in_use_flags) == 0);
3647 
3648         sep_dump_message(sep);
3649 
3650         dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
3651                 current->pid, count_user);
3652 
3653         /* In case user has allocated bigger buffer */
3654         if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
3655                 count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
3656 
3657         if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
3658                 error = -EFAULT;
3659                 goto end_function_error;
3660         }
3661 
3662         dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
3663         error = count_user;
3664 
3665 end_function_error:
3666         /* Copy possible tail data to user and free DCB and MLLIs */
3667         error_tmp = sep_free_dcb_handler(sep, dma_ctx);
3668         if (error_tmp)
3669                 dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
3670                         current->pid);
3671 
3672         /* End the transaction, wakeup pending ones */
3673         error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
3674                 my_queue_elem);
3675         if (error_tmp)
3676                 dev_warn(&sep->pdev->dev,
3677                          "[PID%d] ending transaction failed\n",
3678                          current->pid);
3679 
3680 end_function:
3681         return error;
3682 }
3683 
3684 /**
3685  *      sep_fastcall_args_get - Gets fastcall params from user
3686  *      sep: SEP device
3687  *      @args: Parameters buffer
3688  *      @buf_user: User buffer for operation parameters
3689  *      @count_user: User buffer size
3690  */
3691 static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
3692                                             struct sep_fastcall_hdr *args,
3693                                             const char __user *buf_user,
3694                                             const size_t count_user)
3695 {
3696         ssize_t error = 0;
3697         size_t actual_count = 0;
3698 
3699         if (!buf_user) {
3700                 dev_warn(&sep->pdev->dev,
3701                          "[PID%d] null user buffer\n",
3702                          current->pid);
3703                 error = -EINVAL;
3704                 goto end_function;
3705         }
3706 
3707         if (count_user < sizeof(struct sep_fastcall_hdr)) {
3708                 dev_warn(&sep->pdev->dev,
3709                          "[PID%d] too small message size 0x%08zX\n",
3710                          current->pid, count_user);
3711                 error = -EINVAL;
3712                 goto end_function;
3713         }
3714 
3715 
3716         if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
3717                 error = -EFAULT;
3718                 goto end_function;
3719         }
3720 
3721         if (SEP_FC_MAGIC != args->magic) {
3722                 dev_warn(&sep->pdev->dev,
3723                          "[PID%d] invalid fastcall magic 0x%08X\n",
3724                          current->pid, args->magic);
3725                 error = -EINVAL;
3726                 goto end_function;
3727         }
3728 
3729         dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3730                 current->pid, args->num_dcbs);
3731         dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
3732                 current->pid, args->msg_len);
3733 
3734         if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
3735             SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
3736                 dev_warn(&sep->pdev->dev,
3737                          "[PID%d] invalid message length\n",
3738                          current->pid);
3739                 error = -EINVAL;
3740                 goto end_function;
3741         }
3742 
3743         actual_count = sizeof(struct sep_fastcall_hdr)
3744                         + args->msg_len
3745                         + (args->num_dcbs * sizeof(struct build_dcb_struct));
3746 
3747         if (actual_count != count_user) {
3748                 dev_warn(&sep->pdev->dev,
3749                          "[PID%d] inconsistent message sizes 0x%08zX vs 0x%08zX\n",
3750                          current->pid, actual_count, count_user);
3751                 error = -EMSGSIZE;
3752                 goto end_function;
3753         }
3754 
3755 end_function:
3756         return error;
3757 }
3758 
3759 /**
3760  *      sep_write - Starts an operation for fastcall interface
3761  *      @filp: File pointer
3762  *      @buf_user: User buffer for operation parameters
3763  *      @count_user: User buffer size
3764  *      @offset: File offset, not supported
3765  *
3766  *      The implementation does not support writing in chunks,
3767  *      all data must be given during a single write system call.
3768  */
3769 static ssize_t sep_write(struct file *filp,
3770                          const char __user *buf_user, size_t count_user,
3771                          loff_t *offset)
3772 {
3773         struct sep_private_data * const private_data = filp->private_data;
3774         struct sep_call_status *call_status = &private_data->call_status;
3775         struct sep_device *sep = private_data->device;
3776         struct sep_dma_context *dma_ctx = NULL;
3777         struct sep_fastcall_hdr call_hdr = {0};
3778         void *msg_region = NULL;
3779         void *dmatables_region = NULL;
3780         struct sep_dcblock *dcb_region = NULL;
3781         ssize_t error = 0;
3782         struct sep_queue_info *my_queue_elem = NULL;
3783         bool my_secure_dma; /* are we using secure_dma (IMR)? */
3784 
3785         dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
3786                 current->pid, sep);
3787         dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
3788                 current->pid, private_data);
3789 
3790         error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
3791         if (error)
3792                 goto end_function;
3793 
3794         buf_user += sizeof(struct sep_fastcall_hdr);
3795 
3796         if (call_hdr.secure_dma == 0)
3797                 my_secure_dma = false;
3798         else
3799                 my_secure_dma = true;
3800 
3801         /*
3802          * Controlling driver memory usage by limiting amount of
3803          * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3804          * of threads can progress further at a time
3805          */
3806         dev_dbg(&sep->pdev->dev,
3807                 "[PID%d] waiting for double buffering region access\n",
3808                 current->pid);
3809         error = down_interruptible(&sep->sep_doublebuf);
3810         dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
3811                                         current->pid);
3812         if (error) {
3813                 /* Signal received */
3814                 goto end_function_error;
3815         }
3816 
3817 
3818         /*
3819          * Prepare contents of the shared area regions for
3820          * the operation into temporary buffers
3821          */
3822         if (0 < call_hdr.num_dcbs) {
3823                 error = sep_create_dcb_dmatables_context(sep,
3824                                 &dcb_region,
3825                                 &dmatables_region,
3826                                 &dma_ctx,
3827                                 (const struct build_dcb_struct __user *)
3828                                         buf_user,
3829                                 call_hdr.num_dcbs, my_secure_dma);
3830                 if (error)
3831                         goto end_function_error_doublebuf;
3832 
3833                 buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
3834         }
3835 
3836         error = sep_create_msgarea_context(sep,
3837                                            &msg_region,
3838                                            buf_user,
3839                                            call_hdr.msg_len);
3840         if (error)
3841                 goto end_function_error_doublebuf;
3842 
3843         dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
3844                                                         current->pid);
3845         my_queue_elem = sep_queue_status_add(sep,
3846                                 ((struct sep_msgarea_hdr *)msg_region)->opcode,
3847                                 (dma_ctx) ? dma_ctx->input_data_len : 0,
3848                                      current->pid,
3849                                      current->comm, sizeof(current->comm));
3850 
3851         if (!my_queue_elem) {
3852                 dev_dbg(&sep->pdev->dev,
3853                         "[PID%d] updating queue status error\n", current->pid);
3854                 error = -ENOMEM;
3855                 goto end_function_error_doublebuf;
3856         }
3857 
3858         /* Wait until current process gets the transaction */
3859         error = sep_wait_transaction(sep);
3860 
3861         if (error) {
3862                 /* Interrupted by signal, don't clear transaction */
3863                 dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
3864                         current->pid);
3865                 sep_queue_status_remove(sep, &my_queue_elem);
3866                 goto end_function_error_doublebuf;
3867         }
3868 
3869         dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
3870                 current->pid);
3871         private_data->my_queue_elem = my_queue_elem;
3872 
3873         /* Activate shared area regions for the transaction */
3874         error = sep_activate_msgarea_context(sep, &msg_region,
3875                                              call_hdr.msg_len);
3876         if (error)
3877                 goto end_function_error_clear_transact;
3878 
3879         sep_dump_message(sep);
3880 
3881         if (0 < call_hdr.num_dcbs) {
3882                 error = sep_activate_dcb_dmatables_context(sep,
3883                                 &dcb_region,
3884                                 &dmatables_region,
3885                                 dma_ctx);
3886                 if (error)
3887                         goto end_function_error_clear_transact;
3888         }
3889 
3890         /* Send command to SEP */
3891         error = sep_send_command_handler(sep);
3892         if (error)
3893                 goto end_function_error_clear_transact;
3894 
3895         /* Store DMA context for the transaction */
3896         private_data->dma_ctx = dma_ctx;
3897         /* Update call status */
3898         set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
3899         error = count_user;
3900 
3901         up(&sep->sep_doublebuf);
3902         dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3903                 current->pid);
3904 
3905         goto end_function;
3906 
3907 end_function_error_clear_transact:
3908         sep_end_transaction_handler(sep, &dma_ctx, call_status,
3909                                                 &private_data->my_queue_elem);
3910 
3911 end_function_error_doublebuf:
3912         up(&sep->sep_doublebuf);
3913         dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3914                 current->pid);
3915 
3916 end_function_error:
3917         if (dma_ctx)
3918                 sep_free_dma_table_data_handler(sep, &dma_ctx);
3919 
3920 end_function:
3921         kfree(dcb_region);
3922         kfree(dmatables_region);
3923         kfree(msg_region);
3924 
3925         return error;
3926 }
3927 /**
3928  *      sep_seek - Handler for seek system call
3929  *      @filp: File pointer
3930  *      @offset: File offset
3931  *      @origin: Options for offset
3932  *
3933  *      Fastcall interface does not support seeking, all reads
3934  *      and writes are from/to offset zero
3935  */
3936 static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
3937 {
3938         return -ENOSYS;
3939 }
3940 
3941 
3942 
3943 /**
3944  * sep_file_operations - file operation on sep device
3945  * @sep_ioctl:  ioctl handler from user space call
3946  * @sep_poll:   poll handler
3947  * @sep_open:   handles sep device open request
3948  * @sep_release:handles sep device release request
3949  * @sep_mmap:   handles memory mapping requests
3950  * @sep_read:   handles read request on sep device
3951  * @sep_write:  handles write request on sep device
3952  * @sep_seek:   handles seek request on sep device
3953  */
3954 static const struct file_operations sep_file_operations = {
3955         .owner = THIS_MODULE,
3956         .unlocked_ioctl = sep_ioctl,
3957         .poll = sep_poll,
3958         .open = sep_open,
3959         .release = sep_release,
3960         .mmap = sep_mmap,
3961         .read = sep_read,
3962         .write = sep_write,
3963         .llseek = sep_seek,
3964 };
3965 
3966 /**
3967  * sep_sysfs_read - read sysfs entry per gives arguments
3968  * @filp: file pointer
3969  * @kobj: kobject pointer
3970  * @attr: binary file attributes
3971  * @buf: read to this buffer
3972  * @pos: offset to read
3973  * @count: amount of data to read
3974  *
3975  * This function is to read sysfs entries for sep driver per given arguments.
3976  */
3977 static ssize_t
3978 sep_sysfs_read(struct file *filp, struct kobject *kobj,
3979                 struct bin_attribute *attr,
3980                 char *buf, loff_t pos, size_t count)
3981 {
3982         unsigned long lck_flags;
3983         size_t nleft = count;
3984         struct sep_device *sep = sep_dev;
3985         struct sep_queue_info *queue_elem = NULL;
3986         u32 queue_num = 0;
3987         u32 i = 1;
3988 
3989         spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
3990 
3991         queue_num = sep->sep_queue_num;
3992         if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
3993                 queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
3994 
3995 
3996         if (count < sizeof(queue_num)
3997                         + (queue_num * sizeof(struct sep_queue_data))) {
3998                 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
3999                 return -EINVAL;
4000         }
4001 
4002         memcpy(buf, &queue_num, sizeof(queue_num));
4003         buf += sizeof(queue_num);
4004         nleft -= sizeof(queue_num);
4005 
4006         list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
4007                 if (i++ > queue_num)
4008                         break;
4009 
4010                 memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
4011                 nleft -= sizeof(queue_elem->data);
4012                 buf += sizeof(queue_elem->data);
4013         }
4014         spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4015 
4016         return count - nleft;
4017 }
4018 
4019 /**
4020  * bin_attributes - defines attributes for queue_status
4021  * @attr: attributes (name & permissions)
4022  * @read: function pointer to read this file
4023  * @size: maxinum size of binary attribute
4024  */
4025 static const struct bin_attribute queue_status = {
4026         .attr = {.name = "queue_status", .mode = 0444},
4027         .read = sep_sysfs_read,
4028         .size = sizeof(u32)
4029                 + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
4030 };
4031 
4032 /**
4033  * sep_register_driver_with_fs - register misc devices
4034  * @sep: pointer to struct sep_device
4035  *
4036  * This function registers the driver with the file system
4037  */
4038 static int sep_register_driver_with_fs(struct sep_device *sep)
4039 {
4040         int ret_val;
4041 
4042         sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
4043         sep->miscdev_sep.name = SEP_DEV_NAME;
4044         sep->miscdev_sep.fops = &sep_file_operations;
4045 
4046         ret_val = misc_register(&sep->miscdev_sep);
4047         if (ret_val) {
4048                 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
4049                         ret_val);
4050                 return ret_val;
4051         }
4052 
4053         ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
4054                                                                 &queue_status);
4055         if (ret_val) {
4056                 dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
4057                         ret_val);
4058                 misc_deregister(&sep->miscdev_sep);
4059                 return ret_val;
4060         }
4061 
4062         return ret_val;
4063 }
4064 
4065 
4066 /**
4067  *sep_probe - probe a matching PCI device
4068  *@pdev:        pci_device
4069  *@ent: pci_device_id
4070  *
4071  *Attempt to set up and configure a SEP device that has been
4072  *discovered by the PCI layer. Allocates all required resources.
4073  */
4074 static int sep_probe(struct pci_dev *pdev,
4075         const struct pci_device_id *ent)
4076 {
4077         int error = 0;
4078         struct sep_device *sep = NULL;
4079 
4080         if (sep_dev != NULL) {
4081                 dev_dbg(&pdev->dev, "only one SEP supported.\n");
4082                 return -EBUSY;
4083         }
4084 
4085         /* Enable the device */
4086         error = pci_enable_device(pdev);
4087         if (error) {
4088                 dev_warn(&pdev->dev, "error enabling pci device\n");
4089                 goto end_function;
4090         }
4091 
4092         /* Allocate the sep_device structure for this device */
4093         sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
4094         if (sep_dev == NULL) {
4095                 error = -ENOMEM;
4096                 goto end_function_disable_device;
4097         }
4098 
4099         /*
4100          * We're going to use another variable for actually
4101          * working with the device; this way, if we have
4102          * multiple devices in the future, it would be easier
4103          * to make appropriate changes
4104          */
4105         sep = sep_dev;
4106 
4107         sep->pdev = pci_dev_get(pdev);
4108 
4109         init_waitqueue_head(&sep->event_transactions);
4110         init_waitqueue_head(&sep->event_interrupt);
4111         spin_lock_init(&sep->snd_rply_lck);
4112         spin_lock_init(&sep->sep_queue_lock);
4113         sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
4114 
4115         INIT_LIST_HEAD(&sep->sep_queue_status);
4116 
4117         dev_dbg(&sep->pdev->dev,
4118                 "sep probe: PCI obtained, device being prepared\n");
4119 
4120         /* Set up our register area */
4121         sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
4122         if (!sep->reg_physical_addr) {
4123                 dev_warn(&sep->pdev->dev, "Error getting register start\n");
4124                 error = -ENODEV;
4125                 goto end_function_free_sep_dev;
4126         }
4127 
4128         sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
4129         if (!sep->reg_physical_end) {
4130                 dev_warn(&sep->pdev->dev, "Error getting register end\n");
4131                 error = -ENODEV;
4132                 goto end_function_free_sep_dev;
4133         }
4134 
4135         sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
4136                 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
4137         if (!sep->reg_addr) {
4138                 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
4139                 error = -ENODEV;
4140                 goto end_function_free_sep_dev;
4141         }
4142 
4143         dev_dbg(&sep->pdev->dev,
4144                 "Register area start %llx end %llx virtual %p\n",
4145                 (unsigned long long)sep->reg_physical_addr,
4146                 (unsigned long long)sep->reg_physical_end,
4147                 sep->reg_addr);
4148 
4149         /* Allocate the shared area */
4150         sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
4151                 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
4152                 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
4153                 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
4154                 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
4155 
4156         if (sep_map_and_alloc_shared_area(sep)) {
4157                 error = -ENOMEM;
4158                 /* Allocation failed */
4159                 goto end_function_error;
4160         }
4161 
4162         /* Clear ICR register */
4163         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4164 
4165         /* Set the IMR register - open only GPR 2 */
4166         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4167 
4168         /* Read send/receive counters from SEP */
4169         sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4170         sep->reply_ct &= 0x3FFFFFFF;
4171         sep->send_ct = sep->reply_ct;
4172 
4173         /* Get the interrupt line */
4174         error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
4175                 "sep_driver", sep);
4176 
4177         if (error)
4178                 goto end_function_deallocate_sep_shared_area;
4179 
4180         /* The new chip requires a shared area reconfigure */
4181         error = sep_reconfig_shared_area(sep);
4182         if (error)
4183                 goto end_function_free_irq;
4184 
4185         sep->in_use = 1;
4186 
4187         /* Finally magic up the device nodes */
4188         /* Register driver with the fs */
4189         error = sep_register_driver_with_fs(sep);
4190 
4191         if (error) {
4192                 dev_err(&sep->pdev->dev, "error registering dev file\n");
4193                 goto end_function_free_irq;
4194         }
4195 
4196         sep->in_use = 0; /* through touching the device */
4197 #ifdef SEP_ENABLE_RUNTIME_PM
4198         pm_runtime_put_noidle(&sep->pdev->dev);
4199         pm_runtime_allow(&sep->pdev->dev);
4200         pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
4201                 SUSPEND_DELAY);
4202         pm_runtime_use_autosuspend(&sep->pdev->dev);
4203         pm_runtime_mark_last_busy(&sep->pdev->dev);
4204         sep->power_save_setup = 1;
4205 #endif
4206         /* register kernel crypto driver */
4207 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4208         error = sep_crypto_setup();
4209         if (error) {
4210                 dev_err(&sep->pdev->dev, "crypto setup failed\n");
4211                 goto end_function_free_irq;
4212         }
4213 #endif
4214         goto end_function;
4215 
4216 end_function_free_irq:
4217         free_irq(pdev->irq, sep);
4218 
4219 end_function_deallocate_sep_shared_area:
4220         /* De-allocate shared area */
4221         sep_unmap_and_free_shared_area(sep);
4222 
4223 end_function_error:
4224         iounmap(sep->reg_addr);
4225 
4226 end_function_free_sep_dev:
4227         pci_dev_put(sep_dev->pdev);
4228         kfree(sep_dev);
4229         sep_dev = NULL;
4230 
4231 end_function_disable_device:
4232         pci_disable_device(pdev);
4233 
4234 end_function:
4235         return error;
4236 }
4237 
4238 /**
4239  * sep_remove - handles removing device from pci subsystem
4240  * @pdev:       pointer to pci device
4241  *
4242  * This function will handle removing our sep device from pci subsystem on exit
4243  * or unloading this module. It should free up all used resources, and unmap if
4244  * any memory regions mapped.
4245  */
4246 static void sep_remove(struct pci_dev *pdev)
4247 {
4248         struct sep_device *sep = sep_dev;
4249 
4250         /* Unregister from fs */
4251         misc_deregister(&sep->miscdev_sep);
4252 
4253         /* Unregister from kernel crypto */
4254 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4255         sep_crypto_takedown();
4256 #endif
4257         /* Free the irq */
4258         free_irq(sep->pdev->irq, sep);
4259 
4260         /* Free the shared area  */
4261         sep_unmap_and_free_shared_area(sep_dev);
4262         iounmap(sep_dev->reg_addr);
4263 
4264 #ifdef SEP_ENABLE_RUNTIME_PM
4265         if (sep->in_use) {
4266                 sep->in_use = 0;
4267                 pm_runtime_forbid(&sep->pdev->dev);
4268                 pm_runtime_get_noresume(&sep->pdev->dev);
4269         }
4270 #endif
4271         pci_dev_put(sep_dev->pdev);
4272         kfree(sep_dev);
4273         sep_dev = NULL;
4274 }
4275 
4276 /* Initialize struct pci_device_id for our driver */
4277 static const struct pci_device_id sep_pci_id_tbl[] = {
4278         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
4279         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
4280         {0}
4281 };
4282 
4283 /* Export our pci_device_id structure to user space */
4284 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
4285 
4286 #ifdef SEP_ENABLE_RUNTIME_PM
4287 
4288 /**
4289  * sep_pm_resume - rsume routine while waking up from S3 state
4290  * @dev:        pointer to sep device
4291  *
4292  * This function is to be used to wake up sep driver while system awakes from S3
4293  * state i.e. suspend to ram. The RAM in intact.
4294  * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4295  */
4296 static int sep_pci_resume(struct device *dev)
4297 {
4298         struct sep_device *sep = sep_dev;
4299 
4300         dev_dbg(&sep->pdev->dev, "pci resume called\n");
4301 
4302         if (sep->power_state == SEP_DRIVER_POWERON)
4303                 return 0;
4304 
4305         /* Clear ICR register */
4306         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4307 
4308         /* Set the IMR register - open only GPR 2 */
4309         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4310 
4311         /* Read send/receive counters from SEP */
4312         sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4313         sep->reply_ct &= 0x3FFFFFFF;
4314         sep->send_ct = sep->reply_ct;
4315 
4316         sep->power_state = SEP_DRIVER_POWERON;
4317 
4318         return 0;
4319 }
4320 
4321 /**
4322  * sep_pm_suspend - suspend routine while going to S3 state
4323  * @dev:        pointer to sep device
4324  *
4325  * This function is to be used to suspend sep driver while system goes to S3
4326  * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4327  * Notes - revisit with more understanding of pm, ICR/IMR
4328  */
4329 static int sep_pci_suspend(struct device *dev)
4330 {
4331         struct sep_device *sep = sep_dev;
4332 
4333         dev_dbg(&sep->pdev->dev, "pci suspend called\n");
4334         if (sep->in_use == 1)
4335                 return -EAGAIN;
4336 
4337         sep->power_state = SEP_DRIVER_POWEROFF;
4338 
4339         /* Clear ICR register */
4340         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4341 
4342         /* Set the IMR to block all */
4343         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
4344 
4345         return 0;
4346 }
4347 
4348 /**
4349  * sep_pm_runtime_resume - runtime resume routine
4350  * @dev:        pointer to sep device
4351  *
4352  * Notes - revisit with more understanding of pm, ICR/IMR & counters
4353  */
4354 static int sep_pm_runtime_resume(struct device *dev)
4355 {
4356 
4357         u32 retval2;
4358         u32 delay_count;
4359         struct sep_device *sep = sep_dev;
4360 
4361         dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
4362 
4363         /**
4364          * Wait until the SCU boot is ready
4365          * This is done by iterating SCU_DELAY_ITERATION (10
4366          * microseconds each) up to SCU_DELAY_MAX (50) times.
4367          * This bit can be set in a random time that is less
4368          * than 500 microseconds after each power resume
4369          */
4370         retval2 = 0;
4371         delay_count = 0;
4372         while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
4373                 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
4374                 retval2 &= 0x00000008;
4375                 if (!retval2) {
4376                         udelay(SCU_DELAY_ITERATION);
4377                         delay_count += 1;
4378                 }
4379         }
4380 
4381         if (!retval2) {
4382                 dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
4383                 return -EINVAL;
4384         }
4385 
4386         /* Clear ICR register */
4387         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4388 
4389         /* Set the IMR register - open only GPR 2 */
4390         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4391 
4392         /* Read send/receive counters from SEP */
4393         sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4394         sep->reply_ct &= 0x3FFFFFFF;
4395         sep->send_ct = sep->reply_ct;
4396 
4397         return 0;
4398 }
4399 
4400 /**
4401  * sep_pm_runtime_suspend - runtime suspend routine
4402  * @dev:        pointer to sep device
4403  *
4404  * Notes - revisit with more understanding of pm
4405  */
4406 static int sep_pm_runtime_suspend(struct device *dev)
4407 {
4408         struct sep_device *sep = sep_dev;
4409 
4410         dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
4411 
4412         /* Clear ICR register */
4413         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4414         return 0;
4415 }
4416 
4417 /**
4418  * sep_pm - power management for sep driver
4419  * @sep_pm_runtime_resume:      resume- no communication with cpu & main memory
4420  * @sep_pm_runtime_suspend:     suspend- no communication with cpu & main memory
4421  * @sep_pci_suspend:            suspend - main memory is still ON
4422  * @sep_pci_resume:             resume - main memory is still ON
4423  */
4424 static const struct dev_pm_ops sep_pm = {
4425         .runtime_resume = sep_pm_runtime_resume,
4426         .runtime_suspend = sep_pm_runtime_suspend,
4427         .resume = sep_pci_resume,
4428         .suspend = sep_pci_suspend,
4429 };
4430 #endif /* SEP_ENABLE_RUNTIME_PM */
4431 
4432 /**
4433  * sep_pci_driver - registers this device with pci subsystem
4434  * @name:       name identifier for this driver
4435  * @sep_pci_id_tbl:     pointer to struct pci_device_id table
4436  * @sep_probe:  pointer to probe function in PCI driver
4437  * @sep_remove: pointer to remove function in PCI driver
4438  */
4439 static struct pci_driver sep_pci_driver = {
4440 #ifdef SEP_ENABLE_RUNTIME_PM
4441         .driver = {
4442                 .pm = &sep_pm,
4443         },
4444 #endif
4445         .name = "sep_sec_driver",
4446         .id_table = sep_pci_id_tbl,
4447         .probe = sep_probe,
4448         .remove = sep_remove
4449 };
4450 
4451 module_pci_driver(sep_pci_driver);
4452 MODULE_LICENSE("GPL");
4453 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us